repo_name
string
path
string
copies
string
size
string
content
string
license
string
DerTeufel/cm7
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
762
4912
/* * Copyright (c) 2007 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/kernel.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include "ipoib.h" static void ipoib_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1); } static u32 ipoib_get_rx_csum(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); return test_bit(IPOIB_FLAG_CSUM, &priv->flags) && !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); } static int ipoib_set_tso(struct net_device *dev, u32 data) { struct ipoib_dev_priv *priv = netdev_priv(dev); if (data) { if (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) && (dev->features & NETIF_F_SG) && (priv->hca_caps & IB_DEVICE_UD_TSO)) { dev->features |= NETIF_F_TSO; } else { ipoib_warn(priv, "can't set TSO on\n"); return -EOPNOTSUPP; } } else dev->features &= ~NETIF_F_TSO; return 0; } static int ipoib_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) { struct ipoib_dev_priv *priv = netdev_priv(dev); coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs; coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames; return 0; } static int ipoib_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret; /* * These values are saved in the private data and returned * when ipoib_get_coalesce() is called */ if (coal->rx_coalesce_usecs > 0xffff || coal->rx_max_coalesced_frames > 0xffff) return -EINVAL; ret = ib_modify_cq(priv->recv_cq, coal->rx_max_coalesced_frames, coal->rx_coalesce_usecs); if (ret && ret != -ENOSYS) { ipoib_warn(priv, "failed modifying CQ (%d)\n", ret); return ret; } priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs; priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames; return 0; } static const char ipoib_stats_keys[][ETH_GSTRING_LEN] = { "LRO aggregated", "LRO flushed", "LRO avg aggr", "LRO no desc" }; static void ipoib_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: memcpy(data, *ipoib_stats_keys, sizeof(ipoib_stats_keys)); break; } } static int ipoib_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(ipoib_stats_keys); default: return -EOPNOTSUPP; } } static void ipoib_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, uint64_t *data) { struct ipoib_dev_priv *priv = netdev_priv(dev); int index = 0; /* Get LRO statistics */ data[index++] = priv->lro.lro_mgr.stats.aggregated; data[index++] = priv->lro.lro_mgr.stats.flushed; if (priv->lro.lro_mgr.stats.flushed) data[index++] = priv->lro.lro_mgr.stats.aggregated / priv->lro.lro_mgr.stats.flushed; else data[index++] = 0; data[index++] = priv->lro.lro_mgr.stats.no_desc; } static const struct ethtool_ops ipoib_ethtool_ops = { .get_drvinfo = ipoib_get_drvinfo, .get_rx_csum = ipoib_get_rx_csum, .set_tso = ipoib_set_tso, .get_coalesce = ipoib_get_coalesce, .set_coalesce = ipoib_set_coalesce, .get_flags = ethtool_op_get_flags, .set_flags = ethtool_op_set_flags, .get_strings = ipoib_get_strings, .get_sset_count = ipoib_get_sset_count, .get_ethtool_stats = ipoib_get_ethtool_stats, }; void ipoib_set_ethtool_ops(struct net_device *dev) { SET_ETHTOOL_OPS(dev, &ipoib_ethtool_ops); }
gpl-2.0
ZTE-BLADE/ZTE-BLADE-2.6.32
arch/m68k/kernel/dma.c
762
3086
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #undef DEBUG #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/scatterlist.h> #include <linux/vmalloc.h> #include <asm/pgalloc.h> void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t flag) { struct page *page, **map; pgprot_t pgprot; void *addr; int i, order; pr_debug("dma_alloc_coherent: %d,%x\n", size, flag); size = PAGE_ALIGN(size); order = get_order(size); page = alloc_pages(flag, order); if (!page) return NULL; *handle = page_to_phys(page); map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA); if (!map) { __free_pages(page, order); return NULL; } split_page(page, order); order = 1 << order; size >>= PAGE_SHIFT; map[0] = page; for (i = 1; i < size; i++) map[i] = page + i; for (; i < order; i++) __free_page(page + i); pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); if (CPU_IS_040_OR_060) pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S; else pgprot_val(pgprot) |= _PAGE_NOCACHE030; addr = vmap(map, size, VM_MAP, pgprot); kfree(map); return addr; } EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_coherent(struct device *dev, size_t size, void *addr, dma_addr_t handle) { pr_debug("dma_free_coherent: %p, %x\n", addr, handle); vfree(addr); } EXPORT_SYMBOL(dma_free_coherent); void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_TO_DEVICE: cache_push(handle, size); break; case DMA_FROM_DEVICE: cache_clear(handle, size); break; default: if (printk_ratelimit()) printk("dma_sync_single_for_device: unsupported dir %u\n", dir); break; } } EXPORT_SYMBOL(dma_sync_single_for_device); void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { int i; for (i = 0; i < nents; sg++, i++) dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir); } EXPORT_SYMBOL(dma_sync_sg_for_device); dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction dir) { dma_addr_t handle = virt_to_bus(addr); dma_sync_single_for_device(dev, handle, size, dir); return handle; } EXPORT_SYMBOL(dma_map_single); dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { dma_addr_t handle = page_to_phys(page) + offset; dma_sync_single_for_device(dev, handle, size, dir); return handle; } EXPORT_SYMBOL(dma_map_page); int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { int i; for (i = 0; i < nents; sg++, i++) { sg->dma_address = sg_phys(sg); dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir); } return nents; } EXPORT_SYMBOL(dma_map_sg);
gpl-2.0
lategoodbye/linux-mxs-power
drivers/net/ethernet/smsc/smc911x.c
1018
57828
/* * smc911x.c * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices. * * Copyright (C) 2005 Sensoria Corp * Derived from the unified SMC91x driver by Nicolas Pitre * and the smsc911x.c reference driver by SMSC * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * * Arguments: * watchdog = TX watchdog timeout * tx_fifo_kb = Size of TX FIFO in KB * * History: * 04/16/05 Dustin McIntire Initial version */ static const char version[] = "smc911x.c: v1.0 04-16-2005 by Dustin McIntire <dustin@sensoria.com>\n"; /* Debugging options */ #define ENABLE_SMC_DEBUG_RX 0 #define ENABLE_SMC_DEBUG_TX 0 #define ENABLE_SMC_DEBUG_DMA 0 #define ENABLE_SMC_DEBUG_PKTS 0 #define ENABLE_SMC_DEBUG_MISC 0 #define ENABLE_SMC_DEBUG_FUNC 0 #define SMC_DEBUG_RX ((ENABLE_SMC_DEBUG_RX ? 1 : 0) << 0) #define SMC_DEBUG_TX ((ENABLE_SMC_DEBUG_TX ? 1 : 0) << 1) #define SMC_DEBUG_DMA ((ENABLE_SMC_DEBUG_DMA ? 1 : 0) << 2) #define SMC_DEBUG_PKTS ((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3) #define SMC_DEBUG_MISC ((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4) #define SMC_DEBUG_FUNC ((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5) #ifndef SMC_DEBUG #define SMC_DEBUG ( SMC_DEBUG_RX | \ SMC_DEBUG_TX | \ SMC_DEBUG_DMA | \ SMC_DEBUG_PKTS | \ SMC_DEBUG_MISC | \ SMC_DEBUG_FUNC \ ) #endif #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/crc32.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/workqueue.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <asm/io.h> #include "smc911x.h" /* * Transmit timeout, default 5 seconds. */ static int watchdog = 5000; module_param(watchdog, int, 0400); MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); static int tx_fifo_kb=8; module_param(tx_fifo_kb, int, 0400); MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:smc911x"); /* * The internal workings of the driver. If you are changing anything * here with the SMC stuff, you should have the datasheet and know * what you are doing. */ #define CARDNAME "smc911x" /* * Use power-down feature of the chip */ #define POWER_DOWN 1 #if SMC_DEBUG > 0 #define DBG(n, dev, args...) \ do { \ if (SMC_DEBUG & (n)) \ netdev_dbg(dev, args); \ } while (0) #define PRINTK(dev, args...) netdev_info(dev, args) #else #define DBG(n, dev, args...) do { } while (0) #define PRINTK(dev, args...) netdev_dbg(dev, args) #endif #if SMC_DEBUG_PKTS > 0 static void PRINT_PKT(u_char *buf, int length) { int i; int remainder; int lines; lines = length / 16; remainder = length % 16; for (i = 0; i < lines ; i ++) { int cur; printk(KERN_DEBUG); for (cur = 0; cur < 8; cur++) { u_char a, b; a = *buf++; b = *buf++; pr_cont("%02x%02x ", a, b); } pr_cont("\n"); } printk(KERN_DEBUG); for (i = 0; i < remainder/2 ; i++) { u_char a, b; a = *buf++; b = *buf++; pr_cont("%02x%02x ", a, b); } pr_cont("\n"); } #else #define PRINT_PKT(x...) do { } while (0) #endif /* this enables an interrupt in the interrupt mask register */ #define SMC_ENABLE_INT(lp, x) do { \ unsigned int __mask; \ __mask = SMC_GET_INT_EN((lp)); \ __mask |= (x); \ SMC_SET_INT_EN((lp), __mask); \ } while (0) /* this disables an interrupt from the interrupt mask register */ #define SMC_DISABLE_INT(lp, x) do { \ unsigned int __mask; \ __mask = SMC_GET_INT_EN((lp)); \ __mask &= ~(x); \ SMC_SET_INT_EN((lp), __mask); \ } while (0) /* * this does a soft reset on the device */ static void smc911x_reset(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); unsigned int reg, timeout=0, resets=1, irq_cfg; unsigned long flags; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); /* Take out of PM setting first */ if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) { /* Write to the bytetest will take out of powerdown */ SMC_SET_BYTE_TEST(lp, 0); timeout=10; do { udelay(10); reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_; } while (--timeout && !reg); if (timeout == 0) { PRINTK(dev, "smc911x_reset timeout waiting for PM restore\n"); return; } } /* Disable all interrupts */ spin_lock_irqsave(&lp->lock, flags); SMC_SET_INT_EN(lp, 0); spin_unlock_irqrestore(&lp->lock, flags); while (resets--) { SMC_SET_HW_CFG(lp, HW_CFG_SRST_); timeout=10; do { udelay(10); reg = SMC_GET_HW_CFG(lp); /* If chip indicates reset timeout then try again */ if (reg & HW_CFG_SRST_TO_) { PRINTK(dev, "chip reset timeout, retrying...\n"); resets++; break; } } while (--timeout && (reg & HW_CFG_SRST_)); } if (timeout == 0) { PRINTK(dev, "smc911x_reset timeout waiting for reset\n"); return; } /* make sure EEPROM has finished loading before setting GPIO_CFG */ timeout=1000; while (--timeout && (SMC_GET_E2P_CMD(lp) & E2P_CMD_EPC_BUSY_)) udelay(10); if (timeout == 0){ PRINTK(dev, "smc911x_reset timeout waiting for EEPROM busy\n"); return; } /* Initialize interrupts */ SMC_SET_INT_EN(lp, 0); SMC_ACK_INT(lp, -1); /* Reset the FIFO level and flow control settings */ SMC_SET_HW_CFG(lp, (lp->tx_fifo_kb & 0xF) << 16); //TODO: Figure out what appropriate pause time is SMC_SET_FLOW(lp, FLOW_FCPT_ | FLOW_FCEN_); SMC_SET_AFC_CFG(lp, lp->afc_cfg); /* Set to LED outputs */ SMC_SET_GPIO_CFG(lp, 0x70070000); /* * Deassert IRQ for 1*10us for edge type interrupts * and drive IRQ pin push-pull */ irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_; #ifdef SMC_DYNAMIC_BUS_CONFIG if (lp->cfg.irq_polarity) irq_cfg |= INT_CFG_IRQ_POL_; #endif SMC_SET_IRQ_CFG(lp, irq_cfg); /* clear anything saved */ if (lp->pending_tx_skb != NULL) { dev_kfree_skb (lp->pending_tx_skb); lp->pending_tx_skb = NULL; dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; } } /* * Enable Interrupts, Receive, and Transmit */ static void smc911x_enable(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); unsigned mask, cfg, cr; unsigned long flags; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); spin_lock_irqsave(&lp->lock, flags); SMC_SET_MAC_ADDR(lp, dev->dev_addr); /* Enable TX */ cfg = SMC_GET_HW_CFG(lp); cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF; cfg |= HW_CFG_SF_; SMC_SET_HW_CFG(lp, cfg); SMC_SET_FIFO_TDA(lp, 0xFF); /* Update TX stats on every 64 packets received or every 1 sec */ SMC_SET_FIFO_TSL(lp, 64); SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); SMC_GET_MAC_CR(lp, cr); cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_; SMC_SET_MAC_CR(lp, cr); SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_); /* Add 2 byte padding to start of packets */ SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_); /* Turn on receiver and enable RX */ if (cr & MAC_CR_RXEN_) DBG(SMC_DEBUG_RX, dev, "Receiver already enabled\n"); SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_); /* Interrupt on every received packet */ SMC_SET_FIFO_RSA(lp, 0x01); SMC_SET_FIFO_RSL(lp, 0x00); /* now, enable interrupts */ mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ | INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ | INT_EN_PHY_INT_EN_; if (IS_REV_A(lp->revision)) mask|=INT_EN_RDFL_EN_; else { mask|=INT_EN_RDFO_EN_; } SMC_ENABLE_INT(lp, mask); spin_unlock_irqrestore(&lp->lock, flags); } /* * this puts the device in an inactive state */ static void smc911x_shutdown(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); unsigned cr; unsigned long flags; DBG(SMC_DEBUG_FUNC, dev, "%s: --> %s\n", CARDNAME, __func__); /* Disable IRQ's */ SMC_SET_INT_EN(lp, 0); /* Turn of Rx and TX */ spin_lock_irqsave(&lp->lock, flags); SMC_GET_MAC_CR(lp, cr); cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_); SMC_SET_MAC_CR(lp, cr); SMC_SET_TX_CFG(lp, TX_CFG_STOP_TX_); spin_unlock_irqrestore(&lp->lock, flags); } static inline void smc911x_drop_pkt(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); unsigned int fifo_count, timeout, reg; DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "%s: --> %s\n", CARDNAME, __func__); fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF; if (fifo_count <= 4) { /* Manually dump the packet data */ while (fifo_count--) SMC_GET_RX_FIFO(lp); } else { /* Fast forward through the bad packet */ SMC_SET_RX_DP_CTRL(lp, RX_DP_CTRL_FFWD_BUSY_); timeout=50; do { udelay(10); reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_; } while (--timeout && reg); if (timeout == 0) { PRINTK(dev, "timeout waiting for RX fast forward\n"); } } } /* * This is the procedure to handle the receipt of a packet. * It should be called after checking for packet presence in * the RX status FIFO. It must be called with the spin lock * already held. */ static inline void smc911x_rcv(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); unsigned int pkt_len, status; struct sk_buff *skb; unsigned char *data; DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "--> %s\n", __func__); status = SMC_GET_RX_STS_FIFO(lp); DBG(SMC_DEBUG_RX, dev, "Rx pkt len %d status 0x%08x\n", (status & 0x3fff0000) >> 16, status & 0xc000ffff); pkt_len = (status & RX_STS_PKT_LEN_) >> 16; if (status & RX_STS_ES_) { /* Deal with a bad packet */ dev->stats.rx_errors++; if (status & RX_STS_CRC_ERR_) dev->stats.rx_crc_errors++; else { if (status & RX_STS_LEN_ERR_) dev->stats.rx_length_errors++; if (status & RX_STS_MCAST_) dev->stats.multicast++; } /* Remove the bad packet data from the RX FIFO */ smc911x_drop_pkt(dev); } else { /* Receive a valid packet */ /* Alloc a buffer with extra room for DMA alignment */ skb = netdev_alloc_skb(dev, pkt_len+32); if (unlikely(skb == NULL)) { PRINTK(dev, "Low memory, rcvd packet dropped.\n"); dev->stats.rx_dropped++; smc911x_drop_pkt(dev); return; } /* Align IP header to 32 bits * Note that the device is configured to add a 2 * byte padding to the packet start, so we really * want to write to the orignal data pointer */ data = skb->data; skb_reserve(skb, 2); skb_put(skb,pkt_len-4); #ifdef SMC_USE_DMA { unsigned int fifo; /* Lower the FIFO threshold if possible */ fifo = SMC_GET_FIFO_INT(lp); if (fifo & 0xFF) fifo--; DBG(SMC_DEBUG_RX, dev, "Setting RX stat FIFO threshold to %d\n", fifo & 0xff); SMC_SET_FIFO_INT(lp, fifo); /* Setup RX DMA */ SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_)); lp->rxdma_active = 1; lp->current_rx_skb = skb; SMC_PULL_DATA(lp, data, (pkt_len+2+15) & ~15); /* Packet processing deferred to DMA RX interrupt */ } #else SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_)); SMC_PULL_DATA(lp, data, pkt_len+2+3); DBG(SMC_DEBUG_PKTS, dev, "Received packet\n"); PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len-4; #endif } } /* * This is called to actually send a packet to the chip. */ static void smc911x_hardware_send_pkt(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); struct sk_buff *skb; unsigned int cmdA, cmdB, len; unsigned char *buf; DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__); BUG_ON(lp->pending_tx_skb == NULL); skb = lp->pending_tx_skb; lp->pending_tx_skb = NULL; /* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */ /* cmdB {31:16] pkt tag [10:0] length */ #ifdef SMC_USE_DMA /* 16 byte buffer alignment mode */ buf = (char*)((u32)(skb->data) & ~0xF); len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF; cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) | TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ | skb->len; #else buf = (char*)((u32)skb->data & ~0x3); len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3; cmdA = (((u32)skb->data & 0x3) << 16) | TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ | skb->len; #endif /* tag is packet length so we can use this in stats update later */ cmdB = (skb->len << 16) | (skb->len & 0x7FF); DBG(SMC_DEBUG_TX, dev, "TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n", len, len, buf, cmdA, cmdB); SMC_SET_TX_FIFO(lp, cmdA); SMC_SET_TX_FIFO(lp, cmdB); DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n"); PRINT_PKT(buf, len <= 64 ? len : 64); /* Send pkt via PIO or DMA */ #ifdef SMC_USE_DMA lp->current_tx_skb = skb; SMC_PUSH_DATA(lp, buf, len); /* DMA complete IRQ will free buffer and set jiffies */ #else SMC_PUSH_DATA(lp, buf, len); dev->trans_start = jiffies; dev_kfree_skb_irq(skb); #endif if (!lp->tx_throttle) { netif_wake_queue(dev); } SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_); } /* * Since I am not sure if I will have enough room in the chip's ram * to store the packet, I call this routine which either sends it * now, or set the card to generates an interrupt when ready * for the packet. */ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); unsigned int free; unsigned long flags; DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__); spin_lock_irqsave(&lp->lock, flags); BUG_ON(lp->pending_tx_skb != NULL); free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_; DBG(SMC_DEBUG_TX, dev, "TX free space %d\n", free); /* Turn off the flow when running out of space in FIFO */ if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) { DBG(SMC_DEBUG_TX, dev, "Disabling data flow due to low FIFO space (%d)\n", free); /* Reenable when at least 1 packet of size MTU present */ SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64); lp->tx_throttle = 1; netif_stop_queue(dev); } /* Drop packets when we run out of space in TX FIFO * Account for overhead required for: * * Tx command words 8 bytes * Start offset 15 bytes * End padding 15 bytes */ if (unlikely(free < (skb->len + 8 + 15 + 15))) { netdev_warn(dev, "No Tx free space %d < %d\n", free, skb->len); lp->pending_tx_skb = NULL; dev->stats.tx_errors++; dev->stats.tx_dropped++; spin_unlock_irqrestore(&lp->lock, flags); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } #ifdef SMC_USE_DMA { /* If the DMA is already running then defer this packet Tx until * the DMA IRQ starts it */ if (lp->txdma_active) { DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Tx DMA running, deferring packet\n"); lp->pending_tx_skb = skb; netif_stop_queue(dev); spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; } else { DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Activating Tx DMA\n"); lp->txdma_active = 1; } } #endif lp->pending_tx_skb = skb; smc911x_hardware_send_pkt(dev); spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; } /* * This handles a TX status interrupt, which is only called when: * - a TX error occurred, or * - TX of a packet completed. */ static void smc911x_tx(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); unsigned int tx_status; DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__); /* Collect the TX status */ while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) { DBG(SMC_DEBUG_TX, dev, "Tx stat FIFO used 0x%04x\n", (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16); tx_status = SMC_GET_TX_STS_FIFO(lp); dev->stats.tx_packets++; dev->stats.tx_bytes+=tx_status>>16; DBG(SMC_DEBUG_TX, dev, "Tx FIFO tag 0x%04x status 0x%04x\n", (tx_status & 0xffff0000) >> 16, tx_status & 0x0000ffff); /* count Tx errors, but ignore lost carrier errors when in * full-duplex mode */ if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx && !(tx_status & 0x00000306))) { dev->stats.tx_errors++; } if (tx_status & TX_STS_MANY_COLL_) { dev->stats.collisions+=16; dev->stats.tx_aborted_errors++; } else { dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3; } /* carrier error only has meaning for half-duplex communication */ if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) && !lp->ctl_rfduplx) { dev->stats.tx_carrier_errors++; } if (tx_status & TX_STS_LATE_COLL_) { dev->stats.collisions++; dev->stats.tx_aborted_errors++; } } } /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ /* * Reads a register from the MII Management serial interface */ static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg) { struct smc911x_local *lp = netdev_priv(dev); unsigned int phydata; SMC_GET_MII(lp, phyreg, phyaddr, phydata); DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n", __func__, phyaddr, phyreg, phydata); return phydata; } /* * Writes a register to the MII Management serial interface */ static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg, int phydata) { struct smc911x_local *lp = netdev_priv(dev); DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", __func__, phyaddr, phyreg, phydata); SMC_SET_MII(lp, phyreg, phyaddr, phydata); } /* * Finds and reports the PHY address (115 and 117 have external * PHY interface 118 has internal only */ static void smc911x_phy_detect(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); int phyaddr; unsigned int cfg, id1, id2; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); lp->phy_type = 0; /* * Scan all 32 PHY addresses if necessary, starting at * PHY#1 to PHY#31, and then PHY#0 last. */ switch(lp->version) { case CHIP_9115: case CHIP_9117: case CHIP_9215: case CHIP_9217: cfg = SMC_GET_HW_CFG(lp); if (cfg & HW_CFG_EXT_PHY_DET_) { cfg &= ~HW_CFG_PHY_CLK_SEL_; cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; SMC_SET_HW_CFG(lp, cfg); udelay(10); /* Wait for clocks to stop */ cfg |= HW_CFG_EXT_PHY_EN_; SMC_SET_HW_CFG(lp, cfg); udelay(10); /* Wait for clocks to stop */ cfg &= ~HW_CFG_PHY_CLK_SEL_; cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; SMC_SET_HW_CFG(lp, cfg); udelay(10); /* Wait for clocks to stop */ cfg |= HW_CFG_SMI_SEL_; SMC_SET_HW_CFG(lp, cfg); for (phyaddr = 1; phyaddr < 32; ++phyaddr) { /* Read the PHY identifiers */ SMC_GET_PHY_ID1(lp, phyaddr & 31, id1); SMC_GET_PHY_ID2(lp, phyaddr & 31, id2); /* Make sure it is a valid identifier */ if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 && id2 != 0x0000 && id2 != 0xffff && id2 != 0x8000) { /* Save the PHY's address */ lp->mii.phy_id = phyaddr & 31; lp->phy_type = id1 << 16 | id2; break; } } if (phyaddr < 32) /* Found an external PHY */ break; } default: /* Internal media only */ SMC_GET_PHY_ID1(lp, 1, id1); SMC_GET_PHY_ID2(lp, 1, id2); /* Save the PHY's address */ lp->mii.phy_id = 1; lp->phy_type = id1 << 16 | id2; } DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%x\n", id1, id2, lp->mii.phy_id); } /* * Sets the PHY to a configuration as determined by the user. * Called with spin_lock held. */ static int smc911x_phy_fixed(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); int phyaddr = lp->mii.phy_id; int bmcr; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); /* Enter Link Disable state */ SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); bmcr |= BMCR_PDOWN; SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); /* * Set our fixed capabilities * Disable auto-negotiation */ bmcr &= ~BMCR_ANENABLE; if (lp->ctl_rfduplx) bmcr |= BMCR_FULLDPLX; if (lp->ctl_rspeed == 100) bmcr |= BMCR_SPEED100; /* Write our capabilities to the phy control register */ SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); /* Re-Configure the Receive/Phy Control register */ bmcr &= ~BMCR_PDOWN; SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); return 1; } /** * smc911x_phy_reset - reset the phy * @dev: net device * @phy: phy address * * Issue a software reset for the specified PHY and * wait up to 100ms for the reset to complete. We should * not access the PHY for 50ms after issuing the reset. * * The time to wait appears to be dependent on the PHY. * */ static int smc911x_phy_reset(struct net_device *dev, int phy) { struct smc911x_local *lp = netdev_priv(dev); int timeout; unsigned long flags; unsigned int reg; DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__); spin_lock_irqsave(&lp->lock, flags); reg = SMC_GET_PMT_CTRL(lp); reg &= ~0xfffff030; reg |= PMT_CTRL_PHY_RST_; SMC_SET_PMT_CTRL(lp, reg); spin_unlock_irqrestore(&lp->lock, flags); for (timeout = 2; timeout; timeout--) { msleep(50); spin_lock_irqsave(&lp->lock, flags); reg = SMC_GET_PMT_CTRL(lp); spin_unlock_irqrestore(&lp->lock, flags); if (!(reg & PMT_CTRL_PHY_RST_)) { /* extra delay required because the phy may * not be completed with its reset * when PHY_BCR_RESET_ is cleared. 256us * should suffice, but use 500us to be safe */ udelay(500); break; } } return reg & PMT_CTRL_PHY_RST_; } /** * smc911x_phy_powerdown - powerdown phy * @dev: net device * @phy: phy address * * Power down the specified PHY */ static void smc911x_phy_powerdown(struct net_device *dev, int phy) { struct smc911x_local *lp = netdev_priv(dev); unsigned int bmcr; /* Enter Link Disable state */ SMC_GET_PHY_BMCR(lp, phy, bmcr); bmcr |= BMCR_PDOWN; SMC_SET_PHY_BMCR(lp, phy, bmcr); } /** * smc911x_phy_check_media - check the media status and adjust BMCR * @dev: net device * @init: set true for initialisation * * Select duplex mode depending on negotiation state. This * also updates our carrier state. */ static void smc911x_phy_check_media(struct net_device *dev, int init) { struct smc911x_local *lp = netdev_priv(dev); int phyaddr = lp->mii.phy_id; unsigned int bmcr, cr; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { /* duplex state has changed */ SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); SMC_GET_MAC_CR(lp, cr); if (lp->mii.full_duplex) { DBG(SMC_DEBUG_MISC, dev, "Configuring for full-duplex mode\n"); bmcr |= BMCR_FULLDPLX; cr |= MAC_CR_RCVOWN_; } else { DBG(SMC_DEBUG_MISC, dev, "Configuring for half-duplex mode\n"); bmcr &= ~BMCR_FULLDPLX; cr &= ~MAC_CR_RCVOWN_; } SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); SMC_SET_MAC_CR(lp, cr); } } /* * Configures the specified PHY through the MII management interface * using Autonegotiation. * Calls smc911x_phy_fixed() if the user has requested a certain config. * If RPC ANEG bit is set, the media selection is dependent purely on * the selection by the MII (either in the MII BMCR reg or the result * of autonegotiation.) If the RPC ANEG bit is cleared, the selection * is controlled by the RPC SPEED and RPC DPLX bits. */ static void smc911x_phy_configure(struct work_struct *work) { struct smc911x_local *lp = container_of(work, struct smc911x_local, phy_configure); struct net_device *dev = lp->netdev; int phyaddr = lp->mii.phy_id; int my_phy_caps; /* My PHY capabilities */ int my_ad_caps; /* My Advertised capabilities */ int status; unsigned long flags; DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__); /* * We should not be called if phy_type is zero. */ if (lp->phy_type == 0) return; if (smc911x_phy_reset(dev, phyaddr)) { netdev_info(dev, "PHY reset timed out\n"); return; } spin_lock_irqsave(&lp->lock, flags); /* * Enable PHY Interrupts (for register 18) * Interrupts listed here are enabled */ SMC_SET_PHY_INT_MASK(lp, phyaddr, PHY_INT_MASK_ENERGY_ON_ | PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ | PHY_INT_MASK_LINK_DOWN_); /* If the user requested no auto neg, then go set his request */ if (lp->mii.force_media) { smc911x_phy_fixed(dev); goto smc911x_phy_configure_exit; } /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */ SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps); if (!(my_phy_caps & BMSR_ANEGCAPABLE)) { netdev_info(dev, "Auto negotiation NOT supported\n"); smc911x_phy_fixed(dev); goto smc911x_phy_configure_exit; } /* CSMA capable w/ both pauses */ my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; if (my_phy_caps & BMSR_100BASE4) my_ad_caps |= ADVERTISE_100BASE4; if (my_phy_caps & BMSR_100FULL) my_ad_caps |= ADVERTISE_100FULL; if (my_phy_caps & BMSR_100HALF) my_ad_caps |= ADVERTISE_100HALF; if (my_phy_caps & BMSR_10FULL) my_ad_caps |= ADVERTISE_10FULL; if (my_phy_caps & BMSR_10HALF) my_ad_caps |= ADVERTISE_10HALF; /* Disable capabilities not selected by our user */ if (lp->ctl_rspeed != 100) my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF); if (!lp->ctl_rfduplx) my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL); /* Update our Auto-Neg Advertisement Register */ SMC_SET_PHY_MII_ADV(lp, phyaddr, my_ad_caps); lp->mii.advertising = my_ad_caps; /* * Read the register back. Without this, it appears that when * auto-negotiation is restarted, sometimes it isn't ready and * the link does not come up. */ udelay(10); SMC_GET_PHY_MII_ADV(lp, phyaddr, status); DBG(SMC_DEBUG_MISC, dev, "phy caps=0x%04x\n", my_phy_caps); DBG(SMC_DEBUG_MISC, dev, "phy advertised caps=0x%04x\n", my_ad_caps); /* Restart auto-negotiation process in order to advertise my caps */ SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART); smc911x_phy_check_media(dev, 1); smc911x_phy_configure_exit: spin_unlock_irqrestore(&lp->lock, flags); } /* * smc911x_phy_interrupt * * Purpose: Handle interrupts relating to PHY register 18. This is * called from the "hard" interrupt handler under our private spinlock. */ static void smc911x_phy_interrupt(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); int phyaddr = lp->mii.phy_id; int status; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); if (lp->phy_type == 0) return; smc911x_phy_check_media(dev, 0); /* read to clear status bits */ SMC_GET_PHY_INT_SRC(lp, phyaddr,status); DBG(SMC_DEBUG_MISC, dev, "PHY interrupt status 0x%04x\n", status & 0xffff); DBG(SMC_DEBUG_MISC, dev, "AFC_CFG 0x%08x\n", SMC_GET_AFC_CFG(lp)); } /*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/ /* * This is the main routine of the driver, to handle the device when * it needs some attention. */ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct smc911x_local *lp = netdev_priv(dev); unsigned int status, mask, timeout; unsigned int rx_overrun=0, cr, pkts; unsigned long flags; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); spin_lock_irqsave(&lp->lock, flags); /* Spurious interrupt check */ if ((SMC_GET_IRQ_CFG(lp) & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { spin_unlock_irqrestore(&lp->lock, flags); return IRQ_NONE; } mask = SMC_GET_INT_EN(lp); SMC_SET_INT_EN(lp, 0); /* set a timeout value, so I don't stay here forever */ timeout = 8; do { status = SMC_GET_INT(lp); DBG(SMC_DEBUG_MISC, dev, "INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n", status, mask, status & ~mask); status &= mask; if (!status) break; /* Handle SW interrupt condition */ if (status & INT_STS_SW_INT_) { SMC_ACK_INT(lp, INT_STS_SW_INT_); mask &= ~INT_EN_SW_INT_EN_; } /* Handle various error conditions */ if (status & INT_STS_RXE_) { SMC_ACK_INT(lp, INT_STS_RXE_); dev->stats.rx_errors++; } if (status & INT_STS_RXDFH_INT_) { SMC_ACK_INT(lp, INT_STS_RXDFH_INT_); dev->stats.rx_dropped+=SMC_GET_RX_DROP(lp); } /* Undocumented interrupt-what is the right thing to do here? */ if (status & INT_STS_RXDF_INT_) { SMC_ACK_INT(lp, INT_STS_RXDF_INT_); } /* Rx Data FIFO exceeds set level */ if (status & INT_STS_RDFL_) { if (IS_REV_A(lp->revision)) { rx_overrun=1; SMC_GET_MAC_CR(lp, cr); cr &= ~MAC_CR_RXEN_; SMC_SET_MAC_CR(lp, cr); DBG(SMC_DEBUG_RX, dev, "RX overrun\n"); dev->stats.rx_errors++; dev->stats.rx_fifo_errors++; } SMC_ACK_INT(lp, INT_STS_RDFL_); } if (status & INT_STS_RDFO_) { if (!IS_REV_A(lp->revision)) { SMC_GET_MAC_CR(lp, cr); cr &= ~MAC_CR_RXEN_; SMC_SET_MAC_CR(lp, cr); rx_overrun=1; DBG(SMC_DEBUG_RX, dev, "RX overrun\n"); dev->stats.rx_errors++; dev->stats.rx_fifo_errors++; } SMC_ACK_INT(lp, INT_STS_RDFO_); } /* Handle receive condition */ if ((status & INT_STS_RSFL_) || rx_overrun) { unsigned int fifo; DBG(SMC_DEBUG_RX, dev, "RX irq\n"); fifo = SMC_GET_RX_FIFO_INF(lp); pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16; DBG(SMC_DEBUG_RX, dev, "Rx FIFO pkts %d, bytes %d\n", pkts, fifo & 0xFFFF); if (pkts != 0) { #ifdef SMC_USE_DMA unsigned int fifo; if (lp->rxdma_active){ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA active\n"); /* The DMA is already running so up the IRQ threshold */ fifo = SMC_GET_FIFO_INT(lp) & ~0xFF; fifo |= pkts & 0xFF; DBG(SMC_DEBUG_RX, dev, "Setting RX stat FIFO threshold to %d\n", fifo & 0xff); SMC_SET_FIFO_INT(lp, fifo); } else #endif smc911x_rcv(dev); } SMC_ACK_INT(lp, INT_STS_RSFL_); } /* Handle transmit FIFO available */ if (status & INT_STS_TDFA_) { DBG(SMC_DEBUG_TX, dev, "TX data FIFO space available irq\n"); SMC_SET_FIFO_TDA(lp, 0xFF); lp->tx_throttle = 0; #ifdef SMC_USE_DMA if (!lp->txdma_active) #endif netif_wake_queue(dev); SMC_ACK_INT(lp, INT_STS_TDFA_); } /* Handle transmit done condition */ #if 1 if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) { DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, dev, "Tx stat FIFO limit (%d) /GPT irq\n", (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16); smc911x_tx(dev); SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); SMC_ACK_INT(lp, INT_STS_TSFL_); SMC_ACK_INT(lp, INT_STS_TSFL_ | INT_STS_GPT_INT_); } #else if (status & INT_STS_TSFL_) { DBG(SMC_DEBUG_TX, dev, "TX status FIFO limit (%d) irq\n", ?); smc911x_tx(dev); SMC_ACK_INT(lp, INT_STS_TSFL_); } if (status & INT_STS_GPT_INT_) { DBG(SMC_DEBUG_RX, dev, "IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n", SMC_GET_IRQ_CFG(lp), SMC_GET_FIFO_INT(lp), SMC_GET_RX_CFG(lp)); DBG(SMC_DEBUG_RX, dev, "Rx Stat FIFO Used 0x%02x Data FIFO Used 0x%04x Stat FIFO 0x%08x\n", (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16, SMC_GET_RX_FIFO_INF(lp) & 0xffff, SMC_GET_RX_STS_FIFO_PEEK(lp)); SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); SMC_ACK_INT(lp, INT_STS_GPT_INT_); } #endif /* Handle PHY interrupt condition */ if (status & INT_STS_PHY_INT_) { DBG(SMC_DEBUG_MISC, dev, "PHY irq\n"); smc911x_phy_interrupt(dev); SMC_ACK_INT(lp, INT_STS_PHY_INT_); } } while (--timeout); /* restore mask state */ SMC_SET_INT_EN(lp, mask); DBG(SMC_DEBUG_MISC, dev, "Interrupt done (%d loops)\n", 8-timeout); spin_unlock_irqrestore(&lp->lock, flags); return IRQ_HANDLED; } #ifdef SMC_USE_DMA static void smc911x_tx_dma_irq(int dma, void *data) { struct net_device *dev = (struct net_device *)data; struct smc911x_local *lp = netdev_priv(dev); struct sk_buff *skb = lp->current_tx_skb; unsigned long flags; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n"); /* Clear the DMA interrupt sources */ SMC_DMA_ACK_IRQ(dev, dma); BUG_ON(skb == NULL); dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE); dev->trans_start = jiffies; dev_kfree_skb_irq(skb); lp->current_tx_skb = NULL; if (lp->pending_tx_skb != NULL) smc911x_hardware_send_pkt(dev); else { DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "No pending Tx packets. DMA disabled\n"); spin_lock_irqsave(&lp->lock, flags); lp->txdma_active = 0; if (!lp->tx_throttle) { netif_wake_queue(dev); } spin_unlock_irqrestore(&lp->lock, flags); } DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq completed\n"); } static void smc911x_rx_dma_irq(int dma, void *data) { struct net_device *dev = (struct net_device *)data; struct smc911x_local *lp = netdev_priv(dev); struct sk_buff *skb = lp->current_rx_skb; unsigned long flags; unsigned int pkts; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n"); /* Clear the DMA interrupt sources */ SMC_DMA_ACK_IRQ(dev, dma); dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE); BUG_ON(skb == NULL); lp->current_rx_skb = NULL; PRINT_PKT(skb->data, skb->len); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; netif_rx(skb); spin_lock_irqsave(&lp->lock, flags); pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16; if (pkts != 0) { smc911x_rcv(dev); }else { lp->rxdma_active = 0; } spin_unlock_irqrestore(&lp->lock, flags); DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq completed. DMA RX FIFO PKTS %d\n", pkts); } #endif /* SMC_USE_DMA */ #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling receive - used by netconsole and other diagnostic tools * to allow network i/o with interrupts disabled. */ static void smc911x_poll_controller(struct net_device *dev) { disable_irq(dev->irq); smc911x_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif /* Our watchdog timed out. Called by the networking layer */ static void smc911x_timeout(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); int status, mask; unsigned long flags; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); spin_lock_irqsave(&lp->lock, flags); status = SMC_GET_INT(lp); mask = SMC_GET_INT_EN(lp); spin_unlock_irqrestore(&lp->lock, flags); DBG(SMC_DEBUG_MISC, dev, "INT 0x%02x MASK 0x%02x\n", status, mask); /* Dump the current TX FIFO contents and restart */ mask = SMC_GET_TX_CFG(lp); SMC_SET_TX_CFG(lp, mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_); /* * Reconfiguring the PHY doesn't seem like a bad idea here, but * smc911x_phy_configure() calls msleep() which calls schedule_timeout() * which calls schedule(). Hence we use a work queue. */ if (lp->phy_type != 0) schedule_work(&lp->phy_configure); /* We can accept TX packets again */ dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); } /* * This routine will, depending on the values passed to it, * either make it accept multicast packets, go into * promiscuous mode (for TCPDUMP and cousins) or accept * a select set of multicast packets */ static void smc911x_set_multicast_list(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); unsigned int multicast_table[2]; unsigned int mcr, update_multicast = 0; unsigned long flags; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); spin_lock_irqsave(&lp->lock, flags); SMC_GET_MAC_CR(lp, mcr); spin_unlock_irqrestore(&lp->lock, flags); if (dev->flags & IFF_PROMISC) { DBG(SMC_DEBUG_MISC, dev, "RCR_PRMS\n"); mcr |= MAC_CR_PRMS_; } /* * Here, I am setting this to accept all multicast packets. * I don't need to zero the multicast table, because the flag is * checked before the table is */ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) { DBG(SMC_DEBUG_MISC, dev, "RCR_ALMUL\n"); mcr |= MAC_CR_MCPAS_; } /* * This sets the internal hardware table to filter out unwanted * multicast packets before they take up memory. * * The SMC chip uses a hash table where the high 6 bits of the CRC of * address are the offset into the table. If that bit is 1, then the * multicast packet is accepted. Otherwise, it's dropped silently. * * To use the 6 bits as an offset into the table, the high 1 bit is * the number of the 32 bit register, while the low 5 bits are the bit * within that register. */ else if (!netdev_mc_empty(dev)) { struct netdev_hw_addr *ha; /* Set the Hash perfec mode */ mcr |= MAC_CR_HPFILT_; /* start with a table of all zeros: reject all */ memset(multicast_table, 0, sizeof(multicast_table)); netdev_for_each_mc_addr(ha, dev) { u32 position; /* upper 6 bits are used as hash index */ position = ether_crc(ETH_ALEN, ha->addr)>>26; multicast_table[position>>5] |= 1 << (position&0x1f); } /* be sure I get rid of flags I might have set */ mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); /* now, the table can be loaded into the chipset */ update_multicast = 1; } else { DBG(SMC_DEBUG_MISC, dev, "~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n"); mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); /* * since I'm disabling all multicast entirely, I need to * clear the multicast list */ memset(multicast_table, 0, sizeof(multicast_table)); update_multicast = 1; } spin_lock_irqsave(&lp->lock, flags); SMC_SET_MAC_CR(lp, mcr); if (update_multicast) { DBG(SMC_DEBUG_MISC, dev, "update mcast hash table 0x%08x 0x%08x\n", multicast_table[0], multicast_table[1]); SMC_SET_HASHL(lp, multicast_table[0]); SMC_SET_HASHH(lp, multicast_table[1]); } spin_unlock_irqrestore(&lp->lock, flags); } /* * Open and Initialize the board * * Set up everything, reset the card, etc.. */ static int smc911x_open(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); /* reset the hardware */ smc911x_reset(dev); /* Configure the PHY, initialize the link state */ smc911x_phy_configure(&lp->phy_configure); /* Turn on Tx + Rx */ smc911x_enable(dev); netif_start_queue(dev); return 0; } /* * smc911x_close * * this makes the board clean up everything that it can * and not talk to the outside world. Caused by * an 'ifconfig ethX down' */ static int smc911x_close(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); netif_stop_queue(dev); netif_carrier_off(dev); /* clear everything */ smc911x_shutdown(dev); if (lp->phy_type != 0) { /* We need to ensure that no calls to * smc911x_phy_configure are pending. */ cancel_work_sync(&lp->phy_configure); smc911x_phy_powerdown(dev, lp->mii.phy_id); } if (lp->pending_tx_skb) { dev_kfree_skb(lp->pending_tx_skb); lp->pending_tx_skb = NULL; } return 0; } /* * Ethtool support */ static int smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) { struct smc911x_local *lp = netdev_priv(dev); int ret, status; unsigned long flags; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); cmd->maxtxpkt = 1; cmd->maxrxpkt = 1; if (lp->phy_type != 0) { spin_lock_irqsave(&lp->lock, flags); ret = mii_ethtool_gset(&lp->mii, cmd); spin_unlock_irqrestore(&lp->lock, flags); } else { cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_TP | SUPPORTED_AUI; if (lp->ctl_rspeed == 10) ethtool_cmd_speed_set(cmd, SPEED_10); else if (lp->ctl_rspeed == 100) ethtool_cmd_speed_set(cmd, SPEED_100); cmd->autoneg = AUTONEG_DISABLE; if (lp->mii.phy_id==1) cmd->transceiver = XCVR_INTERNAL; else cmd->transceiver = XCVR_EXTERNAL; cmd->port = 0; SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status); cmd->duplex = (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ? DUPLEX_FULL : DUPLEX_HALF; ret = 0; } return ret; } static int smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) { struct smc911x_local *lp = netdev_priv(dev); int ret; unsigned long flags; if (lp->phy_type != 0) { spin_lock_irqsave(&lp->lock, flags); ret = mii_ethtool_sset(&lp->mii, cmd); spin_unlock_irqrestore(&lp->lock, flags); } else { if (cmd->autoneg != AUTONEG_DISABLE || cmd->speed != SPEED_10 || (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || (cmd->port != PORT_TP && cmd->port != PORT_AUI)) return -EINVAL; lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; ret = 0; } return ret; } static void smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, CARDNAME, sizeof(info->driver)); strlcpy(info->version, version, sizeof(info->version)); strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); } static int smc911x_ethtool_nwayreset(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); int ret = -EINVAL; unsigned long flags; if (lp->phy_type != 0) { spin_lock_irqsave(&lp->lock, flags); ret = mii_nway_restart(&lp->mii); spin_unlock_irqrestore(&lp->lock, flags); } return ret; } static u32 smc911x_ethtool_getmsglevel(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); return lp->msg_enable; } static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level) { struct smc911x_local *lp = netdev_priv(dev); lp->msg_enable = level; } static int smc911x_ethtool_getregslen(struct net_device *dev) { /* System regs + MAC regs + PHY regs */ return (((E2P_CMD - ID_REV)/4 + 1) + (WUCSR - MAC_CR)+1 + 32) * sizeof(u32); } static void smc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs* regs, void *buf) { struct smc911x_local *lp = netdev_priv(dev); unsigned long flags; u32 reg,i,j=0; u32 *data = (u32*)buf; regs->version = lp->version; for(i=ID_REV;i<=E2P_CMD;i+=4) { data[j++] = SMC_inl(lp, i); } for(i=MAC_CR;i<=WUCSR;i++) { spin_lock_irqsave(&lp->lock, flags); SMC_GET_MAC_CSR(lp, i, reg); spin_unlock_irqrestore(&lp->lock, flags); data[j++] = reg; } for(i=0;i<=31;i++) { spin_lock_irqsave(&lp->lock, flags); SMC_GET_MII(lp, i, lp->mii.phy_id, reg); spin_unlock_irqrestore(&lp->lock, flags); data[j++] = reg & 0xFFFF; } } static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); unsigned int timeout; int e2p_cmd; e2p_cmd = SMC_GET_E2P_CMD(lp); for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) { if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) { PRINTK(dev, "%s timeout waiting for EEPROM to respond\n", __func__); return -EFAULT; } mdelay(1); e2p_cmd = SMC_GET_E2P_CMD(lp); } if (timeout == 0) { PRINTK(dev, "%s timeout waiting for EEPROM CMD not busy\n", __func__); return -ETIMEDOUT; } return 0; } static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev, int cmd, int addr) { struct smc911x_local *lp = netdev_priv(dev); int ret; if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) return ret; SMC_SET_E2P_CMD(lp, E2P_CMD_EPC_BUSY_ | ((cmd) & (0x7<<28)) | ((addr) & 0xFF)); return 0; } static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev, u8 *data) { struct smc911x_local *lp = netdev_priv(dev); int ret; if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) return ret; *data = SMC_GET_E2P_DATA(lp); return 0; } static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev, u8 data) { struct smc911x_local *lp = netdev_priv(dev); int ret; if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) return ret; SMC_SET_E2P_DATA(lp, data); return 0; } static int smc911x_ethtool_geteeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { u8 eebuf[SMC911X_EEPROM_LEN]; int i, ret; for(i=0;i<SMC911X_EEPROM_LEN;i++) { if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_READ_, i ))!=0) return ret; if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0) return ret; } memcpy(data, eebuf+eeprom->offset, eeprom->len); return 0; } static int smc911x_ethtool_seteeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { int i, ret; /* Enable erase */ if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0) return ret; for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) { /* erase byte */ if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0) return ret; /* write byte */ if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0) return ret; if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0) return ret; } return 0; } static int smc911x_ethtool_geteeprom_len(struct net_device *dev) { return SMC911X_EEPROM_LEN; } static const struct ethtool_ops smc911x_ethtool_ops = { .get_settings = smc911x_ethtool_getsettings, .set_settings = smc911x_ethtool_setsettings, .get_drvinfo = smc911x_ethtool_getdrvinfo, .get_msglevel = smc911x_ethtool_getmsglevel, .set_msglevel = smc911x_ethtool_setmsglevel, .nway_reset = smc911x_ethtool_nwayreset, .get_link = ethtool_op_get_link, .get_regs_len = smc911x_ethtool_getregslen, .get_regs = smc911x_ethtool_getregs, .get_eeprom_len = smc911x_ethtool_geteeprom_len, .get_eeprom = smc911x_ethtool_geteeprom, .set_eeprom = smc911x_ethtool_seteeprom, }; /* * smc911x_findirq * * This routine has a simple purpose -- make the SMC chip generate an * interrupt, so an auto-detect routine can detect it, and find the IRQ, */ static int smc911x_findirq(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); int timeout = 20; unsigned long cookie; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); cookie = probe_irq_on(); /* * Force a SW interrupt */ SMC_SET_INT_EN(lp, INT_EN_SW_INT_EN_); /* * Wait until positive that the interrupt has been generated */ do { int int_status; udelay(10); int_status = SMC_GET_INT_EN(lp); if (int_status & INT_EN_SW_INT_EN_) break; /* got the interrupt */ } while (--timeout); /* * there is really nothing that I can do here if timeout fails, * as autoirq_report will return a 0 anyway, which is what I * want in this case. Plus, the clean up is needed in both * cases. */ /* and disable all interrupts again */ SMC_SET_INT_EN(lp, 0); /* and return what I found */ return probe_irq_off(cookie); } static const struct net_device_ops smc911x_netdev_ops = { .ndo_open = smc911x_open, .ndo_stop = smc911x_close, .ndo_start_xmit = smc911x_hard_start_xmit, .ndo_tx_timeout = smc911x_timeout, .ndo_set_rx_mode = smc911x_set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = smc911x_poll_controller, #endif }; /* * Function: smc911x_probe(unsigned long ioaddr) * * Purpose: * Tests to see if a given ioaddr points to an SMC911x chip. * Returns a 0 on success * * Algorithm: * (1) see if the endian word is OK * (1) see if I recognize the chip ID in the appropriate register * * Here I do typical initialization tasks. * * o Initialize the structure if needed * o print out my vanity message if not done so already * o print out what type of hardware is detected * o print out the ethernet address * o find the IRQ * o set up my private data * o configure the dev structure with my subroutines * o actually GRAB the irq. * o GRAB the region */ static int smc911x_probe(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); int i, retval; unsigned int val, chip_id, revision; const char *version_string; unsigned long irq_flags; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); /* First, see if the endian word is recognized */ val = SMC_GET_BYTE_TEST(lp); DBG(SMC_DEBUG_MISC, dev, "%s: endian probe returned 0x%04x\n", CARDNAME, val); if (val != 0x87654321) { netdev_err(dev, "Invalid chip endian 0x%08x\n", val); retval = -ENODEV; goto err_out; } /* * check if the revision register is something that I * recognize. These might need to be added to later, * as future revisions could be added. */ chip_id = SMC_GET_PN(lp); DBG(SMC_DEBUG_MISC, dev, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id); for(i=0;chip_ids[i].id != 0; i++) { if (chip_ids[i].id == chip_id) break; } if (!chip_ids[i].id) { netdev_err(dev, "Unknown chip ID %04x\n", chip_id); retval = -ENODEV; goto err_out; } version_string = chip_ids[i].name; revision = SMC_GET_REV(lp); DBG(SMC_DEBUG_MISC, dev, "%s: revision = 0x%04x\n", CARDNAME, revision); /* At this point I'll assume that the chip is an SMC911x. */ DBG(SMC_DEBUG_MISC, dev, "%s: Found a %s\n", CARDNAME, chip_ids[i].name); /* Validate the TX FIFO size requested */ if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) { netdev_err(dev, "Invalid TX FIFO size requested %d\n", tx_fifo_kb); retval = -EINVAL; goto err_out; } /* fill in some of the fields */ lp->version = chip_ids[i].id; lp->revision = revision; lp->tx_fifo_kb = tx_fifo_kb; /* Reverse calculate the RX FIFO size from the TX */ lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512; lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15; /* Set the automatic flow control values */ switch(lp->tx_fifo_kb) { /* * AFC_HI is about ((Rx Data Fifo Size)*2/3)/64 * AFC_LO is AFC_HI/2 * BACK_DUR is about 5uS*(AFC_LO) rounded down */ case 2:/* 13440 Rx Data Fifo Size */ lp->afc_cfg=0x008C46AF;break; case 3:/* 12480 Rx Data Fifo Size */ lp->afc_cfg=0x0082419F;break; case 4:/* 11520 Rx Data Fifo Size */ lp->afc_cfg=0x00783C9F;break; case 5:/* 10560 Rx Data Fifo Size */ lp->afc_cfg=0x006E374F;break; case 6:/* 9600 Rx Data Fifo Size */ lp->afc_cfg=0x0064328F;break; case 7:/* 8640 Rx Data Fifo Size */ lp->afc_cfg=0x005A2D7F;break; case 8:/* 7680 Rx Data Fifo Size */ lp->afc_cfg=0x0050287F;break; case 9:/* 6720 Rx Data Fifo Size */ lp->afc_cfg=0x0046236F;break; case 10:/* 5760 Rx Data Fifo Size */ lp->afc_cfg=0x003C1E6F;break; case 11:/* 4800 Rx Data Fifo Size */ lp->afc_cfg=0x0032195F;break; /* * AFC_HI is ~1520 bytes less than RX Data Fifo Size * AFC_LO is AFC_HI/2 * BACK_DUR is about 5uS*(AFC_LO) rounded down */ case 12:/* 3840 Rx Data Fifo Size */ lp->afc_cfg=0x0024124F;break; case 13:/* 2880 Rx Data Fifo Size */ lp->afc_cfg=0x0015073F;break; case 14:/* 1920 Rx Data Fifo Size */ lp->afc_cfg=0x0006032F;break; default: PRINTK(dev, "ERROR -- no AFC_CFG setting found"); break; } DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, dev, "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME, lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg); spin_lock_init(&lp->lock); /* Get the MAC address */ SMC_GET_MAC_ADDR(lp, dev->dev_addr); /* now, reset the chip, and put it into a known state */ smc911x_reset(dev); /* * If dev->irq is 0, then the device has to be banged on to see * what the IRQ is. * * Specifying an IRQ is done with the assumption that the user knows * what (s)he is doing. No checking is done!!!! */ if (dev->irq < 1) { int trials; trials = 3; while (trials--) { dev->irq = smc911x_findirq(dev); if (dev->irq) break; /* kick the card and try again */ smc911x_reset(dev); } } if (dev->irq == 0) { netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n"); retval = -ENODEV; goto err_out; } dev->irq = irq_canonicalize(dev->irq); dev->netdev_ops = &smc911x_netdev_ops; dev->watchdog_timeo = msecs_to_jiffies(watchdog); dev->ethtool_ops = &smc911x_ethtool_ops; INIT_WORK(&lp->phy_configure, smc911x_phy_configure); lp->mii.phy_id_mask = 0x1f; lp->mii.reg_num_mask = 0x1f; lp->mii.force_media = 0; lp->mii.full_duplex = 0; lp->mii.dev = dev; lp->mii.mdio_read = smc911x_phy_read; lp->mii.mdio_write = smc911x_phy_write; /* * Locate the phy, if any. */ smc911x_phy_detect(dev); /* Set default parameters */ lp->msg_enable = NETIF_MSG_LINK; lp->ctl_rfduplx = 1; lp->ctl_rspeed = 100; #ifdef SMC_DYNAMIC_BUS_CONFIG irq_flags = lp->cfg.irq_flags; #else irq_flags = IRQF_SHARED | SMC_IRQ_SENSE; #endif /* Grab the IRQ */ retval = request_irq(dev->irq, smc911x_interrupt, irq_flags, dev->name, dev); if (retval) goto err_out; #ifdef SMC_USE_DMA lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq); lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq); lp->rxdma_active = 0; lp->txdma_active = 0; dev->dma = lp->rxdma; #endif retval = register_netdev(dev); if (retval == 0) { /* now, print out the card info, in a short format.. */ netdev_info(dev, "%s (rev %d) at %#lx IRQ %d", version_string, lp->revision, dev->base_addr, dev->irq); #ifdef SMC_USE_DMA if (lp->rxdma != -1) pr_cont(" RXDMA %d", lp->rxdma); if (lp->txdma != -1) pr_cont(" TXDMA %d", lp->txdma); #endif pr_cont("\n"); if (!is_valid_ether_addr(dev->dev_addr)) { netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n"); } else { /* Print the Ethernet address */ netdev_info(dev, "Ethernet addr: %pM\n", dev->dev_addr); } if (lp->phy_type == 0) { PRINTK(dev, "No PHY found\n"); } else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) { PRINTK(dev, "LAN911x Internal PHY\n"); } else { PRINTK(dev, "External PHY 0x%08x\n", lp->phy_type); } } err_out: #ifdef SMC_USE_DMA if (retval) { if (lp->rxdma != -1) { SMC_DMA_FREE(dev, lp->rxdma); } if (lp->txdma != -1) { SMC_DMA_FREE(dev, lp->txdma); } } #endif return retval; } /* * smc911x_drv_probe(void) * * Output: * 0 --> there is a device * anything else, error */ static int smc911x_drv_probe(struct platform_device *pdev) { struct net_device *ndev; struct resource *res; struct smc911x_local *lp; void __iomem *addr; int ret; /* ndev is not valid yet, so avoid passing it in. */ DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; goto out; } /* * Request the regions. */ if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) { ret = -EBUSY; goto out; } ndev = alloc_etherdev(sizeof(struct smc911x_local)); if (!ndev) { ret = -ENOMEM; goto release_1; } SET_NETDEV_DEV(ndev, &pdev->dev); ndev->dma = (unsigned char)-1; ndev->irq = platform_get_irq(pdev, 0); lp = netdev_priv(ndev); lp->netdev = ndev; #ifdef SMC_DYNAMIC_BUS_CONFIG { struct smc911x_platdata *pd = dev_get_platdata(&pdev->dev); if (!pd) { ret = -EINVAL; goto release_both; } memcpy(&lp->cfg, pd, sizeof(lp->cfg)); } #endif addr = ioremap(res->start, SMC911X_IO_EXTENT); if (!addr) { ret = -ENOMEM; goto release_both; } platform_set_drvdata(pdev, ndev); lp->base = addr; ndev->base_addr = res->start; ret = smc911x_probe(ndev); if (ret != 0) { iounmap(addr); release_both: free_netdev(ndev); release_1: release_mem_region(res->start, SMC911X_IO_EXTENT); out: pr_info("%s: not found (%d).\n", CARDNAME, ret); } #ifdef SMC_USE_DMA else { lp->physaddr = res->start; lp->dev = &pdev->dev; } #endif return ret; } static int smc911x_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct smc911x_local *lp = netdev_priv(ndev); struct resource *res; DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__); unregister_netdev(ndev); free_irq(ndev->irq, ndev); #ifdef SMC_USE_DMA { if (lp->rxdma != -1) { SMC_DMA_FREE(dev, lp->rxdma); } if (lp->txdma != -1) { SMC_DMA_FREE(dev, lp->txdma); } } #endif iounmap(lp->base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, SMC911X_IO_EXTENT); free_netdev(ndev); return 0; } static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state) { struct net_device *ndev = platform_get_drvdata(dev); struct smc911x_local *lp = netdev_priv(ndev); DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__); if (ndev) { if (netif_running(ndev)) { netif_device_detach(ndev); smc911x_shutdown(ndev); #if POWER_DOWN /* Set D2 - Energy detect only setting */ SMC_SET_PMT_CTRL(lp, 2<<12); #endif } } return 0; } static int smc911x_drv_resume(struct platform_device *dev) { struct net_device *ndev = platform_get_drvdata(dev); DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__); if (ndev) { struct smc911x_local *lp = netdev_priv(ndev); if (netif_running(ndev)) { smc911x_reset(ndev); if (lp->phy_type != 0) smc911x_phy_configure(&lp->phy_configure); smc911x_enable(ndev); netif_device_attach(ndev); } } return 0; } static struct platform_driver smc911x_driver = { .probe = smc911x_drv_probe, .remove = smc911x_drv_remove, .suspend = smc911x_drv_suspend, .resume = smc911x_drv_resume, .driver = { .name = CARDNAME, }, }; module_platform_driver(smc911x_driver);
gpl-2.0
lookfiresu123/my_linux-3.13.0
drivers/video/pm2fb.c
1274
50272
/* * Permedia2 framebuffer driver. * * 2.5/2.6 driver: * Copyright (c) 2003 Jim Hague (jim.hague@acm.org) * * based on 2.4 driver: * Copyright (c) 1998-2000 Ilario Nardinocchi (nardinoc@CS.UniBO.IT) * Copyright (c) 1999 Jakub Jelinek (jakub@redhat.com) * * and additional input from James Simmon's port of Hannu Mallat's tdfx * driver. * * I have a Creative Graphics Blaster Exxtreme card - pm2fb on x86. I * have no access to other pm2fb implementations. Sparc (and thus * hopefully other big-endian) devices now work, thanks to a lot of * testing work by Ron Murray. I have no access to CVision hardware, * and therefore for now I am omitting the CVision code. * * Multiple boards support has been on the TODO list for ages. * Don't expect this to change. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/pci.h> #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif #include <video/permedia2.h> #include <video/cvisionppc.h> #if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) #error "The endianness of the target host has not been defined." #endif #if !defined(CONFIG_PCI) #error "Only generic PCI cards supported." #endif #undef PM2FB_MASTER_DEBUG #ifdef PM2FB_MASTER_DEBUG #define DPRINTK(a, b...) \ printk(KERN_DEBUG "pm2fb: %s: " a, __func__ , ## b) #else #define DPRINTK(a, b...) #endif #define PM2_PIXMAP_SIZE (1600 * 4) /* * Driver data */ static int hwcursor = 1; static char *mode_option; /* * The XFree GLINT driver will (I think to implement hardware cursor * support on TVP4010 and similar where there is no RAMDAC - see * comment in set_video) always request +ve sync regardless of what * the mode requires. This screws me because I have a Sun * fixed-frequency monitor which absolutely has to have -ve sync. So * these flags allow the user to specify that requests for +ve sync * should be silently turned in -ve sync. */ static bool lowhsync; static bool lowvsync; static bool noaccel; /* mtrr option */ #ifdef CONFIG_MTRR static bool nomtrr; #endif /* * The hardware state of the graphics card that isn't part of the * screeninfo. */ struct pm2fb_par { pm2type_t type; /* Board type */ unsigned char __iomem *v_regs;/* virtual address of p_regs */ u32 memclock; /* memclock */ u32 video; /* video flags before blanking */ u32 mem_config; /* MemConfig reg at probe */ u32 mem_control; /* MemControl reg at probe */ u32 boot_address; /* BootAddress reg at probe */ u32 palette[16]; int mtrr_handle; }; /* * Here we define the default structs fb_fix_screeninfo and fb_var_screeninfo * if we don't use modedb. */ static struct fb_fix_screeninfo pm2fb_fix = { .id = "", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .xpanstep = 1, .ypanstep = 1, .ywrapstep = 0, .accel = FB_ACCEL_3DLABS_PERMEDIA2, }; /* * Default video mode. In case the modedb doesn't work. */ static struct fb_var_screeninfo pm2fb_var = { /* "640x480, 8 bpp @ 60 Hz */ .xres = 640, .yres = 480, .xres_virtual = 640, .yres_virtual = 480, .bits_per_pixel = 8, .red = {0, 8, 0}, .blue = {0, 8, 0}, .green = {0, 8, 0}, .activate = FB_ACTIVATE_NOW, .height = -1, .width = -1, .accel_flags = 0, .pixclock = 39721, .left_margin = 40, .right_margin = 24, .upper_margin = 32, .lower_margin = 11, .hsync_len = 96, .vsync_len = 2, .vmode = FB_VMODE_NONINTERLACED }; /* * Utility functions */ static inline u32 pm2_RD(struct pm2fb_par *p, s32 off) { return fb_readl(p->v_regs + off); } static inline void pm2_WR(struct pm2fb_par *p, s32 off, u32 v) { fb_writel(v, p->v_regs + off); } static inline u32 pm2_RDAC_RD(struct pm2fb_par *p, s32 idx) { pm2_WR(p, PM2R_RD_PALETTE_WRITE_ADDRESS, idx); mb(); return pm2_RD(p, PM2R_RD_INDEXED_DATA); } static inline u32 pm2v_RDAC_RD(struct pm2fb_par *p, s32 idx) { pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff); mb(); return pm2_RD(p, PM2VR_RD_INDEXED_DATA); } static inline void pm2_RDAC_WR(struct pm2fb_par *p, s32 idx, u32 v) { pm2_WR(p, PM2R_RD_PALETTE_WRITE_ADDRESS, idx); wmb(); pm2_WR(p, PM2R_RD_INDEXED_DATA, v); wmb(); } static inline void pm2v_RDAC_WR(struct pm2fb_par *p, s32 idx, u32 v) { pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff); wmb(); pm2_WR(p, PM2VR_RD_INDEXED_DATA, v); wmb(); } #ifdef CONFIG_FB_PM2_FIFO_DISCONNECT #define WAIT_FIFO(p, a) #else static inline void WAIT_FIFO(struct pm2fb_par *p, u32 a) { while (pm2_RD(p, PM2R_IN_FIFO_SPACE) < a) cpu_relax(); } #endif /* * partial products for the supported horizontal resolutions. */ #define PACKPP(p0, p1, p2) (((p2) << 6) | ((p1) << 3) | (p0)) static const struct { u16 width; u16 pp; } pp_table[] = { { 32, PACKPP(1, 0, 0) }, { 64, PACKPP(1, 1, 0) }, { 96, PACKPP(1, 1, 1) }, { 128, PACKPP(2, 1, 1) }, { 160, PACKPP(2, 2, 1) }, { 192, PACKPP(2, 2, 2) }, { 224, PACKPP(3, 2, 1) }, { 256, PACKPP(3, 2, 2) }, { 288, PACKPP(3, 3, 1) }, { 320, PACKPP(3, 3, 2) }, { 384, PACKPP(3, 3, 3) }, { 416, PACKPP(4, 3, 1) }, { 448, PACKPP(4, 3, 2) }, { 512, PACKPP(4, 3, 3) }, { 544, PACKPP(4, 4, 1) }, { 576, PACKPP(4, 4, 2) }, { 640, PACKPP(4, 4, 3) }, { 768, PACKPP(4, 4, 4) }, { 800, PACKPP(5, 4, 1) }, { 832, PACKPP(5, 4, 2) }, { 896, PACKPP(5, 4, 3) }, { 1024, PACKPP(5, 4, 4) }, { 1056, PACKPP(5, 5, 1) }, { 1088, PACKPP(5, 5, 2) }, { 1152, PACKPP(5, 5, 3) }, { 1280, PACKPP(5, 5, 4) }, { 1536, PACKPP(5, 5, 5) }, { 1568, PACKPP(6, 5, 1) }, { 1600, PACKPP(6, 5, 2) }, { 1664, PACKPP(6, 5, 3) }, { 1792, PACKPP(6, 5, 4) }, { 2048, PACKPP(6, 5, 5) }, { 0, 0 } }; static u32 partprod(u32 xres) { int i; for (i = 0; pp_table[i].width && pp_table[i].width != xres; i++) ; if (pp_table[i].width == 0) DPRINTK("invalid width %u\n", xres); return pp_table[i].pp; } static u32 to3264(u32 timing, int bpp, int is64) { switch (bpp) { case 24: timing *= 3; case 8: timing >>= 1; case 16: timing >>= 1; case 32: break; } if (is64) timing >>= 1; return timing; } static void pm2_mnp(u32 clk, unsigned char *mm, unsigned char *nn, unsigned char *pp) { unsigned char m; unsigned char n; unsigned char p; u32 f; s32 curr; s32 delta = 100000; *mm = *nn = *pp = 0; for (n = 2; n < 15; n++) { for (m = 2; m; m++) { f = PM2_REFERENCE_CLOCK * m / n; if (f >= 150000 && f <= 300000) { for (p = 0; p < 5; p++, f >>= 1) { curr = (clk > f) ? clk - f : f - clk; if (curr < delta) { delta = curr; *mm = m; *nn = n; *pp = p; } } } } } } static void pm2v_mnp(u32 clk, unsigned char *mm, unsigned char *nn, unsigned char *pp) { unsigned char m; unsigned char n; unsigned char p; u32 f; s32 delta = 1000; *mm = *nn = *pp = 0; for (m = 1; m < 128; m++) { for (n = 2 * m + 1; n; n++) { for (p = 0; p < 2; p++) { f = (PM2_REFERENCE_CLOCK >> (p + 1)) * n / m; if (clk > f - delta && clk < f + delta) { delta = (clk > f) ? clk - f : f - clk; *mm = m; *nn = n; *pp = p; } } } } } static void clear_palette(struct pm2fb_par *p) { int i = 256; WAIT_FIFO(p, 1); pm2_WR(p, PM2R_RD_PALETTE_WRITE_ADDRESS, 0); wmb(); while (i--) { WAIT_FIFO(p, 3); pm2_WR(p, PM2R_RD_PALETTE_DATA, 0); pm2_WR(p, PM2R_RD_PALETTE_DATA, 0); pm2_WR(p, PM2R_RD_PALETTE_DATA, 0); } } static void reset_card(struct pm2fb_par *p) { if (p->type == PM2_TYPE_PERMEDIA2V) pm2_WR(p, PM2VR_RD_INDEX_HIGH, 0); pm2_WR(p, PM2R_RESET_STATUS, 0); mb(); while (pm2_RD(p, PM2R_RESET_STATUS) & PM2F_BEING_RESET) cpu_relax(); mb(); #ifdef CONFIG_FB_PM2_FIFO_DISCONNECT DPRINTK("FIFO disconnect enabled\n"); pm2_WR(p, PM2R_FIFO_DISCON, 1); mb(); #endif /* Restore stashed memory config information from probe */ WAIT_FIFO(p, 3); pm2_WR(p, PM2R_MEM_CONTROL, p->mem_control); pm2_WR(p, PM2R_BOOT_ADDRESS, p->boot_address); wmb(); pm2_WR(p, PM2R_MEM_CONFIG, p->mem_config); } static void reset_config(struct pm2fb_par *p) { WAIT_FIFO(p, 53); pm2_WR(p, PM2R_CHIP_CONFIG, pm2_RD(p, PM2R_CHIP_CONFIG) & ~(PM2F_VGA_ENABLE | PM2F_VGA_FIXED)); pm2_WR(p, PM2R_BYPASS_WRITE_MASK, ~(0L)); pm2_WR(p, PM2R_FRAMEBUFFER_WRITE_MASK, ~(0L)); pm2_WR(p, PM2R_FIFO_CONTROL, 0); pm2_WR(p, PM2R_APERTURE_ONE, 0); pm2_WR(p, PM2R_APERTURE_TWO, 0); pm2_WR(p, PM2R_RASTERIZER_MODE, 0); pm2_WR(p, PM2R_DELTA_MODE, PM2F_DELTA_ORDER_RGB); pm2_WR(p, PM2R_LB_READ_FORMAT, 0); pm2_WR(p, PM2R_LB_WRITE_FORMAT, 0); pm2_WR(p, PM2R_LB_READ_MODE, 0); pm2_WR(p, PM2R_LB_SOURCE_OFFSET, 0); pm2_WR(p, PM2R_FB_SOURCE_OFFSET, 0); pm2_WR(p, PM2R_FB_PIXEL_OFFSET, 0); pm2_WR(p, PM2R_FB_WINDOW_BASE, 0); pm2_WR(p, PM2R_LB_WINDOW_BASE, 0); pm2_WR(p, PM2R_FB_SOFT_WRITE_MASK, ~(0L)); pm2_WR(p, PM2R_FB_HARD_WRITE_MASK, ~(0L)); pm2_WR(p, PM2R_FB_READ_PIXEL, 0); pm2_WR(p, PM2R_DITHER_MODE, 0); pm2_WR(p, PM2R_AREA_STIPPLE_MODE, 0); pm2_WR(p, PM2R_DEPTH_MODE, 0); pm2_WR(p, PM2R_STENCIL_MODE, 0); pm2_WR(p, PM2R_TEXTURE_ADDRESS_MODE, 0); pm2_WR(p, PM2R_TEXTURE_READ_MODE, 0); pm2_WR(p, PM2R_TEXEL_LUT_MODE, 0); pm2_WR(p, PM2R_YUV_MODE, 0); pm2_WR(p, PM2R_COLOR_DDA_MODE, 0); pm2_WR(p, PM2R_TEXTURE_COLOR_MODE, 0); pm2_WR(p, PM2R_FOG_MODE, 0); pm2_WR(p, PM2R_ALPHA_BLEND_MODE, 0); pm2_WR(p, PM2R_LOGICAL_OP_MODE, 0); pm2_WR(p, PM2R_STATISTICS_MODE, 0); pm2_WR(p, PM2R_SCISSOR_MODE, 0); pm2_WR(p, PM2R_FILTER_MODE, PM2F_SYNCHRONIZATION); pm2_WR(p, PM2R_RD_PIXEL_MASK, 0xff); switch (p->type) { case PM2_TYPE_PERMEDIA2: pm2_RDAC_WR(p, PM2I_RD_MODE_CONTROL, 0); /* no overlay */ pm2_RDAC_WR(p, PM2I_RD_CURSOR_CONTROL, 0); pm2_RDAC_WR(p, PM2I_RD_MISC_CONTROL, PM2F_RD_PALETTE_WIDTH_8); pm2_RDAC_WR(p, PM2I_RD_COLOR_KEY_CONTROL, 0); pm2_RDAC_WR(p, PM2I_RD_OVERLAY_KEY, 0); pm2_RDAC_WR(p, PM2I_RD_RED_KEY, 0); pm2_RDAC_WR(p, PM2I_RD_GREEN_KEY, 0); pm2_RDAC_WR(p, PM2I_RD_BLUE_KEY, 0); break; case PM2_TYPE_PERMEDIA2V: pm2v_RDAC_WR(p, PM2VI_RD_MISC_CONTROL, 1); /* 8bit */ break; } } static void set_aperture(struct pm2fb_par *p, u32 depth) { /* * The hardware is little-endian. When used in big-endian * hosts, the on-chip aperture settings are used where * possible to translate from host to card byte order. */ WAIT_FIFO(p, 2); #ifdef __LITTLE_ENDIAN pm2_WR(p, PM2R_APERTURE_ONE, PM2F_APERTURE_STANDARD); #else switch (depth) { case 24: /* RGB->BGR */ /* * We can't use the aperture to translate host to * card byte order here, so we switch to BGR mode * in pm2fb_set_par(). */ case 8: /* B->B */ pm2_WR(p, PM2R_APERTURE_ONE, PM2F_APERTURE_STANDARD); break; case 16: /* HL->LH */ pm2_WR(p, PM2R_APERTURE_ONE, PM2F_APERTURE_HALFWORDSWAP); break; case 32: /* RGBA->ABGR */ pm2_WR(p, PM2R_APERTURE_ONE, PM2F_APERTURE_BYTESWAP); break; } #endif /* We don't use aperture two, so this may be superflous */ pm2_WR(p, PM2R_APERTURE_TWO, PM2F_APERTURE_STANDARD); } static void set_color(struct pm2fb_par *p, unsigned char regno, unsigned char r, unsigned char g, unsigned char b) { WAIT_FIFO(p, 4); pm2_WR(p, PM2R_RD_PALETTE_WRITE_ADDRESS, regno); wmb(); pm2_WR(p, PM2R_RD_PALETTE_DATA, r); wmb(); pm2_WR(p, PM2R_RD_PALETTE_DATA, g); wmb(); pm2_WR(p, PM2R_RD_PALETTE_DATA, b); } static void set_memclock(struct pm2fb_par *par, u32 clk) { int i; unsigned char m, n, p; switch (par->type) { case PM2_TYPE_PERMEDIA2V: pm2v_mnp(clk/2, &m, &n, &p); WAIT_FIFO(par, 12); pm2_WR(par, PM2VR_RD_INDEX_HIGH, PM2VI_RD_MCLK_CONTROL >> 8); pm2v_RDAC_WR(par, PM2VI_RD_MCLK_CONTROL, 0); pm2v_RDAC_WR(par, PM2VI_RD_MCLK_PRESCALE, m); pm2v_RDAC_WR(par, PM2VI_RD_MCLK_FEEDBACK, n); pm2v_RDAC_WR(par, PM2VI_RD_MCLK_POSTSCALE, p); pm2v_RDAC_WR(par, PM2VI_RD_MCLK_CONTROL, 1); rmb(); for (i = 256; i; i--) if (pm2v_RDAC_RD(par, PM2VI_RD_MCLK_CONTROL) & 2) break; pm2_WR(par, PM2VR_RD_INDEX_HIGH, 0); break; case PM2_TYPE_PERMEDIA2: pm2_mnp(clk, &m, &n, &p); WAIT_FIFO(par, 10); pm2_RDAC_WR(par, PM2I_RD_MEMORY_CLOCK_3, 6); pm2_RDAC_WR(par, PM2I_RD_MEMORY_CLOCK_1, m); pm2_RDAC_WR(par, PM2I_RD_MEMORY_CLOCK_2, n); pm2_RDAC_WR(par, PM2I_RD_MEMORY_CLOCK_3, 8|p); pm2_RDAC_RD(par, PM2I_RD_MEMORY_CLOCK_STATUS); rmb(); for (i = 256; i; i--) if (pm2_RD(par, PM2R_RD_INDEXED_DATA) & PM2F_PLL_LOCKED) break; break; } } static void set_pixclock(struct pm2fb_par *par, u32 clk) { int i; unsigned char m, n, p; switch (par->type) { case PM2_TYPE_PERMEDIA2: pm2_mnp(clk, &m, &n, &p); WAIT_FIFO(par, 10); pm2_RDAC_WR(par, PM2I_RD_PIXEL_CLOCK_A3, 0); pm2_RDAC_WR(par, PM2I_RD_PIXEL_CLOCK_A1, m); pm2_RDAC_WR(par, PM2I_RD_PIXEL_CLOCK_A2, n); pm2_RDAC_WR(par, PM2I_RD_PIXEL_CLOCK_A3, 8|p); pm2_RDAC_RD(par, PM2I_RD_PIXEL_CLOCK_STATUS); rmb(); for (i = 256; i; i--) if (pm2_RD(par, PM2R_RD_INDEXED_DATA) & PM2F_PLL_LOCKED) break; break; case PM2_TYPE_PERMEDIA2V: pm2v_mnp(clk/2, &m, &n, &p); WAIT_FIFO(par, 8); pm2_WR(par, PM2VR_RD_INDEX_HIGH, PM2VI_RD_CLK0_PRESCALE >> 8); pm2v_RDAC_WR(par, PM2VI_RD_CLK0_PRESCALE, m); pm2v_RDAC_WR(par, PM2VI_RD_CLK0_FEEDBACK, n); pm2v_RDAC_WR(par, PM2VI_RD_CLK0_POSTSCALE, p); pm2_WR(par, PM2VR_RD_INDEX_HIGH, 0); break; } } static void set_video(struct pm2fb_par *p, u32 video) { u32 tmp; u32 vsync = video; DPRINTK("video = 0x%x\n", video); /* * The hardware cursor needs +vsync to recognise vert retrace. * We may not be using the hardware cursor, but the X Glint * driver may well. So always set +hsync/+vsync and then set * the RAMDAC to invert the sync if necessary. */ vsync &= ~(PM2F_HSYNC_MASK | PM2F_VSYNC_MASK); vsync |= PM2F_HSYNC_ACT_HIGH | PM2F_VSYNC_ACT_HIGH; WAIT_FIFO(p, 3); pm2_WR(p, PM2R_VIDEO_CONTROL, vsync); switch (p->type) { case PM2_TYPE_PERMEDIA2: tmp = PM2F_RD_PALETTE_WIDTH_8; if ((video & PM2F_HSYNC_MASK) == PM2F_HSYNC_ACT_LOW) tmp |= 4; /* invert hsync */ if ((video & PM2F_VSYNC_MASK) == PM2F_VSYNC_ACT_LOW) tmp |= 8; /* invert vsync */ pm2_RDAC_WR(p, PM2I_RD_MISC_CONTROL, tmp); break; case PM2_TYPE_PERMEDIA2V: tmp = 0; if ((video & PM2F_HSYNC_MASK) == PM2F_HSYNC_ACT_LOW) tmp |= 1; /* invert hsync */ if ((video & PM2F_VSYNC_MASK) == PM2F_VSYNC_ACT_LOW) tmp |= 4; /* invert vsync */ pm2v_RDAC_WR(p, PM2VI_RD_SYNC_CONTROL, tmp); break; } } /* * pm2fb_check_var - Optional function. Validates a var passed in. * @var: frame buffer variable screen structure * @info: frame buffer structure that represents a single frame buffer * * Checks to see if the hardware supports the state requested by * var passed in. * * Returns negative errno on error, or zero on success. */ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { u32 lpitch; if (var->bits_per_pixel != 8 && var->bits_per_pixel != 16 && var->bits_per_pixel != 24 && var->bits_per_pixel != 32) { DPRINTK("depth not supported: %u\n", var->bits_per_pixel); return -EINVAL; } if (var->xres != var->xres_virtual) { DPRINTK("virtual x resolution != " "physical x resolution not supported\n"); return -EINVAL; } if (var->yres > var->yres_virtual) { DPRINTK("virtual y resolution < " "physical y resolution not possible\n"); return -EINVAL; } /* permedia cannot blit over 2048 */ if (var->yres_virtual > 2047) { var->yres_virtual = 2047; } if (var->xoffset) { DPRINTK("xoffset not supported\n"); return -EINVAL; } if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) { DPRINTK("interlace not supported\n"); return -EINVAL; } var->xres = (var->xres + 15) & ~15; /* could sometimes be 8 */ lpitch = var->xres * ((var->bits_per_pixel + 7) >> 3); if (var->xres < 320 || var->xres > 1600) { DPRINTK("width not supported: %u\n", var->xres); return -EINVAL; } if (var->yres < 200 || var->yres > 1200) { DPRINTK("height not supported: %u\n", var->yres); return -EINVAL; } if (lpitch * var->yres_virtual > info->fix.smem_len) { DPRINTK("no memory for screen (%ux%ux%u)\n", var->xres, var->yres_virtual, var->bits_per_pixel); return -EINVAL; } if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) { DPRINTK("pixclock too high (%ldKHz)\n", PICOS2KHZ(var->pixclock)); return -EINVAL; } var->transp.offset = 0; var->transp.length = 0; switch (var->bits_per_pixel) { case 8: var->red.length = 8; var->green.length = 8; var->blue.length = 8; break; case 16: var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; break; case 32: var->transp.offset = 24; var->transp.length = 8; var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; break; case 24: #ifdef __BIG_ENDIAN var->red.offset = 0; var->blue.offset = 16; #else var->red.offset = 16; var->blue.offset = 0; #endif var->green.offset = 8; var->red.length = 8; var->green.length = 8; var->blue.length = 8; break; } var->height = -1; var->width = -1; var->accel_flags = 0; /* Can't mmap if this is on */ DPRINTK("Checking graphics mode at %dx%d depth %d\n", var->xres, var->yres, var->bits_per_pixel); return 0; } /** * pm2fb_set_par - Alters the hardware state. * @info: frame buffer structure that represents a single frame buffer * * Using the fb_var_screeninfo in fb_info we set the resolution of the * this particular framebuffer. */ static int pm2fb_set_par(struct fb_info *info) { struct pm2fb_par *par = info->par; u32 pixclock; u32 width = (info->var.xres_virtual + 7) & ~7; u32 height = info->var.yres_virtual; u32 depth = (info->var.bits_per_pixel + 7) & ~7; u32 hsstart, hsend, hbend, htotal; u32 vsstart, vsend, vbend, vtotal; u32 stride; u32 base; u32 video = 0; u32 clrmode = PM2F_RD_COLOR_MODE_RGB | PM2F_RD_GUI_ACTIVE; u32 txtmap = 0; u32 pixsize = 0; u32 clrformat = 0; u32 misc = 1; /* 8-bit DAC */ u32 xres = (info->var.xres + 31) & ~31; int data64; reset_card(par); reset_config(par); clear_palette(par); if (par->memclock) set_memclock(par, par->memclock); depth = (depth > 32) ? 32 : depth; data64 = depth > 8 || par->type == PM2_TYPE_PERMEDIA2V; pixclock = PICOS2KHZ(info->var.pixclock); if (pixclock > PM2_MAX_PIXCLOCK) { DPRINTK("pixclock too high (%uKHz)\n", pixclock); return -EINVAL; } hsstart = to3264(info->var.right_margin, depth, data64); hsend = hsstart + to3264(info->var.hsync_len, depth, data64); hbend = hsend + to3264(info->var.left_margin, depth, data64); htotal = to3264(xres, depth, data64) + hbend - 1; vsstart = (info->var.lower_margin) ? info->var.lower_margin - 1 : 0; /* FIXME! */ vsend = info->var.lower_margin + info->var.vsync_len - 1; vbend = info->var.lower_margin + info->var.vsync_len + info->var.upper_margin; vtotal = info->var.yres + vbend - 1; stride = to3264(width, depth, 1); base = to3264(info->var.yoffset * xres + info->var.xoffset, depth, 1); if (data64) video |= PM2F_DATA_64_ENABLE; if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) { if (lowhsync) { DPRINTK("ignoring +hsync, using -hsync.\n"); video |= PM2F_HSYNC_ACT_LOW; } else video |= PM2F_HSYNC_ACT_HIGH; } else video |= PM2F_HSYNC_ACT_LOW; if (info->var.sync & FB_SYNC_VERT_HIGH_ACT) { if (lowvsync) { DPRINTK("ignoring +vsync, using -vsync.\n"); video |= PM2F_VSYNC_ACT_LOW; } else video |= PM2F_VSYNC_ACT_HIGH; } else video |= PM2F_VSYNC_ACT_LOW; if ((info->var.vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) { DPRINTK("interlaced not supported\n"); return -EINVAL; } if ((info->var.vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) video |= PM2F_LINE_DOUBLE; if ((info->var.activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) video |= PM2F_VIDEO_ENABLE; par->video = video; info->fix.visual = (depth == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; info->fix.line_length = info->var.xres * depth / 8; info->cmap.len = 256; /* * Settings calculated. Now write them out. */ if (par->type == PM2_TYPE_PERMEDIA2V) { WAIT_FIFO(par, 1); pm2_WR(par, PM2VR_RD_INDEX_HIGH, 0); } set_aperture(par, depth); mb(); WAIT_FIFO(par, 19); switch (depth) { case 8: pm2_WR(par, PM2R_FB_READ_PIXEL, 0); clrformat = 0x2e; break; case 16: pm2_WR(par, PM2R_FB_READ_PIXEL, 1); clrmode |= PM2F_RD_TRUECOLOR | PM2F_RD_PIXELFORMAT_RGB565; txtmap = PM2F_TEXTEL_SIZE_16; pixsize = 1; clrformat = 0x70; misc |= 8; break; case 32: pm2_WR(par, PM2R_FB_READ_PIXEL, 2); clrmode |= PM2F_RD_TRUECOLOR | PM2F_RD_PIXELFORMAT_RGBA8888; txtmap = PM2F_TEXTEL_SIZE_32; pixsize = 2; clrformat = 0x20; misc |= 8; break; case 24: pm2_WR(par, PM2R_FB_READ_PIXEL, 4); clrmode |= PM2F_RD_TRUECOLOR | PM2F_RD_PIXELFORMAT_RGB888; txtmap = PM2F_TEXTEL_SIZE_24; pixsize = 4; clrformat = 0x20; misc |= 8; break; } pm2_WR(par, PM2R_FB_WRITE_MODE, PM2F_FB_WRITE_ENABLE); pm2_WR(par, PM2R_FB_READ_MODE, partprod(xres)); pm2_WR(par, PM2R_LB_READ_MODE, partprod(xres)); pm2_WR(par, PM2R_TEXTURE_MAP_FORMAT, txtmap | partprod(xres)); pm2_WR(par, PM2R_H_TOTAL, htotal); pm2_WR(par, PM2R_HS_START, hsstart); pm2_WR(par, PM2R_HS_END, hsend); pm2_WR(par, PM2R_HG_END, hbend); pm2_WR(par, PM2R_HB_END, hbend); pm2_WR(par, PM2R_V_TOTAL, vtotal); pm2_WR(par, PM2R_VS_START, vsstart); pm2_WR(par, PM2R_VS_END, vsend); pm2_WR(par, PM2R_VB_END, vbend); pm2_WR(par, PM2R_SCREEN_STRIDE, stride); wmb(); pm2_WR(par, PM2R_WINDOW_ORIGIN, 0); pm2_WR(par, PM2R_SCREEN_SIZE, (height << 16) | width); pm2_WR(par, PM2R_SCISSOR_MODE, PM2F_SCREEN_SCISSOR_ENABLE); wmb(); pm2_WR(par, PM2R_SCREEN_BASE, base); wmb(); set_video(par, video); WAIT_FIFO(par, 10); switch (par->type) { case PM2_TYPE_PERMEDIA2: pm2_RDAC_WR(par, PM2I_RD_COLOR_MODE, clrmode); pm2_RDAC_WR(par, PM2I_RD_COLOR_KEY_CONTROL, (depth == 8) ? 0 : PM2F_COLOR_KEY_TEST_OFF); break; case PM2_TYPE_PERMEDIA2V: pm2v_RDAC_WR(par, PM2VI_RD_DAC_CONTROL, 0); pm2v_RDAC_WR(par, PM2VI_RD_PIXEL_SIZE, pixsize); pm2v_RDAC_WR(par, PM2VI_RD_COLOR_FORMAT, clrformat); pm2v_RDAC_WR(par, PM2VI_RD_MISC_CONTROL, misc); pm2v_RDAC_WR(par, PM2VI_RD_OVERLAY_KEY, 0); break; } set_pixclock(par, pixclock); DPRINTK("Setting graphics mode at %dx%d depth %d\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); return 0; } /** * pm2fb_setcolreg - Sets a color register. * @regno: boolean, 0 copy local, 1 get_user() function * @red: frame buffer colormap structure * @green: The green value which can be up to 16 bits wide * @blue: The blue value which can be up to 16 bits wide. * @transp: If supported the alpha value which can be up to 16 bits wide. * @info: frame buffer info structure * * Set a single color register. The values supplied have a 16 bit * magnitude which needs to be scaled in this function for the hardware. * Pretty much a direct lift from tdfxfb.c. * * Returns negative errno on error, or zero on success. */ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct pm2fb_par *par = info->par; if (regno >= info->cmap.len) /* no. of hw registers */ return -EINVAL; /* * Program hardware... do anything you want with transp */ /* grayscale works only partially under directcolor */ /* grayscale = 0.30*R + 0.59*G + 0.11*B */ if (info->var.grayscale) red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; /* Directcolor: * var->{color}.offset contains start of bitfield * var->{color}.length contains length of bitfield * {hardwarespecific} contains width of DAC * cmap[X] is programmed to * (X << red.offset) | (X << green.offset) | (X << blue.offset) * RAMDAC[X] is programmed to (red, green, blue) * * Pseudocolor: * uses offset = 0 && length = DAC register width. * var->{color}.offset is 0 * var->{color}.length contains width of DAC * cmap is not used * DAC[X] is programmed to (red, green, blue) * Truecolor: * does not use RAMDAC (usually has 3 of them). * var->{color}.offset contains start of bitfield * var->{color}.length contains length of bitfield * cmap is programmed to * (red << red.offset) | (green << green.offset) | * (blue << blue.offset) | (transp << transp.offset) * RAMDAC does not exist */ #define CNVT_TOHW(val, width) ((((val) << (width)) + 0x7FFF -(val)) >> 16) switch (info->fix.visual) { case FB_VISUAL_TRUECOLOR: case FB_VISUAL_PSEUDOCOLOR: red = CNVT_TOHW(red, info->var.red.length); green = CNVT_TOHW(green, info->var.green.length); blue = CNVT_TOHW(blue, info->var.blue.length); transp = CNVT_TOHW(transp, info->var.transp.length); break; case FB_VISUAL_DIRECTCOLOR: /* example here assumes 8 bit DAC. Might be different * for your hardware */ red = CNVT_TOHW(red, 8); green = CNVT_TOHW(green, 8); blue = CNVT_TOHW(blue, 8); /* hey, there is bug in transp handling... */ transp = CNVT_TOHW(transp, 8); break; } #undef CNVT_TOHW /* Truecolor has hardware independent palette */ if (info->fix.visual == FB_VISUAL_TRUECOLOR) { u32 v; if (regno >= 16) return -EINVAL; v = (red << info->var.red.offset) | (green << info->var.green.offset) | (blue << info->var.blue.offset) | (transp << info->var.transp.offset); switch (info->var.bits_per_pixel) { case 8: break; case 16: case 24: case 32: par->palette[regno] = v; break; } return 0; } else if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR) set_color(par, regno, red, green, blue); return 0; } /** * pm2fb_pan_display - Pans the display. * @var: frame buffer variable screen structure * @info: frame buffer structure that represents a single frame buffer * * Pan (or wrap, depending on the `vmode' field) the display using the * `xoffset' and `yoffset' fields of the `var' structure. * If the values don't fit, return -EINVAL. * * Returns negative errno on error, or zero on success. * */ static int pm2fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct pm2fb_par *p = info->par; u32 base; u32 depth = (info->var.bits_per_pixel + 7) & ~7; u32 xres = (info->var.xres + 31) & ~31; depth = (depth > 32) ? 32 : depth; base = to3264(var->yoffset * xres + var->xoffset, depth, 1); WAIT_FIFO(p, 1); pm2_WR(p, PM2R_SCREEN_BASE, base); return 0; } /** * pm2fb_blank - Blanks the display. * @blank_mode: the blank mode we want. * @info: frame buffer structure that represents a single frame buffer * * Blank the screen if blank_mode != 0, else unblank. Return 0 if * blanking succeeded, != 0 if un-/blanking failed due to e.g. a * video mode which doesn't support it. Implements VESA suspend * and powerdown modes on hardware that supports disabling hsync/vsync: * blank_mode == 2: suspend vsync * blank_mode == 3: suspend hsync * blank_mode == 4: powerdown * * Returns negative errno on error, or zero on success. * */ static int pm2fb_blank(int blank_mode, struct fb_info *info) { struct pm2fb_par *par = info->par; u32 video = par->video; DPRINTK("blank_mode %d\n", blank_mode); switch (blank_mode) { case FB_BLANK_UNBLANK: /* Screen: On */ video |= PM2F_VIDEO_ENABLE; break; case FB_BLANK_NORMAL: /* Screen: Off */ video &= ~PM2F_VIDEO_ENABLE; break; case FB_BLANK_VSYNC_SUSPEND: /* VSync: Off */ video &= ~(PM2F_VSYNC_MASK | PM2F_BLANK_LOW); break; case FB_BLANK_HSYNC_SUSPEND: /* HSync: Off */ video &= ~(PM2F_HSYNC_MASK | PM2F_BLANK_LOW); break; case FB_BLANK_POWERDOWN: /* HSync: Off, VSync: Off */ video &= ~(PM2F_VSYNC_MASK | PM2F_HSYNC_MASK | PM2F_BLANK_LOW); break; } set_video(par, video); return 0; } static int pm2fb_sync(struct fb_info *info) { struct pm2fb_par *par = info->par; WAIT_FIFO(par, 1); pm2_WR(par, PM2R_SYNC, 0); mb(); do { while (pm2_RD(par, PM2R_OUT_FIFO_WORDS) == 0) cpu_relax(); } while (pm2_RD(par, PM2R_OUT_FIFO) != PM2TAG(PM2R_SYNC)); return 0; } static void pm2fb_fillrect(struct fb_info *info, const struct fb_fillrect *region) { struct pm2fb_par *par = info->par; struct fb_fillrect modded; int vxres, vyres; u32 color = (info->fix.visual == FB_VISUAL_TRUECOLOR) ? ((u32 *)info->pseudo_palette)[region->color] : region->color; if (info->state != FBINFO_STATE_RUNNING) return; if ((info->flags & FBINFO_HWACCEL_DISABLED) || region->rop != ROP_COPY ) { cfb_fillrect(info, region); return; } vxres = info->var.xres_virtual; vyres = info->var.yres_virtual; memcpy(&modded, region, sizeof(struct fb_fillrect)); if (!modded.width || !modded.height || modded.dx >= vxres || modded.dy >= vyres) return; if (modded.dx + modded.width > vxres) modded.width = vxres - modded.dx; if (modded.dy + modded.height > vyres) modded.height = vyres - modded.dy; if (info->var.bits_per_pixel == 8) color |= color << 8; if (info->var.bits_per_pixel <= 16) color |= color << 16; WAIT_FIFO(par, 3); pm2_WR(par, PM2R_CONFIG, PM2F_CONFIG_FB_WRITE_ENABLE); pm2_WR(par, PM2R_RECTANGLE_ORIGIN, (modded.dy << 16) | modded.dx); pm2_WR(par, PM2R_RECTANGLE_SIZE, (modded.height << 16) | modded.width); if (info->var.bits_per_pixel != 24) { WAIT_FIFO(par, 2); pm2_WR(par, PM2R_FB_BLOCK_COLOR, color); wmb(); pm2_WR(par, PM2R_RENDER, PM2F_RENDER_RECTANGLE | PM2F_RENDER_FASTFILL); } else { WAIT_FIFO(par, 4); pm2_WR(par, PM2R_COLOR_DDA_MODE, 1); pm2_WR(par, PM2R_CONSTANT_COLOR, color); wmb(); pm2_WR(par, PM2R_RENDER, PM2F_RENDER_RECTANGLE | PM2F_INCREASE_X | PM2F_INCREASE_Y ); pm2_WR(par, PM2R_COLOR_DDA_MODE, 0); } } static void pm2fb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct pm2fb_par *par = info->par; struct fb_copyarea modded; u32 vxres, vyres; if (info->state != FBINFO_STATE_RUNNING) return; if (info->flags & FBINFO_HWACCEL_DISABLED) { cfb_copyarea(info, area); return; } memcpy(&modded, area, sizeof(struct fb_copyarea)); vxres = info->var.xres_virtual; vyres = info->var.yres_virtual; if (!modded.width || !modded.height || modded.sx >= vxres || modded.sy >= vyres || modded.dx >= vxres || modded.dy >= vyres) return; if (modded.sx + modded.width > vxres) modded.width = vxres - modded.sx; if (modded.dx + modded.width > vxres) modded.width = vxres - modded.dx; if (modded.sy + modded.height > vyres) modded.height = vyres - modded.sy; if (modded.dy + modded.height > vyres) modded.height = vyres - modded.dy; WAIT_FIFO(par, 5); pm2_WR(par, PM2R_CONFIG, PM2F_CONFIG_FB_WRITE_ENABLE | PM2F_CONFIG_FB_READ_SOURCE_ENABLE); pm2_WR(par, PM2R_FB_SOURCE_DELTA, ((modded.sy - modded.dy) & 0xfff) << 16 | ((modded.sx - modded.dx) & 0xfff)); pm2_WR(par, PM2R_RECTANGLE_ORIGIN, (modded.dy << 16) | modded.dx); pm2_WR(par, PM2R_RECTANGLE_SIZE, (modded.height << 16) | modded.width); wmb(); pm2_WR(par, PM2R_RENDER, PM2F_RENDER_RECTANGLE | (modded.dx < modded.sx ? PM2F_INCREASE_X : 0) | (modded.dy < modded.sy ? PM2F_INCREASE_Y : 0)); } static void pm2fb_imageblit(struct fb_info *info, const struct fb_image *image) { struct pm2fb_par *par = info->par; u32 height = image->height; u32 fgx, bgx; const u32 *src = (const u32 *)image->data; u32 xres = (info->var.xres + 31) & ~31; int raster_mode = 1; /* invert bits */ #ifdef __LITTLE_ENDIAN raster_mode |= 3 << 7; /* reverse byte order */ #endif if (info->state != FBINFO_STATE_RUNNING) return; if (info->flags & FBINFO_HWACCEL_DISABLED || image->depth != 1) { cfb_imageblit(info, image); return; } switch (info->fix.visual) { case FB_VISUAL_PSEUDOCOLOR: fgx = image->fg_color; bgx = image->bg_color; break; case FB_VISUAL_TRUECOLOR: default: fgx = par->palette[image->fg_color]; bgx = par->palette[image->bg_color]; break; } if (info->var.bits_per_pixel == 8) { fgx |= fgx << 8; bgx |= bgx << 8; } if (info->var.bits_per_pixel <= 16) { fgx |= fgx << 16; bgx |= bgx << 16; } WAIT_FIFO(par, 13); pm2_WR(par, PM2R_FB_READ_MODE, partprod(xres)); pm2_WR(par, PM2R_SCISSOR_MIN_XY, ((image->dy & 0xfff) << 16) | (image->dx & 0x0fff)); pm2_WR(par, PM2R_SCISSOR_MAX_XY, (((image->dy + image->height) & 0x0fff) << 16) | ((image->dx + image->width) & 0x0fff)); pm2_WR(par, PM2R_SCISSOR_MODE, 1); /* GXcopy & UNIT_ENABLE */ pm2_WR(par, PM2R_LOGICAL_OP_MODE, (0x3 << 1) | 1); pm2_WR(par, PM2R_RECTANGLE_ORIGIN, ((image->dy & 0xfff) << 16) | (image->dx & 0x0fff)); pm2_WR(par, PM2R_RECTANGLE_SIZE, ((image->height & 0x0fff) << 16) | ((image->width) & 0x0fff)); if (info->var.bits_per_pixel == 24) { pm2_WR(par, PM2R_COLOR_DDA_MODE, 1); /* clear area */ pm2_WR(par, PM2R_CONSTANT_COLOR, bgx); pm2_WR(par, PM2R_RENDER, PM2F_RENDER_RECTANGLE | PM2F_INCREASE_X | PM2F_INCREASE_Y); /* BitMapPackEachScanline */ pm2_WR(par, PM2R_RASTERIZER_MODE, raster_mode | (1 << 9)); pm2_WR(par, PM2R_CONSTANT_COLOR, fgx); pm2_WR(par, PM2R_RENDER, PM2F_RENDER_RECTANGLE | PM2F_INCREASE_X | PM2F_INCREASE_Y | PM2F_RENDER_SYNC_ON_BIT_MASK); } else { pm2_WR(par, PM2R_COLOR_DDA_MODE, 0); /* clear area */ pm2_WR(par, PM2R_FB_BLOCK_COLOR, bgx); pm2_WR(par, PM2R_RENDER, PM2F_RENDER_RECTANGLE | PM2F_RENDER_FASTFILL | PM2F_INCREASE_X | PM2F_INCREASE_Y); pm2_WR(par, PM2R_RASTERIZER_MODE, raster_mode); pm2_WR(par, PM2R_FB_BLOCK_COLOR, fgx); pm2_WR(par, PM2R_RENDER, PM2F_RENDER_RECTANGLE | PM2F_INCREASE_X | PM2F_INCREASE_Y | PM2F_RENDER_FASTFILL | PM2F_RENDER_SYNC_ON_BIT_MASK); } while (height--) { int width = ((image->width + 7) >> 3) + info->pixmap.scan_align - 1; width >>= 2; WAIT_FIFO(par, width); while (width--) { pm2_WR(par, PM2R_BIT_MASK_PATTERN, *src); src++; } } WAIT_FIFO(par, 3); pm2_WR(par, PM2R_RASTERIZER_MODE, 0); pm2_WR(par, PM2R_COLOR_DDA_MODE, 0); pm2_WR(par, PM2R_SCISSOR_MODE, 0); } /* * Hardware cursor support. */ static const u8 cursor_bits_lookup[16] = { 0x00, 0x40, 0x10, 0x50, 0x04, 0x44, 0x14, 0x54, 0x01, 0x41, 0x11, 0x51, 0x05, 0x45, 0x15, 0x55 }; static int pm2vfb_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct pm2fb_par *par = info->par; u8 mode = PM2F_CURSORMODE_TYPE_X; int x = cursor->image.dx - info->var.xoffset; int y = cursor->image.dy - info->var.yoffset; if (cursor->enable) mode |= PM2F_CURSORMODE_CURSOR_ENABLE; pm2v_RDAC_WR(par, PM2VI_RD_CURSOR_MODE, mode); if (!cursor->enable) x = 2047; /* push it outside display */ pm2v_RDAC_WR(par, PM2VI_RD_CURSOR_X_LOW, x & 0xff); pm2v_RDAC_WR(par, PM2VI_RD_CURSOR_X_HIGH, (x >> 8) & 0xf); pm2v_RDAC_WR(par, PM2VI_RD_CURSOR_Y_LOW, y & 0xff); pm2v_RDAC_WR(par, PM2VI_RD_CURSOR_Y_HIGH, (y >> 8) & 0xf); /* * If the cursor is not be changed this means either we want the * current cursor state (if enable is set) or we want to query what * we can do with the cursor (if enable is not set) */ if (!cursor->set) return 0; if (cursor->set & FB_CUR_SETHOT) { pm2v_RDAC_WR(par, PM2VI_RD_CURSOR_X_HOT, cursor->hot.x & 0x3f); pm2v_RDAC_WR(par, PM2VI_RD_CURSOR_Y_HOT, cursor->hot.y & 0x3f); } if (cursor->set & FB_CUR_SETCMAP) { u32 fg_idx = cursor->image.fg_color; u32 bg_idx = cursor->image.bg_color; struct fb_cmap cmap = info->cmap; /* the X11 driver says one should use these color registers */ pm2_WR(par, PM2VR_RD_INDEX_HIGH, PM2VI_RD_CURSOR_PALETTE >> 8); pm2v_RDAC_WR(par, PM2VI_RD_CURSOR_PALETTE + 0, cmap.red[bg_idx] >> 8 ); pm2v_RDAC_WR(par, PM2VI_RD_CURSOR_PALETTE + 1, cmap.green[bg_idx] >> 8 ); pm2v_RDAC_WR(par, PM2VI_RD_CURSOR_PALETTE + 2, cmap.blue[bg_idx] >> 8 ); pm2v_RDAC_WR(par, PM2VI_RD_CURSOR_PALETTE + 3, cmap.red[fg_idx] >> 8 ); pm2v_RDAC_WR(par, PM2VI_RD_CURSOR_PALETTE + 4, cmap.green[fg_idx] >> 8 ); pm2v_RDAC_WR(par, PM2VI_RD_CURSOR_PALETTE + 5, cmap.blue[fg_idx] >> 8 ); pm2_WR(par, PM2VR_RD_INDEX_HIGH, 0); } if (cursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) { u8 *bitmap = (u8 *)cursor->image.data; u8 *mask = (u8 *)cursor->mask; int i; int pos = PM2VI_RD_CURSOR_PATTERN; for (i = 0; i < cursor->image.height; i++) { int j = (cursor->image.width + 7) >> 3; int k = 8 - j; pm2_WR(par, PM2VR_RD_INDEX_HIGH, pos >> 8); for (; j > 0; j--) { u8 data = *bitmap ^ *mask; if (cursor->rop == ROP_COPY) data = *mask & *bitmap; /* Upper 4 bits of bitmap data */ pm2v_RDAC_WR(par, pos++, cursor_bits_lookup[data >> 4] | (cursor_bits_lookup[*mask >> 4] << 1)); /* Lower 4 bits of bitmap */ pm2v_RDAC_WR(par, pos++, cursor_bits_lookup[data & 0xf] | (cursor_bits_lookup[*mask & 0xf] << 1)); bitmap++; mask++; } for (; k > 0; k--) { pm2v_RDAC_WR(par, pos++, 0); pm2v_RDAC_WR(par, pos++, 0); } } while (pos < (1024 + PM2VI_RD_CURSOR_PATTERN)) { pm2_WR(par, PM2VR_RD_INDEX_HIGH, pos >> 8); pm2v_RDAC_WR(par, pos++, 0); } pm2_WR(par, PM2VR_RD_INDEX_HIGH, 0); } return 0; } static int pm2fb_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct pm2fb_par *par = info->par; u8 mode; if (!hwcursor) return -EINVAL; /* just to force soft_cursor() call */ /* Too large of a cursor or wrong bpp :-( */ if (cursor->image.width > 64 || cursor->image.height > 64 || cursor->image.depth > 1) return -EINVAL; if (par->type == PM2_TYPE_PERMEDIA2V) return pm2vfb_cursor(info, cursor); mode = 0x40; if (cursor->enable) mode = 0x43; pm2_RDAC_WR(par, PM2I_RD_CURSOR_CONTROL, mode); /* * If the cursor is not be changed this means either we want the * current cursor state (if enable is set) or we want to query what * we can do with the cursor (if enable is not set) */ if (!cursor->set) return 0; if (cursor->set & FB_CUR_SETPOS) { int x = cursor->image.dx - info->var.xoffset + 63; int y = cursor->image.dy - info->var.yoffset + 63; WAIT_FIFO(par, 4); pm2_WR(par, PM2R_RD_CURSOR_X_LSB, x & 0xff); pm2_WR(par, PM2R_RD_CURSOR_X_MSB, (x >> 8) & 0x7); pm2_WR(par, PM2R_RD_CURSOR_Y_LSB, y & 0xff); pm2_WR(par, PM2R_RD_CURSOR_Y_MSB, (y >> 8) & 0x7); } if (cursor->set & FB_CUR_SETCMAP) { u32 fg_idx = cursor->image.fg_color; u32 bg_idx = cursor->image.bg_color; WAIT_FIFO(par, 7); pm2_WR(par, PM2R_RD_CURSOR_COLOR_ADDRESS, 1); pm2_WR(par, PM2R_RD_CURSOR_COLOR_DATA, info->cmap.red[bg_idx] >> 8); pm2_WR(par, PM2R_RD_CURSOR_COLOR_DATA, info->cmap.green[bg_idx] >> 8); pm2_WR(par, PM2R_RD_CURSOR_COLOR_DATA, info->cmap.blue[bg_idx] >> 8); pm2_WR(par, PM2R_RD_CURSOR_COLOR_DATA, info->cmap.red[fg_idx] >> 8); pm2_WR(par, PM2R_RD_CURSOR_COLOR_DATA, info->cmap.green[fg_idx] >> 8); pm2_WR(par, PM2R_RD_CURSOR_COLOR_DATA, info->cmap.blue[fg_idx] >> 8); } if (cursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) { u8 *bitmap = (u8 *)cursor->image.data; u8 *mask = (u8 *)cursor->mask; int i; WAIT_FIFO(par, 1); pm2_WR(par, PM2R_RD_PALETTE_WRITE_ADDRESS, 0); for (i = 0; i < cursor->image.height; i++) { int j = (cursor->image.width + 7) >> 3; int k = 8 - j; WAIT_FIFO(par, 8); for (; j > 0; j--) { u8 data = *bitmap ^ *mask; if (cursor->rop == ROP_COPY) data = *mask & *bitmap; /* bitmap data */ pm2_WR(par, PM2R_RD_CURSOR_DATA, data); bitmap++; mask++; } for (; k > 0; k--) pm2_WR(par, PM2R_RD_CURSOR_DATA, 0); } for (; i < 64; i++) { int j = 8; WAIT_FIFO(par, 8); while (j-- > 0) pm2_WR(par, PM2R_RD_CURSOR_DATA, 0); } mask = (u8 *)cursor->mask; for (i = 0; i < cursor->image.height; i++) { int j = (cursor->image.width + 7) >> 3; int k = 8 - j; WAIT_FIFO(par, 8); for (; j > 0; j--) { /* mask */ pm2_WR(par, PM2R_RD_CURSOR_DATA, *mask); mask++; } for (; k > 0; k--) pm2_WR(par, PM2R_RD_CURSOR_DATA, 0); } for (; i < 64; i++) { int j = 8; WAIT_FIFO(par, 8); while (j-- > 0) pm2_WR(par, PM2R_RD_CURSOR_DATA, 0); } } return 0; } /* ------------ Hardware Independent Functions ------------ */ /* * Frame buffer operations */ static struct fb_ops pm2fb_ops = { .owner = THIS_MODULE, .fb_check_var = pm2fb_check_var, .fb_set_par = pm2fb_set_par, .fb_setcolreg = pm2fb_setcolreg, .fb_blank = pm2fb_blank, .fb_pan_display = pm2fb_pan_display, .fb_fillrect = pm2fb_fillrect, .fb_copyarea = pm2fb_copyarea, .fb_imageblit = pm2fb_imageblit, .fb_sync = pm2fb_sync, .fb_cursor = pm2fb_cursor, }; /* * PCI stuff */ /** * Device initialisation * * Initialise and allocate resource for PCI device. * * @param pdev PCI device. * @param id PCI device ID. */ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct pm2fb_par *default_par; struct fb_info *info; int err; int retval = -ENXIO; err = pci_enable_device(pdev); if (err) { printk(KERN_WARNING "pm2fb: Can't enable pdev: %d\n", err); return err; } info = framebuffer_alloc(sizeof(struct pm2fb_par), &pdev->dev); if (!info) return -ENOMEM; default_par = info->par; switch (pdev->device) { case PCI_DEVICE_ID_TI_TVP4020: strcpy(pm2fb_fix.id, "TVP4020"); default_par->type = PM2_TYPE_PERMEDIA2; break; case PCI_DEVICE_ID_3DLABS_PERMEDIA2: strcpy(pm2fb_fix.id, "Permedia2"); default_par->type = PM2_TYPE_PERMEDIA2; break; case PCI_DEVICE_ID_3DLABS_PERMEDIA2V: strcpy(pm2fb_fix.id, "Permedia2v"); default_par->type = PM2_TYPE_PERMEDIA2V; break; } pm2fb_fix.mmio_start = pci_resource_start(pdev, 0); pm2fb_fix.mmio_len = PM2_REGS_SIZE; #if defined(__BIG_ENDIAN) /* * PM2 has a 64k register file, mapped twice in 128k. Lower * map is little-endian, upper map is big-endian. */ pm2fb_fix.mmio_start += PM2_REGS_SIZE; DPRINTK("Adjusting register base for big-endian.\n"); #endif DPRINTK("Register base at 0x%lx\n", pm2fb_fix.mmio_start); /* Registers - request region and map it. */ if (!request_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len, "pm2fb regbase")) { printk(KERN_WARNING "pm2fb: Can't reserve regbase.\n"); goto err_exit_neither; } default_par->v_regs = ioremap_nocache(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len); if (!default_par->v_regs) { printk(KERN_WARNING "pm2fb: Can't remap %s register area.\n", pm2fb_fix.id); release_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len); goto err_exit_neither; } /* Stash away memory register info for use when we reset the board */ default_par->mem_control = pm2_RD(default_par, PM2R_MEM_CONTROL); default_par->boot_address = pm2_RD(default_par, PM2R_BOOT_ADDRESS); default_par->mem_config = pm2_RD(default_par, PM2R_MEM_CONFIG); DPRINTK("MemControl 0x%x BootAddress 0x%x MemConfig 0x%x\n", default_par->mem_control, default_par->boot_address, default_par->mem_config); if (default_par->mem_control == 0 && default_par->boot_address == 0x31 && default_par->mem_config == 0x259fffff) { default_par->memclock = CVPPC_MEMCLOCK; default_par->mem_control = 0; default_par->boot_address = 0x20; default_par->mem_config = 0xe6002021; if (pdev->subsystem_vendor == 0x1048 && pdev->subsystem_device == 0x0a31) { DPRINTK("subsystem_vendor: %04x, " "subsystem_device: %04x\n", pdev->subsystem_vendor, pdev->subsystem_device); DPRINTK("We have not been initialized by VGA BIOS and " "are running on an Elsa Winner 2000 Office\n"); DPRINTK("Initializing card timings manually...\n"); default_par->memclock = 100000; } if (pdev->subsystem_vendor == 0x3d3d && pdev->subsystem_device == 0x0100) { DPRINTK("subsystem_vendor: %04x, " "subsystem_device: %04x\n", pdev->subsystem_vendor, pdev->subsystem_device); DPRINTK("We have not been initialized by VGA BIOS and " "are running on an 3dlabs reference board\n"); DPRINTK("Initializing card timings manually...\n"); default_par->memclock = 74894; } } /* Now work out how big lfb is going to be. */ switch (default_par->mem_config & PM2F_MEM_CONFIG_RAM_MASK) { case PM2F_MEM_BANKS_1: pm2fb_fix.smem_len = 0x200000; break; case PM2F_MEM_BANKS_2: pm2fb_fix.smem_len = 0x400000; break; case PM2F_MEM_BANKS_3: pm2fb_fix.smem_len = 0x600000; break; case PM2F_MEM_BANKS_4: pm2fb_fix.smem_len = 0x800000; break; } pm2fb_fix.smem_start = pci_resource_start(pdev, 1); /* Linear frame buffer - request region and map it. */ if (!request_mem_region(pm2fb_fix.smem_start, pm2fb_fix.smem_len, "pm2fb smem")) { printk(KERN_WARNING "pm2fb: Can't reserve smem.\n"); goto err_exit_mmio; } info->screen_base = ioremap_nocache(pm2fb_fix.smem_start, pm2fb_fix.smem_len); if (!info->screen_base) { printk(KERN_WARNING "pm2fb: Can't ioremap smem area.\n"); release_mem_region(pm2fb_fix.smem_start, pm2fb_fix.smem_len); goto err_exit_mmio; } #ifdef CONFIG_MTRR default_par->mtrr_handle = -1; if (!nomtrr) default_par->mtrr_handle = mtrr_add(pm2fb_fix.smem_start, pm2fb_fix.smem_len, MTRR_TYPE_WRCOMB, 1); #endif info->fbops = &pm2fb_ops; info->fix = pm2fb_fix; info->pseudo_palette = default_par->palette; info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT; info->pixmap.addr = kmalloc(PM2_PIXMAP_SIZE, GFP_KERNEL); if (!info->pixmap.addr) { retval = -ENOMEM; goto err_exit_pixmap; } info->pixmap.size = PM2_PIXMAP_SIZE; info->pixmap.buf_align = 4; info->pixmap.scan_align = 4; info->pixmap.access_align = 32; info->pixmap.flags = FB_PIXMAP_SYSTEM; if (noaccel) { printk(KERN_DEBUG "disabling acceleration\n"); info->flags |= FBINFO_HWACCEL_DISABLED; info->pixmap.scan_align = 1; } if (!mode_option) mode_option = "640x480@60"; err = fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 8); if (!err || err == 4) info->var = pm2fb_var; retval = fb_alloc_cmap(&info->cmap, 256, 0); if (retval < 0) goto err_exit_both; retval = register_framebuffer(info); if (retval < 0) goto err_exit_all; fb_info(info, "%s frame buffer device, memory = %dK\n", info->fix.id, pm2fb_fix.smem_len / 1024); /* * Our driver data */ pci_set_drvdata(pdev, info); return 0; err_exit_all: fb_dealloc_cmap(&info->cmap); err_exit_both: kfree(info->pixmap.addr); err_exit_pixmap: iounmap(info->screen_base); release_mem_region(pm2fb_fix.smem_start, pm2fb_fix.smem_len); err_exit_mmio: iounmap(default_par->v_regs); release_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len); err_exit_neither: framebuffer_release(info); return retval; } /** * Device removal. * * Release all device resources. * * @param pdev PCI device to clean up. */ static void pm2fb_remove(struct pci_dev *pdev) { struct fb_info *info = pci_get_drvdata(pdev); struct fb_fix_screeninfo *fix = &info->fix; struct pm2fb_par *par = info->par; unregister_framebuffer(info); #ifdef CONFIG_MTRR if (par->mtrr_handle >= 0) mtrr_del(par->mtrr_handle, info->fix.smem_start, info->fix.smem_len); #endif /* CONFIG_MTRR */ iounmap(info->screen_base); release_mem_region(fix->smem_start, fix->smem_len); iounmap(par->v_regs); release_mem_region(fix->mmio_start, fix->mmio_len); fb_dealloc_cmap(&info->cmap); kfree(info->pixmap.addr); framebuffer_release(info); } static struct pci_device_id pm2fb_id_table[] = { { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TVP4020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_3DLABS, PCI_DEVICE_ID_3DLABS_PERMEDIA2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_3DLABS, PCI_DEVICE_ID_3DLABS_PERMEDIA2V, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } }; static struct pci_driver pm2fb_driver = { .name = "pm2fb", .id_table = pm2fb_id_table, .probe = pm2fb_probe, .remove = pm2fb_remove, }; MODULE_DEVICE_TABLE(pci, pm2fb_id_table); #ifndef MODULE /** * Parse user specified options. * * This is, comma-separated options following `video=pm2fb:'. */ static int __init pm2fb_setup(char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!*this_opt) continue; if (!strcmp(this_opt, "lowhsync")) lowhsync = 1; else if (!strcmp(this_opt, "lowvsync")) lowvsync = 1; else if (!strncmp(this_opt, "hwcursor=", 9)) hwcursor = simple_strtoul(this_opt + 9, NULL, 0); #ifdef CONFIG_MTRR else if (!strncmp(this_opt, "nomtrr", 6)) nomtrr = 1; #endif else if (!strncmp(this_opt, "noaccel", 7)) noaccel = 1; else mode_option = this_opt; } return 0; } #endif static int __init pm2fb_init(void) { #ifndef MODULE char *option = NULL; if (fb_get_options("pm2fb", &option)) return -ENODEV; pm2fb_setup(option); #endif return pci_register_driver(&pm2fb_driver); } module_init(pm2fb_init); #ifdef MODULE /* * Cleanup */ static void __exit pm2fb_exit(void) { pci_unregister_driver(&pm2fb_driver); } #endif #ifdef MODULE module_exit(pm2fb_exit); module_param(mode_option, charp, 0); MODULE_PARM_DESC(mode_option, "Initial video mode e.g. '648x480-8@60'"); module_param_named(mode, mode_option, charp, 0); MODULE_PARM_DESC(mode, "Initial video mode e.g. '648x480-8@60' (deprecated)"); module_param(lowhsync, bool, 0); MODULE_PARM_DESC(lowhsync, "Force horizontal sync low regardless of mode"); module_param(lowvsync, bool, 0); MODULE_PARM_DESC(lowvsync, "Force vertical sync low regardless of mode"); module_param(noaccel, bool, 0); MODULE_PARM_DESC(noaccel, "Disable acceleration"); module_param(hwcursor, int, 0644); MODULE_PARM_DESC(hwcursor, "Enable hardware cursor " "(1=enable, 0=disable, default=1)"); #ifdef CONFIG_MTRR module_param(nomtrr, bool, 0); MODULE_PARM_DESC(nomtrr, "Disable MTRR support (0 or 1=disabled) (default=0)"); #endif MODULE_AUTHOR("Jim Hague <jim.hague@acm.org>"); MODULE_DESCRIPTION("Permedia2 framebuffer device driver"); MODULE_LICENSE("GPL"); #endif
gpl-2.0
Pafcholini/linux-3.10.y
drivers/clk/samsung/clk-exynos4.c
1530
45051
/* * Copyright (c) 2013 Samsung Electronics Co., Ltd. * Copyright (c) 2013 Linaro Ltd. * Author: Thomas Abraham <thomas.ab@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Common Clock Framework support for all Exynos4 SoCs. */ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> #include "clk.h" #include "clk-pll.h" /* Exynos4 clock controller register offsets */ #define SRC_LEFTBUS 0x4200 #define DIV_LEFTBUS 0x4500 #define GATE_IP_LEFTBUS 0x4800 #define E4X12_GATE_IP_IMAGE 0x4930 #define SRC_RIGHTBUS 0x8200 #define DIV_RIGHTBUS 0x8500 #define GATE_IP_RIGHTBUS 0x8800 #define E4X12_GATE_IP_PERIR 0x8960 #define EPLL_LOCK 0xc010 #define VPLL_LOCK 0xc020 #define EPLL_CON0 0xc110 #define EPLL_CON1 0xc114 #define EPLL_CON2 0xc118 #define VPLL_CON0 0xc120 #define VPLL_CON1 0xc124 #define VPLL_CON2 0xc128 #define SRC_TOP0 0xc210 #define SRC_TOP1 0xc214 #define SRC_CAM 0xc220 #define SRC_TV 0xc224 #define SRC_MFC 0xc228 #define SRC_G3D 0xc22c #define E4210_SRC_IMAGE 0xc230 #define SRC_LCD0 0xc234 #define E4210_SRC_LCD1 0xc238 #define E4X12_SRC_ISP 0xc238 #define SRC_MAUDIO 0xc23c #define SRC_FSYS 0xc240 #define SRC_PERIL0 0xc250 #define SRC_PERIL1 0xc254 #define E4X12_SRC_CAM1 0xc258 #define SRC_MASK_TOP 0xc310 #define SRC_MASK_CAM 0xc320 #define SRC_MASK_TV 0xc324 #define SRC_MASK_LCD0 0xc334 #define E4210_SRC_MASK_LCD1 0xc338 #define E4X12_SRC_MASK_ISP 0xc338 #define SRC_MASK_MAUDIO 0xc33c #define SRC_MASK_FSYS 0xc340 #define SRC_MASK_PERIL0 0xc350 #define SRC_MASK_PERIL1 0xc354 #define DIV_TOP 0xc510 #define DIV_CAM 0xc520 #define DIV_TV 0xc524 #define DIV_MFC 0xc528 #define DIV_G3D 0xc52c #define DIV_IMAGE 0xc530 #define DIV_LCD0 0xc534 #define E4210_DIV_LCD1 0xc538 #define E4X12_DIV_ISP 0xc538 #define DIV_MAUDIO 0xc53c #define DIV_FSYS0 0xc540 #define DIV_FSYS1 0xc544 #define DIV_FSYS2 0xc548 #define DIV_FSYS3 0xc54c #define DIV_PERIL0 0xc550 #define DIV_PERIL1 0xc554 #define DIV_PERIL2 0xc558 #define DIV_PERIL3 0xc55c #define DIV_PERIL4 0xc560 #define DIV_PERIL5 0xc564 #define E4X12_DIV_CAM1 0xc568 #define GATE_SCLK_CAM 0xc820 #define GATE_IP_CAM 0xc920 #define GATE_IP_TV 0xc924 #define GATE_IP_MFC 0xc928 #define GATE_IP_G3D 0xc92c #define E4210_GATE_IP_IMAGE 0xc930 #define GATE_IP_LCD0 0xc934 #define E4210_GATE_IP_LCD1 0xc938 #define E4X12_GATE_IP_ISP 0xc938 #define E4X12_GATE_IP_MAUDIO 0xc93c #define GATE_IP_FSYS 0xc940 #define GATE_IP_GPS 0xc94c #define GATE_IP_PERIL 0xc950 #define E4210_GATE_IP_PERIR 0xc960 #define GATE_BLOCK 0xc970 #define E4X12_MPLL_CON0 0x10108 #define SRC_DMC 0x10200 #define SRC_MASK_DMC 0x10300 #define DIV_DMC0 0x10500 #define DIV_DMC1 0x10504 #define GATE_IP_DMC 0x10900 #define APLL_CON0 0x14100 #define E4210_MPLL_CON0 0x14108 #define SRC_CPU 0x14200 #define DIV_CPU0 0x14500 #define DIV_CPU1 0x14504 #define GATE_SCLK_CPU 0x14800 #define GATE_IP_CPU 0x14900 #define E4X12_DIV_ISP0 0x18300 #define E4X12_DIV_ISP1 0x18304 #define E4X12_GATE_ISP0 0x18800 #define E4X12_GATE_ISP1 0x18804 /* the exynos4 soc type */ enum exynos4_soc { EXYNOS4210, EXYNOS4X12, }; /* * Let each supported clock get a unique id. This id is used to lookup the clock * for device tree based platforms. The clocks are categorized into three * sections: core, sclk gate and bus interface gate clocks. * * When adding a new clock to this list, it is advised to choose a clock * category and add it to the end of that category. That is because the the * device tree source file is referring to these ids and any change in the * sequence number of existing clocks will require corresponding change in the * device tree files. This limitation would go away when pre-processor support * for dtc would be available. */ enum exynos4_clks { none, /* core clocks */ xxti, xusbxti, fin_pll, fout_apll, fout_mpll, fout_epll, fout_vpll, sclk_apll, sclk_mpll, sclk_epll, sclk_vpll, arm_clk, aclk200, aclk100, aclk160, aclk133, mout_mpll_user_t, mout_mpll_user_c, mout_core, mout_apll, /* 20 */ /* gate for special clocks (sclk) */ sclk_fimc0 = 128, sclk_fimc1, sclk_fimc2, sclk_fimc3, sclk_cam0, sclk_cam1, sclk_csis0, sclk_csis1, sclk_hdmi, sclk_mixer, sclk_dac, sclk_pixel, sclk_fimd0, sclk_mdnie0, sclk_mdnie_pwm0, sclk_mipi0, sclk_audio0, sclk_mmc0, sclk_mmc1, sclk_mmc2, sclk_mmc3, sclk_mmc4, sclk_sata, sclk_uart0, sclk_uart1, sclk_uart2, sclk_uart3, sclk_uart4, sclk_audio1, sclk_audio2, sclk_spdif, sclk_spi0, sclk_spi1, sclk_spi2, sclk_slimbus, sclk_fimd1, sclk_mipi1, sclk_pcm1, sclk_pcm2, sclk_i2s1, sclk_i2s2, sclk_mipihsi, sclk_mfc, sclk_pcm0, sclk_g3d, sclk_pwm_isp, sclk_spi0_isp, sclk_spi1_isp, sclk_uart_isp, /* gate clocks */ fimc0 = 256, fimc1, fimc2, fimc3, csis0, csis1, jpeg, smmu_fimc0, smmu_fimc1, smmu_fimc2, smmu_fimc3, smmu_jpeg, vp, mixer, tvenc, hdmi, smmu_tv, mfc, smmu_mfcl, smmu_mfcr, g3d, g2d, rotator, mdma, smmu_g2d, smmu_rotator, smmu_mdma, fimd0, mie0, mdnie0, dsim0, smmu_fimd0, fimd1, mie1, dsim1, smmu_fimd1, pdma0, pdma1, pcie_phy, sata_phy, tsi, sdmmc0, sdmmc1, sdmmc2, sdmmc3, sdmmc4, sata, sromc, usb_host, usb_device, pcie, onenand, nfcon, smmu_pcie, gps, smmu_gps, uart0, uart1, uart2, uart3, uart4, i2c0, i2c1, i2c2, i2c3, i2c4, i2c5, i2c6, i2c7, i2c_hdmi, tsadc, spi0, spi1, spi2, i2s1, i2s2, pcm0, i2s0, pcm1, pcm2, pwm, slimbus, spdif, ac97, modemif, chipid, sysreg, hdmi_cec, mct, wdt, rtc, keyif, audss, mipi_hsi, mdma2, pixelasyncm0, pixelasyncm1, fimc_lite0, fimc_lite1, ppmuispx, ppmuispmx, fimc_isp, fimc_drc, fimc_fd, mcuisp, gicisp, smmu_isp, smmu_drc, smmu_fd, smmu_lite0, smmu_lite1, mcuctl_isp, mpwm_isp, i2c0_isp, i2c1_isp, mtcadc_isp, pwm_isp, wdt_isp, uart_isp, asyncaxim, smmu_ispcx, spi0_isp, spi1_isp, pwm_isp_sclk, spi0_isp_sclk, spi1_isp_sclk, uart_isp_sclk, /* mux clocks */ mout_fimc0 = 384, mout_fimc1, mout_fimc2, mout_fimc3, mout_cam0, mout_cam1, mout_csis0, mout_csis1, mout_g3d0, mout_g3d1, mout_g3d, aclk400_mcuisp, /* div clocks */ div_isp0 = 450, div_isp1, div_mcuisp0, div_mcuisp1, div_aclk200, div_aclk400_mcuisp, nr_clks, }; /* * list of controller registers to be saved and restored during a * suspend/resume cycle. */ static __initdata unsigned long exynos4210_clk_save[] = { E4210_SRC_IMAGE, E4210_SRC_LCD1, E4210_SRC_MASK_LCD1, E4210_DIV_LCD1, E4210_GATE_IP_IMAGE, E4210_GATE_IP_LCD1, E4210_GATE_IP_PERIR, E4210_MPLL_CON0, }; static __initdata unsigned long exynos4x12_clk_save[] = { E4X12_GATE_IP_IMAGE, E4X12_GATE_IP_PERIR, E4X12_SRC_CAM1, E4X12_DIV_ISP, E4X12_DIV_CAM1, E4X12_MPLL_CON0, }; static __initdata unsigned long exynos4_clk_regs[] = { SRC_LEFTBUS, DIV_LEFTBUS, GATE_IP_LEFTBUS, SRC_RIGHTBUS, DIV_RIGHTBUS, GATE_IP_RIGHTBUS, EPLL_CON0, EPLL_CON1, EPLL_CON2, VPLL_CON0, VPLL_CON1, VPLL_CON2, SRC_TOP0, SRC_TOP1, SRC_CAM, SRC_TV, SRC_MFC, SRC_G3D, SRC_LCD0, SRC_MAUDIO, SRC_FSYS, SRC_PERIL0, SRC_PERIL1, SRC_MASK_TOP, SRC_MASK_CAM, SRC_MASK_TV, SRC_MASK_LCD0, SRC_MASK_MAUDIO, SRC_MASK_FSYS, SRC_MASK_PERIL0, SRC_MASK_PERIL1, DIV_TOP, DIV_CAM, DIV_TV, DIV_MFC, DIV_G3D, DIV_IMAGE, DIV_LCD0, DIV_MAUDIO, DIV_FSYS0, DIV_FSYS1, DIV_FSYS2, DIV_FSYS3, DIV_PERIL0, DIV_PERIL1, DIV_PERIL2, DIV_PERIL3, DIV_PERIL4, DIV_PERIL5, GATE_SCLK_CAM, GATE_IP_CAM, GATE_IP_TV, GATE_IP_MFC, GATE_IP_G3D, GATE_IP_LCD0, GATE_IP_FSYS, GATE_IP_GPS, GATE_IP_PERIL, GATE_BLOCK, SRC_MASK_DMC, SRC_DMC, DIV_DMC0, DIV_DMC1, GATE_IP_DMC, APLL_CON0, SRC_CPU, DIV_CPU0, DIV_CPU1, GATE_SCLK_CPU, GATE_IP_CPU, }; /* list of all parent clock list */ PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", }; PNAME(mout_epll_p) = { "fin_pll", "fout_epll", }; PNAME(mout_vpllsrc_p) = { "fin_pll", "sclk_hdmi24m", }; PNAME(mout_vpll_p) = { "fin_pll", "fout_vpll", }; PNAME(sclk_evpll_p) = { "sclk_epll", "sclk_vpll", }; PNAME(mout_mfc_p) = { "mout_mfc0", "mout_mfc1", }; PNAME(mout_g3d_p) = { "mout_g3d0", "mout_g3d1", }; PNAME(mout_g2d_p) = { "mout_g2d0", "mout_g2d1", }; PNAME(mout_hdmi_p) = { "sclk_pixel", "sclk_hdmiphy", }; PNAME(mout_jpeg_p) = { "mout_jpeg0", "mout_jpeg1", }; PNAME(mout_spdif_p) = { "sclk_audio0", "sclk_audio1", "sclk_audio2", "spdif_extclk", }; PNAME(mout_onenand_p) = {"aclk133", "aclk160", }; PNAME(mout_onenand1_p) = {"mout_onenand", "sclk_vpll", }; /* Exynos 4210-specific parent groups */ PNAME(sclk_vpll_p4210) = { "mout_vpllsrc", "fout_vpll", }; PNAME(mout_core_p4210) = { "mout_apll", "sclk_mpll", }; PNAME(sclk_ampll_p4210) = { "sclk_mpll", "sclk_apll", }; PNAME(group1_p4210) = { "xxti", "xusbxti", "sclk_hdmi24m", "sclk_usbphy0", "none", "sclk_hdmiphy", "sclk_mpll", "sclk_epll", "sclk_vpll", }; PNAME(mout_audio0_p4210) = { "cdclk0", "none", "sclk_hdmi24m", "sclk_usbphy0", "xxti", "xusbxti", "sclk_mpll", "sclk_epll", "sclk_vpll" }; PNAME(mout_audio1_p4210) = { "cdclk1", "none", "sclk_hdmi24m", "sclk_usbphy0", "xxti", "xusbxti", "sclk_mpll", "sclk_epll", "sclk_vpll", }; PNAME(mout_audio2_p4210) = { "cdclk2", "none", "sclk_hdmi24m", "sclk_usbphy0", "xxti", "xusbxti", "sclk_mpll", "sclk_epll", "sclk_vpll", }; PNAME(mout_mixer_p4210) = { "sclk_dac", "sclk_hdmi", }; PNAME(mout_dac_p4210) = { "sclk_vpll", "sclk_hdmiphy", }; /* Exynos 4x12-specific parent groups */ PNAME(mout_mpll_user_p4x12) = { "fin_pll", "sclk_mpll", }; PNAME(mout_core_p4x12) = { "mout_apll", "mout_mpll_user_c", }; PNAME(sclk_ampll_p4x12) = { "mout_mpll_user_t", "sclk_apll", }; PNAME(group1_p4x12) = { "xxti", "xusbxti", "sclk_hdmi24m", "sclk_usbphy0", "none", "sclk_hdmiphy", "mout_mpll_user_t", "sclk_epll", "sclk_vpll", }; PNAME(mout_audio0_p4x12) = { "cdclk0", "none", "sclk_hdmi24m", "sclk_usbphy0", "xxti", "xusbxti", "mout_mpll_user_t", "sclk_epll", "sclk_vpll" }; PNAME(mout_audio1_p4x12) = { "cdclk1", "none", "sclk_hdmi24m", "sclk_usbphy0", "xxti", "xusbxti", "mout_mpll_user_t", "sclk_epll", "sclk_vpll", }; PNAME(mout_audio2_p4x12) = { "cdclk2", "none", "sclk_hdmi24m", "sclk_usbphy0", "xxti", "xusbxti", "mout_mpll_user_t", "sclk_epll", "sclk_vpll", }; PNAME(aclk_p4412) = { "mout_mpll_user_t", "sclk_apll", }; PNAME(mout_user_aclk400_mcuisp_p4x12) = {"fin_pll", "div_aclk400_mcuisp", }; PNAME(mout_user_aclk200_p4x12) = {"fin_pll", "div_aclk200", }; PNAME(mout_user_aclk266_gps_p4x12) = {"fin_pll", "div_aclk266_gps", }; /* fixed rate clocks generated outside the soc */ struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata = { FRATE(xxti, "xxti", NULL, CLK_IS_ROOT, 0), FRATE(xusbxti, "xusbxti", NULL, CLK_IS_ROOT, 0), }; /* fixed rate clocks generated inside the soc */ struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = { FRATE(none, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000), FRATE(none, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000), FRATE(none, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000), }; struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata = { FRATE(none, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000), }; /* list of mux clocks supported in all exynos4 soc's */ struct samsung_mux_clock exynos4_mux_clks[] __initdata = { MUX_F(mout_apll, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, CLK_SET_RATE_PARENT, 0), MUX(none, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1), MUX(none, "mout_mfc1", sclk_evpll_p, SRC_MFC, 4, 1), MUX(none, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1), MUX_F(mout_g3d1, "mout_g3d1", sclk_evpll_p, SRC_G3D, 4, 1, CLK_SET_RATE_PARENT, 0), MUX_F(mout_g3d, "mout_g3d", mout_g3d_p, SRC_G3D, 8, 1, CLK_SET_RATE_PARENT, 0), MUX(none, "mout_spdif", mout_spdif_p, SRC_PERIL1, 8, 2), MUX(none, "mout_onenand1", mout_onenand1_p, SRC_TOP0, 0, 1), MUX_A(sclk_epll, "sclk_epll", mout_epll_p, SRC_TOP0, 4, 1, "sclk_epll"), MUX(none, "mout_onenand", mout_onenand_p, SRC_TOP0, 28, 1), }; /* list of mux clocks supported in exynos4210 soc */ struct samsung_mux_clock exynos4210_mux_clks[] __initdata = { MUX(none, "mout_aclk200", sclk_ampll_p4210, SRC_TOP0, 12, 1), MUX(none, "mout_aclk100", sclk_ampll_p4210, SRC_TOP0, 16, 1), MUX(none, "mout_aclk160", sclk_ampll_p4210, SRC_TOP0, 20, 1), MUX(none, "mout_aclk133", sclk_ampll_p4210, SRC_TOP0, 24, 1), MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1), MUX(none, "mout_mixer", mout_mixer_p4210, SRC_TV, 4, 1), MUX(none, "mout_dac", mout_dac_p4210, SRC_TV, 8, 1), MUX(none, "mout_g2d0", sclk_ampll_p4210, E4210_SRC_IMAGE, 0, 1), MUX(none, "mout_g2d1", sclk_evpll_p, E4210_SRC_IMAGE, 4, 1), MUX(none, "mout_g2d", mout_g2d_p, E4210_SRC_IMAGE, 8, 1), MUX(none, "mout_fimd1", group1_p4210, E4210_SRC_LCD1, 0, 4), MUX(none, "mout_mipi1", group1_p4210, E4210_SRC_LCD1, 12, 4), MUX_A(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_CPU, 8, 1, "sclk_mpll"), MUX_A(mout_core, "mout_core", mout_core_p4210, SRC_CPU, 16, 1, "mout_core"), MUX_A(sclk_vpll, "sclk_vpll", sclk_vpll_p4210, SRC_TOP0, 8, 1, "sclk_vpll"), MUX(mout_fimc0, "mout_fimc0", group1_p4210, SRC_CAM, 0, 4), MUX(mout_fimc1, "mout_fimc1", group1_p4210, SRC_CAM, 4, 4), MUX(mout_fimc2, "mout_fimc2", group1_p4210, SRC_CAM, 8, 4), MUX(mout_fimc3, "mout_fimc3", group1_p4210, SRC_CAM, 12, 4), MUX(mout_cam0, "mout_cam0", group1_p4210, SRC_CAM, 16, 4), MUX(mout_cam1, "mout_cam1", group1_p4210, SRC_CAM, 20, 4), MUX(mout_csis0, "mout_csis0", group1_p4210, SRC_CAM, 24, 4), MUX(mout_csis1, "mout_csis1", group1_p4210, SRC_CAM, 28, 4), MUX(none, "mout_mfc0", sclk_ampll_p4210, SRC_MFC, 0, 1), MUX_F(mout_g3d0, "mout_g3d0", sclk_ampll_p4210, SRC_G3D, 0, 1, CLK_SET_RATE_PARENT, 0), MUX(none, "mout_fimd0", group1_p4210, SRC_LCD0, 0, 4), MUX(none, "mout_mipi0", group1_p4210, SRC_LCD0, 12, 4), MUX(none, "mout_audio0", mout_audio0_p4210, SRC_MAUDIO, 0, 4), MUX(none, "mout_mmc0", group1_p4210, SRC_FSYS, 0, 4), MUX(none, "mout_mmc1", group1_p4210, SRC_FSYS, 4, 4), MUX(none, "mout_mmc2", group1_p4210, SRC_FSYS, 8, 4), MUX(none, "mout_mmc3", group1_p4210, SRC_FSYS, 12, 4), MUX(none, "mout_mmc4", group1_p4210, SRC_FSYS, 16, 4), MUX(none, "mout_sata", sclk_ampll_p4210, SRC_FSYS, 24, 1), MUX(none, "mout_uart0", group1_p4210, SRC_PERIL0, 0, 4), MUX(none, "mout_uart1", group1_p4210, SRC_PERIL0, 4, 4), MUX(none, "mout_uart2", group1_p4210, SRC_PERIL0, 8, 4), MUX(none, "mout_uart3", group1_p4210, SRC_PERIL0, 12, 4), MUX(none, "mout_uart4", group1_p4210, SRC_PERIL0, 16, 4), MUX(none, "mout_audio1", mout_audio1_p4210, SRC_PERIL1, 0, 4), MUX(none, "mout_audio2", mout_audio2_p4210, SRC_PERIL1, 4, 4), MUX(none, "mout_spi0", group1_p4210, SRC_PERIL1, 16, 4), MUX(none, "mout_spi1", group1_p4210, SRC_PERIL1, 20, 4), MUX(none, "mout_spi2", group1_p4210, SRC_PERIL1, 24, 4), }; /* list of mux clocks supported in exynos4x12 soc */ struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = { MUX(mout_mpll_user_c, "mout_mpll_user_c", mout_mpll_user_p4x12, SRC_CPU, 24, 1), MUX(none, "mout_aclk266_gps", aclk_p4412, SRC_TOP1, 4, 1), MUX(none, "mout_aclk400_mcuisp", aclk_p4412, SRC_TOP1, 8, 1), MUX(mout_mpll_user_t, "mout_mpll_user_t", mout_mpll_user_p4x12, SRC_TOP1, 12, 1), MUX(none, "mout_user_aclk266_gps", mout_user_aclk266_gps_p4x12, SRC_TOP1, 16, 1), MUX(aclk200, "aclk200", mout_user_aclk200_p4x12, SRC_TOP1, 20, 1), MUX(aclk400_mcuisp, "aclk400_mcuisp", mout_user_aclk400_mcuisp_p4x12, SRC_TOP1, 24, 1), MUX(none, "mout_aclk200", aclk_p4412, SRC_TOP0, 12, 1), MUX(none, "mout_aclk100", aclk_p4412, SRC_TOP0, 16, 1), MUX(none, "mout_aclk160", aclk_p4412, SRC_TOP0, 20, 1), MUX(none, "mout_aclk133", aclk_p4412, SRC_TOP0, 24, 1), MUX(none, "mout_mdnie0", group1_p4x12, SRC_LCD0, 4, 4), MUX(none, "mout_mdnie_pwm0", group1_p4x12, SRC_LCD0, 8, 4), MUX(none, "mout_sata", sclk_ampll_p4x12, SRC_FSYS, 24, 1), MUX(none, "mout_jpeg0", sclk_ampll_p4x12, E4X12_SRC_CAM1, 0, 1), MUX(none, "mout_jpeg1", sclk_evpll_p, E4X12_SRC_CAM1, 4, 1), MUX(none, "mout_jpeg", mout_jpeg_p, E4X12_SRC_CAM1, 8, 1), MUX_A(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_DMC, 12, 1, "sclk_mpll"), MUX_A(sclk_vpll, "sclk_vpll", mout_vpll_p, SRC_TOP0, 8, 1, "sclk_vpll"), MUX(mout_core, "mout_core", mout_core_p4x12, SRC_CPU, 16, 1), MUX(mout_fimc0, "mout_fimc0", group1_p4x12, SRC_CAM, 0, 4), MUX(mout_fimc1, "mout_fimc1", group1_p4x12, SRC_CAM, 4, 4), MUX(mout_fimc2, "mout_fimc2", group1_p4x12, SRC_CAM, 8, 4), MUX(mout_fimc3, "mout_fimc3", group1_p4x12, SRC_CAM, 12, 4), MUX(mout_cam0, "mout_cam0", group1_p4x12, SRC_CAM, 16, 4), MUX(mout_cam1, "mout_cam1", group1_p4x12, SRC_CAM, 20, 4), MUX(mout_csis0, "mout_csis0", group1_p4x12, SRC_CAM, 24, 4), MUX(mout_csis1, "mout_csis1", group1_p4x12, SRC_CAM, 28, 4), MUX(none, "mout_mfc0", sclk_ampll_p4x12, SRC_MFC, 0, 1), MUX_F(mout_g3d0, "mout_g3d0", sclk_ampll_p4x12, SRC_G3D, 0, 1, CLK_SET_RATE_PARENT, 0), MUX(none, "mout_fimd0", group1_p4x12, SRC_LCD0, 0, 4), MUX(none, "mout_mipi0", group1_p4x12, SRC_LCD0, 12, 4), MUX(none, "mout_audio0", mout_audio0_p4x12, SRC_MAUDIO, 0, 4), MUX(none, "mout_mmc0", group1_p4x12, SRC_FSYS, 0, 4), MUX(none, "mout_mmc1", group1_p4x12, SRC_FSYS, 4, 4), MUX(none, "mout_mmc2", group1_p4x12, SRC_FSYS, 8, 4), MUX(none, "mout_mmc3", group1_p4x12, SRC_FSYS, 12, 4), MUX(none, "mout_mmc4", group1_p4x12, SRC_FSYS, 16, 4), MUX(none, "mout_mipihsi", aclk_p4412, SRC_FSYS, 24, 1), MUX(none, "mout_uart0", group1_p4x12, SRC_PERIL0, 0, 4), MUX(none, "mout_uart1", group1_p4x12, SRC_PERIL0, 4, 4), MUX(none, "mout_uart2", group1_p4x12, SRC_PERIL0, 8, 4), MUX(none, "mout_uart3", group1_p4x12, SRC_PERIL0, 12, 4), MUX(none, "mout_uart4", group1_p4x12, SRC_PERIL0, 16, 4), MUX(none, "mout_audio1", mout_audio1_p4x12, SRC_PERIL1, 0, 4), MUX(none, "mout_audio2", mout_audio2_p4x12, SRC_PERIL1, 4, 4), MUX(none, "mout_spi0", group1_p4x12, SRC_PERIL1, 16, 4), MUX(none, "mout_spi1", group1_p4x12, SRC_PERIL1, 20, 4), MUX(none, "mout_spi2", group1_p4x12, SRC_PERIL1, 24, 4), MUX(none, "mout_pwm_isp", group1_p4x12, E4X12_SRC_ISP, 0, 4), MUX(none, "mout_spi0_isp", group1_p4x12, E4X12_SRC_ISP, 4, 4), MUX(none, "mout_spi1_isp", group1_p4x12, E4X12_SRC_ISP, 8, 4), MUX(none, "mout_uart_isp", group1_p4x12, E4X12_SRC_ISP, 12, 4), }; /* list of divider clocks supported in all exynos4 soc's */ struct samsung_div_clock exynos4_div_clks[] __initdata = { DIV(none, "div_core", "mout_core", DIV_CPU0, 0, 3), DIV(none, "div_core2", "div_core", DIV_CPU0, 28, 3), DIV(none, "div_fimc0", "mout_fimc0", DIV_CAM, 0, 4), DIV(none, "div_fimc1", "mout_fimc1", DIV_CAM, 4, 4), DIV(none, "div_fimc2", "mout_fimc2", DIV_CAM, 8, 4), DIV(none, "div_fimc3", "mout_fimc3", DIV_CAM, 12, 4), DIV(none, "div_cam0", "mout_cam0", DIV_CAM, 16, 4), DIV(none, "div_cam1", "mout_cam1", DIV_CAM, 20, 4), DIV(none, "div_csis0", "mout_csis0", DIV_CAM, 24, 4), DIV(none, "div_csis1", "mout_csis1", DIV_CAM, 28, 4), DIV(sclk_mfc, "sclk_mfc", "mout_mfc", DIV_MFC, 0, 4), DIV_F(none, "div_g3d", "mout_g3d", DIV_G3D, 0, 4, CLK_SET_RATE_PARENT, 0), DIV(none, "div_fimd0", "mout_fimd0", DIV_LCD0, 0, 4), DIV(none, "div_mipi0", "mout_mipi0", DIV_LCD0, 16, 4), DIV(none, "div_audio0", "mout_audio0", DIV_MAUDIO, 0, 4), DIV(sclk_pcm0, "sclk_pcm0", "sclk_audio0", DIV_MAUDIO, 4, 8), DIV(none, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4), DIV(none, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4), DIV(none, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4), DIV(none, "div_mmc3", "mout_mmc3", DIV_FSYS2, 16, 4), DIV(sclk_pixel, "sclk_pixel", "sclk_vpll", DIV_TV, 0, 4), DIV(aclk100, "aclk100", "mout_aclk100", DIV_TOP, 4, 4), DIV(aclk160, "aclk160", "mout_aclk160", DIV_TOP, 8, 3), DIV(aclk133, "aclk133", "mout_aclk133", DIV_TOP, 12, 3), DIV(none, "div_onenand", "mout_onenand1", DIV_TOP, 16, 3), DIV(sclk_slimbus, "sclk_slimbus", "sclk_epll", DIV_PERIL3, 4, 4), DIV(sclk_pcm1, "sclk_pcm1", "sclk_audio1", DIV_PERIL4, 4, 8), DIV(sclk_pcm2, "sclk_pcm2", "sclk_audio2", DIV_PERIL4, 20, 8), DIV(sclk_i2s1, "sclk_i2s1", "sclk_audio1", DIV_PERIL5, 0, 6), DIV(sclk_i2s2, "sclk_i2s2", "sclk_audio2", DIV_PERIL5, 8, 6), DIV(none, "div_mmc4", "mout_mmc4", DIV_FSYS3, 0, 4), DIV(none, "div_mmc_pre4", "div_mmc4", DIV_FSYS3, 8, 8), DIV(none, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4), DIV(none, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4), DIV(none, "div_uart2", "mout_uart2", DIV_PERIL0, 8, 4), DIV(none, "div_uart3", "mout_uart3", DIV_PERIL0, 12, 4), DIV(none, "div_uart4", "mout_uart4", DIV_PERIL0, 16, 4), DIV(none, "div_spi0", "mout_spi0", DIV_PERIL1, 0, 4), DIV(none, "div_spi_pre0", "div_spi0", DIV_PERIL1, 8, 8), DIV(none, "div_spi1", "mout_spi1", DIV_PERIL1, 16, 4), DIV(none, "div_spi_pre1", "div_spi1", DIV_PERIL1, 24, 8), DIV(none, "div_spi2", "mout_spi2", DIV_PERIL2, 0, 4), DIV(none, "div_spi_pre2", "div_spi2", DIV_PERIL2, 8, 8), DIV(none, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4), DIV(none, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4), DIV_A(arm_clk, "arm_clk", "div_core2", DIV_CPU0, 28, 3, "arm_clk"), DIV_A(sclk_apll, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3, "sclk_apll"), DIV_F(none, "div_mipi_pre0", "div_mipi0", DIV_LCD0, 20, 4, CLK_SET_RATE_PARENT, 0), DIV_F(none, "div_mmc_pre0", "div_mmc0", DIV_FSYS1, 8, 8, CLK_SET_RATE_PARENT, 0), DIV_F(none, "div_mmc_pre1", "div_mmc1", DIV_FSYS1, 24, 8, CLK_SET_RATE_PARENT, 0), DIV_F(none, "div_mmc_pre2", "div_mmc2", DIV_FSYS2, 8, 8, CLK_SET_RATE_PARENT, 0), DIV_F(none, "div_mmc_pre3", "div_mmc3", DIV_FSYS2, 24, 8, CLK_SET_RATE_PARENT, 0), }; /* list of divider clocks supported in exynos4210 soc */ struct samsung_div_clock exynos4210_div_clks[] __initdata = { DIV(aclk200, "aclk200", "mout_aclk200", DIV_TOP, 0, 3), DIV(none, "div_g2d", "mout_g2d", DIV_IMAGE, 0, 4), DIV(none, "div_fimd1", "mout_fimd1", E4210_DIV_LCD1, 0, 4), DIV(none, "div_mipi1", "mout_mipi1", E4210_DIV_LCD1, 16, 4), DIV(none, "div_sata", "mout_sata", DIV_FSYS0, 20, 4), DIV_F(none, "div_mipi_pre1", "div_mipi1", E4210_DIV_LCD1, 20, 4, CLK_SET_RATE_PARENT, 0), }; /* list of divider clocks supported in exynos4x12 soc */ struct samsung_div_clock exynos4x12_div_clks[] __initdata = { DIV(none, "div_mdnie0", "mout_mdnie0", DIV_LCD0, 4, 4), DIV(none, "div_mdnie_pwm0", "mout_mdnie_pwm0", DIV_LCD0, 8, 4), DIV(none, "div_mdnie_pwm_pre0", "div_mdnie_pwm0", DIV_LCD0, 12, 4), DIV(none, "div_mipihsi", "mout_mipihsi", DIV_FSYS0, 20, 4), DIV(none, "div_jpeg", "mout_jpeg", E4X12_DIV_CAM1, 0, 4), DIV(div_aclk200, "div_aclk200", "mout_aclk200", DIV_TOP, 0, 3), DIV(none, "div_aclk266_gps", "mout_aclk266_gps", DIV_TOP, 20, 3), DIV(div_aclk400_mcuisp, "div_aclk400_mcuisp", "mout_aclk400_mcuisp", DIV_TOP, 24, 3), DIV(none, "div_pwm_isp", "mout_pwm_isp", E4X12_DIV_ISP, 0, 4), DIV(none, "div_spi0_isp", "mout_spi0_isp", E4X12_DIV_ISP, 4, 4), DIV(none, "div_spi0_isp_pre", "div_spi0_isp", E4X12_DIV_ISP, 8, 8), DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4), DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8), DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4), DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3), DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3), DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3), DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3), }; /* list of gate clocks supported in all exynos4 soc's */ struct samsung_gate_clock exynos4_gate_clks[] __initdata = { /* * After all Exynos4 based platforms are migrated to use device tree, * the device name and clock alias names specified below for some * of the clocks can be removed. */ GATE(sclk_hdmi, "sclk_hdmi", "mout_hdmi", SRC_MASK_TV, 0, 0, 0), GATE(sclk_spdif, "sclk_spdif", "mout_spdif", SRC_MASK_PERIL1, 8, 0, 0), GATE(jpeg, "jpeg", "aclk160", GATE_IP_CAM, 6, 0, 0), GATE(mie0, "mie0", "aclk160", GATE_IP_LCD0, 1, 0, 0), GATE(dsim0, "dsim0", "aclk160", GATE_IP_LCD0, 3, 0, 0), GATE(fimd1, "fimd1", "aclk160", E4210_GATE_IP_LCD1, 0, 0, 0), GATE(mie1, "mie1", "aclk160", E4210_GATE_IP_LCD1, 1, 0, 0), GATE(dsim1, "dsim1", "aclk160", E4210_GATE_IP_LCD1, 3, 0, 0), GATE(smmu_fimd1, "smmu_fimd1", "aclk160", E4210_GATE_IP_LCD1, 4, 0, 0), GATE(tsi, "tsi", "aclk133", GATE_IP_FSYS, 4, 0, 0), GATE(sromc, "sromc", "aclk133", GATE_IP_FSYS, 11, 0, 0), GATE(sclk_g3d, "sclk_g3d", "div_g3d", GATE_IP_G3D, 0, CLK_SET_RATE_PARENT, 0), GATE(usb_device, "usb_device", "aclk133", GATE_IP_FSYS, 13, 0, 0), GATE(onenand, "onenand", "aclk133", GATE_IP_FSYS, 15, 0, 0), GATE(nfcon, "nfcon", "aclk133", GATE_IP_FSYS, 16, 0, 0), GATE(gps, "gps", "aclk133", GATE_IP_GPS, 0, 0, 0), GATE(smmu_gps, "smmu_gps", "aclk133", GATE_IP_GPS, 1, 0, 0), GATE(slimbus, "slimbus", "aclk100", GATE_IP_PERIL, 25, 0, 0), GATE(sclk_cam0, "sclk_cam0", "div_cam0", GATE_SCLK_CAM, 4, CLK_SET_RATE_PARENT, 0), GATE(sclk_cam1, "sclk_cam1", "div_cam1", GATE_SCLK_CAM, 5, CLK_SET_RATE_PARENT, 0), GATE(sclk_mipi0, "sclk_mipi0", "div_mipi_pre0", SRC_MASK_LCD0, 12, CLK_SET_RATE_PARENT, 0), GATE(sclk_audio0, "sclk_audio0", "div_audio0", SRC_MASK_MAUDIO, 0, CLK_SET_RATE_PARENT, 0), GATE(sclk_audio1, "sclk_audio1", "div_audio1", SRC_MASK_PERIL1, 0, CLK_SET_RATE_PARENT, 0), GATE_D(vp, "s5p-mixer", "vp", "aclk160", GATE_IP_TV, 0, 0, 0), GATE_D(mixer, "s5p-mixer", "mixer", "aclk160", GATE_IP_TV, 1, 0, 0), GATE_D(hdmi, "exynos4-hdmi", "hdmi", "aclk160", GATE_IP_TV, 3, 0, 0), GATE_A(pwm, "pwm", "aclk100", GATE_IP_PERIL, 24, 0, 0, "timers"), GATE_A(sdmmc4, "sdmmc4", "aclk133", GATE_IP_FSYS, 9, 0, 0, "biu"), GATE_A(usb_host, "usb_host", "aclk133", GATE_IP_FSYS, 12, 0, 0, "usbhost"), GATE_DA(sclk_fimc0, "exynos4-fimc.0", "sclk_fimc0", "div_fimc0", SRC_MASK_CAM, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimc"), GATE_DA(sclk_fimc1, "exynos4-fimc.1", "sclk_fimc1", "div_fimc1", SRC_MASK_CAM, 4, CLK_SET_RATE_PARENT, 0, "sclk_fimc"), GATE_DA(sclk_fimc2, "exynos4-fimc.2", "sclk_fimc2", "div_fimc2", SRC_MASK_CAM, 8, CLK_SET_RATE_PARENT, 0, "sclk_fimc"), GATE_DA(sclk_fimc3, "exynos4-fimc.3", "sclk_fimc3", "div_fimc3", SRC_MASK_CAM, 12, CLK_SET_RATE_PARENT, 0, "sclk_fimc"), GATE_DA(sclk_csis0, "s5p-mipi-csis.0", "sclk_csis0", "div_csis0", SRC_MASK_CAM, 24, CLK_SET_RATE_PARENT, 0, "sclk_csis"), GATE_DA(sclk_csis1, "s5p-mipi-csis.1", "sclk_csis1", "div_csis1", SRC_MASK_CAM, 28, CLK_SET_RATE_PARENT, 0, "sclk_csis"), GATE_DA(sclk_fimd0, "exynos4-fb.0", "sclk_fimd0", "div_fimd0", SRC_MASK_LCD0, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimd"), GATE_DA(sclk_mmc0, "exynos4-sdhci.0", "sclk_mmc0", "div_mmc_pre0", SRC_MASK_FSYS, 0, CLK_SET_RATE_PARENT, 0, "mmc_busclk.2"), GATE_DA(sclk_mmc1, "exynos4-sdhci.1", "sclk_mmc1", "div_mmc_pre1", SRC_MASK_FSYS, 4, CLK_SET_RATE_PARENT, 0, "mmc_busclk.2"), GATE_DA(sclk_mmc2, "exynos4-sdhci.2", "sclk_mmc2", "div_mmc_pre2", SRC_MASK_FSYS, 8, CLK_SET_RATE_PARENT, 0, "mmc_busclk.2"), GATE_DA(sclk_mmc3, "exynos4-sdhci.3", "sclk_mmc3", "div_mmc_pre3", SRC_MASK_FSYS, 12, CLK_SET_RATE_PARENT, 0, "mmc_busclk.2"), GATE_DA(sclk_mmc4, NULL, "sclk_mmc4", "div_mmc_pre4", SRC_MASK_FSYS, 16, CLK_SET_RATE_PARENT, 0, "ciu"), GATE_DA(sclk_uart0, "exynos4210-uart.0", "uclk0", "div_uart0", SRC_MASK_PERIL0, 0, CLK_SET_RATE_PARENT, 0, "clk_uart_baud0"), GATE_DA(sclk_uart1, "exynos4210-uart.1", "uclk1", "div_uart1", SRC_MASK_PERIL0, 4, CLK_SET_RATE_PARENT, 0, "clk_uart_baud0"), GATE_DA(sclk_uart2, "exynos4210-uart.2", "uclk2", "div_uart2", SRC_MASK_PERIL0, 8, CLK_SET_RATE_PARENT, 0, "clk_uart_baud0"), GATE_DA(sclk_uart3, "exynos4210-uart.3", "uclk3", "div_uart3", SRC_MASK_PERIL0, 12, CLK_SET_RATE_PARENT, 0, "clk_uart_baud0"), GATE_DA(sclk_uart4, "exynos4210-uart.4", "uclk4", "div_uart4", SRC_MASK_PERIL0, 16, CLK_SET_RATE_PARENT, 0, "clk_uart_baud0"), GATE(sclk_audio2, "sclk_audio2", "div_audio2", SRC_MASK_PERIL1, 4, CLK_SET_RATE_PARENT, 0), GATE_DA(sclk_spi0, "exynos4210-spi.0", "sclk_spi0", "div_spi_pre0", SRC_MASK_PERIL1, 16, CLK_SET_RATE_PARENT, 0, "spi_busclk0"), GATE_DA(sclk_spi1, "exynos4210-spi.1", "sclk_spi1", "div_spi_pre1", SRC_MASK_PERIL1, 20, CLK_SET_RATE_PARENT, 0, "spi_busclk0"), GATE_DA(sclk_spi2, "exynos4210-spi.2", "sclk_spi2", "div_spi_pre2", SRC_MASK_PERIL1, 24, CLK_SET_RATE_PARENT, 0, "spi_busclk0"), GATE_DA(fimc0, "exynos4-fimc.0", "fimc0", "aclk160", GATE_IP_CAM, 0, 0, 0, "fimc"), GATE_DA(fimc1, "exynos4-fimc.1", "fimc1", "aclk160", GATE_IP_CAM, 1, 0, 0, "fimc"), GATE_DA(fimc2, "exynos4-fimc.2", "fimc2", "aclk160", GATE_IP_CAM, 2, 0, 0, "fimc"), GATE_DA(fimc3, "exynos4-fimc.3", "fimc3", "aclk160", GATE_IP_CAM, 3, 0, 0, "fimc"), GATE_DA(csis0, "s5p-mipi-csis.0", "csis0", "aclk160", GATE_IP_CAM, 4, 0, 0, "fimc"), GATE_DA(csis1, "s5p-mipi-csis.1", "csis1", "aclk160", GATE_IP_CAM, 5, 0, 0, "fimc"), GATE_DA(smmu_fimc0, "exynos-sysmmu.5", "smmu_fimc0", "aclk160", GATE_IP_CAM, 7, 0, 0, "sysmmu"), GATE_DA(smmu_fimc1, "exynos-sysmmu.6", "smmu_fimc1", "aclk160", GATE_IP_CAM, 8, 0, 0, "sysmmu"), GATE_DA(smmu_fimc2, "exynos-sysmmu.7", "smmu_fimc2", "aclk160", GATE_IP_CAM, 9, 0, 0, "sysmmu"), GATE_DA(smmu_fimc3, "exynos-sysmmu.8", "smmu_fimc3", "aclk160", GATE_IP_CAM, 10, 0, 0, "sysmmu"), GATE_DA(smmu_jpeg, "exynos-sysmmu.3", "smmu_jpeg", "aclk160", GATE_IP_CAM, 11, 0, 0, "sysmmu"), GATE(pixelasyncm0, "pxl_async0", "aclk160", GATE_IP_CAM, 17, 0, 0), GATE(pixelasyncm1, "pxl_async1", "aclk160", GATE_IP_CAM, 18, 0, 0), GATE_DA(smmu_tv, "exynos-sysmmu.2", "smmu_tv", "aclk160", GATE_IP_TV, 4, 0, 0, "sysmmu"), GATE_DA(mfc, "s5p-mfc", "mfc", "aclk100", GATE_IP_MFC, 0, 0, 0, "mfc"), GATE_DA(smmu_mfcl, "exynos-sysmmu.0", "smmu_mfcl", "aclk100", GATE_IP_MFC, 1, 0, 0, "sysmmu"), GATE_DA(smmu_mfcr, "exynos-sysmmu.1", "smmu_mfcr", "aclk100", GATE_IP_MFC, 2, 0, 0, "sysmmu"), GATE_DA(fimd0, "exynos4-fb.0", "fimd0", "aclk160", GATE_IP_LCD0, 0, 0, 0, "fimd"), GATE_DA(smmu_fimd0, "exynos-sysmmu.10", "smmu_fimd0", "aclk160", GATE_IP_LCD0, 4, 0, 0, "sysmmu"), GATE_DA(pdma0, "dma-pl330.0", "pdma0", "aclk133", GATE_IP_FSYS, 0, 0, 0, "dma"), GATE_DA(pdma1, "dma-pl330.1", "pdma1", "aclk133", GATE_IP_FSYS, 1, 0, 0, "dma"), GATE_DA(sdmmc0, "exynos4-sdhci.0", "sdmmc0", "aclk133", GATE_IP_FSYS, 5, 0, 0, "hsmmc"), GATE_DA(sdmmc1, "exynos4-sdhci.1", "sdmmc1", "aclk133", GATE_IP_FSYS, 6, 0, 0, "hsmmc"), GATE_DA(sdmmc2, "exynos4-sdhci.2", "sdmmc2", "aclk133", GATE_IP_FSYS, 7, 0, 0, "hsmmc"), GATE_DA(sdmmc3, "exynos4-sdhci.3", "sdmmc3", "aclk133", GATE_IP_FSYS, 8, 0, 0, "hsmmc"), GATE_DA(uart0, "exynos4210-uart.0", "uart0", "aclk100", GATE_IP_PERIL, 0, 0, 0, "uart"), GATE_DA(uart1, "exynos4210-uart.1", "uart1", "aclk100", GATE_IP_PERIL, 1, 0, 0, "uart"), GATE_DA(uart2, "exynos4210-uart.2", "uart2", "aclk100", GATE_IP_PERIL, 2, 0, 0, "uart"), GATE_DA(uart3, "exynos4210-uart.3", "uart3", "aclk100", GATE_IP_PERIL, 3, 0, 0, "uart"), GATE_DA(uart4, "exynos4210-uart.4", "uart4", "aclk100", GATE_IP_PERIL, 4, 0, 0, "uart"), GATE_DA(i2c0, "s3c2440-i2c.0", "i2c0", "aclk100", GATE_IP_PERIL, 6, 0, 0, "i2c"), GATE_DA(i2c1, "s3c2440-i2c.1", "i2c1", "aclk100", GATE_IP_PERIL, 7, 0, 0, "i2c"), GATE_DA(i2c2, "s3c2440-i2c.2", "i2c2", "aclk100", GATE_IP_PERIL, 8, 0, 0, "i2c"), GATE_DA(i2c3, "s3c2440-i2c.3", "i2c3", "aclk100", GATE_IP_PERIL, 9, 0, 0, "i2c"), GATE_DA(i2c4, "s3c2440-i2c.4", "i2c4", "aclk100", GATE_IP_PERIL, 10, 0, 0, "i2c"), GATE_DA(i2c5, "s3c2440-i2c.5", "i2c5", "aclk100", GATE_IP_PERIL, 11, 0, 0, "i2c"), GATE_DA(i2c6, "s3c2440-i2c.6", "i2c6", "aclk100", GATE_IP_PERIL, 12, 0, 0, "i2c"), GATE_DA(i2c7, "s3c2440-i2c.7", "i2c7", "aclk100", GATE_IP_PERIL, 13, 0, 0, "i2c"), GATE_DA(i2c_hdmi, "s3c2440-hdmiphy-i2c", "i2c-hdmi", "aclk100", GATE_IP_PERIL, 14, 0, 0, "i2c"), GATE_DA(spi0, "exynos4210-spi.0", "spi0", "aclk100", GATE_IP_PERIL, 16, 0, 0, "spi"), GATE_DA(spi1, "exynos4210-spi.1", "spi1", "aclk100", GATE_IP_PERIL, 17, 0, 0, "spi"), GATE_DA(spi2, "exynos4210-spi.2", "spi2", "aclk100", GATE_IP_PERIL, 18, 0, 0, "spi"), GATE_DA(i2s1, "samsung-i2s.1", "i2s1", "aclk100", GATE_IP_PERIL, 20, 0, 0, "iis"), GATE_DA(i2s2, "samsung-i2s.2", "i2s2", "aclk100", GATE_IP_PERIL, 21, 0, 0, "iis"), GATE_DA(pcm1, "samsung-pcm.1", "pcm1", "aclk100", GATE_IP_PERIL, 22, 0, 0, "pcm"), GATE_DA(pcm2, "samsung-pcm.2", "pcm2", "aclk100", GATE_IP_PERIL, 23, 0, 0, "pcm"), GATE_DA(spdif, "samsung-spdif", "spdif", "aclk100", GATE_IP_PERIL, 26, 0, 0, "spdif"), GATE_DA(ac97, "samsung-ac97", "ac97", "aclk100", GATE_IP_PERIL, 27, 0, 0, "ac97"), }; /* list of gate clocks supported in exynos4210 soc */ struct samsung_gate_clock exynos4210_gate_clks[] __initdata = { GATE(tvenc, "tvenc", "aclk160", GATE_IP_TV, 2, 0, 0), GATE(g2d, "g2d", "aclk200", E4210_GATE_IP_IMAGE, 0, 0, 0), GATE(rotator, "rotator", "aclk200", E4210_GATE_IP_IMAGE, 1, 0, 0), GATE(mdma, "mdma", "aclk200", E4210_GATE_IP_IMAGE, 2, 0, 0), GATE(smmu_g2d, "smmu_g2d", "aclk200", E4210_GATE_IP_IMAGE, 3, 0, 0), GATE(smmu_mdma, "smmu_mdma", "aclk200", E4210_GATE_IP_IMAGE, 5, 0, 0), GATE(pcie_phy, "pcie_phy", "aclk133", GATE_IP_FSYS, 2, 0, 0), GATE(sata_phy, "sata_phy", "aclk133", GATE_IP_FSYS, 3, 0, 0), GATE(sata, "sata", "aclk133", GATE_IP_FSYS, 10, 0, 0), GATE(pcie, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0), GATE(smmu_pcie, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0), GATE(modemif, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0), GATE(chipid, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0), GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), GATE(hdmi_cec, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, 0), GATE(smmu_rotator, "smmu_rotator", "aclk200", E4210_GATE_IP_IMAGE, 4, 0, 0), GATE(sclk_mipi1, "sclk_mipi1", "div_mipi_pre1", E4210_SRC_MASK_LCD1, 12, CLK_SET_RATE_PARENT, 0), GATE(sclk_sata, "sclk_sata", "div_sata", SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0), GATE(sclk_mixer, "sclk_mixer", "mout_mixer", SRC_MASK_TV, 4, 0, 0), GATE(sclk_dac, "sclk_dac", "mout_dac", SRC_MASK_TV, 8, 0, 0), GATE_A(tsadc, "tsadc", "aclk100", GATE_IP_PERIL, 15, 0, 0, "adc"), GATE_A(mct, "mct", "aclk100", E4210_GATE_IP_PERIR, 13, 0, 0, "mct"), GATE_A(wdt, "watchdog", "aclk100", E4210_GATE_IP_PERIR, 14, 0, 0, "watchdog"), GATE_A(rtc, "rtc", "aclk100", E4210_GATE_IP_PERIR, 15, 0, 0, "rtc"), GATE_A(keyif, "keyif", "aclk100", E4210_GATE_IP_PERIR, 16, 0, 0, "keypad"), GATE_DA(sclk_fimd1, "exynos4-fb.1", "sclk_fimd1", "div_fimd1", E4210_SRC_MASK_LCD1, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimd"), }; /* list of gate clocks supported in exynos4x12 soc */ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = { GATE(audss, "audss", "sclk_epll", E4X12_GATE_IP_MAUDIO, 0, 0, 0), GATE(mdnie0, "mdnie0", "aclk160", GATE_IP_LCD0, 2, 0, 0), GATE(rotator, "rotator", "aclk200", E4X12_GATE_IP_IMAGE, 1, 0, 0), GATE(mdma2, "mdma2", "aclk200", E4X12_GATE_IP_IMAGE, 2, 0, 0), GATE(smmu_mdma, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0, 0), GATE(mipi_hsi, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0), GATE(chipid, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0), GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, CLK_IGNORE_UNUSED, 0), GATE(hdmi_cec, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, 0), GATE(sclk_mdnie0, "sclk_mdnie0", "div_mdnie0", SRC_MASK_LCD0, 4, CLK_SET_RATE_PARENT, 0), GATE(sclk_mdnie_pwm0, "sclk_mdnie_pwm0", "div_mdnie_pwm_pre0", SRC_MASK_LCD0, 8, CLK_SET_RATE_PARENT, 0), GATE(sclk_mipihsi, "sclk_mipihsi", "div_mipihsi", SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0), GATE(smmu_rotator, "smmu_rotator", "aclk200", E4X12_GATE_IP_IMAGE, 4, 0, 0), GATE_A(mct, "mct", "aclk100", E4X12_GATE_IP_PERIR, 13, 0, 0, "mct"), GATE_A(rtc, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15, 0, 0, "rtc"), GATE_A(keyif, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0, "keypad"), GATE(sclk_pwm_isp, "sclk_pwm_isp", "div_pwm_isp", E4X12_SRC_MASK_ISP, 0, CLK_SET_RATE_PARENT, 0), GATE(sclk_spi0_isp, "sclk_spi0_isp", "div_spi0_isp_pre", E4X12_SRC_MASK_ISP, 4, CLK_SET_RATE_PARENT, 0), GATE(sclk_spi1_isp, "sclk_spi1_isp", "div_spi1_isp_pre", E4X12_SRC_MASK_ISP, 8, CLK_SET_RATE_PARENT, 0), GATE(sclk_uart_isp, "sclk_uart_isp", "div_uart_isp", E4X12_SRC_MASK_ISP, 12, CLK_SET_RATE_PARENT, 0), GATE(pwm_isp_sclk, "pwm_isp_sclk", "sclk_pwm_isp", E4X12_GATE_IP_ISP, 0, 0, 0), GATE(spi0_isp_sclk, "spi0_isp_sclk", "sclk_spi0_isp", E4X12_GATE_IP_ISP, 1, 0, 0), GATE(spi1_isp_sclk, "spi1_isp_sclk", "sclk_spi1_isp", E4X12_GATE_IP_ISP, 2, 0, 0), GATE(uart_isp_sclk, "uart_isp_sclk", "sclk_uart_isp", E4X12_GATE_IP_ISP, 3, 0, 0), GATE_A(wdt, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0, "watchdog"), GATE_DA(pcm0, "samsung-pcm.0", "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2, 0, 0, "pcm"), GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100", E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"), GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0, CLK_IGNORE_UNUSED, 0), GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1, CLK_IGNORE_UNUSED, 0), GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2, CLK_IGNORE_UNUSED, 0), GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, CLK_IGNORE_UNUSED, 0), GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, CLK_IGNORE_UNUSED, 0), GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, CLK_IGNORE_UNUSED, 0), GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, CLK_IGNORE_UNUSED, 0), GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, CLK_IGNORE_UNUSED, 0), GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, CLK_IGNORE_UNUSED, 0), GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, CLK_IGNORE_UNUSED, 0), GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, CLK_IGNORE_UNUSED, 0), GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, CLK_IGNORE_UNUSED, 0), GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, CLK_IGNORE_UNUSED, 0), GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, CLK_IGNORE_UNUSED, 0), GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, CLK_IGNORE_UNUSED, 0), GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, CLK_IGNORE_UNUSED, 0), GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, CLK_IGNORE_UNUSED, 0), GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, CLK_IGNORE_UNUSED, 0), GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, CLK_IGNORE_UNUSED, 0), GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, CLK_IGNORE_UNUSED, 0), GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, CLK_IGNORE_UNUSED, 0), GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, CLK_IGNORE_UNUSED, 0), GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, CLK_IGNORE_UNUSED, 0), GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, CLK_IGNORE_UNUSED, 0), GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, CLK_IGNORE_UNUSED, 0), GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, CLK_IGNORE_UNUSED, 0), }; /* * The parent of the fin_pll clock is selected by the XOM[0] bit. This bit * resides in chipid register space, outside of the clock controller memory * mapped space. So to determine the parent of fin_pll clock, the chipid * controller is first remapped and the value of XOM[0] bit is read to * determine the parent clock. */ static unsigned long exynos4_get_xom(void) { unsigned long xom = 0; void __iomem *chipid_base; struct device_node *np; np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-chipid"); if (np) { chipid_base = of_iomap(np, 0); if (chipid_base) xom = readl(chipid_base + 8); iounmap(chipid_base); } return xom; } static void __init exynos4_clk_register_finpll(unsigned long xom) { struct samsung_fixed_rate_clock fclk; struct clk *clk; unsigned long finpll_f = 24000000; char *parent_name; parent_name = xom & 1 ? "xusbxti" : "xxti"; clk = clk_get(NULL, parent_name); if (IS_ERR(clk)) { pr_err("%s: failed to lookup parent clock %s, assuming " "fin_pll clock frequency is 24MHz\n", __func__, parent_name); } else { finpll_f = clk_get_rate(clk); } fclk.id = fin_pll; fclk.name = "fin_pll"; fclk.parent_name = NULL; fclk.flags = CLK_IS_ROOT; fclk.fixed_rate = finpll_f; samsung_clk_register_fixed_rate(&fclk, 1); } /* * This function allows non-dt platforms to specify the clock speed of the * xxti and xusbxti clocks. These clocks are then registered with the specified * clock speed. */ void __init exynos4_clk_register_fixed_ext(unsigned long xxti_f, unsigned long xusbxti_f) { exynos4_fixed_rate_ext_clks[0].fixed_rate = xxti_f; exynos4_fixed_rate_ext_clks[1].fixed_rate = xusbxti_f; samsung_clk_register_fixed_rate(exynos4_fixed_rate_ext_clks, ARRAY_SIZE(exynos4_fixed_rate_ext_clks)); } static __initdata struct of_device_id ext_clk_match[] = { { .compatible = "samsung,clock-xxti", .data = (void *)0, }, { .compatible = "samsung,clock-xusbxti", .data = (void *)1, }, {}, }; /* register exynos4 clocks */ void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc exynos4_soc, void __iomem *reg_base, unsigned long xom) { struct clk *apll, *mpll, *epll, *vpll; if (np) { reg_base = of_iomap(np, 0); if (!reg_base) panic("%s: failed to map registers\n", __func__); } if (exynos4_soc == EXYNOS4210) samsung_clk_init(np, reg_base, nr_clks, exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs), exynos4210_clk_save, ARRAY_SIZE(exynos4210_clk_save)); else samsung_clk_init(np, reg_base, nr_clks, exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs), exynos4x12_clk_save, ARRAY_SIZE(exynos4x12_clk_save)); if (np) samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks, ARRAY_SIZE(exynos4_fixed_rate_ext_clks), ext_clk_match); exynos4_clk_register_finpll(xom); if (exynos4_soc == EXYNOS4210) { apll = samsung_clk_register_pll45xx("fout_apll", "fin_pll", reg_base + APLL_CON0, pll_4508); mpll = samsung_clk_register_pll45xx("fout_mpll", "fin_pll", reg_base + E4210_MPLL_CON0, pll_4508); epll = samsung_clk_register_pll46xx("fout_epll", "fin_pll", reg_base + EPLL_CON0, pll_4600); vpll = samsung_clk_register_pll46xx("fout_vpll", "mout_vpllsrc", reg_base + VPLL_CON0, pll_4650c); } else { apll = samsung_clk_register_pll35xx("fout_apll", "fin_pll", reg_base + APLL_CON0); mpll = samsung_clk_register_pll35xx("fout_mpll", "fin_pll", reg_base + E4X12_MPLL_CON0); epll = samsung_clk_register_pll36xx("fout_epll", "fin_pll", reg_base + EPLL_CON0); vpll = samsung_clk_register_pll36xx("fout_vpll", "fin_pll", reg_base + VPLL_CON0); } samsung_clk_add_lookup(apll, fout_apll); samsung_clk_add_lookup(mpll, fout_mpll); samsung_clk_add_lookup(epll, fout_epll); samsung_clk_add_lookup(vpll, fout_vpll); samsung_clk_register_fixed_rate(exynos4_fixed_rate_clks, ARRAY_SIZE(exynos4_fixed_rate_clks)); samsung_clk_register_mux(exynos4_mux_clks, ARRAY_SIZE(exynos4_mux_clks)); samsung_clk_register_div(exynos4_div_clks, ARRAY_SIZE(exynos4_div_clks)); samsung_clk_register_gate(exynos4_gate_clks, ARRAY_SIZE(exynos4_gate_clks)); if (exynos4_soc == EXYNOS4210) { samsung_clk_register_fixed_rate(exynos4210_fixed_rate_clks, ARRAY_SIZE(exynos4210_fixed_rate_clks)); samsung_clk_register_mux(exynos4210_mux_clks, ARRAY_SIZE(exynos4210_mux_clks)); samsung_clk_register_div(exynos4210_div_clks, ARRAY_SIZE(exynos4210_div_clks)); samsung_clk_register_gate(exynos4210_gate_clks, ARRAY_SIZE(exynos4210_gate_clks)); } else { samsung_clk_register_mux(exynos4x12_mux_clks, ARRAY_SIZE(exynos4x12_mux_clks)); samsung_clk_register_div(exynos4x12_div_clks, ARRAY_SIZE(exynos4x12_div_clks)); samsung_clk_register_gate(exynos4x12_gate_clks, ARRAY_SIZE(exynos4x12_gate_clks)); } pr_info("%s clocks: sclk_apll = %ld, sclk_mpll = %ld\n" "\tsclk_epll = %ld, sclk_vpll = %ld, arm_clk = %ld\n", exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12", _get_rate("sclk_apll"), _get_rate("sclk_mpll"), _get_rate("sclk_epll"), _get_rate("sclk_vpll"), _get_rate("arm_clk")); } static void __init exynos4210_clk_init(struct device_node *np) { exynos4_clk_init(np, EXYNOS4210, NULL, exynos4_get_xom()); } CLK_OF_DECLARE(exynos4210_clk, "samsung,exynos4210-clock", exynos4210_clk_init); static void __init exynos4412_clk_init(struct device_node *np) { exynos4_clk_init(np, EXYNOS4X12, NULL, exynos4_get_xom()); } CLK_OF_DECLARE(exynos4412_clk, "samsung,exynos4412-clock", exynos4412_clk_init);
gpl-2.0
k2wl/kernel.org
drivers/media/radio/radio-aztech.c
2042
4350
/* * radio-aztech.c - Aztech radio card driver * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@xs4all.nl> * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> * Adapted to support the Video for Linux API by * Russell Kroll <rkroll@exploits.org>. Based on original tuner code by: * * Quay Ly * Donald Song * Jason Lewis (jlewis@twilight.vtc.vsc.edu) * Scott McGrath (smcgrath@twilight.vtc.vsc.edu) * William McGrath (wmcgrath@twilight.vtc.vsc.edu) * * Fully tested with the Keene USB FM Transmitter and the v4l2-compliance tool. */ #include <linux/module.h> /* Modules */ #include <linux/init.h> /* Initdata */ #include <linux/ioport.h> /* request_region */ #include <linux/delay.h> /* udelay */ #include <linux/videodev2.h> /* kernel radio structs */ #include <linux/io.h> /* outb, outb_p */ #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include "radio-isa.h" #include "lm7000.h" MODULE_AUTHOR("Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath"); MODULE_DESCRIPTION("A driver for the Aztech radio card."); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); /* acceptable ports: 0x350 (JP3 shorted), 0x358 (JP3 open) */ #ifndef CONFIG_RADIO_AZTECH_PORT #define CONFIG_RADIO_AZTECH_PORT -1 #endif #define AZTECH_MAX 2 static int io[AZTECH_MAX] = { [0] = CONFIG_RADIO_AZTECH_PORT, [1 ... (AZTECH_MAX - 1)] = -1 }; static int radio_nr[AZTECH_MAX] = { [0 ... (AZTECH_MAX - 1)] = -1 }; static const int radio_wait_time = 1000; module_param_array(io, int, NULL, 0444); MODULE_PARM_DESC(io, "I/O addresses of the Aztech card (0x350 or 0x358)"); module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(radio_nr, "Radio device numbers"); struct aztech { struct radio_isa_card isa; int curvol; }; /* bit definitions for register read */ #define AZTECH_BIT_NOT_TUNED (1 << 0) #define AZTECH_BIT_MONO (1 << 1) /* bit definitions for register write */ #define AZTECH_BIT_TUN_CE (1 << 1) #define AZTECH_BIT_TUN_CLK (1 << 6) #define AZTECH_BIT_TUN_DATA (1 << 7) /* bits 0 and 2 are volume control, bits 3..5 are not connected */ static void aztech_set_pins(void *handle, u8 pins) { struct radio_isa_card *isa = handle; struct aztech *az = container_of(isa, struct aztech, isa); u8 bits = az->curvol; if (pins & LM7000_DATA) bits |= AZTECH_BIT_TUN_DATA; if (pins & LM7000_CLK) bits |= AZTECH_BIT_TUN_CLK; if (pins & LM7000_CE) bits |= AZTECH_BIT_TUN_CE; outb_p(bits, az->isa.io); } static struct radio_isa_card *aztech_alloc(void) { struct aztech *az = kzalloc(sizeof(*az), GFP_KERNEL); return az ? &az->isa : NULL; } static int aztech_s_frequency(struct radio_isa_card *isa, u32 freq) { lm7000_set_freq(freq, isa, aztech_set_pins); return 0; } static u32 aztech_g_rxsubchans(struct radio_isa_card *isa) { if (inb(isa->io) & AZTECH_BIT_MONO) return V4L2_TUNER_SUB_MONO; return V4L2_TUNER_SUB_STEREO; } static u32 aztech_g_signal(struct radio_isa_card *isa) { return (inb(isa->io) & AZTECH_BIT_NOT_TUNED) ? 0 : 0xffff; } static int aztech_s_mute_volume(struct radio_isa_card *isa, bool mute, int vol) { struct aztech *az = container_of(isa, struct aztech, isa); if (mute) vol = 0; az->curvol = (vol & 1) + ((vol & 2) << 1); outb(az->curvol, isa->io); return 0; } static const struct radio_isa_ops aztech_ops = { .alloc = aztech_alloc, .s_mute_volume = aztech_s_mute_volume, .s_frequency = aztech_s_frequency, .g_rxsubchans = aztech_g_rxsubchans, .g_signal = aztech_g_signal, }; static const int aztech_ioports[] = { 0x350, 0x358 }; static struct radio_isa_driver aztech_driver = { .driver = { .match = radio_isa_match, .probe = radio_isa_probe, .remove = radio_isa_remove, .driver = { .name = "radio-aztech", }, }, .io_params = io, .radio_nr_params = radio_nr, .io_ports = aztech_ioports, .num_of_io_ports = ARRAY_SIZE(aztech_ioports), .region_size = 8, .card = "Aztech Radio", .ops = &aztech_ops, .has_stereo = true, .max_volume = 3, }; static int __init aztech_init(void) { return isa_register_driver(&aztech_driver.driver, AZTECH_MAX); } static void __exit aztech_exit(void) { isa_unregister_driver(&aztech_driver.driver); } module_init(aztech_init); module_exit(aztech_exit);
gpl-2.0
Demon000/libra
drivers/block/swim3.c
2554
32285
/* * Driver for the SWIM3 (Super Woz Integrated Machine 3) * floppy controller found on Power Macintoshes. * * Copyright (C) 1996 Paul Mackerras. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* * TODO: * handle 2 drives * handle GCR disks */ #undef DEBUG #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/fd.h> #include <linux/ioctl.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/module.h> #include <linux/spinlock.h> #include <asm/io.h> #include <asm/dbdma.h> #include <asm/prom.h> #include <asm/uaccess.h> #include <asm/mediabay.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #define MAX_FLOPPIES 2 static DEFINE_MUTEX(swim3_mutex); static struct gendisk *disks[MAX_FLOPPIES]; enum swim_state { idle, locating, seeking, settling, do_transfer, jogging, available, revalidating, ejecting }; #define REG(x) unsigned char x; char x ## _pad[15]; /* * The names for these registers mostly represent speculation on my part. * It will be interesting to see how close they are to the names Apple uses. */ struct swim3 { REG(data); REG(timer); /* counts down at 1MHz */ REG(error); REG(mode); REG(select); /* controls CA0, CA1, CA2 and LSTRB signals */ REG(setup); REG(control); /* writing bits clears them */ REG(status); /* writing bits sets them in control */ REG(intr); REG(nseek); /* # tracks to seek */ REG(ctrack); /* current track number */ REG(csect); /* current sector number */ REG(gap3); /* size of gap 3 in track format */ REG(sector); /* sector # to read or write */ REG(nsect); /* # sectors to read or write */ REG(intr_enable); }; #define control_bic control #define control_bis status /* Bits in select register */ #define CA_MASK 7 #define LSTRB 8 /* Bits in control register */ #define DO_SEEK 0x80 #define FORMAT 0x40 #define SELECT 0x20 #define WRITE_SECTORS 0x10 #define DO_ACTION 0x08 #define DRIVE2_ENABLE 0x04 #define DRIVE_ENABLE 0x02 #define INTR_ENABLE 0x01 /* Bits in status register */ #define FIFO_1BYTE 0x80 #define FIFO_2BYTE 0x40 #define ERROR 0x20 #define DATA 0x08 #define RDDATA 0x04 #define INTR_PENDING 0x02 #define MARK_BYTE 0x01 /* Bits in intr and intr_enable registers */ #define ERROR_INTR 0x20 #define DATA_CHANGED 0x10 #define TRANSFER_DONE 0x08 #define SEEN_SECTOR 0x04 #define SEEK_DONE 0x02 #define TIMER_DONE 0x01 /* Bits in error register */ #define ERR_DATA_CRC 0x80 #define ERR_ADDR_CRC 0x40 #define ERR_OVERRUN 0x04 #define ERR_UNDERRUN 0x01 /* Bits in setup register */ #define S_SW_RESET 0x80 #define S_GCR_WRITE 0x40 #define S_IBM_DRIVE 0x20 #define S_TEST_MODE 0x10 #define S_FCLK_DIV2 0x08 #define S_GCR 0x04 #define S_COPY_PROT 0x02 #define S_INV_WDATA 0x01 /* Select values for swim3_action */ #define SEEK_POSITIVE 0 #define SEEK_NEGATIVE 4 #define STEP 1 #define MOTOR_ON 2 #define MOTOR_OFF 6 #define INDEX 3 #define EJECT 7 #define SETMFM 9 #define SETGCR 13 /* Select values for swim3_select and swim3_readbit */ #define STEP_DIR 0 #define STEPPING 1 #define MOTOR_ON 2 #define RELAX 3 /* also eject in progress */ #define READ_DATA_0 4 #define TWOMEG_DRIVE 5 #define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */ #define DRIVE_PRESENT 7 #define DISK_IN 8 #define WRITE_PROT 9 #define TRACK_ZERO 10 #define TACHO 11 #define READ_DATA_1 12 #define MFM_MODE 13 #define SEEK_COMPLETE 14 #define ONEMEG_MEDIA 15 /* Definitions of values used in writing and formatting */ #define DATA_ESCAPE 0x99 #define GCR_SYNC_EXC 0x3f #define GCR_SYNC_CONV 0x80 #define GCR_FIRST_MARK 0xd5 #define GCR_SECOND_MARK 0xaa #define GCR_ADDR_MARK "\xd5\xaa\x00" #define GCR_DATA_MARK "\xd5\xaa\x0b" #define GCR_SLIP_BYTE "\x27\xaa" #define GCR_SELF_SYNC "\x3f\xbf\x1e\x34\x3c\x3f" #define DATA_99 "\x99\x99" #define MFM_ADDR_MARK "\x99\xa1\x99\xa1\x99\xa1\x99\xfe" #define MFM_INDEX_MARK "\x99\xc2\x99\xc2\x99\xc2\x99\xfc" #define MFM_GAP_LEN 12 struct floppy_state { enum swim_state state; struct swim3 __iomem *swim3; /* hardware registers */ struct dbdma_regs __iomem *dma; /* DMA controller registers */ int swim3_intr; /* interrupt number for SWIM3 */ int dma_intr; /* interrupt number for DMA channel */ int cur_cyl; /* cylinder head is on, or -1 */ int cur_sector; /* last sector we saw go past */ int req_cyl; /* the cylinder for the current r/w request */ int head; /* head number ditto */ int req_sector; /* sector number ditto */ int scount; /* # sectors we're transferring at present */ int retries; int settle_time; int secpercyl; /* disk geometry information */ int secpertrack; int total_secs; int write_prot; /* 1 if write-protected, 0 if not, -1 dunno */ struct dbdma_cmd *dma_cmd; int ref_count; int expect_cyl; struct timer_list timeout; int timeout_pending; int ejected; wait_queue_head_t wait; int wanted; struct macio_dev *mdev; char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)]; int index; struct request *cur_req; }; #define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg) #define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg) #define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg) #ifdef DEBUG #define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg) #else #define swim3_dbg(fmt, arg...) do { } while(0) #endif static struct floppy_state floppy_states[MAX_FLOPPIES]; static int floppy_count = 0; static DEFINE_SPINLOCK(swim3_lock); static unsigned short write_preamble[] = { 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, /* gap field */ 0, 0, 0, 0, 0, 0, /* sync field */ 0x99a1, 0x99a1, 0x99a1, 0x99fb, /* data address mark */ 0x990f /* no escape for 512 bytes */ }; static unsigned short write_postamble[] = { 0x9904, /* insert CRC */ 0x4e4e, 0x4e4e, 0x9908, /* stop writing */ 0, 0, 0, 0, 0, 0 }; static void seek_track(struct floppy_state *fs, int n); static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count); static void act(struct floppy_state *fs); static void scan_timeout(unsigned long data); static void seek_timeout(unsigned long data); static void settle_timeout(unsigned long data); static void xfer_timeout(unsigned long data); static irqreturn_t swim3_interrupt(int irq, void *dev_id); /*static void fd_dma_interrupt(int irq, void *dev_id);*/ static int grab_drive(struct floppy_state *fs, enum swim_state state, int interruptible); static void release_drive(struct floppy_state *fs); static int fd_eject(struct floppy_state *fs); static int floppy_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long param); static int floppy_open(struct block_device *bdev, fmode_t mode); static void floppy_release(struct gendisk *disk, fmode_t mode); static unsigned int floppy_check_events(struct gendisk *disk, unsigned int clearing); static int floppy_revalidate(struct gendisk *disk); static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes) { struct request *req = fs->cur_req; int rc; swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n", err, nr_bytes, req); if (err) nr_bytes = blk_rq_cur_bytes(req); rc = __blk_end_request(req, err, nr_bytes); if (rc) return true; fs->cur_req = NULL; return false; } static void swim3_select(struct floppy_state *fs, int sel) { struct swim3 __iomem *sw = fs->swim3; out_8(&sw->select, RELAX); if (sel & 8) out_8(&sw->control_bis, SELECT); else out_8(&sw->control_bic, SELECT); out_8(&sw->select, sel & CA_MASK); } static void swim3_action(struct floppy_state *fs, int action) { struct swim3 __iomem *sw = fs->swim3; swim3_select(fs, action); udelay(1); out_8(&sw->select, sw->select | LSTRB); udelay(2); out_8(&sw->select, sw->select & ~LSTRB); udelay(1); } static int swim3_readbit(struct floppy_state *fs, int bit) { struct swim3 __iomem *sw = fs->swim3; int stat; swim3_select(fs, bit); udelay(1); stat = in_8(&sw->status); return (stat & DATA) == 0; } static void start_request(struct floppy_state *fs) { struct request *req; unsigned long x; swim3_dbg("start request, initial state=%d\n", fs->state); if (fs->state == idle && fs->wanted) { fs->state = available; wake_up(&fs->wait); return; } while (fs->state == idle) { swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req); if (!fs->cur_req) { fs->cur_req = blk_fetch_request(disks[fs->index]->queue); swim3_dbg(" fetched request %p\n", fs->cur_req); if (!fs->cur_req) break; } req = fs->cur_req; if (fs->mdev->media_bay && check_media_bay(fs->mdev->media_bay) != MB_FD) { swim3_dbg("%s", " media bay absent, dropping req\n"); swim3_end_request(fs, -ENODEV, 0); continue; } #if 0 /* This is really too verbose */ swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n", req->rq_disk->disk_name, req->cmd, (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer); swim3_dbg(" errors=%d current_nr_sectors=%u\n", req->errors, blk_rq_cur_sectors(req)); #endif if (blk_rq_pos(req) >= fs->total_secs) { swim3_dbg(" pos out of bounds (%ld, max is %ld)\n", (long)blk_rq_pos(req), (long)fs->total_secs); swim3_end_request(fs, -EIO, 0); continue; } if (fs->ejected) { swim3_dbg("%s", " disk ejected\n"); swim3_end_request(fs, -EIO, 0); continue; } if (rq_data_dir(req) == WRITE) { if (fs->write_prot < 0) fs->write_prot = swim3_readbit(fs, WRITE_PROT); if (fs->write_prot) { swim3_dbg("%s", " try to write, disk write protected\n"); swim3_end_request(fs, -EIO, 0); continue; } } /* Do not remove the cast. blk_rq_pos(req) is now a * sector_t and can be 64 bits, but it will never go * past 32 bits for this driver anyway, so we can * safely cast it down and not have to do a 64/32 * division */ fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl; x = ((long)blk_rq_pos(req)) % fs->secpercyl; fs->head = x / fs->secpertrack; fs->req_sector = x % fs->secpertrack + 1; fs->state = do_transfer; fs->retries = 0; act(fs); } } static void do_fd_request(struct request_queue * q) { start_request(q->queuedata); } static void set_timeout(struct floppy_state *fs, int nticks, void (*proc)(unsigned long)) { if (fs->timeout_pending) del_timer(&fs->timeout); fs->timeout.expires = jiffies + nticks; fs->timeout.function = proc; fs->timeout.data = (unsigned long) fs; add_timer(&fs->timeout); fs->timeout_pending = 1; } static inline void scan_track(struct floppy_state *fs) { struct swim3 __iomem *sw = fs->swim3; swim3_select(fs, READ_DATA_0); in_8(&sw->intr); /* clear SEEN_SECTOR bit */ in_8(&sw->error); out_8(&sw->intr_enable, SEEN_SECTOR); out_8(&sw->control_bis, DO_ACTION); /* enable intr when track found */ set_timeout(fs, HZ, scan_timeout); /* enable timeout */ } static inline void seek_track(struct floppy_state *fs, int n) { struct swim3 __iomem *sw = fs->swim3; if (n >= 0) { swim3_action(fs, SEEK_POSITIVE); sw->nseek = n; } else { swim3_action(fs, SEEK_NEGATIVE); sw->nseek = -n; } fs->expect_cyl = (fs->cur_cyl >= 0)? fs->cur_cyl + n: -1; swim3_select(fs, STEP); in_8(&sw->error); /* enable intr when seek finished */ out_8(&sw->intr_enable, SEEK_DONE); out_8(&sw->control_bis, DO_SEEK); set_timeout(fs, 3*HZ, seek_timeout); /* enable timeout */ fs->settle_time = 0; } static inline void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count) { st_le16(&cp->req_count, count); st_le16(&cp->command, cmd); st_le32(&cp->phy_addr, virt_to_bus(buf)); cp->xfer_status = 0; } static inline void setup_transfer(struct floppy_state *fs) { int n; struct swim3 __iomem *sw = fs->swim3; struct dbdma_cmd *cp = fs->dma_cmd; struct dbdma_regs __iomem *dr = fs->dma; struct request *req = fs->cur_req; if (blk_rq_cur_sectors(req) <= 0) { swim3_warn("%s", "Transfer 0 sectors ?\n"); return; } if (rq_data_dir(req) == WRITE) n = 1; else { n = fs->secpertrack - fs->req_sector + 1; if (n > blk_rq_cur_sectors(req)) n = blk_rq_cur_sectors(req); } swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n", fs->req_sector, fs->secpertrack, fs->head, n); fs->scount = n; swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); out_8(&sw->sector, fs->req_sector); out_8(&sw->nsect, n); out_8(&sw->gap3, 0); out_le32(&dr->cmdptr, virt_to_bus(cp)); if (rq_data_dir(req) == WRITE) { /* Set up 3 dma commands: write preamble, data, postamble */ init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); ++cp; init_dma(cp, OUTPUT_MORE, req->buffer, 512); ++cp; init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); } else { init_dma(cp, INPUT_LAST, req->buffer, n * 512); } ++cp; out_le16(&cp->command, DBDMA_STOP); out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); in_8(&sw->error); out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); if (rq_data_dir(req) == WRITE) out_8(&sw->control_bis, WRITE_SECTORS); in_8(&sw->intr); out_le32(&dr->control, (RUN << 16) | RUN); /* enable intr when transfer complete */ out_8(&sw->intr_enable, TRANSFER_DONE); out_8(&sw->control_bis, DO_ACTION); set_timeout(fs, 2*HZ, xfer_timeout); /* enable timeout */ } static void act(struct floppy_state *fs) { for (;;) { swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n", fs->state, fs->req_cyl, fs->cur_cyl); switch (fs->state) { case idle: return; /* XXX shouldn't get here */ case locating: if (swim3_readbit(fs, TRACK_ZERO)) { swim3_dbg("%s", " locate track 0\n"); fs->cur_cyl = 0; if (fs->req_cyl == 0) fs->state = do_transfer; else fs->state = seeking; break; } scan_track(fs); return; case seeking: if (fs->cur_cyl < 0) { fs->expect_cyl = -1; fs->state = locating; break; } if (fs->req_cyl == fs->cur_cyl) { swim3_warn("%s", "Whoops, seeking 0\n"); fs->state = do_transfer; break; } seek_track(fs, fs->req_cyl - fs->cur_cyl); return; case settling: /* check for SEEK_COMPLETE after 30ms */ fs->settle_time = (HZ + 32) / 33; set_timeout(fs, fs->settle_time, settle_timeout); return; case do_transfer: if (fs->cur_cyl != fs->req_cyl) { if (fs->retries > 5) { swim3_err("Wrong cylinder in transfer, want: %d got %d\n", fs->req_cyl, fs->cur_cyl); swim3_end_request(fs, -EIO, 0); fs->state = idle; return; } fs->state = seeking; break; } setup_transfer(fs); return; case jogging: seek_track(fs, -5); return; default: swim3_err("Unknown state %d\n", fs->state); return; } } } static void scan_timeout(unsigned long data) { struct floppy_state *fs = (struct floppy_state *) data; struct swim3 __iomem *sw = fs->swim3; unsigned long flags; swim3_dbg("* scan timeout, state=%d\n", fs->state); spin_lock_irqsave(&swim3_lock, flags); fs->timeout_pending = 0; out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); out_8(&sw->select, RELAX); out_8(&sw->intr_enable, 0); fs->cur_cyl = -1; if (fs->retries > 5) { swim3_end_request(fs, -EIO, 0); fs->state = idle; start_request(fs); } else { fs->state = jogging; act(fs); } spin_unlock_irqrestore(&swim3_lock, flags); } static void seek_timeout(unsigned long data) { struct floppy_state *fs = (struct floppy_state *) data; struct swim3 __iomem *sw = fs->swim3; unsigned long flags; swim3_dbg("* seek timeout, state=%d\n", fs->state); spin_lock_irqsave(&swim3_lock, flags); fs->timeout_pending = 0; out_8(&sw->control_bic, DO_SEEK); out_8(&sw->select, RELAX); out_8(&sw->intr_enable, 0); swim3_err("%s", "Seek timeout\n"); swim3_end_request(fs, -EIO, 0); fs->state = idle; start_request(fs); spin_unlock_irqrestore(&swim3_lock, flags); } static void settle_timeout(unsigned long data) { struct floppy_state *fs = (struct floppy_state *) data; struct swim3 __iomem *sw = fs->swim3; unsigned long flags; swim3_dbg("* settle timeout, state=%d\n", fs->state); spin_lock_irqsave(&swim3_lock, flags); fs->timeout_pending = 0; if (swim3_readbit(fs, SEEK_COMPLETE)) { out_8(&sw->select, RELAX); fs->state = locating; act(fs); goto unlock; } out_8(&sw->select, RELAX); if (fs->settle_time < 2*HZ) { ++fs->settle_time; set_timeout(fs, 1, settle_timeout); goto unlock; } swim3_err("%s", "Seek settle timeout\n"); swim3_end_request(fs, -EIO, 0); fs->state = idle; start_request(fs); unlock: spin_unlock_irqrestore(&swim3_lock, flags); } static void xfer_timeout(unsigned long data) { struct floppy_state *fs = (struct floppy_state *) data; struct swim3 __iomem *sw = fs->swim3; struct dbdma_regs __iomem *dr = fs->dma; unsigned long flags; int n; swim3_dbg("* xfer timeout, state=%d\n", fs->state); spin_lock_irqsave(&swim3_lock, flags); fs->timeout_pending = 0; out_le32(&dr->control, RUN << 16); /* We must wait a bit for dbdma to stop */ for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++) udelay(1); out_8(&sw->intr_enable, 0); out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); out_8(&sw->select, RELAX); swim3_err("Timeout %sing sector %ld\n", (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"), (long)blk_rq_pos(fs->cur_req)); swim3_end_request(fs, -EIO, 0); fs->state = idle; start_request(fs); spin_unlock_irqrestore(&swim3_lock, flags); } static irqreturn_t swim3_interrupt(int irq, void *dev_id) { struct floppy_state *fs = (struct floppy_state *) dev_id; struct swim3 __iomem *sw = fs->swim3; int intr, err, n; int stat, resid; struct dbdma_regs __iomem *dr; struct dbdma_cmd *cp; unsigned long flags; struct request *req = fs->cur_req; swim3_dbg("* interrupt, state=%d\n", fs->state); spin_lock_irqsave(&swim3_lock, flags); intr = in_8(&sw->intr); err = (intr & ERROR_INTR)? in_8(&sw->error): 0; if ((intr & ERROR_INTR) && fs->state != do_transfer) swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n", fs->state, rq_data_dir(req), intr, err); switch (fs->state) { case locating: if (intr & SEEN_SECTOR) { out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); out_8(&sw->select, RELAX); out_8(&sw->intr_enable, 0); del_timer(&fs->timeout); fs->timeout_pending = 0; if (sw->ctrack == 0xff) { swim3_err("%s", "Seen sector but cyl=ff?\n"); fs->cur_cyl = -1; if (fs->retries > 5) { swim3_end_request(fs, -EIO, 0); fs->state = idle; start_request(fs); } else { fs->state = jogging; act(fs); } break; } fs->cur_cyl = sw->ctrack; fs->cur_sector = sw->csect; if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl) swim3_err("Expected cyl %d, got %d\n", fs->expect_cyl, fs->cur_cyl); fs->state = do_transfer; act(fs); } break; case seeking: case jogging: if (sw->nseek == 0) { out_8(&sw->control_bic, DO_SEEK); out_8(&sw->select, RELAX); out_8(&sw->intr_enable, 0); del_timer(&fs->timeout); fs->timeout_pending = 0; if (fs->state == seeking) ++fs->retries; fs->state = settling; act(fs); } break; case settling: out_8(&sw->intr_enable, 0); del_timer(&fs->timeout); fs->timeout_pending = 0; act(fs); break; case do_transfer: if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0) break; out_8(&sw->intr_enable, 0); out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); out_8(&sw->select, RELAX); del_timer(&fs->timeout); fs->timeout_pending = 0; dr = fs->dma; cp = fs->dma_cmd; if (rq_data_dir(req) == WRITE) ++cp; /* * Check that the main data transfer has finished. * On writing, the swim3 sometimes doesn't use * up all the bytes of the postamble, so we can still * see DMA active here. That doesn't matter as long * as all the sector data has been transferred. */ if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) { /* wait a little while for DMA to complete */ for (n = 0; n < 100; ++n) { if (cp->xfer_status != 0) break; udelay(1); barrier(); } } /* turn off DMA */ out_le32(&dr->control, (RUN | PAUSE) << 16); stat = ld_le16(&cp->xfer_status); resid = ld_le16(&cp->res_count); if (intr & ERROR_INTR) { n = fs->scount - 1 - resid / 512; if (n > 0) { blk_update_request(req, 0, n << 9); fs->req_sector += n; } if (fs->retries < 5) { ++fs->retries; act(fs); } else { swim3_err("Error %sing block %ld (err=%x)\n", rq_data_dir(req) == WRITE? "writ": "read", (long)blk_rq_pos(req), err); swim3_end_request(fs, -EIO, 0); fs->state = idle; } } else { if ((stat & ACTIVE) == 0 || resid != 0) { /* musta been an error */ swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid); swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n", fs->state, rq_data_dir(req), intr, err); swim3_end_request(fs, -EIO, 0); fs->state = idle; start_request(fs); break; } fs->retries = 0; if (swim3_end_request(fs, 0, fs->scount << 9)) { fs->req_sector += fs->scount; if (fs->req_sector > fs->secpertrack) { fs->req_sector -= fs->secpertrack; if (++fs->head > 1) { fs->head = 0; ++fs->req_cyl; } } act(fs); } else fs->state = idle; } if (fs->state == idle) start_request(fs); break; default: swim3_err("Don't know what to do in state %d\n", fs->state); } spin_unlock_irqrestore(&swim3_lock, flags); return IRQ_HANDLED; } /* static void fd_dma_interrupt(int irq, void *dev_id) { } */ /* Called under the mutex to grab exclusive access to a drive */ static int grab_drive(struct floppy_state *fs, enum swim_state state, int interruptible) { unsigned long flags; swim3_dbg("%s", "-> grab drive\n"); spin_lock_irqsave(&swim3_lock, flags); if (fs->state != idle && fs->state != available) { ++fs->wanted; while (fs->state != available) { spin_unlock_irqrestore(&swim3_lock, flags); if (interruptible && signal_pending(current)) { --fs->wanted; return -EINTR; } interruptible_sleep_on(&fs->wait); spin_lock_irqsave(&swim3_lock, flags); } --fs->wanted; } fs->state = state; spin_unlock_irqrestore(&swim3_lock, flags); return 0; } static void release_drive(struct floppy_state *fs) { unsigned long flags; swim3_dbg("%s", "-> release drive\n"); spin_lock_irqsave(&swim3_lock, flags); fs->state = idle; start_request(fs); spin_unlock_irqrestore(&swim3_lock, flags); } static int fd_eject(struct floppy_state *fs) { int err, n; err = grab_drive(fs, ejecting, 1); if (err) return err; swim3_action(fs, EJECT); for (n = 20; n > 0; --n) { if (signal_pending(current)) { err = -EINTR; break; } swim3_select(fs, RELAX); schedule_timeout_interruptible(1); if (swim3_readbit(fs, DISK_IN) == 0) break; } swim3_select(fs, RELAX); udelay(150); fs->ejected = 1; release_drive(fs); return err; } static struct floppy_struct floppy_type = { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */ static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long param) { struct floppy_state *fs = bdev->bd_disk->private_data; int err; if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN)) return -EPERM; if (fs->mdev->media_bay && check_media_bay(fs->mdev->media_bay) != MB_FD) return -ENXIO; switch (cmd) { case FDEJECT: if (fs->ref_count != 1) return -EBUSY; err = fd_eject(fs); return err; case FDGETPRM: if (copy_to_user((void __user *) param, &floppy_type, sizeof(struct floppy_struct))) return -EFAULT; return 0; } return -ENOTTY; } static int floppy_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long param) { int ret; mutex_lock(&swim3_mutex); ret = floppy_locked_ioctl(bdev, mode, cmd, param); mutex_unlock(&swim3_mutex); return ret; } static int floppy_open(struct block_device *bdev, fmode_t mode) { struct floppy_state *fs = bdev->bd_disk->private_data; struct swim3 __iomem *sw = fs->swim3; int n, err = 0; if (fs->ref_count == 0) { if (fs->mdev->media_bay && check_media_bay(fs->mdev->media_bay) != MB_FD) return -ENXIO; out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2); out_8(&sw->control_bic, 0xff); out_8(&sw->mode, 0x95); udelay(10); out_8(&sw->intr_enable, 0); out_8(&sw->control_bis, DRIVE_ENABLE | INTR_ENABLE); swim3_action(fs, MOTOR_ON); fs->write_prot = -1; fs->cur_cyl = -1; for (n = 0; n < 2 * HZ; ++n) { if (n >= HZ/30 && swim3_readbit(fs, SEEK_COMPLETE)) break; if (signal_pending(current)) { err = -EINTR; break; } swim3_select(fs, RELAX); schedule_timeout_interruptible(1); } if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0 || swim3_readbit(fs, DISK_IN) == 0)) err = -ENXIO; swim3_action(fs, SETMFM); swim3_select(fs, RELAX); } else if (fs->ref_count == -1 || mode & FMODE_EXCL) return -EBUSY; if (err == 0 && (mode & FMODE_NDELAY) == 0 && (mode & (FMODE_READ|FMODE_WRITE))) { check_disk_change(bdev); if (fs->ejected) err = -ENXIO; } if (err == 0 && (mode & FMODE_WRITE)) { if (fs->write_prot < 0) fs->write_prot = swim3_readbit(fs, WRITE_PROT); if (fs->write_prot) err = -EROFS; } if (err) { if (fs->ref_count == 0) { swim3_action(fs, MOTOR_OFF); out_8(&sw->control_bic, DRIVE_ENABLE | INTR_ENABLE); swim3_select(fs, RELAX); } return err; } if (mode & FMODE_EXCL) fs->ref_count = -1; else ++fs->ref_count; return 0; } static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) { int ret; mutex_lock(&swim3_mutex); ret = floppy_open(bdev, mode); mutex_unlock(&swim3_mutex); return ret; } static void floppy_release(struct gendisk *disk, fmode_t mode) { struct floppy_state *fs = disk->private_data; struct swim3 __iomem *sw = fs->swim3; mutex_lock(&swim3_mutex); if (fs->ref_count > 0 && --fs->ref_count == 0) { swim3_action(fs, MOTOR_OFF); out_8(&sw->control_bic, 0xff); swim3_select(fs, RELAX); } mutex_unlock(&swim3_mutex); } static unsigned int floppy_check_events(struct gendisk *disk, unsigned int clearing) { struct floppy_state *fs = disk->private_data; return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0; } static int floppy_revalidate(struct gendisk *disk) { struct floppy_state *fs = disk->private_data; struct swim3 __iomem *sw; int ret, n; if (fs->mdev->media_bay && check_media_bay(fs->mdev->media_bay) != MB_FD) return -ENXIO; sw = fs->swim3; grab_drive(fs, revalidating, 0); out_8(&sw->intr_enable, 0); out_8(&sw->control_bis, DRIVE_ENABLE); swim3_action(fs, MOTOR_ON); /* necessary? */ fs->write_prot = -1; fs->cur_cyl = -1; mdelay(1); for (n = HZ; n > 0; --n) { if (swim3_readbit(fs, SEEK_COMPLETE)) break; if (signal_pending(current)) break; swim3_select(fs, RELAX); schedule_timeout_interruptible(1); } ret = swim3_readbit(fs, SEEK_COMPLETE) == 0 || swim3_readbit(fs, DISK_IN) == 0; if (ret) swim3_action(fs, MOTOR_OFF); else { fs->ejected = 0; swim3_action(fs, SETMFM); } swim3_select(fs, RELAX); release_drive(fs); return ret; } static const struct block_device_operations floppy_fops = { .open = floppy_unlocked_open, .release = floppy_release, .ioctl = floppy_ioctl, .check_events = floppy_check_events, .revalidate_disk= floppy_revalidate, }; static void swim3_mb_event(struct macio_dev* mdev, int mb_state) { struct floppy_state *fs = macio_get_drvdata(mdev); struct swim3 __iomem *sw; if (!fs) return; sw = fs->swim3; if (mb_state != MB_FD) return; /* Clear state */ out_8(&sw->intr_enable, 0); in_8(&sw->intr); in_8(&sw->error); } static int swim3_add_device(struct macio_dev *mdev, int index) { struct device_node *swim = mdev->ofdev.dev.of_node; struct floppy_state *fs = &floppy_states[index]; int rc = -EBUSY; /* Do this first for message macros */ memset(fs, 0, sizeof(*fs)); fs->mdev = mdev; fs->index = index; /* Check & Request resources */ if (macio_resource_count(mdev) < 2) { swim3_err("%s", "No address in device-tree\n"); return -ENXIO; } if (macio_irq_count(mdev) < 1) { swim3_err("%s", "No interrupt in device-tree\n"); return -ENXIO; } if (macio_request_resource(mdev, 0, "swim3 (mmio)")) { swim3_err("%s", "Can't request mmio resource\n"); return -EBUSY; } if (macio_request_resource(mdev, 1, "swim3 (dma)")) { swim3_err("%s", "Can't request dma resource\n"); macio_release_resource(mdev, 0); return -EBUSY; } dev_set_drvdata(&mdev->ofdev.dev, fs); if (mdev->media_bay == NULL) pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1); fs->state = idle; fs->swim3 = (struct swim3 __iomem *) ioremap(macio_resource_start(mdev, 0), 0x200); if (fs->swim3 == NULL) { swim3_err("%s", "Couldn't map mmio registers\n"); rc = -ENOMEM; goto out_release; } fs->dma = (struct dbdma_regs __iomem *) ioremap(macio_resource_start(mdev, 1), 0x200); if (fs->dma == NULL) { swim3_err("%s", "Couldn't map dma registers\n"); iounmap(fs->swim3); rc = -ENOMEM; goto out_release; } fs->swim3_intr = macio_irq(mdev, 0); fs->dma_intr = macio_irq(mdev, 1); fs->cur_cyl = -1; fs->cur_sector = -1; fs->secpercyl = 36; fs->secpertrack = 18; fs->total_secs = 2880; init_waitqueue_head(&fs->wait); fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd)); st_le16(&fs->dma_cmd[1].command, DBDMA_STOP); if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD) swim3_mb_event(mdev, MB_FD); if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) { swim3_err("%s", "Couldn't request interrupt\n"); pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0); goto out_unmap; return -EBUSY; } init_timer(&fs->timeout); swim3_info("SWIM3 floppy controller %s\n", mdev->media_bay ? "in media bay" : ""); return 0; out_unmap: iounmap(fs->dma); iounmap(fs->swim3); out_release: macio_release_resource(mdev, 0); macio_release_resource(mdev, 1); return rc; } static int swim3_attach(struct macio_dev *mdev, const struct of_device_id *match) { struct gendisk *disk; int index, rc; index = floppy_count++; if (index >= MAX_FLOPPIES) return -ENXIO; /* Add the drive */ rc = swim3_add_device(mdev, index); if (rc) return rc; /* Now register that disk. Same comment about failure handling */ disk = disks[index] = alloc_disk(1); if (disk == NULL) return -ENOMEM; disk->queue = blk_init_queue(do_fd_request, &swim3_lock); if (disk->queue == NULL) { put_disk(disk); return -ENOMEM; } disk->queue->queuedata = &floppy_states[index]; if (index == 0) { /* If we failed, there isn't much we can do as the driver is still * too dumb to remove the device, just bail out */ if (register_blkdev(FLOPPY_MAJOR, "fd")) return 0; } disk->major = FLOPPY_MAJOR; disk->first_minor = index; disk->fops = &floppy_fops; disk->private_data = &floppy_states[index]; disk->flags |= GENHD_FL_REMOVABLE; sprintf(disk->disk_name, "fd%d", index); set_capacity(disk, 2880); add_disk(disk); return 0; } static struct of_device_id swim3_match[] = { { .name = "swim3", }, { .compatible = "ohare-swim3" }, { .compatible = "swim3" }, { /* end of list */ } }; static struct macio_driver swim3_driver = { .driver = { .name = "swim3", .of_match_table = swim3_match, }, .probe = swim3_attach, #ifdef CONFIG_PMAC_MEDIABAY .mediabay_event = swim3_mb_event, #endif #if 0 .suspend = swim3_suspend, .resume = swim3_resume, #endif }; int swim3_init(void) { macio_register_driver(&swim3_driver); return 0; } module_init(swim3_init) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Paul Mackerras"); MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
gpl-2.0
Snepsts/e980_stock
net/mac80211/work.c
3322
8669
/* * mac80211 work implementation * * Copyright 2003-2008, Jouni Malinen <j@w1.fi> * Copyright 2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2009, Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/crc32.h> #include <linux/slab.h> #include <net/mac80211.h> #include <asm/unaligned.h> #include "ieee80211_i.h" #include "rate.h" #include "driver-ops.h" enum work_action { WORK_ACT_NONE, WORK_ACT_TIMEOUT, }; /* utils */ static inline void ASSERT_WORK_MTX(struct ieee80211_local *local) { lockdep_assert_held(&local->mtx); } /* * We can have multiple work items (and connection probing) * scheduling this timer, but we need to take care to only * reschedule it when it should fire _earlier_ than it was * asked for before, or if it's not pending right now. This * function ensures that. Note that it then is required to * run this function for all timeouts after the first one * has happened -- the work that runs from this timer will * do that. */ static void run_again(struct ieee80211_local *local, unsigned long timeout) { ASSERT_WORK_MTX(local); if (!timer_pending(&local->work_timer) || time_before(timeout, local->work_timer.expires)) mod_timer(&local->work_timer, timeout); } void free_work(struct ieee80211_work *wk) { kfree_rcu(wk, rcu_head); } static enum work_action __must_check ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk) { /* * First time we run, do nothing -- the generic code will * have switched to the right channel etc. */ if (!wk->started) { wk->timeout = jiffies + msecs_to_jiffies(wk->remain.duration); cfg80211_ready_on_channel(wk->sdata->dev, (unsigned long) wk, wk->chan, wk->chan_type, wk->remain.duration, GFP_KERNEL); return WORK_ACT_NONE; } return WORK_ACT_TIMEOUT; } static enum work_action __must_check ieee80211_offchannel_tx(struct ieee80211_work *wk) { if (!wk->started) { wk->timeout = jiffies + msecs_to_jiffies(wk->offchan_tx.wait); /* * After this, offchan_tx.frame remains but now is no * longer a valid pointer -- we still need it as the * cookie for canceling this work/status matching. */ ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame); return WORK_ACT_NONE; } return WORK_ACT_TIMEOUT; } static void ieee80211_work_timer(unsigned long data) { struct ieee80211_local *local = (void *) data; if (local->quiescing) return; ieee80211_queue_work(&local->hw, &local->work_work); } static void ieee80211_work_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, work_work); struct ieee80211_work *wk, *tmp; LIST_HEAD(free_work); enum work_action rma; bool remain_off_channel = false; if (local->scanning) return; /* * ieee80211_queue_work() should have picked up most cases, * here we'll pick the rest. */ if (WARN(local->suspended, "work scheduled while going to suspend\n")) return; mutex_lock(&local->mtx); ieee80211_recalc_idle(local); list_for_each_entry_safe(wk, tmp, &local->work_list, list) { bool started = wk->started; /* mark work as started if it's on the current off-channel */ if (!started && local->tmp_channel && wk->chan == local->tmp_channel && wk->chan_type == local->tmp_channel_type) { started = true; wk->timeout = jiffies; } if (!started && !local->tmp_channel) { ieee80211_offchannel_stop_vifs(local, true); local->tmp_channel = wk->chan; local->tmp_channel_type = wk->chan_type; ieee80211_hw_config(local, 0); started = true; wk->timeout = jiffies; } /* don't try to work with items that aren't started */ if (!started) continue; if (time_is_after_jiffies(wk->timeout)) { /* * This work item isn't supposed to be worked on * right now, but take care to adjust the timer * properly. */ run_again(local, wk->timeout); continue; } switch (wk->type) { default: WARN_ON(1); /* nothing */ rma = WORK_ACT_NONE; break; case IEEE80211_WORK_ABORT: rma = WORK_ACT_TIMEOUT; break; case IEEE80211_WORK_REMAIN_ON_CHANNEL: rma = ieee80211_remain_on_channel_timeout(wk); break; case IEEE80211_WORK_OFFCHANNEL_TX: rma = ieee80211_offchannel_tx(wk); break; } wk->started = started; switch (rma) { case WORK_ACT_NONE: /* might have changed the timeout */ run_again(local, wk->timeout); break; case WORK_ACT_TIMEOUT: list_del_rcu(&wk->list); synchronize_rcu(); list_add(&wk->list, &free_work); break; default: WARN(1, "unexpected: %d", rma); } } list_for_each_entry(wk, &local->work_list, list) { if (!wk->started) continue; if (wk->chan != local->tmp_channel || wk->chan_type != local->tmp_channel_type) continue; remain_off_channel = true; } if (!remain_off_channel && local->tmp_channel) { local->tmp_channel = NULL; ieee80211_hw_config(local, 0); ieee80211_offchannel_return(local, true); /* give connection some time to breathe */ run_again(local, jiffies + HZ/2); } if (list_empty(&local->work_list) && local->scan_req && !local->scanning) ieee80211_queue_delayed_work(&local->hw, &local->scan_work, round_jiffies_relative(0)); ieee80211_recalc_idle(local); mutex_unlock(&local->mtx); list_for_each_entry_safe(wk, tmp, &free_work, list) { wk->done(wk, NULL); list_del(&wk->list); kfree(wk); } } void ieee80211_add_work(struct ieee80211_work *wk) { struct ieee80211_local *local; if (WARN_ON(!wk->chan)) return; if (WARN_ON(!wk->sdata)) return; if (WARN_ON(!wk->done)) return; if (WARN_ON(!ieee80211_sdata_running(wk->sdata))) return; wk->started = false; local = wk->sdata->local; mutex_lock(&local->mtx); list_add_tail(&wk->list, &local->work_list); mutex_unlock(&local->mtx); ieee80211_queue_work(&local->hw, &local->work_work); } void ieee80211_work_init(struct ieee80211_local *local) { INIT_LIST_HEAD(&local->work_list); setup_timer(&local->work_timer, ieee80211_work_timer, (unsigned long)local); INIT_WORK(&local->work_work, ieee80211_work_work); } void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_work *wk; bool cleanup = false; mutex_lock(&local->mtx); list_for_each_entry(wk, &local->work_list, list) { if (wk->sdata != sdata) continue; cleanup = true; wk->type = IEEE80211_WORK_ABORT; wk->started = true; wk->timeout = jiffies; } mutex_unlock(&local->mtx); /* run cleanups etc. */ if (cleanup) ieee80211_work_work(&local->work_work); mutex_lock(&local->mtx); list_for_each_entry(wk, &local->work_list, list) { if (wk->sdata != sdata) continue; WARN_ON(1); break; } mutex_unlock(&local->mtx); } static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk, struct sk_buff *skb) { /* * We are done serving the remain-on-channel command. */ cfg80211_remain_on_channel_expired(wk->sdata->dev, (unsigned long) wk, wk->chan, wk->chan_type, GFP_KERNEL); return WORK_DONE_DESTROY; } int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type, unsigned int duration, u64 *cookie) { struct ieee80211_work *wk; wk = kzalloc(sizeof(*wk), GFP_KERNEL); if (!wk) return -ENOMEM; wk->type = IEEE80211_WORK_REMAIN_ON_CHANNEL; wk->chan = chan; wk->chan_type = channel_type; wk->sdata = sdata; wk->done = ieee80211_remain_done; wk->remain.duration = duration; *cookie = (unsigned long) wk; ieee80211_add_work(wk); return 0; } int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata, u64 cookie) { struct ieee80211_local *local = sdata->local; struct ieee80211_work *wk, *tmp; bool found = false; mutex_lock(&local->mtx); list_for_each_entry_safe(wk, tmp, &local->work_list, list) { if ((unsigned long) wk == cookie) { wk->timeout = jiffies; found = true; break; } } mutex_unlock(&local->mtx); if (!found) return -ENOENT; ieee80211_queue_work(&local->hw, &local->work_work); return 0; }
gpl-2.0
kenkit/htc-kernel-saga
drivers/lguest/lguest_device.c
3578
15434
/*P:050 * Lguest guests use a very simple method to describe devices. It's a * series of device descriptors contained just above the top of normal Guest * memory. * * We use the standard "virtio" device infrastructure, which provides us with a * console, a network and a block driver. Each one expects some configuration * information and a "virtqueue" or two to send and receive data. :*/ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/lguest_launcher.h> #include <linux/virtio.h> #include <linux/virtio_config.h> #include <linux/interrupt.h> #include <linux/virtio_ring.h> #include <linux/err.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/paravirt.h> #include <asm/lguest_hcall.h> /* The pointer to our (page) of device descriptions. */ static void *lguest_devices; /* * For Guests, device memory can be used as normal memory, so we cast away the * __iomem to quieten sparse. */ static inline void *lguest_map(unsigned long phys_addr, unsigned long pages) { return (__force void *)ioremap_cache(phys_addr, PAGE_SIZE*pages); } static inline void lguest_unmap(void *addr) { iounmap((__force void __iomem *)addr); } /*D:100 * Each lguest device is just a virtio device plus a pointer to its entry * in the lguest_devices page. */ struct lguest_device { struct virtio_device vdev; /* The entry in the lguest_devices page for this device. */ struct lguest_device_desc *desc; }; /* * Since the virtio infrastructure hands us a pointer to the virtio_device all * the time, it helps to have a curt macro to get a pointer to the struct * lguest_device it's enclosed in. */ #define to_lgdev(vd) container_of(vd, struct lguest_device, vdev) /*D:130 * Device configurations * * The configuration information for a device consists of one or more * virtqueues, a feature bitmap, and some configuration bytes. The * configuration bytes don't really matter to us: the Launcher sets them up, and * the driver will look at them during setup. * * A convenient routine to return the device's virtqueue config array: * immediately after the descriptor. */ static struct lguest_vqconfig *lg_vq(const struct lguest_device_desc *desc) { return (void *)(desc + 1); } /* The features come immediately after the virtqueues. */ static u8 *lg_features(const struct lguest_device_desc *desc) { return (void *)(lg_vq(desc) + desc->num_vq); } /* The config space comes after the two feature bitmasks. */ static u8 *lg_config(const struct lguest_device_desc *desc) { return lg_features(desc) + desc->feature_len * 2; } /* The total size of the config page used by this device (incl. desc) */ static unsigned desc_size(const struct lguest_device_desc *desc) { return sizeof(*desc) + desc->num_vq * sizeof(struct lguest_vqconfig) + desc->feature_len * 2 + desc->config_len; } /* This gets the device's feature bits. */ static u32 lg_get_features(struct virtio_device *vdev) { unsigned int i; u32 features = 0; struct lguest_device_desc *desc = to_lgdev(vdev)->desc; u8 *in_features = lg_features(desc); /* We do this the slow but generic way. */ for (i = 0; i < min(desc->feature_len * 8, 32); i++) if (in_features[i / 8] & (1 << (i % 8))) features |= (1 << i); return features; } /* * The virtio core takes the features the Host offers, and copies the ones * supported by the driver into the vdev->features array. Once that's all * sorted out, this routine is called so we can tell the Host which features we * understand and accept. */ static void lg_finalize_features(struct virtio_device *vdev) { unsigned int i, bits; struct lguest_device_desc *desc = to_lgdev(vdev)->desc; /* Second half of bitmap is features we accept. */ u8 *out_features = lg_features(desc) + desc->feature_len; /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); /* * The vdev->feature array is a Linux bitmask: this isn't the same as a * the simple array of bits used by lguest devices for features. So we * do this slow, manual conversion which is completely general. */ memset(out_features, 0, desc->feature_len); bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; for (i = 0; i < bits; i++) { if (test_bit(i, vdev->features)) out_features[i / 8] |= (1 << (i % 8)); } } /* Once they've found a field, getting a copy of it is easy. */ static void lg_get(struct virtio_device *vdev, unsigned int offset, void *buf, unsigned len) { struct lguest_device_desc *desc = to_lgdev(vdev)->desc; /* Check they didn't ask for more than the length of the config! */ BUG_ON(offset + len > desc->config_len); memcpy(buf, lg_config(desc) + offset, len); } /* Setting the contents is also trivial. */ static void lg_set(struct virtio_device *vdev, unsigned int offset, const void *buf, unsigned len) { struct lguest_device_desc *desc = to_lgdev(vdev)->desc; /* Check they didn't ask for more than the length of the config! */ BUG_ON(offset + len > desc->config_len); memcpy(lg_config(desc) + offset, buf, len); } /* * The operations to get and set the status word just access the status field * of the device descriptor. */ static u8 lg_get_status(struct virtio_device *vdev) { return to_lgdev(vdev)->desc->status; } /* * To notify on status updates, we (ab)use the NOTIFY hypercall, with the * descriptor address of the device. A zero status means "reset". */ static void set_status(struct virtio_device *vdev, u8 status) { unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices; /* We set the status. */ to_lgdev(vdev)->desc->status = status; hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0); } static void lg_set_status(struct virtio_device *vdev, u8 status) { BUG_ON(!status); set_status(vdev, status); } static void lg_reset(struct virtio_device *vdev) { set_status(vdev, 0); } /* * Virtqueues * * The other piece of infrastructure virtio needs is a "virtqueue": a way of * the Guest device registering buffers for the other side to read from or * write into (ie. send and receive buffers). Each device can have multiple * virtqueues: for example the console driver uses one queue for sending and * another for receiving. * * Fortunately for us, a very fast shared-memory-plus-descriptors virtqueue * already exists in virtio_ring.c. We just need to connect it up. * * We start with the information we need to keep about each virtqueue. */ /*D:140 This is the information we remember about each virtqueue. */ struct lguest_vq_info { /* A copy of the information contained in the device config. */ struct lguest_vqconfig config; /* The address where we mapped the virtio ring, so we can unmap it. */ void *pages; }; /* * When the virtio_ring code wants to prod the Host, it calls us here and we * make a hypercall. We hand the physical address of the virtqueue so the Host * knows which virtqueue we're talking about. */ static void lg_notify(struct virtqueue *vq) { /* * We store our virtqueue information in the "priv" pointer of the * virtqueue structure. */ struct lguest_vq_info *lvq = vq->priv; hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0); } /* An extern declaration inside a C file is bad form. Don't do it. */ extern void lguest_setup_irq(unsigned int irq); /* * This routine finds the Nth virtqueue described in the configuration of * this device and sets it up. * * This is kind of an ugly duckling. It'd be nicer to have a standard * representation of a virtqueue in the configuration space, but it seems that * everyone wants to do it differently. The KVM coders want the Guest to * allocate its own pages and tell the Host where they are, but for lguest it's * simpler for the Host to simply tell us where the pages are. */ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name) { struct lguest_device *ldev = to_lgdev(vdev); struct lguest_vq_info *lvq; struct virtqueue *vq; int err; /* We must have this many virtqueues. */ if (index >= ldev->desc->num_vq) return ERR_PTR(-ENOENT); lvq = kmalloc(sizeof(*lvq), GFP_KERNEL); if (!lvq) return ERR_PTR(-ENOMEM); /* * Make a copy of the "struct lguest_vqconfig" entry, which sits after * the descriptor. We need a copy because the config space might not * be aligned correctly. */ memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config)); printk("Mapping virtqueue %i addr %lx\n", index, (unsigned long)lvq->config.pfn << PAGE_SHIFT); /* Figure out how many pages the ring will take, and map that memory */ lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT, DIV_ROUND_UP(vring_size(lvq->config.num, LGUEST_VRING_ALIGN), PAGE_SIZE)); if (!lvq->pages) { err = -ENOMEM; goto free_lvq; } /* * OK, tell virtio_ring.c to set up a virtqueue now we know its size * and we've got a pointer to its pages. */ vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, vdev, lvq->pages, lg_notify, callback, name); if (!vq) { err = -ENOMEM; goto unmap; } /* Make sure the interrupt is allocated. */ lguest_setup_irq(lvq->config.irq); /* * Tell the interrupt for this virtqueue to go to the virtio_ring * interrupt handler. * * FIXME: We used to have a flag for the Host to tell us we could use * the interrupt as a source of randomness: it'd be nice to have that * back. */ err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vq); if (err) goto destroy_vring; /* * Last of all we hook up our 'struct lguest_vq_info" to the * virtqueue's priv pointer. */ vq->priv = lvq; return vq; destroy_vring: vring_del_virtqueue(vq); unmap: lguest_unmap(lvq->pages); free_lvq: kfree(lvq); return ERR_PTR(err); } /*:*/ /* Cleaning up a virtqueue is easy */ static void lg_del_vq(struct virtqueue *vq) { struct lguest_vq_info *lvq = vq->priv; /* Release the interrupt */ free_irq(lvq->config.irq, vq); /* Tell virtio_ring.c to free the virtqueue. */ vring_del_virtqueue(vq); /* Unmap the pages containing the ring. */ lguest_unmap(lvq->pages); /* Free our own queue information. */ kfree(lvq); } static void lg_del_vqs(struct virtio_device *vdev) { struct virtqueue *vq, *n; list_for_each_entry_safe(vq, n, &vdev->vqs, list) lg_del_vq(vq); } static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) { struct lguest_device *ldev = to_lgdev(vdev); int i; /* We must have this many virtqueues. */ if (nvqs > ldev->desc->num_vq) return -ENOENT; for (i = 0; i < nvqs; ++i) { vqs[i] = lg_find_vq(vdev, i, callbacks[i], names[i]); if (IS_ERR(vqs[i])) goto error; } return 0; error: lg_del_vqs(vdev); return PTR_ERR(vqs[i]); } /* The ops structure which hooks everything together. */ static struct virtio_config_ops lguest_config_ops = { .get_features = lg_get_features, .finalize_features = lg_finalize_features, .get = lg_get, .set = lg_set, .get_status = lg_get_status, .set_status = lg_set_status, .reset = lg_reset, .find_vqs = lg_find_vqs, .del_vqs = lg_del_vqs, }; /* * The root device for the lguest virtio devices. This makes them appear as * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2. */ static struct device *lguest_root; /*D:120 * This is the core of the lguest bus: actually adding a new device. * It's a separate function because it's neater that way, and because an * earlier version of the code supported hotplug and unplug. They were removed * early on because they were never used. * * As Andrew Tridgell says, "Untested code is buggy code". * * It's worth reading this carefully: we start with a pointer to the new device * descriptor in the "lguest_devices" page, and the offset into the device * descriptor page so we can uniquely identify it if things go badly wrong. */ static void add_lguest_device(struct lguest_device_desc *d, unsigned int offset) { struct lguest_device *ldev; /* Start with zeroed memory; Linux's device layer counts on it. */ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); if (!ldev) { printk(KERN_EMERG "Cannot allocate lguest dev %u type %u\n", offset, d->type); return; } /* This devices' parent is the lguest/ dir. */ ldev->vdev.dev.parent = lguest_root; /* * The device type comes straight from the descriptor. There's also a * device vendor field in the virtio_device struct, which we leave as * 0. */ ldev->vdev.id.device = d->type; /* * We have a simple set of routines for querying the device's * configuration information and setting its status. */ ldev->vdev.config = &lguest_config_ops; /* And we remember the device's descriptor for lguest_config_ops. */ ldev->desc = d; /* * register_virtio_device() sets up the generic fields for the struct * virtio_device and calls device_register(). This makes the bus * infrastructure look for a matching driver. */ if (register_virtio_device(&ldev->vdev) != 0) { printk(KERN_ERR "Failed to register lguest dev %u type %u\n", offset, d->type); kfree(ldev); } } /*D:110 * scan_devices() simply iterates through the device page. The type 0 is * reserved to mean "end of devices". */ static void scan_devices(void) { unsigned int i; struct lguest_device_desc *d; /* We start at the page beginning, and skip over each entry. */ for (i = 0; i < PAGE_SIZE; i += desc_size(d)) { d = lguest_devices + i; /* Once we hit a zero, stop. */ if (d->type == 0) break; printk("Device at %i has size %u\n", i, desc_size(d)); add_lguest_device(d, i); } } /*D:105 * Fairly early in boot, lguest_devices_init() is called to set up the * lguest device infrastructure. We check that we are a Guest by checking * pv_info.name: there are other ways of checking, but this seems most * obvious to me. * * So we can access the "struct lguest_device_desc"s easily, we map that memory * and store the pointer in the global "lguest_devices". Then we register a * root device from which all our devices will hang (this seems to be the * correct sysfs incantation). * * Finally we call scan_devices() which adds all the devices found in the * lguest_devices page. */ static int __init lguest_devices_init(void) { if (strcmp(pv_info.name, "lguest") != 0) return 0; lguest_root = root_device_register("lguest"); if (IS_ERR(lguest_root)) panic("Could not register lguest root"); /* Devices are in a single page above top of "normal" mem */ lguest_devices = lguest_map(max_pfn<<PAGE_SHIFT, 1); scan_devices(); return 0; } /* We do this after core stuff, but before the drivers. */ postcore_initcall(lguest_devices_init); /*D:150 * At this point in the journey we used to now wade through the lguest * devices themselves: net, block and console. Since they're all now virtio * devices rather than lguest-specific, I've decided to ignore them. Mostly, * they're kind of boring. But this does mean you'll never experience the * thrill of reading the forbidden love scene buried deep in the block driver. * * "make Launcher" beckons, where we answer questions like "Where do Guests * come from?", and "What do you do when someone asks for optimization?". */
gpl-2.0
JmzTaylor/android_kernel_htc_a32e
arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
4090
3201
/* * arch/arm/mach-orion5x/rd88f6183-ap-ge-setup.c * * Marvell Orion-1-90 AP GE Reference Design Setup * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/irq.h> #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/ethtool.h> #include <net/dsa.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> #include <mach/orion5x.h> #include "common.h" static struct mv643xx_eth_platform_data rd88f6183ap_ge_eth_data = { .phy_addr = -1, .speed = SPEED_1000, .duplex = DUPLEX_FULL, }; static struct dsa_chip_data rd88f6183ap_ge_switch_chip_data = { .port_names[0] = "lan1", .port_names[1] = "lan2", .port_names[2] = "lan3", .port_names[3] = "lan4", .port_names[4] = "wan", .port_names[5] = "cpu", }; static struct dsa_platform_data rd88f6183ap_ge_switch_plat_data = { .nr_chips = 1, .chip = &rd88f6183ap_ge_switch_chip_data, }; static struct mtd_partition rd88f6183ap_ge_partitions[] = { { .name = "kernel", .offset = 0x00000000, .size = 0x00200000, }, { .name = "rootfs", .offset = 0x00200000, .size = 0x00500000, }, { .name = "nvram", .offset = 0x00700000, .size = 0x00080000, }, }; static struct flash_platform_data rd88f6183ap_ge_spi_slave_data = { .type = "m25p64", .nr_parts = ARRAY_SIZE(rd88f6183ap_ge_partitions), .parts = rd88f6183ap_ge_partitions, }; static struct spi_board_info __initdata rd88f6183ap_ge_spi_slave_info[] = { { .modalias = "m25p80", .platform_data = &rd88f6183ap_ge_spi_slave_data, .irq = NO_IRQ, .max_speed_hz = 20000000, .bus_num = 0, .chip_select = 0, }, }; static void __init rd88f6183ap_ge_init(void) { /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_eth_init(&rd88f6183ap_ge_eth_data); orion5x_eth_switch_init(&rd88f6183ap_ge_switch_plat_data, gpio_to_irq(3)); spi_register_board_info(rd88f6183ap_ge_spi_slave_info, ARRAY_SIZE(rd88f6183ap_ge_spi_slave_info)); orion5x_spi_init(); orion5x_uart0_init(); } static struct hw_pci rd88f6183ap_ge_pci __initdata = { .nr_controllers = 2, .setup = orion5x_pci_sys_setup, .scan = orion5x_pci_sys_scan_bus, .map_irq = orion5x_pci_map_irq, }; static int __init rd88f6183ap_ge_pci_init(void) { if (machine_is_rd88f6183ap_ge()) { orion5x_pci_disable(); pci_common_init(&rd88f6183ap_ge_pci); } return 0; } subsys_initcall(rd88f6183ap_ge_pci_init); MACHINE_START(RD88F6183AP_GE, "Marvell Orion-1-90 AP GE Reference Design") /* Maintainer: Lennert Buytenhek <buytenh@marvell.com> */ .atag_offset = 0x100, .init_machine = rd88f6183ap_ge_init, .map_io = orion5x_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .init_time = orion5x_timer_init, .fixup = tag_fixup_mem32, .restart = orion5x_restart, MACHINE_END
gpl-2.0
SlimRoms/kernel_motorola_msm8960dt-common
fs/udf/partition.c
8698
8854
/* * partition.c * * PURPOSE * Partition handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1998-2001 Ben Fennema * * HISTORY * * 12/06/98 blf Created file. * */ #include "udfdecl.h" #include "udf_sb.h" #include "udf_i.h" #include <linux/fs.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/mutex.h> uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; if (partition >= sbi->s_partitions) { udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n", block, partition, offset); return 0xFFFFFFFF; } map = &sbi->s_partmaps[partition]; if (map->s_partition_func) return map->s_partition_func(sb, block, partition, offset); else return map->s_partition_root + block + offset; } uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { struct buffer_head *bh = NULL; uint32_t newblock; uint32_t index; uint32_t loc; struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; struct udf_virtual_data *vdata; struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode); map = &sbi->s_partmaps[partition]; vdata = &map->s_type_specific.s_virtual; if (block > vdata->s_num_entries) { udf_debug("Trying to access block beyond end of VAT (%d max %d)\n", block, vdata->s_num_entries); return 0xFFFFFFFF; } if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { loc = le32_to_cpu(((__le32 *)(iinfo->i_ext.i_data + vdata->s_start_offset))[block]); goto translate; } index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t); if (block >= index) { block -= index; newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t))); index = block % (sb->s_blocksize / sizeof(uint32_t)); } else { newblock = 0; index = vdata->s_start_offset / sizeof(uint32_t) + block; } loc = udf_block_map(sbi->s_vat_inode, newblock); bh = sb_bread(sb, loc); if (!bh) { udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n", sb, block, partition, loc, index); return 0xFFFFFFFF; } loc = le32_to_cpu(((__le32 *)bh->b_data)[index]); brelse(bh); translate: if (iinfo->i_location.partitionReferenceNum == partition) { udf_debug("recursive call to udf_get_pblock!\n"); return 0xFFFFFFFF; } return udf_get_pblock(sb, loc, iinfo->i_location.partitionReferenceNum, offset); } inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { return udf_get_pblock_virt15(sb, block, partition, offset); } uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { int i; struct sparingTable *st = NULL; struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; uint32_t packet; struct udf_sparing_data *sdata; map = &sbi->s_partmaps[partition]; sdata = &map->s_type_specific.s_sparing; packet = (block + offset) & ~(sdata->s_packet_len - 1); for (i = 0; i < 4; i++) { if (sdata->s_spar_map[i] != NULL) { st = (struct sparingTable *) sdata->s_spar_map[i]->b_data; break; } } if (st) { for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) { struct sparingEntry *entry = &st->mapEntry[i]; u32 origLoc = le32_to_cpu(entry->origLocation); if (origLoc >= 0xFFFFFFF0) break; else if (origLoc == packet) return le32_to_cpu(entry->mappedLocation) + ((block + offset) & (sdata->s_packet_len - 1)); else if (origLoc > packet) break; } } return map->s_partition_root + block + offset; } int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block) { struct udf_sparing_data *sdata; struct sparingTable *st = NULL; struct sparingEntry mapEntry; uint32_t packet; int i, j, k, l; struct udf_sb_info *sbi = UDF_SB(sb); u16 reallocationTableLen; struct buffer_head *bh; int ret = 0; mutex_lock(&sbi->s_alloc_mutex); for (i = 0; i < sbi->s_partitions; i++) { struct udf_part_map *map = &sbi->s_partmaps[i]; if (old_block > map->s_partition_root && old_block < map->s_partition_root + map->s_partition_len) { sdata = &map->s_type_specific.s_sparing; packet = (old_block - map->s_partition_root) & ~(sdata->s_packet_len - 1); for (j = 0; j < 4; j++) if (sdata->s_spar_map[j] != NULL) { st = (struct sparingTable *) sdata->s_spar_map[j]->b_data; break; } if (!st) { ret = 1; goto out; } reallocationTableLen = le16_to_cpu(st->reallocationTableLen); for (k = 0; k < reallocationTableLen; k++) { struct sparingEntry *entry = &st->mapEntry[k]; u32 origLoc = le32_to_cpu(entry->origLocation); if (origLoc == 0xFFFFFFFF) { for (; j < 4; j++) { int len; bh = sdata->s_spar_map[j]; if (!bh) continue; st = (struct sparingTable *) bh->b_data; entry->origLocation = cpu_to_le32(packet); len = sizeof(struct sparingTable) + reallocationTableLen * sizeof(struct sparingEntry); udf_update_tag((char *)st, len); mark_buffer_dirty(bh); } *new_block = le32_to_cpu( entry->mappedLocation) + ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); ret = 0; goto out; } else if (origLoc == packet) { *new_block = le32_to_cpu( entry->mappedLocation) + ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); ret = 0; goto out; } else if (origLoc > packet) break; } for (l = k; l < reallocationTableLen; l++) { struct sparingEntry *entry = &st->mapEntry[l]; u32 origLoc = le32_to_cpu(entry->origLocation); if (origLoc != 0xFFFFFFFF) continue; for (; j < 4; j++) { bh = sdata->s_spar_map[j]; if (!bh) continue; st = (struct sparingTable *)bh->b_data; mapEntry = st->mapEntry[l]; mapEntry.origLocation = cpu_to_le32(packet); memmove(&st->mapEntry[k + 1], &st->mapEntry[k], (l - k) * sizeof(struct sparingEntry)); st->mapEntry[k] = mapEntry; udf_update_tag((char *)st, sizeof(struct sparingTable) + reallocationTableLen * sizeof(struct sparingEntry)); mark_buffer_dirty(bh); } *new_block = le32_to_cpu( st->mapEntry[k].mappedLocation) + ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); ret = 0; goto out; } ret = 1; goto out; } /* if old_block */ } if (i == sbi->s_partitions) { /* outside of partitions */ /* for now, fail =) */ ret = 1; } out: mutex_unlock(&sbi->s_alloc_mutex); return ret; } static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block, uint16_t partition, uint32_t offset) { struct super_block *sb = inode->i_sb; struct udf_part_map *map; struct kernel_lb_addr eloc; uint32_t elen; sector_t ext_offset; struct extent_position epos = {}; uint32_t phyblock; if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) != (EXT_RECORDED_ALLOCATED >> 30)) phyblock = 0xFFFFFFFF; else { map = &UDF_SB(sb)->s_partmaps[partition]; /* map to sparable/physical partition desc */ phyblock = udf_get_pblock(sb, eloc.logicalBlockNum, map->s_partition_num, ext_offset + offset); } brelse(epos.bh); return phyblock; } uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; struct udf_meta_data *mdata; uint32_t retblk; struct inode *inode; udf_debug("READING from METADATA\n"); map = &sbi->s_partmaps[partition]; mdata = &map->s_type_specific.s_metadata; inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe; /* We shouldn't mount such media... */ BUG_ON(!inode); retblk = udf_try_read_meta(inode, block, partition, offset); if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) { udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n"); if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) { mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc, map->s_partition_num); mdata->s_flags |= MF_MIRROR_FE_LOADED; } inode = mdata->s_mirror_fe; if (!inode) return 0xFFFFFFFF; retblk = udf_try_read_meta(inode, block, partition, offset); } return retblk; }
gpl-2.0
estiko/android_lenovo_a706_kinglplite
lib/cpu-notifier-error-inject.c
9210
1565
#include <linux/kernel.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/notifier.h> static int priority; static int cpu_up_prepare_error; static int cpu_down_prepare_error; module_param(priority, int, 0); MODULE_PARM_DESC(priority, "specify cpu notifier priority"); module_param(cpu_up_prepare_error, int, 0644); MODULE_PARM_DESC(cpu_up_prepare_error, "specify error code to inject CPU_UP_PREPARE action"); module_param(cpu_down_prepare_error, int, 0644); MODULE_PARM_DESC(cpu_down_prepare_error, "specify error code to inject CPU_DOWN_PREPARE action"); static int err_inject_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int err = 0; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: err = cpu_up_prepare_error; break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: err = cpu_down_prepare_error; break; } if (err) printk(KERN_INFO "Injecting error (%d) at cpu notifier\n", err); return notifier_from_errno(err); } static struct notifier_block err_inject_cpu_notifier = { .notifier_call = err_inject_cpu_callback, }; static int err_inject_init(void) { err_inject_cpu_notifier.priority = priority; return register_hotcpu_notifier(&err_inject_cpu_notifier); } static void err_inject_exit(void) { unregister_hotcpu_notifier(&err_inject_cpu_notifier); } module_init(err_inject_init); module_exit(err_inject_exit); MODULE_DESCRIPTION("CPU notifier error injection module"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
gpl-2.0
18712886438/android_kernel_motorola_quark
drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
10490
3865
/* * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver * * Copyright (C) 2007 Domen Puncer, Telargo, Inc. * Copyright (C) 2008 Wolfram Sang, Pengutronix * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/phy.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/of_mdio.h> #include <asm/io.h> #include <asm/mpc52xx.h> #include "fec_mpc52xx.h" struct mpc52xx_fec_mdio_priv { struct mpc52xx_fec __iomem *regs; int mdio_irqs[PHY_MAX_ADDR]; }; static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id, int reg, u32 value) { struct mpc52xx_fec_mdio_priv *priv = bus->priv; struct mpc52xx_fec __iomem *fec = priv->regs; int tries = 3; value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; out_be32(&fec->ievent, FEC_IEVENT_MII); out_be32(&fec->mii_data, value); /* wait for it to finish, this takes about 23 us on lite5200b */ while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) msleep(1); if (!tries) return -ETIMEDOUT; return value & FEC_MII_DATA_OP_RD ? in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0; } static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) { return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME); } static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data) { return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, data | FEC_MII_WRITE_FRAME); } static int mpc52xx_fec_mdio_probe(struct platform_device *of) { struct device *dev = &of->dev; struct device_node *np = of->dev.of_node; struct mii_bus *bus; struct mpc52xx_fec_mdio_priv *priv; struct resource res; int err; bus = mdiobus_alloc(); if (bus == NULL) return -ENOMEM; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv == NULL) { err = -ENOMEM; goto out_free; } bus->name = "mpc52xx MII bus"; bus->read = mpc52xx_fec_mdio_read; bus->write = mpc52xx_fec_mdio_write; /* setup irqs */ bus->irq = priv->mdio_irqs; /* setup registers */ err = of_address_to_resource(np, 0, &res); if (err) goto out_free; priv->regs = ioremap(res.start, resource_size(&res)); if (priv->regs == NULL) { err = -ENOMEM; goto out_free; } snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); bus->priv = priv; bus->parent = dev; dev_set_drvdata(dev, bus); /* set MII speed */ out_be32(&priv->regs->mii_speed, ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1); err = of_mdiobus_register(bus, np); if (err) goto out_unmap; return 0; out_unmap: iounmap(priv->regs); out_free: kfree(priv); mdiobus_free(bus); return err; } static int mpc52xx_fec_mdio_remove(struct platform_device *of) { struct device *dev = &of->dev; struct mii_bus *bus = dev_get_drvdata(dev); struct mpc52xx_fec_mdio_priv *priv = bus->priv; mdiobus_unregister(bus); dev_set_drvdata(dev, NULL); iounmap(priv->regs); kfree(priv); mdiobus_free(bus); return 0; } static struct of_device_id mpc52xx_fec_mdio_match[] = { { .compatible = "fsl,mpc5200b-mdio", }, { .compatible = "fsl,mpc5200-mdio", }, { .compatible = "mpc5200b-fec-phy", }, {} }; MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match); struct platform_driver mpc52xx_fec_mdio_driver = { .driver = { .name = "mpc5200b-fec-phy", .owner = THIS_MODULE, .of_match_table = mpc52xx_fec_mdio_match, }, .probe = mpc52xx_fec_mdio_probe, .remove = mpc52xx_fec_mdio_remove, }; /* let fec driver call it, since this has to be registered before it */ EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
artemh/asuswrt-merlin
release/src-rt-7.x.main/src/linux/linux-2.6.36/arch/sparc/prom/bootstr_64.c
12026
1055
/* * bootstr.c: Boot string/argument acquisition from the PROM. * * Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright(C) 1996,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/string.h> #include <linux/init.h> #include <asm/oplib.h> /* WARNING: The boot loader knows that these next three variables come one right * after another in the .data section. Do not move this stuff into * the .bss section or it will break things. */ #define BARG_LEN 256 struct { int bootstr_len; int bootstr_valid; char bootstr_buf[BARG_LEN]; } bootstr_info = { .bootstr_len = BARG_LEN, #ifdef CONFIG_CMDLINE .bootstr_valid = 1, .bootstr_buf = CONFIG_CMDLINE, #endif }; char * __init prom_getbootargs(void) { /* This check saves us from a panic when bootfd patches args. */ if (bootstr_info.bootstr_valid) return bootstr_info.bootstr_buf; prom_getstring(prom_chosen_node, "bootargs", bootstr_info.bootstr_buf, BARG_LEN); bootstr_info.bootstr_valid = 1; return bootstr_info.bootstr_buf; }
gpl-2.0
souljaboy11792/linux
sound/pci/emu10k1/irq.c
12794
6691
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Creative Labs, Inc. * Routines for IRQ control of EMU10K1 chips * * BUGS: * -- * * TODO: * -- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <sound/core.h> #include <sound/emu10k1.h> irqreturn_t snd_emu10k1_interrupt(int irq, void *dev_id) { struct snd_emu10k1 *emu = dev_id; unsigned int status, status2, orig_status, orig_status2; int handled = 0; int timeout = 0; while (((status = inl(emu->port + IPR)) != 0) && (timeout < 1000)) { timeout++; orig_status = status; handled = 1; if ((status & 0xffffffff) == 0xffffffff) { snd_printk(KERN_INFO "snd-emu10k1: Suspected sound card removal\n"); break; } if (status & IPR_PCIERROR) { snd_printk(KERN_ERR "interrupt: PCI error\n"); snd_emu10k1_intr_disable(emu, INTE_PCIERRORENABLE); status &= ~IPR_PCIERROR; } if (status & (IPR_VOLINCR|IPR_VOLDECR|IPR_MUTE)) { if (emu->hwvol_interrupt) emu->hwvol_interrupt(emu, status); else snd_emu10k1_intr_disable(emu, INTE_VOLINCRENABLE|INTE_VOLDECRENABLE|INTE_MUTEENABLE); status &= ~(IPR_VOLINCR|IPR_VOLDECR|IPR_MUTE); } if (status & IPR_CHANNELLOOP) { int voice; int voice_max = status & IPR_CHANNELNUMBERMASK; u32 val; struct snd_emu10k1_voice *pvoice = emu->voices; val = snd_emu10k1_ptr_read(emu, CLIPL, 0); for (voice = 0; voice <= voice_max; voice++) { if (voice == 0x20) val = snd_emu10k1_ptr_read(emu, CLIPH, 0); if (val & 1) { if (pvoice->use && pvoice->interrupt != NULL) { pvoice->interrupt(emu, pvoice); snd_emu10k1_voice_intr_ack(emu, voice); } else { snd_emu10k1_voice_intr_disable(emu, voice); } } val >>= 1; pvoice++; } val = snd_emu10k1_ptr_read(emu, HLIPL, 0); for (voice = 0; voice <= voice_max; voice++) { if (voice == 0x20) val = snd_emu10k1_ptr_read(emu, HLIPH, 0); if (val & 1) { if (pvoice->use && pvoice->interrupt != NULL) { pvoice->interrupt(emu, pvoice); snd_emu10k1_voice_half_loop_intr_ack(emu, voice); } else { snd_emu10k1_voice_half_loop_intr_disable(emu, voice); } } val >>= 1; pvoice++; } status &= ~IPR_CHANNELLOOP; } status &= ~IPR_CHANNELNUMBERMASK; if (status & (IPR_ADCBUFFULL|IPR_ADCBUFHALFFULL)) { if (emu->capture_interrupt) emu->capture_interrupt(emu, status); else snd_emu10k1_intr_disable(emu, INTE_ADCBUFENABLE); status &= ~(IPR_ADCBUFFULL|IPR_ADCBUFHALFFULL); } if (status & (IPR_MICBUFFULL|IPR_MICBUFHALFFULL)) { if (emu->capture_mic_interrupt) emu->capture_mic_interrupt(emu, status); else snd_emu10k1_intr_disable(emu, INTE_MICBUFENABLE); status &= ~(IPR_MICBUFFULL|IPR_MICBUFHALFFULL); } if (status & (IPR_EFXBUFFULL|IPR_EFXBUFHALFFULL)) { if (emu->capture_efx_interrupt) emu->capture_efx_interrupt(emu, status); else snd_emu10k1_intr_disable(emu, INTE_EFXBUFENABLE); status &= ~(IPR_EFXBUFFULL|IPR_EFXBUFHALFFULL); } if (status & (IPR_MIDITRANSBUFEMPTY|IPR_MIDIRECVBUFEMPTY)) { if (emu->midi.interrupt) emu->midi.interrupt(emu, status); else snd_emu10k1_intr_disable(emu, INTE_MIDITXENABLE|INTE_MIDIRXENABLE); status &= ~(IPR_MIDITRANSBUFEMPTY|IPR_MIDIRECVBUFEMPTY); } if (status & (IPR_A_MIDITRANSBUFEMPTY2|IPR_A_MIDIRECVBUFEMPTY2)) { if (emu->midi2.interrupt) emu->midi2.interrupt(emu, status); else snd_emu10k1_intr_disable(emu, INTE_A_MIDITXENABLE2|INTE_A_MIDIRXENABLE2); status &= ~(IPR_A_MIDITRANSBUFEMPTY2|IPR_A_MIDIRECVBUFEMPTY2); } if (status & IPR_INTERVALTIMER) { if (emu->timer) snd_timer_interrupt(emu->timer, emu->timer->sticks); else snd_emu10k1_intr_disable(emu, INTE_INTERVALTIMERENB); status &= ~IPR_INTERVALTIMER; } if (status & (IPR_GPSPDIFSTATUSCHANGE|IPR_CDROMSTATUSCHANGE)) { if (emu->spdif_interrupt) emu->spdif_interrupt(emu, status); else snd_emu10k1_intr_disable(emu, INTE_GPSPDIFENABLE|INTE_CDSPDIFENABLE); status &= ~(IPR_GPSPDIFSTATUSCHANGE|IPR_CDROMSTATUSCHANGE); } if (status & IPR_FXDSP) { if (emu->dsp_interrupt) emu->dsp_interrupt(emu); else snd_emu10k1_intr_disable(emu, INTE_FXDSPENABLE); status &= ~IPR_FXDSP; } if (status & IPR_P16V) { while ((status2 = inl(emu->port + IPR2)) != 0) { u32 mask = INTE2_PLAYBACK_CH_0_LOOP; /* Full Loop */ struct snd_emu10k1_voice *pvoice = &(emu->p16v_voices[0]); struct snd_emu10k1_voice *cvoice = &(emu->p16v_capture_voice); //printk(KERN_INFO "status2=0x%x\n", status2); orig_status2 = status2; if(status2 & mask) { if(pvoice->use) { snd_pcm_period_elapsed(pvoice->epcm->substream); } else { snd_printk(KERN_ERR "p16v: status: 0x%08x, mask=0x%08x, pvoice=%p, use=%d\n", status2, mask, pvoice, pvoice->use); } } if(status2 & 0x110000) { //printk(KERN_INFO "capture int found\n"); if(cvoice->use) { //printk(KERN_INFO "capture period_elapsed\n"); snd_pcm_period_elapsed(cvoice->epcm->substream); } } outl(orig_status2, emu->port + IPR2); /* ack all */ } status &= ~IPR_P16V; } if (status) { unsigned int bits; snd_printk(KERN_ERR "emu10k1: unhandled interrupt: 0x%08x\n", status); //make sure any interrupts we don't handle are disabled: bits = INTE_FXDSPENABLE | INTE_PCIERRORENABLE | INTE_VOLINCRENABLE | INTE_VOLDECRENABLE | INTE_MUTEENABLE | INTE_MICBUFENABLE | INTE_ADCBUFENABLE | INTE_EFXBUFENABLE | INTE_GPSPDIFENABLE | INTE_CDSPDIFENABLE | INTE_INTERVALTIMERENB | INTE_MIDITXENABLE | INTE_MIDIRXENABLE; if (emu->audigy) bits |= INTE_A_MIDITXENABLE2 | INTE_A_MIDIRXENABLE2; snd_emu10k1_intr_disable(emu, bits); } outl(orig_status, emu->port + IPR); /* ack all */ } if (timeout == 1000) snd_printk(KERN_INFO "emu10k1 irq routine failure\n"); return IRQ_RETVAL(handled); }
gpl-2.0
Chong-Li/VATC-3.3.7
drivers/media/video/ov772x.c
251
34563
/* * ov772x Camera Driver * * Copyright (C) 2008 Renesas Solutions Corp. * Kuninori Morimoto <morimoto.kuninori@renesas.com> * * Based on ov7670 and soc_camera_platform driver, * * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net> * Copyright (C) 2008 Magnus Damm * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/v4l2-mediabus.h> #include <linux/videodev2.h> #include <media/ov772x.h> #include <media/soc_camera.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-subdev.h> /* * register offset */ #define GAIN 0x00 /* AGC - Gain control gain setting */ #define BLUE 0x01 /* AWB - Blue channel gain setting */ #define RED 0x02 /* AWB - Red channel gain setting */ #define GREEN 0x03 /* AWB - Green channel gain setting */ #define COM1 0x04 /* Common control 1 */ #define BAVG 0x05 /* U/B Average Level */ #define GAVG 0x06 /* Y/Gb Average Level */ #define RAVG 0x07 /* V/R Average Level */ #define AECH 0x08 /* Exposure Value - AEC MSBs */ #define COM2 0x09 /* Common control 2 */ #define PID 0x0A /* Product ID Number MSB */ #define VER 0x0B /* Product ID Number LSB */ #define COM3 0x0C /* Common control 3 */ #define COM4 0x0D /* Common control 4 */ #define COM5 0x0E /* Common control 5 */ #define COM6 0x0F /* Common control 6 */ #define AEC 0x10 /* Exposure Value */ #define CLKRC 0x11 /* Internal clock */ #define COM7 0x12 /* Common control 7 */ #define COM8 0x13 /* Common control 8 */ #define COM9 0x14 /* Common control 9 */ #define COM10 0x15 /* Common control 10 */ #define REG16 0x16 /* Register 16 */ #define HSTART 0x17 /* Horizontal sensor size */ #define HSIZE 0x18 /* Horizontal frame (HREF column) end high 8-bit */ #define VSTART 0x19 /* Vertical frame (row) start high 8-bit */ #define VSIZE 0x1A /* Vertical sensor size */ #define PSHFT 0x1B /* Data format - pixel delay select */ #define MIDH 0x1C /* Manufacturer ID byte - high */ #define MIDL 0x1D /* Manufacturer ID byte - low */ #define LAEC 0x1F /* Fine AEC value */ #define COM11 0x20 /* Common control 11 */ #define BDBASE 0x22 /* Banding filter Minimum AEC value */ #define DBSTEP 0x23 /* Banding filter Maximum Setp */ #define AEW 0x24 /* AGC/AEC - Stable operating region (upper limit) */ #define AEB 0x25 /* AGC/AEC - Stable operating region (lower limit) */ #define VPT 0x26 /* AGC/AEC Fast mode operating region */ #define REG28 0x28 /* Register 28 */ #define HOUTSIZE 0x29 /* Horizontal data output size MSBs */ #define EXHCH 0x2A /* Dummy pixel insert MSB */ #define EXHCL 0x2B /* Dummy pixel insert LSB */ #define VOUTSIZE 0x2C /* Vertical data output size MSBs */ #define ADVFL 0x2D /* LSB of insert dummy lines in Vertical direction */ #define ADVFH 0x2E /* MSG of insert dummy lines in Vertical direction */ #define YAVE 0x2F /* Y/G Channel Average value */ #define LUMHTH 0x30 /* Histogram AEC/AGC Luminance high level threshold */ #define LUMLTH 0x31 /* Histogram AEC/AGC Luminance low level threshold */ #define HREF 0x32 /* Image start and size control */ #define DM_LNL 0x33 /* Dummy line low 8 bits */ #define DM_LNH 0x34 /* Dummy line high 8 bits */ #define ADOFF_B 0x35 /* AD offset compensation value for B channel */ #define ADOFF_R 0x36 /* AD offset compensation value for R channel */ #define ADOFF_GB 0x37 /* AD offset compensation value for Gb channel */ #define ADOFF_GR 0x38 /* AD offset compensation value for Gr channel */ #define OFF_B 0x39 /* Analog process B channel offset value */ #define OFF_R 0x3A /* Analog process R channel offset value */ #define OFF_GB 0x3B /* Analog process Gb channel offset value */ #define OFF_GR 0x3C /* Analog process Gr channel offset value */ #define COM12 0x3D /* Common control 12 */ #define COM13 0x3E /* Common control 13 */ #define COM14 0x3F /* Common control 14 */ #define COM15 0x40 /* Common control 15*/ #define COM16 0x41 /* Common control 16 */ #define TGT_B 0x42 /* BLC blue channel target value */ #define TGT_R 0x43 /* BLC red channel target value */ #define TGT_GB 0x44 /* BLC Gb channel target value */ #define TGT_GR 0x45 /* BLC Gr channel target value */ /* for ov7720 */ #define LCC0 0x46 /* Lens correction control 0 */ #define LCC1 0x47 /* Lens correction option 1 - X coordinate */ #define LCC2 0x48 /* Lens correction option 2 - Y coordinate */ #define LCC3 0x49 /* Lens correction option 3 */ #define LCC4 0x4A /* Lens correction option 4 - radius of the circular */ #define LCC5 0x4B /* Lens correction option 5 */ #define LCC6 0x4C /* Lens correction option 6 */ /* for ov7725 */ #define LC_CTR 0x46 /* Lens correction control */ #define LC_XC 0x47 /* X coordinate of lens correction center relative */ #define LC_YC 0x48 /* Y coordinate of lens correction center relative */ #define LC_COEF 0x49 /* Lens correction coefficient */ #define LC_RADI 0x4A /* Lens correction radius */ #define LC_COEFB 0x4B /* Lens B channel compensation coefficient */ #define LC_COEFR 0x4C /* Lens R channel compensation coefficient */ #define FIXGAIN 0x4D /* Analog fix gain amplifer */ #define AREF0 0x4E /* Sensor reference control */ #define AREF1 0x4F /* Sensor reference current control */ #define AREF2 0x50 /* Analog reference control */ #define AREF3 0x51 /* ADC reference control */ #define AREF4 0x52 /* ADC reference control */ #define AREF5 0x53 /* ADC reference control */ #define AREF6 0x54 /* Analog reference control */ #define AREF7 0x55 /* Analog reference control */ #define UFIX 0x60 /* U channel fixed value output */ #define VFIX 0x61 /* V channel fixed value output */ #define AWBB_BLK 0x62 /* AWB option for advanced AWB */ #define AWB_CTRL0 0x63 /* AWB control byte 0 */ #define DSP_CTRL1 0x64 /* DSP control byte 1 */ #define DSP_CTRL2 0x65 /* DSP control byte 2 */ #define DSP_CTRL3 0x66 /* DSP control byte 3 */ #define DSP_CTRL4 0x67 /* DSP control byte 4 */ #define AWB_BIAS 0x68 /* AWB BLC level clip */ #define AWB_CTRL1 0x69 /* AWB control 1 */ #define AWB_CTRL2 0x6A /* AWB control 2 */ #define AWB_CTRL3 0x6B /* AWB control 3 */ #define AWB_CTRL4 0x6C /* AWB control 4 */ #define AWB_CTRL5 0x6D /* AWB control 5 */ #define AWB_CTRL6 0x6E /* AWB control 6 */ #define AWB_CTRL7 0x6F /* AWB control 7 */ #define AWB_CTRL8 0x70 /* AWB control 8 */ #define AWB_CTRL9 0x71 /* AWB control 9 */ #define AWB_CTRL10 0x72 /* AWB control 10 */ #define AWB_CTRL11 0x73 /* AWB control 11 */ #define AWB_CTRL12 0x74 /* AWB control 12 */ #define AWB_CTRL13 0x75 /* AWB control 13 */ #define AWB_CTRL14 0x76 /* AWB control 14 */ #define AWB_CTRL15 0x77 /* AWB control 15 */ #define AWB_CTRL16 0x78 /* AWB control 16 */ #define AWB_CTRL17 0x79 /* AWB control 17 */ #define AWB_CTRL18 0x7A /* AWB control 18 */ #define AWB_CTRL19 0x7B /* AWB control 19 */ #define AWB_CTRL20 0x7C /* AWB control 20 */ #define AWB_CTRL21 0x7D /* AWB control 21 */ #define GAM1 0x7E /* Gamma Curve 1st segment input end point */ #define GAM2 0x7F /* Gamma Curve 2nd segment input end point */ #define GAM3 0x80 /* Gamma Curve 3rd segment input end point */ #define GAM4 0x81 /* Gamma Curve 4th segment input end point */ #define GAM5 0x82 /* Gamma Curve 5th segment input end point */ #define GAM6 0x83 /* Gamma Curve 6th segment input end point */ #define GAM7 0x84 /* Gamma Curve 7th segment input end point */ #define GAM8 0x85 /* Gamma Curve 8th segment input end point */ #define GAM9 0x86 /* Gamma Curve 9th segment input end point */ #define GAM10 0x87 /* Gamma Curve 10th segment input end point */ #define GAM11 0x88 /* Gamma Curve 11th segment input end point */ #define GAM12 0x89 /* Gamma Curve 12th segment input end point */ #define GAM13 0x8A /* Gamma Curve 13th segment input end point */ #define GAM14 0x8B /* Gamma Curve 14th segment input end point */ #define GAM15 0x8C /* Gamma Curve 15th segment input end point */ #define SLOP 0x8D /* Gamma curve highest segment slope */ #define DNSTH 0x8E /* De-noise threshold */ #define EDGE_STRNGT 0x8F /* Edge strength control when manual mode */ #define EDGE_TRSHLD 0x90 /* Edge threshold control when manual mode */ #define DNSOFF 0x91 /* Auto De-noise threshold control */ #define EDGE_UPPER 0x92 /* Edge strength upper limit when Auto mode */ #define EDGE_LOWER 0x93 /* Edge strength lower limit when Auto mode */ #define MTX1 0x94 /* Matrix coefficient 1 */ #define MTX2 0x95 /* Matrix coefficient 2 */ #define MTX3 0x96 /* Matrix coefficient 3 */ #define MTX4 0x97 /* Matrix coefficient 4 */ #define MTX5 0x98 /* Matrix coefficient 5 */ #define MTX6 0x99 /* Matrix coefficient 6 */ #define MTX_CTRL 0x9A /* Matrix control */ #define BRIGHT 0x9B /* Brightness control */ #define CNTRST 0x9C /* Contrast contrast */ #define CNTRST_CTRL 0x9D /* Contrast contrast center */ #define UVAD_J0 0x9E /* Auto UV adjust contrast 0 */ #define UVAD_J1 0x9F /* Auto UV adjust contrast 1 */ #define SCAL0 0xA0 /* Scaling control 0 */ #define SCAL1 0xA1 /* Scaling control 1 */ #define SCAL2 0xA2 /* Scaling control 2 */ #define FIFODLYM 0xA3 /* FIFO manual mode delay control */ #define FIFODLYA 0xA4 /* FIFO auto mode delay control */ #define SDE 0xA6 /* Special digital effect control */ #define USAT 0xA7 /* U component saturation control */ #define VSAT 0xA8 /* V component saturation control */ /* for ov7720 */ #define HUE0 0xA9 /* Hue control 0 */ #define HUE1 0xAA /* Hue control 1 */ /* for ov7725 */ #define HUECOS 0xA9 /* Cosine value */ #define HUESIN 0xAA /* Sine value */ #define SIGN 0xAB /* Sign bit for Hue and contrast */ #define DSPAUTO 0xAC /* DSP auto function ON/OFF control */ /* * register detail */ /* COM2 */ #define SOFT_SLEEP_MODE 0x10 /* Soft sleep mode */ /* Output drive capability */ #define OCAP_1x 0x00 /* 1x */ #define OCAP_2x 0x01 /* 2x */ #define OCAP_3x 0x02 /* 3x */ #define OCAP_4x 0x03 /* 4x */ /* COM3 */ #define SWAP_MASK (SWAP_RGB | SWAP_YUV | SWAP_ML) #define IMG_MASK (VFLIP_IMG | HFLIP_IMG) #define VFLIP_IMG 0x80 /* Vertical flip image ON/OFF selection */ #define HFLIP_IMG 0x40 /* Horizontal mirror image ON/OFF selection */ #define SWAP_RGB 0x20 /* Swap B/R output sequence in RGB mode */ #define SWAP_YUV 0x10 /* Swap Y/UV output sequence in YUV mode */ #define SWAP_ML 0x08 /* Swap output MSB/LSB */ /* Tri-state option for output clock */ #define NOTRI_CLOCK 0x04 /* 0: Tri-state at this period */ /* 1: No tri-state at this period */ /* Tri-state option for output data */ #define NOTRI_DATA 0x02 /* 0: Tri-state at this period */ /* 1: No tri-state at this period */ #define SCOLOR_TEST 0x01 /* Sensor color bar test pattern */ /* COM4 */ /* PLL frequency control */ #define PLL_BYPASS 0x00 /* 00: Bypass PLL */ #define PLL_4x 0x40 /* 01: PLL 4x */ #define PLL_6x 0x80 /* 10: PLL 6x */ #define PLL_8x 0xc0 /* 11: PLL 8x */ /* AEC evaluate window */ #define AEC_FULL 0x00 /* 00: Full window */ #define AEC_1p2 0x10 /* 01: 1/2 window */ #define AEC_1p4 0x20 /* 10: 1/4 window */ #define AEC_2p3 0x30 /* 11: Low 2/3 window */ /* COM5 */ #define AFR_ON_OFF 0x80 /* Auto frame rate control ON/OFF selection */ #define AFR_SPPED 0x40 /* Auto frame rate control speed selection */ /* Auto frame rate max rate control */ #define AFR_NO_RATE 0x00 /* No reduction of frame rate */ #define AFR_1p2 0x10 /* Max reduction to 1/2 frame rate */ #define AFR_1p4 0x20 /* Max reduction to 1/4 frame rate */ #define AFR_1p8 0x30 /* Max reduction to 1/8 frame rate */ /* Auto frame rate active point control */ #define AF_2x 0x00 /* Add frame when AGC reaches 2x gain */ #define AF_4x 0x04 /* Add frame when AGC reaches 4x gain */ #define AF_8x 0x08 /* Add frame when AGC reaches 8x gain */ #define AF_16x 0x0c /* Add frame when AGC reaches 16x gain */ /* AEC max step control */ #define AEC_NO_LIMIT 0x01 /* 0 : AEC incease step has limit */ /* 1 : No limit to AEC increase step */ /* COM7 */ /* SCCB Register Reset */ #define SCCB_RESET 0x80 /* 0 : No change */ /* 1 : Resets all registers to default */ /* Resolution selection */ #define SLCT_MASK 0x40 /* Mask of VGA or QVGA */ #define SLCT_VGA 0x00 /* 0 : VGA */ #define SLCT_QVGA 0x40 /* 1 : QVGA */ #define ITU656_ON_OFF 0x20 /* ITU656 protocol ON/OFF selection */ /* RGB output format control */ #define FMT_MASK 0x0c /* Mask of color format */ #define FMT_GBR422 0x00 /* 00 : GBR 4:2:2 */ #define FMT_RGB565 0x04 /* 01 : RGB 565 */ #define FMT_RGB555 0x08 /* 10 : RGB 555 */ #define FMT_RGB444 0x0c /* 11 : RGB 444 */ /* Output format control */ #define OFMT_MASK 0x03 /* Mask of output format */ #define OFMT_YUV 0x00 /* 00 : YUV */ #define OFMT_P_BRAW 0x01 /* 01 : Processed Bayer RAW */ #define OFMT_RGB 0x02 /* 10 : RGB */ #define OFMT_BRAW 0x03 /* 11 : Bayer RAW */ /* COM8 */ #define FAST_ALGO 0x80 /* Enable fast AGC/AEC algorithm */ /* AEC Setp size limit */ #define UNLMT_STEP 0x40 /* 0 : Step size is limited */ /* 1 : Unlimited step size */ #define BNDF_ON_OFF 0x20 /* Banding filter ON/OFF */ #define AEC_BND 0x10 /* Enable AEC below banding value */ #define AEC_ON_OFF 0x08 /* Fine AEC ON/OFF control */ #define AGC_ON 0x04 /* AGC Enable */ #define AWB_ON 0x02 /* AWB Enable */ #define AEC_ON 0x01 /* AEC Enable */ /* COM9 */ #define BASE_AECAGC 0x80 /* Histogram or average based AEC/AGC */ /* Automatic gain ceiling - maximum AGC value */ #define GAIN_2x 0x00 /* 000 : 2x */ #define GAIN_4x 0x10 /* 001 : 4x */ #define GAIN_8x 0x20 /* 010 : 8x */ #define GAIN_16x 0x30 /* 011 : 16x */ #define GAIN_32x 0x40 /* 100 : 32x */ #define GAIN_64x 0x50 /* 101 : 64x */ #define GAIN_128x 0x60 /* 110 : 128x */ #define DROP_VSYNC 0x04 /* Drop VSYNC output of corrupt frame */ #define DROP_HREF 0x02 /* Drop HREF output of corrupt frame */ /* COM11 */ #define SGLF_ON_OFF 0x02 /* Single frame ON/OFF selection */ #define SGLF_TRIG 0x01 /* Single frame transfer trigger */ /* EXHCH */ #define VSIZE_LSB 0x04 /* Vertical data output size LSB */ /* DSP_CTRL1 */ #define FIFO_ON 0x80 /* FIFO enable/disable selection */ #define UV_ON_OFF 0x40 /* UV adjust function ON/OFF selection */ #define YUV444_2_422 0x20 /* YUV444 to 422 UV channel option selection */ #define CLR_MTRX_ON_OFF 0x10 /* Color matrix ON/OFF selection */ #define INTPLT_ON_OFF 0x08 /* Interpolation ON/OFF selection */ #define GMM_ON_OFF 0x04 /* Gamma function ON/OFF selection */ #define AUTO_BLK_ON_OFF 0x02 /* Black defect auto correction ON/OFF */ #define AUTO_WHT_ON_OFF 0x01 /* White define auto correction ON/OFF */ /* DSP_CTRL3 */ #define UV_MASK 0x80 /* UV output sequence option */ #define UV_ON 0x80 /* ON */ #define UV_OFF 0x00 /* OFF */ #define CBAR_MASK 0x20 /* DSP Color bar mask */ #define CBAR_ON 0x20 /* ON */ #define CBAR_OFF 0x00 /* OFF */ /* HSTART */ #define HST_VGA 0x23 #define HST_QVGA 0x3F /* HSIZE */ #define HSZ_VGA 0xA0 #define HSZ_QVGA 0x50 /* VSTART */ #define VST_VGA 0x07 #define VST_QVGA 0x03 /* VSIZE */ #define VSZ_VGA 0xF0 #define VSZ_QVGA 0x78 /* HOUTSIZE */ #define HOSZ_VGA 0xA0 #define HOSZ_QVGA 0x50 /* VOUTSIZE */ #define VOSZ_VGA 0xF0 #define VOSZ_QVGA 0x78 /* DSPAUTO (DSP Auto Function ON/OFF Control) */ #define AWB_ACTRL 0x80 /* AWB auto threshold control */ #define DENOISE_ACTRL 0x40 /* De-noise auto threshold control */ #define EDGE_ACTRL 0x20 /* Edge enhancement auto strength control */ #define UV_ACTRL 0x10 /* UV adjust auto slope control */ #define SCAL0_ACTRL 0x08 /* Auto scaling factor control */ #define SCAL1_2_ACTRL 0x04 /* Auto scaling factor control */ /* * ID */ #define OV7720 0x7720 #define OV7725 0x7721 #define VERSION(pid, ver) ((pid<<8)|(ver&0xFF)) /* * struct */ struct regval_list { unsigned char reg_num; unsigned char value; }; struct ov772x_color_format { enum v4l2_mbus_pixelcode code; enum v4l2_colorspace colorspace; u8 dsp3; u8 com3; u8 com7; }; struct ov772x_win_size { char *name; __u32 width; __u32 height; unsigned char com7_bit; const struct regval_list *regs; }; struct ov772x_priv { struct v4l2_subdev subdev; struct v4l2_ctrl_handler hdl; struct ov772x_camera_info *info; const struct ov772x_color_format *cfmt; const struct ov772x_win_size *win; int model; unsigned short flag_vflip:1; unsigned short flag_hflip:1; /* band_filter = COM8[5] ? 256 - BDBASE : 0 */ unsigned short band_filter; }; #define ENDMARKER { 0xff, 0xff } /* * register setting for window size */ static const struct regval_list ov772x_qvga_regs[] = { { HSTART, HST_QVGA }, { HSIZE, HSZ_QVGA }, { VSTART, VST_QVGA }, { VSIZE, VSZ_QVGA }, { HOUTSIZE, HOSZ_QVGA }, { VOUTSIZE, VOSZ_QVGA }, ENDMARKER, }; static const struct regval_list ov772x_vga_regs[] = { { HSTART, HST_VGA }, { HSIZE, HSZ_VGA }, { VSTART, VST_VGA }, { VSIZE, VSZ_VGA }, { HOUTSIZE, HOSZ_VGA }, { VOUTSIZE, VOSZ_VGA }, ENDMARKER, }; /* * supported color format list */ static const struct ov772x_color_format ov772x_cfmts[] = { { .code = V4L2_MBUS_FMT_YUYV8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .dsp3 = 0x0, .com3 = SWAP_YUV, .com7 = OFMT_YUV, }, { .code = V4L2_MBUS_FMT_YVYU8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .dsp3 = UV_ON, .com3 = SWAP_YUV, .com7 = OFMT_YUV, }, { .code = V4L2_MBUS_FMT_UYVY8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .dsp3 = 0x0, .com3 = 0x0, .com7 = OFMT_YUV, }, { .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, .colorspace = V4L2_COLORSPACE_SRGB, .dsp3 = 0x0, .com3 = SWAP_RGB, .com7 = FMT_RGB555 | OFMT_RGB, }, { .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, .colorspace = V4L2_COLORSPACE_SRGB, .dsp3 = 0x0, .com3 = 0x0, .com7 = FMT_RGB555 | OFMT_RGB, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_LE, .colorspace = V4L2_COLORSPACE_SRGB, .dsp3 = 0x0, .com3 = SWAP_RGB, .com7 = FMT_RGB565 | OFMT_RGB, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_BE, .colorspace = V4L2_COLORSPACE_SRGB, .dsp3 = 0x0, .com3 = 0x0, .com7 = FMT_RGB565 | OFMT_RGB, }, }; /* * window size list */ #define VGA_WIDTH 640 #define VGA_HEIGHT 480 #define QVGA_WIDTH 320 #define QVGA_HEIGHT 240 #define MAX_WIDTH VGA_WIDTH #define MAX_HEIGHT VGA_HEIGHT static const struct ov772x_win_size ov772x_win_vga = { .name = "VGA", .width = VGA_WIDTH, .height = VGA_HEIGHT, .com7_bit = SLCT_VGA, .regs = ov772x_vga_regs, }; static const struct ov772x_win_size ov772x_win_qvga = { .name = "QVGA", .width = QVGA_WIDTH, .height = QVGA_HEIGHT, .com7_bit = SLCT_QVGA, .regs = ov772x_qvga_regs, }; /* * general function */ static struct ov772x_priv *to_ov772x(const struct i2c_client *client) { return container_of(i2c_get_clientdata(client), struct ov772x_priv, subdev); } static int ov772x_write_array(struct i2c_client *client, const struct regval_list *vals) { while (vals->reg_num != 0xff) { int ret = i2c_smbus_write_byte_data(client, vals->reg_num, vals->value); if (ret < 0) return ret; vals++; } return 0; } static int ov772x_mask_set(struct i2c_client *client, u8 command, u8 mask, u8 set) { s32 val = i2c_smbus_read_byte_data(client, command); if (val < 0) return val; val &= ~mask; val |= set & mask; return i2c_smbus_write_byte_data(client, command, val); } static int ov772x_reset(struct i2c_client *client) { int ret = i2c_smbus_write_byte_data(client, COM7, SCCB_RESET); msleep(1); return ret; } /* * soc_camera_ops function */ static int ov772x_s_stream(struct v4l2_subdev *sd, int enable) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov772x_priv *priv = container_of(sd, struct ov772x_priv, subdev); if (!enable) { ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, SOFT_SLEEP_MODE); return 0; } if (!priv->win || !priv->cfmt) { dev_err(&client->dev, "norm or win select error\n"); return -EPERM; } ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, 0); dev_dbg(&client->dev, "format %d, win %s\n", priv->cfmt->code, priv->win->name); return 0; } static int ov772x_s_ctrl(struct v4l2_ctrl *ctrl) { struct ov772x_priv *priv = container_of(ctrl->handler, struct ov772x_priv, hdl); struct v4l2_subdev *sd = &priv->subdev; struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; u8 val; switch (ctrl->id) { case V4L2_CID_VFLIP: val = ctrl->val ? VFLIP_IMG : 0x00; priv->flag_vflip = ctrl->val; if (priv->info->flags & OV772X_FLAG_VFLIP) val ^= VFLIP_IMG; return ov772x_mask_set(client, COM3, VFLIP_IMG, val); case V4L2_CID_HFLIP: val = ctrl->val ? HFLIP_IMG : 0x00; priv->flag_hflip = ctrl->val; if (priv->info->flags & OV772X_FLAG_HFLIP) val ^= HFLIP_IMG; return ov772x_mask_set(client, COM3, HFLIP_IMG, val); case V4L2_CID_BAND_STOP_FILTER: if (!ctrl->val) { /* Switch the filter off, it is on now */ ret = ov772x_mask_set(client, BDBASE, 0xff, 0xff); if (!ret) ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, 0); } else { /* Switch the filter on, set AEC low limit */ val = 256 - ctrl->val; ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF); if (!ret) ret = ov772x_mask_set(client, BDBASE, 0xff, val); } if (!ret) priv->band_filter = ctrl->val; return ret; } return -EINVAL; } static int ov772x_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *id) { struct ov772x_priv *priv = container_of(sd, struct ov772x_priv, subdev); id->ident = priv->model; id->revision = 0; return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int ov772x_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; reg->size = 1; if (reg->reg > 0xff) return -EINVAL; ret = i2c_smbus_read_byte_data(client, reg->reg); if (ret < 0) return ret; reg->val = (__u64)ret; return 0; } static int ov772x_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (reg->reg > 0xff || reg->val > 0xff) return -EINVAL; return i2c_smbus_write_byte_data(client, reg->reg, reg->val); } #endif static const struct ov772x_win_size *ov772x_select_win(u32 width, u32 height) { __u32 diff; const struct ov772x_win_size *win; /* default is QVGA */ diff = abs(width - ov772x_win_qvga.width) + abs(height - ov772x_win_qvga.height); win = &ov772x_win_qvga; /* VGA */ if (diff > abs(width - ov772x_win_vga.width) + abs(height - ov772x_win_vga.height)) win = &ov772x_win_vga; return win; } static int ov772x_set_params(struct i2c_client *client, u32 *width, u32 *height, enum v4l2_mbus_pixelcode code) { struct ov772x_priv *priv = to_ov772x(client); int ret = -EINVAL; u8 val; int i; /* * select format */ priv->cfmt = NULL; for (i = 0; i < ARRAY_SIZE(ov772x_cfmts); i++) { if (code == ov772x_cfmts[i].code) { priv->cfmt = ov772x_cfmts + i; break; } } if (!priv->cfmt) goto ov772x_set_fmt_error; /* * select win */ priv->win = ov772x_select_win(*width, *height); /* * reset hardware */ ov772x_reset(client); /* * Edge Ctrl */ if (priv->info->edgectrl.strength & OV772X_MANUAL_EDGE_CTRL) { /* * Manual Edge Control Mode * * Edge auto strength bit is set by default. * Remove it when manual mode. */ ret = ov772x_mask_set(client, DSPAUTO, EDGE_ACTRL, 0x00); if (ret < 0) goto ov772x_set_fmt_error; ret = ov772x_mask_set(client, EDGE_TRSHLD, OV772X_EDGE_THRESHOLD_MASK, priv->info->edgectrl.threshold); if (ret < 0) goto ov772x_set_fmt_error; ret = ov772x_mask_set(client, EDGE_STRNGT, OV772X_EDGE_STRENGTH_MASK, priv->info->edgectrl.strength); if (ret < 0) goto ov772x_set_fmt_error; } else if (priv->info->edgectrl.upper > priv->info->edgectrl.lower) { /* * Auto Edge Control Mode * * set upper and lower limit */ ret = ov772x_mask_set(client, EDGE_UPPER, OV772X_EDGE_UPPER_MASK, priv->info->edgectrl.upper); if (ret < 0) goto ov772x_set_fmt_error; ret = ov772x_mask_set(client, EDGE_LOWER, OV772X_EDGE_LOWER_MASK, priv->info->edgectrl.lower); if (ret < 0) goto ov772x_set_fmt_error; } /* * set size format */ ret = ov772x_write_array(client, priv->win->regs); if (ret < 0) goto ov772x_set_fmt_error; /* * set DSP_CTRL3 */ val = priv->cfmt->dsp3; if (val) { ret = ov772x_mask_set(client, DSP_CTRL3, UV_MASK, val); if (ret < 0) goto ov772x_set_fmt_error; } /* * set COM3 */ val = priv->cfmt->com3; if (priv->info->flags & OV772X_FLAG_VFLIP) val |= VFLIP_IMG; if (priv->info->flags & OV772X_FLAG_HFLIP) val |= HFLIP_IMG; if (priv->flag_vflip) val ^= VFLIP_IMG; if (priv->flag_hflip) val ^= HFLIP_IMG; ret = ov772x_mask_set(client, COM3, SWAP_MASK | IMG_MASK, val); if (ret < 0) goto ov772x_set_fmt_error; /* * set COM7 */ val = priv->win->com7_bit | priv->cfmt->com7; ret = ov772x_mask_set(client, COM7, SLCT_MASK | FMT_MASK | OFMT_MASK, val); if (ret < 0) goto ov772x_set_fmt_error; /* * set COM8 */ if (priv->band_filter) { ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, 1); if (!ret) ret = ov772x_mask_set(client, BDBASE, 0xff, 256 - priv->band_filter); if (ret < 0) goto ov772x_set_fmt_error; } *width = priv->win->width; *height = priv->win->height; return ret; ov772x_set_fmt_error: ov772x_reset(client); priv->win = NULL; priv->cfmt = NULL; return ret; } static int ov772x_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { a->c.left = 0; a->c.top = 0; a->c.width = VGA_WIDTH; a->c.height = VGA_HEIGHT; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; return 0; } static int ov772x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) { a->bounds.left = 0; a->bounds.top = 0; a->bounds.width = VGA_WIDTH; a->bounds.height = VGA_HEIGHT; a->defrect = a->bounds; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; a->pixelaspect.numerator = 1; a->pixelaspect.denominator = 1; return 0; } static int ov772x_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov772x_priv *priv = container_of(sd, struct ov772x_priv, subdev); if (!priv->win || !priv->cfmt) { u32 width = VGA_WIDTH, height = VGA_HEIGHT; int ret = ov772x_set_params(client, &width, &height, V4L2_MBUS_FMT_YUYV8_2X8); if (ret < 0) return ret; } mf->width = priv->win->width; mf->height = priv->win->height; mf->code = priv->cfmt->code; mf->colorspace = priv->cfmt->colorspace; mf->field = V4L2_FIELD_NONE; return 0; } static int ov772x_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov772x_priv *priv = container_of(sd, struct ov772x_priv, subdev); int ret = ov772x_set_params(client, &mf->width, &mf->height, mf->code); if (!ret) mf->colorspace = priv->cfmt->colorspace; return ret; } static int ov772x_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct ov772x_priv *priv = container_of(sd, struct ov772x_priv, subdev); const struct ov772x_win_size *win; int i; /* * select suitable win */ win = ov772x_select_win(mf->width, mf->height); mf->width = win->width; mf->height = win->height; mf->field = V4L2_FIELD_NONE; for (i = 0; i < ARRAY_SIZE(ov772x_cfmts); i++) if (mf->code == ov772x_cfmts[i].code) break; if (i == ARRAY_SIZE(ov772x_cfmts)) { /* Unsupported format requested. Propose either */ if (priv->cfmt) { /* the current one or */ mf->colorspace = priv->cfmt->colorspace; mf->code = priv->cfmt->code; } else { /* the default one */ mf->colorspace = ov772x_cfmts[0].colorspace; mf->code = ov772x_cfmts[0].code; } } else { /* Also return the colorspace */ mf->colorspace = ov772x_cfmts[i].colorspace; } return 0; } static int ov772x_video_probe(struct i2c_client *client) { struct ov772x_priv *priv = to_ov772x(client); u8 pid, ver; const char *devname; /* * check and show product ID and manufacturer ID */ pid = i2c_smbus_read_byte_data(client, PID); ver = i2c_smbus_read_byte_data(client, VER); switch (VERSION(pid, ver)) { case OV7720: devname = "ov7720"; priv->model = V4L2_IDENT_OV7720; break; case OV7725: devname = "ov7725"; priv->model = V4L2_IDENT_OV7725; break; default: dev_err(&client->dev, "Product ID error %x:%x\n", pid, ver); return -ENODEV; } dev_info(&client->dev, "%s Product ID %0x:%0x Manufacturer ID %x:%x\n", devname, pid, ver, i2c_smbus_read_byte_data(client, MIDH), i2c_smbus_read_byte_data(client, MIDL)); return v4l2_ctrl_handler_setup(&priv->hdl); } static const struct v4l2_ctrl_ops ov772x_ctrl_ops = { .s_ctrl = ov772x_s_ctrl, }; static struct v4l2_subdev_core_ops ov772x_subdev_core_ops = { .g_chip_ident = ov772x_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = ov772x_g_register, .s_register = ov772x_s_register, #endif }; static int ov772x_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { if (index >= ARRAY_SIZE(ov772x_cfmts)) return -EINVAL; *code = ov772x_cfmts[index].code; return 0; } static int ov772x_g_mbus_config(struct v4l2_subdev *sd, struct v4l2_mbus_config *cfg) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_link *icl = soc_camera_i2c_to_link(client); cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER | V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH; cfg->type = V4L2_MBUS_PARALLEL; cfg->flags = soc_camera_apply_board_flags(icl, cfg); return 0; } static struct v4l2_subdev_video_ops ov772x_subdev_video_ops = { .s_stream = ov772x_s_stream, .g_mbus_fmt = ov772x_g_fmt, .s_mbus_fmt = ov772x_s_fmt, .try_mbus_fmt = ov772x_try_fmt, .cropcap = ov772x_cropcap, .g_crop = ov772x_g_crop, .enum_mbus_fmt = ov772x_enum_fmt, .g_mbus_config = ov772x_g_mbus_config, }; static struct v4l2_subdev_ops ov772x_subdev_ops = { .core = &ov772x_subdev_core_ops, .video = &ov772x_subdev_video_ops, }; /* * i2c_driver function */ static int ov772x_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct ov772x_priv *priv; struct soc_camera_link *icl = soc_camera_i2c_to_link(client); struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); int ret; if (!icl || !icl->priv) { dev_err(&client->dev, "OV772X: missing platform data!\n"); return -EINVAL; } if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&adapter->dev, "I2C-Adapter doesn't support " "I2C_FUNC_SMBUS_BYTE_DATA\n"); return -EIO; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->info = icl->priv; v4l2_i2c_subdev_init(&priv->subdev, client, &ov772x_subdev_ops); v4l2_ctrl_handler_init(&priv->hdl, 3); v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops, V4L2_CID_BAND_STOP_FILTER, 0, 256, 1, 0); priv->subdev.ctrl_handler = &priv->hdl; if (priv->hdl.error) { int err = priv->hdl.error; kfree(priv); return err; } ret = ov772x_video_probe(client); if (ret) { v4l2_ctrl_handler_free(&priv->hdl); kfree(priv); } return ret; } static int ov772x_remove(struct i2c_client *client) { struct ov772x_priv *priv = to_ov772x(client); v4l2_device_unregister_subdev(&priv->subdev); v4l2_ctrl_handler_free(&priv->hdl); kfree(priv); return 0; } static const struct i2c_device_id ov772x_id[] = { { "ov772x", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ov772x_id); static struct i2c_driver ov772x_i2c_driver = { .driver = { .name = "ov772x", }, .probe = ov772x_probe, .remove = ov772x_remove, .id_table = ov772x_id, }; /* * module function */ static int __init ov772x_module_init(void) { return i2c_add_driver(&ov772x_i2c_driver); } static void __exit ov772x_module_exit(void) { i2c_del_driver(&ov772x_i2c_driver); } module_init(ov772x_module_init); module_exit(ov772x_module_exit); MODULE_DESCRIPTION("SoC Camera driver for ov772x"); MODULE_AUTHOR("Kuninori Morimoto"); MODULE_LICENSE("GPL v2");
gpl-2.0
FreeOptimusProject/android_kernel_lge_p970
drivers/acpi/acpica/nsaccess.c
763
19069
/******************************************************************************* * * Module Name: nsaccess - Top-level functions for accessing ACPI namespace * ******************************************************************************/ /* * Copyright (C) 2000 - 2010, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "amlcode.h" #include "acnamesp.h" #include "acdispat.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsaccess") /******************************************************************************* * * FUNCTION: acpi_ns_root_initialize * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Allocate and initialize the default root named objects * * MUTEX: Locks namespace for entire execution * ******************************************************************************/ acpi_status acpi_ns_root_initialize(void) { acpi_status status; const struct acpi_predefined_names *init_val = NULL; struct acpi_namespace_node *new_node; union acpi_operand_object *obj_desc; acpi_string val = NULL; ACPI_FUNCTION_TRACE(ns_root_initialize); status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * The global root ptr is initially NULL, so a non-NULL value indicates * that acpi_ns_root_initialize() has already been called; just return. */ if (acpi_gbl_root_node) { status = AE_OK; goto unlock_and_exit; } /* * Tell the rest of the subsystem that the root is initialized * (This is OK because the namespace is locked) */ acpi_gbl_root_node = &acpi_gbl_root_node_struct; /* Enter the pre-defined names in the name table */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Entering predefined entries into namespace\n")); for (init_val = acpi_gbl_pre_defined_names; init_val->name; init_val++) { /* _OSI is optional for now, will be permanent later */ if (!ACPI_STRCMP(init_val->name, "_OSI") && !acpi_gbl_create_osi_method) { continue; } status = acpi_ns_lookup(NULL, init_val->name, init_val->type, ACPI_IMODE_LOAD_PASS2, ACPI_NS_NO_UPSEARCH, NULL, &new_node); if (ACPI_FAILURE(status) || (!new_node)) { /* Must be on same line for code converter */ ACPI_EXCEPTION((AE_INFO, status, "Could not create predefined name %s", init_val->name)); } /* * Name entered successfully. If entry in pre_defined_names[] specifies * an initial value, create the initial value. */ if (init_val->val) { status = acpi_os_predefined_override(init_val, &val); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not override predefined %s", init_val->name)); } if (!val) { val = init_val->val; } /* * Entry requests an initial value, allocate a * descriptor for it. */ obj_desc = acpi_ut_create_internal_object(init_val->type); if (!obj_desc) { status = AE_NO_MEMORY; goto unlock_and_exit; } /* * Convert value string from table entry to * internal representation. Only types actually * used for initial values are implemented here. */ switch (init_val->type) { case ACPI_TYPE_METHOD: obj_desc->method.param_count = (u8) ACPI_TO_INTEGER(val); obj_desc->common.flags |= AOPOBJ_DATA_VALID; #if defined (ACPI_ASL_COMPILER) /* Save the parameter count for the i_aSL compiler */ new_node->value = obj_desc->method.param_count; #else /* Mark this as a very SPECIAL method */ obj_desc->method.method_flags = AML_METHOD_INTERNAL_ONLY; obj_desc->method.extra.implementation = acpi_ut_osi_implementation; #endif break; case ACPI_TYPE_INTEGER: obj_desc->integer.value = ACPI_TO_INTEGER(val); break; case ACPI_TYPE_STRING: /* Build an object around the static string */ obj_desc->string.length = (u32) ACPI_STRLEN(val); obj_desc->string.pointer = val; obj_desc->common.flags |= AOPOBJ_STATIC_POINTER; break; case ACPI_TYPE_MUTEX: obj_desc->mutex.node = new_node; obj_desc->mutex.sync_level = (u8) (ACPI_TO_INTEGER(val) - 1); /* Create a mutex */ status = acpi_os_create_mutex(&obj_desc->mutex. os_mutex); if (ACPI_FAILURE(status)) { acpi_ut_remove_reference(obj_desc); goto unlock_and_exit; } /* Special case for ACPI Global Lock */ if (ACPI_STRCMP(init_val->name, "_GL_") == 0) { acpi_gbl_global_lock_mutex = obj_desc; /* Create additional counting semaphore for global lock */ status = acpi_os_create_semaphore(1, 0, &acpi_gbl_global_lock_semaphore); if (ACPI_FAILURE(status)) { acpi_ut_remove_reference (obj_desc); goto unlock_and_exit; } } break; default: ACPI_ERROR((AE_INFO, "Unsupported initial type value 0x%X", init_val->type)); acpi_ut_remove_reference(obj_desc); obj_desc = NULL; continue; } /* Store pointer to value descriptor in the Node */ status = acpi_ns_attach_object(new_node, obj_desc, obj_desc->common.type); /* Remove local reference to the object */ acpi_ut_remove_reference(obj_desc); } } unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); /* Save a handle to "_GPE", it is always present */ if (ACPI_SUCCESS(status)) { status = acpi_ns_get_node(NULL, "\\_GPE", ACPI_NS_NO_UPSEARCH, &acpi_gbl_fadt_gpe_device); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ns_lookup * * PARAMETERS: scope_info - Current scope info block * Pathname - Search pathname, in internal format * (as represented in the AML stream) * Type - Type associated with name * interpreter_mode - IMODE_LOAD_PASS2 => add name if not found * Flags - Flags describing the search restrictions * walk_state - Current state of the walk * return_node - Where the Node is placed (if found * or created successfully) * * RETURN: Status * * DESCRIPTION: Find or enter the passed name in the name space. * Log an error if name not found in Exec mode. * * MUTEX: Assumes namespace is locked. * ******************************************************************************/ acpi_status acpi_ns_lookup(union acpi_generic_state *scope_info, char *pathname, acpi_object_type type, acpi_interpreter_mode interpreter_mode, u32 flags, struct acpi_walk_state *walk_state, struct acpi_namespace_node **return_node) { acpi_status status; char *path = pathname; struct acpi_namespace_node *prefix_node; struct acpi_namespace_node *current_node = NULL; struct acpi_namespace_node *this_node = NULL; u32 num_segments; u32 num_carats; acpi_name simple_name; acpi_object_type type_to_check_for; acpi_object_type this_search_type; u32 search_parent_flag = ACPI_NS_SEARCH_PARENT; u32 local_flags; ACPI_FUNCTION_TRACE(ns_lookup); if (!return_node) { return_ACPI_STATUS(AE_BAD_PARAMETER); } local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND | ACPI_NS_SEARCH_PARENT); *return_node = ACPI_ENTRY_NOT_FOUND; acpi_gbl_ns_lookup_count++; if (!acpi_gbl_root_node) { return_ACPI_STATUS(AE_NO_NAMESPACE); } /* Get the prefix scope. A null scope means use the root scope */ if ((!scope_info) || (!scope_info->scope.node)) { ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Null scope prefix, using root node (%p)\n", acpi_gbl_root_node)); prefix_node = acpi_gbl_root_node; } else { prefix_node = scope_info->scope.node; if (ACPI_GET_DESCRIPTOR_TYPE(prefix_node) != ACPI_DESC_TYPE_NAMED) { ACPI_ERROR((AE_INFO, "%p is not a namespace node [%s]", prefix_node, acpi_ut_get_descriptor_name(prefix_node))); return_ACPI_STATUS(AE_AML_INTERNAL); } if (!(flags & ACPI_NS_PREFIX_IS_SCOPE)) { /* * This node might not be a actual "scope" node (such as a * Device/Method, etc.) It could be a Package or other object * node. Backup up the tree to find the containing scope node. */ while (!acpi_ns_opens_scope(prefix_node->type) && prefix_node->type != ACPI_TYPE_ANY) { prefix_node = acpi_ns_get_parent_node(prefix_node); } } } /* Save type. TBD: may be no longer necessary */ type_to_check_for = type; /* * Begin examination of the actual pathname */ if (!pathname) { /* A Null name_path is allowed and refers to the root */ num_segments = 0; this_node = acpi_gbl_root_node; path = ""; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Null Pathname (Zero segments), Flags=%X\n", flags)); } else { /* * Name pointer is valid (and must be in internal name format) * * Check for scope prefixes: * * As represented in the AML stream, a namepath consists of an * optional scope prefix followed by a name segment part. * * If present, the scope prefix is either a Root Prefix (in * which case the name is fully qualified), or one or more * Parent Prefixes (in which case the name's scope is relative * to the current scope). */ if (*path == (u8) AML_ROOT_PREFIX) { /* Pathname is fully qualified, start from the root */ this_node = acpi_gbl_root_node; search_parent_flag = ACPI_NS_NO_UPSEARCH; /* Point to name segment part */ path++; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Path is absolute from root [%p]\n", this_node)); } else { /* Pathname is relative to current scope, start there */ ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Searching relative to prefix scope [%4.4s] (%p)\n", acpi_ut_get_node_name(prefix_node), prefix_node)); /* * Handle multiple Parent Prefixes (carat) by just getting * the parent node for each prefix instance. */ this_node = prefix_node; num_carats = 0; while (*path == (u8) AML_PARENT_PREFIX) { /* Name is fully qualified, no search rules apply */ search_parent_flag = ACPI_NS_NO_UPSEARCH; /* * Point past this prefix to the name segment * part or the next Parent Prefix */ path++; /* Backup to the parent node */ num_carats++; this_node = acpi_ns_get_parent_node(this_node); if (!this_node) { /* Current scope has no parent scope */ ACPI_ERROR((AE_INFO, "ACPI path has too many parent prefixes (^) " "- reached beyond root node")); return_ACPI_STATUS(AE_NOT_FOUND); } } if (search_parent_flag == ACPI_NS_NO_UPSEARCH) { ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Search scope is [%4.4s], path has %d carat(s)\n", acpi_ut_get_node_name (this_node), num_carats)); } } /* * Determine the number of ACPI name segments in this pathname. * * The segment part consists of either: * - A Null name segment (0) * - A dual_name_prefix followed by two 4-byte name segments * - A multi_name_prefix followed by a byte indicating the * number of segments and the segments themselves. * - A single 4-byte name segment * * Examine the name prefix opcode, if any, to determine the number of * segments. */ switch (*path) { case 0: /* * Null name after a root or parent prefixes. We already * have the correct target node and there are no name segments. */ num_segments = 0; type = this_node->type; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Prefix-only Pathname (Zero name segments), Flags=%X\n", flags)); break; case AML_DUAL_NAME_PREFIX: /* More than one name_seg, search rules do not apply */ search_parent_flag = ACPI_NS_NO_UPSEARCH; /* Two segments, point to first name segment */ num_segments = 2; path++; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Dual Pathname (2 segments, Flags=%X)\n", flags)); break; case AML_MULTI_NAME_PREFIX_OP: /* More than one name_seg, search rules do not apply */ search_parent_flag = ACPI_NS_NO_UPSEARCH; /* Extract segment count, point to first name segment */ path++; num_segments = (u32) (u8) * path; path++; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Multi Pathname (%d Segments, Flags=%X)\n", num_segments, flags)); break; default: /* * Not a Null name, no Dual or Multi prefix, hence there is * only one name segment and Pathname is already pointing to it. */ num_segments = 1; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Simple Pathname (1 segment, Flags=%X)\n", flags)); break; } ACPI_DEBUG_EXEC(acpi_ns_print_pathname(num_segments, path)); } /* * Search namespace for each segment of the name. Loop through and * verify (or add to the namespace) each name segment. * * The object type is significant only at the last name * segment. (We don't care about the types along the path, only * the type of the final target object.) */ this_search_type = ACPI_TYPE_ANY; current_node = this_node; while (num_segments && current_node) { num_segments--; if (!num_segments) { /* This is the last segment, enable typechecking */ this_search_type = type; /* * Only allow automatic parent search (search rules) if the caller * requested it AND we have a single, non-fully-qualified name_seg */ if ((search_parent_flag != ACPI_NS_NO_UPSEARCH) && (flags & ACPI_NS_SEARCH_PARENT)) { local_flags |= ACPI_NS_SEARCH_PARENT; } /* Set error flag according to caller */ if (flags & ACPI_NS_ERROR_IF_FOUND) { local_flags |= ACPI_NS_ERROR_IF_FOUND; } } /* Extract one ACPI name from the front of the pathname */ ACPI_MOVE_32_TO_32(&simple_name, path); /* Try to find the single (4 character) ACPI name */ status = acpi_ns_search_and_enter(simple_name, walk_state, current_node, interpreter_mode, this_search_type, local_flags, &this_node); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) { /* Name not found in ACPI namespace */ ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Name [%4.4s] not found in scope [%4.4s] %p\n", (char *)&simple_name, (char *)&current_node->name, current_node)); } *return_node = this_node; return_ACPI_STATUS(status); } /* More segments to follow? */ if (num_segments > 0) { /* * If we have an alias to an object that opens a scope (such as a * device or processor), we need to dereference the alias here so * that we can access any children of the original node (via the * remaining segments). */ if (this_node->type == ACPI_TYPE_LOCAL_ALIAS) { if (!this_node->object) { return_ACPI_STATUS(AE_NOT_EXIST); } if (acpi_ns_opens_scope (((struct acpi_namespace_node *) this_node->object)->type)) { this_node = (struct acpi_namespace_node *) this_node->object; } } } /* Special handling for the last segment (num_segments == 0) */ else { /* * Sanity typecheck of the target object: * * If 1) This is the last segment (num_segments == 0) * 2) And we are looking for a specific type * (Not checking for TYPE_ANY) * 3) Which is not an alias * 4) Which is not a local type (TYPE_SCOPE) * 5) And the type of target object is known (not TYPE_ANY) * 6) And target object does not match what we are looking for * * Then we have a type mismatch. Just warn and ignore it. */ if ((type_to_check_for != ACPI_TYPE_ANY) && (type_to_check_for != ACPI_TYPE_LOCAL_ALIAS) && (type_to_check_for != ACPI_TYPE_LOCAL_METHOD_ALIAS) && (type_to_check_for != ACPI_TYPE_LOCAL_SCOPE) && (this_node->type != ACPI_TYPE_ANY) && (this_node->type != type_to_check_for)) { /* Complain about a type mismatch */ ACPI_WARNING((AE_INFO, "NsLookup: Type mismatch on %4.4s (%s), searching for (%s)", ACPI_CAST_PTR(char, &simple_name), acpi_ut_get_type_name(this_node-> type), acpi_ut_get_type_name (type_to_check_for))); } /* * If this is the last name segment and we are not looking for a * specific type, but the type of found object is known, use that * type to (later) see if it opens a scope. */ if (type == ACPI_TYPE_ANY) { type = this_node->type; } } /* Point to next name segment and make this node current */ path += ACPI_NAME_SIZE; current_node = this_node; } /* Always check if we need to open a new scope */ if (!(flags & ACPI_NS_DONT_OPEN_SCOPE) && (walk_state)) { /* * If entry is a type which opens a scope, push the new scope on the * scope stack. */ if (acpi_ns_opens_scope(type)) { status = acpi_ds_scope_stack_push(this_node, type, walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } } *return_node = this_node; return_ACPI_STATUS(AE_OK); }
gpl-2.0
jameskdev/android_kernel_sky_ef30s
fs/ceph/ceph_strings.c
763
6048
/* * Ceph string constants */ #include "types.h" const char *ceph_entity_type_name(int type) { switch (type) { case CEPH_ENTITY_TYPE_MDS: return "mds"; case CEPH_ENTITY_TYPE_OSD: return "osd"; case CEPH_ENTITY_TYPE_MON: return "mon"; case CEPH_ENTITY_TYPE_CLIENT: return "client"; case CEPH_ENTITY_TYPE_AUTH: return "auth"; default: return "unknown"; } } const char *ceph_osd_op_name(int op) { switch (op) { case CEPH_OSD_OP_READ: return "read"; case CEPH_OSD_OP_STAT: return "stat"; case CEPH_OSD_OP_MASKTRUNC: return "masktrunc"; case CEPH_OSD_OP_WRITE: return "write"; case CEPH_OSD_OP_DELETE: return "delete"; case CEPH_OSD_OP_TRUNCATE: return "truncate"; case CEPH_OSD_OP_ZERO: return "zero"; case CEPH_OSD_OP_WRITEFULL: return "writefull"; case CEPH_OSD_OP_APPEND: return "append"; case CEPH_OSD_OP_STARTSYNC: return "startsync"; case CEPH_OSD_OP_SETTRUNC: return "settrunc"; case CEPH_OSD_OP_TRIMTRUNC: return "trimtrunc"; case CEPH_OSD_OP_TMAPUP: return "tmapup"; case CEPH_OSD_OP_TMAPGET: return "tmapget"; case CEPH_OSD_OP_TMAPPUT: return "tmapput"; case CEPH_OSD_OP_GETXATTR: return "getxattr"; case CEPH_OSD_OP_GETXATTRS: return "getxattrs"; case CEPH_OSD_OP_SETXATTR: return "setxattr"; case CEPH_OSD_OP_SETXATTRS: return "setxattrs"; case CEPH_OSD_OP_RESETXATTRS: return "resetxattrs"; case CEPH_OSD_OP_RMXATTR: return "rmxattr"; case CEPH_OSD_OP_CMPXATTR: return "cmpxattr"; case CEPH_OSD_OP_PULL: return "pull"; case CEPH_OSD_OP_PUSH: return "push"; case CEPH_OSD_OP_BALANCEREADS: return "balance-reads"; case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads"; case CEPH_OSD_OP_SCRUB: return "scrub"; case CEPH_OSD_OP_WRLOCK: return "wrlock"; case CEPH_OSD_OP_WRUNLOCK: return "wrunlock"; case CEPH_OSD_OP_RDLOCK: return "rdlock"; case CEPH_OSD_OP_RDUNLOCK: return "rdunlock"; case CEPH_OSD_OP_UPLOCK: return "uplock"; case CEPH_OSD_OP_DNLOCK: return "dnlock"; case CEPH_OSD_OP_CALL: return "call"; case CEPH_OSD_OP_PGLS: return "pgls"; } return "???"; } const char *ceph_mds_state_name(int s) { switch (s) { /* down and out */ case CEPH_MDS_STATE_DNE: return "down:dne"; case CEPH_MDS_STATE_STOPPED: return "down:stopped"; /* up and out */ case CEPH_MDS_STATE_BOOT: return "up:boot"; case CEPH_MDS_STATE_STANDBY: return "up:standby"; case CEPH_MDS_STATE_STANDBY_REPLAY: return "up:standby-replay"; case CEPH_MDS_STATE_CREATING: return "up:creating"; case CEPH_MDS_STATE_STARTING: return "up:starting"; /* up and in */ case CEPH_MDS_STATE_REPLAY: return "up:replay"; case CEPH_MDS_STATE_RESOLVE: return "up:resolve"; case CEPH_MDS_STATE_RECONNECT: return "up:reconnect"; case CEPH_MDS_STATE_REJOIN: return "up:rejoin"; case CEPH_MDS_STATE_CLIENTREPLAY: return "up:clientreplay"; case CEPH_MDS_STATE_ACTIVE: return "up:active"; case CEPH_MDS_STATE_STOPPING: return "up:stopping"; } return "???"; } const char *ceph_session_op_name(int op) { switch (op) { case CEPH_SESSION_REQUEST_OPEN: return "request_open"; case CEPH_SESSION_OPEN: return "open"; case CEPH_SESSION_REQUEST_CLOSE: return "request_close"; case CEPH_SESSION_CLOSE: return "close"; case CEPH_SESSION_REQUEST_RENEWCAPS: return "request_renewcaps"; case CEPH_SESSION_RENEWCAPS: return "renewcaps"; case CEPH_SESSION_STALE: return "stale"; case CEPH_SESSION_RECALL_STATE: return "recall_state"; } return "???"; } const char *ceph_mds_op_name(int op) { switch (op) { case CEPH_MDS_OP_LOOKUP: return "lookup"; case CEPH_MDS_OP_LOOKUPHASH: return "lookuphash"; case CEPH_MDS_OP_LOOKUPPARENT: return "lookupparent"; case CEPH_MDS_OP_GETATTR: return "getattr"; case CEPH_MDS_OP_SETXATTR: return "setxattr"; case CEPH_MDS_OP_SETATTR: return "setattr"; case CEPH_MDS_OP_RMXATTR: return "rmxattr"; case CEPH_MDS_OP_READDIR: return "readdir"; case CEPH_MDS_OP_MKNOD: return "mknod"; case CEPH_MDS_OP_LINK: return "link"; case CEPH_MDS_OP_UNLINK: return "unlink"; case CEPH_MDS_OP_RENAME: return "rename"; case CEPH_MDS_OP_MKDIR: return "mkdir"; case CEPH_MDS_OP_RMDIR: return "rmdir"; case CEPH_MDS_OP_SYMLINK: return "symlink"; case CEPH_MDS_OP_CREATE: return "create"; case CEPH_MDS_OP_OPEN: return "open"; case CEPH_MDS_OP_LOOKUPSNAP: return "lookupsnap"; case CEPH_MDS_OP_LSSNAP: return "lssnap"; case CEPH_MDS_OP_MKSNAP: return "mksnap"; case CEPH_MDS_OP_RMSNAP: return "rmsnap"; } return "???"; } const char *ceph_cap_op_name(int op) { switch (op) { case CEPH_CAP_OP_GRANT: return "grant"; case CEPH_CAP_OP_REVOKE: return "revoke"; case CEPH_CAP_OP_TRUNC: return "trunc"; case CEPH_CAP_OP_EXPORT: return "export"; case CEPH_CAP_OP_IMPORT: return "import"; case CEPH_CAP_OP_UPDATE: return "update"; case CEPH_CAP_OP_DROP: return "drop"; case CEPH_CAP_OP_FLUSH: return "flush"; case CEPH_CAP_OP_FLUSH_ACK: return "flush_ack"; case CEPH_CAP_OP_FLUSHSNAP: return "flushsnap"; case CEPH_CAP_OP_FLUSHSNAP_ACK: return "flushsnap_ack"; case CEPH_CAP_OP_RELEASE: return "release"; case CEPH_CAP_OP_RENEW: return "renew"; } return "???"; } const char *ceph_lease_op_name(int o) { switch (o) { case CEPH_MDS_LEASE_REVOKE: return "revoke"; case CEPH_MDS_LEASE_RELEASE: return "release"; case CEPH_MDS_LEASE_RENEW: return "renew"; case CEPH_MDS_LEASE_REVOKE_ACK: return "revoke_ack"; } return "???"; } const char *ceph_snap_op_name(int o) { switch (o) { case CEPH_SNAP_OP_UPDATE: return "update"; case CEPH_SNAP_OP_CREATE: return "create"; case CEPH_SNAP_OP_DESTROY: return "destroy"; case CEPH_SNAP_OP_SPLIT: return "split"; } return "???"; } const char *ceph_pool_op_name(int op) { switch (op) { case POOL_OP_CREATE: return "create"; case POOL_OP_DELETE: return "delete"; case POOL_OP_AUID_CHANGE: return "auid change"; case POOL_OP_CREATE_SNAP: return "create snap"; case POOL_OP_DELETE_SNAP: return "delete snap"; case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap"; case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap"; } return "???"; }
gpl-2.0
lorenzo-stoakes/linux-historical
drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
1275
2249
/* * Copyright (c) 2013 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/types.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <brcmu_wifi.h> #include "core.h" #include "bus.h" #include "debug.h" #include "proto.h" #include "bcdc.h" #include "msgbuf.h" int brcmf_proto_attach(struct brcmf_pub *drvr) { struct brcmf_proto *proto; brcmf_dbg(TRACE, "Enter\n"); proto = kzalloc(sizeof(*proto), GFP_ATOMIC); if (!proto) goto fail; drvr->proto = proto; if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC) { if (brcmf_proto_bcdc_attach(drvr)) goto fail; } else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF) { if (brcmf_proto_msgbuf_attach(drvr)) goto fail; } else { brcmf_err("Unsupported proto type %d\n", drvr->bus_if->proto_type); goto fail; } if ((proto->txdata == NULL) || (proto->hdrpull == NULL) || (proto->query_dcmd == NULL) || (proto->set_dcmd == NULL) || (proto->configure_addr_mode == NULL) || (proto->delete_peer == NULL) || (proto->add_tdls_peer == NULL)) { brcmf_err("Not all proto handlers have been installed\n"); goto fail; } return 0; fail: kfree(proto); drvr->proto = NULL; return -ENOMEM; } void brcmf_proto_detach(struct brcmf_pub *drvr) { brcmf_dbg(TRACE, "Enter\n"); if (drvr->proto) { if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC) brcmf_proto_bcdc_detach(drvr); else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF) brcmf_proto_msgbuf_detach(drvr); kfree(drvr->proto); drvr->proto = NULL; } }
gpl-2.0
digetx/picasso_upstream_support
sound/isa/azt2320.c
1275
9816
/* card-azt2320.c - driver for Aztech Systems AZT2320 based soundcards. Copyright (C) 1999-2000 by Massimo Piccioni <dafastidio@libero.it> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* This driver should provide support for most Aztech AZT2320 based cards. Several AZT2316 chips are also supported/tested, but autoprobe doesn't work: all module option have to be set. No docs available for us at Aztech headquarters !!! Unbelievable ... No other help obtained. Thanks to Rainer Wiesner <rainer.wiesner@01019freenet.de> for the WSS activation method (full-duplex audio!). */ #include <linux/io.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/pnp.h> #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/wss.h> #include <sound/mpu401.h> #include <sound/opl3.h> #define PFX "azt2320: " MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>"); MODULE_DESCRIPTION("Aztech Systems AZT2320"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Aztech Systems,PRO16V}," "{Aztech Systems,AZT2320}," "{Aztech Systems,AZT3300}," "{Aztech Systems,AZT2320}," "{Aztech Systems,AZT3000}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */ static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long wss_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */ static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */ static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for azt2320 based soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for azt2320 based soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable azt2320 based soundcard."); struct snd_card_azt2320 { int dev_no; struct pnp_dev *dev; struct pnp_dev *devmpu; struct snd_wss *chip; }; static struct pnp_card_device_id snd_azt2320_pnpids[] = { /* PRO16V */ { .id = "AZT1008", .devs = { { "AZT1008" }, { "AZT2001" }, } }, /* Aztech Sound Galaxy 16 */ { .id = "AZT2320", .devs = { { "AZT0001" }, { "AZT0002" }, } }, /* Packard Bell Sound III 336 AM/SP */ { .id = "AZT3000", .devs = { { "AZT1003" }, { "AZT2001" }, } }, /* AT3300 */ { .id = "AZT3002", .devs = { { "AZT1004" }, { "AZT2001" }, } }, /* --- */ { .id = "AZT3005", .devs = { { "AZT1003" }, { "AZT2001" }, } }, /* --- */ { .id = "AZT3011", .devs = { { "AZT1003" }, { "AZT2001" }, } }, { .id = "" } /* end */ }; MODULE_DEVICE_TABLE(pnp_card, snd_azt2320_pnpids); #define DRIVER_NAME "snd-card-azt2320" static int snd_card_azt2320_pnp(int dev, struct snd_card_azt2320 *acard, struct pnp_card_link *card, const struct pnp_card_device_id *id) { struct pnp_dev *pdev; int err; acard->dev = pnp_request_card_device(card, id->devs[0].id, NULL); if (acard->dev == NULL) return -ENODEV; acard->devmpu = pnp_request_card_device(card, id->devs[1].id, NULL); pdev = acard->dev; err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR PFX "AUDIO pnp configure failure\n"); return err; } port[dev] = pnp_port_start(pdev, 0); fm_port[dev] = pnp_port_start(pdev, 1); wss_port[dev] = pnp_port_start(pdev, 2); dma1[dev] = pnp_dma(pdev, 0); dma2[dev] = pnp_dma(pdev, 1); irq[dev] = pnp_irq(pdev, 0); pdev = acard->devmpu; if (pdev != NULL) { err = pnp_activate_dev(pdev); if (err < 0) goto __mpu_error; mpu_port[dev] = pnp_port_start(pdev, 0); mpu_irq[dev] = pnp_irq(pdev, 0); } else { __mpu_error: if (pdev) { pnp_release_card_device(pdev); snd_printk(KERN_ERR PFX "MPU401 pnp configure failure, skipping\n"); } acard->devmpu = NULL; mpu_port[dev] = -1; } return 0; } /* same of snd_sbdsp_command by Jaroslav Kysela */ static int snd_card_azt2320_command(unsigned long port, unsigned char val) { int i; unsigned long limit; limit = jiffies + HZ / 10; for (i = 50000; i && time_after(limit, jiffies); i--) if (!(inb(port + 0x0c) & 0x80)) { outb(val, port + 0x0c); return 0; } return -EBUSY; } static int snd_card_azt2320_enable_wss(unsigned long port) { int error; if ((error = snd_card_azt2320_command(port, 0x09))) return error; if ((error = snd_card_azt2320_command(port, 0x00))) return error; mdelay(5); return 0; } static int snd_card_azt2320_probe(int dev, struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { int error; struct snd_card *card; struct snd_card_azt2320 *acard; struct snd_wss *chip; struct snd_opl3 *opl3; error = snd_card_new(&pcard->card->dev, index[dev], id[dev], THIS_MODULE, sizeof(struct snd_card_azt2320), &card); if (error < 0) return error; acard = card->private_data; if ((error = snd_card_azt2320_pnp(dev, acard, pcard, pid))) { snd_card_free(card); return error; } if ((error = snd_card_azt2320_enable_wss(port[dev]))) { snd_card_free(card); return error; } error = snd_wss_create(card, wss_port[dev], -1, irq[dev], dma1[dev], dma2[dev], WSS_HW_DETECT, 0, &chip); if (error < 0) { snd_card_free(card); return error; } strcpy(card->driver, "AZT2320"); strcpy(card->shortname, "Aztech AZT2320"); sprintf(card->longname, "%s, WSS at 0x%lx, irq %i, dma %i&%i", card->shortname, chip->port, irq[dev], dma1[dev], dma2[dev]); error = snd_wss_pcm(chip, 0); if (error < 0) { snd_card_free(card); return error; } error = snd_wss_mixer(chip); if (error < 0) { snd_card_free(card); return error; } error = snd_wss_timer(chip, 0); if (error < 0) { snd_card_free(card); return error; } if (mpu_port[dev] > 0 && mpu_port[dev] != SNDRV_AUTO_PORT) { if (snd_mpu401_uart_new(card, 0, MPU401_HW_AZT2320, mpu_port[dev], 0, mpu_irq[dev], NULL) < 0) snd_printk(KERN_ERR PFX "no MPU-401 device at 0x%lx\n", mpu_port[dev]); } if (fm_port[dev] > 0 && fm_port[dev] != SNDRV_AUTO_PORT) { if (snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2, OPL3_HW_AUTO, 0, &opl3) < 0) { snd_printk(KERN_ERR PFX "no OPL device at 0x%lx-0x%lx\n", fm_port[dev], fm_port[dev] + 2); } else { if ((error = snd_opl3_timer_new(opl3, 1, 2)) < 0) { snd_card_free(card); return error; } if ((error = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { snd_card_free(card); return error; } } } if ((error = snd_card_register(card)) < 0) { snd_card_free(card); return error; } pnp_set_card_drvdata(pcard, card); return 0; } static unsigned int azt2320_devices; static int snd_azt2320_pnp_detect(struct pnp_card_link *card, const struct pnp_card_device_id *id) { static int dev; int res; for ( ; dev < SNDRV_CARDS; dev++) { if (!enable[dev]) continue; res = snd_card_azt2320_probe(dev, card, id); if (res < 0) return res; dev++; azt2320_devices++; return 0; } return -ENODEV; } static void snd_azt2320_pnp_remove(struct pnp_card_link *pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } #ifdef CONFIG_PM static int snd_azt2320_pnp_suspend(struct pnp_card_link *pcard, pm_message_t state) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_card_azt2320 *acard = card->private_data; struct snd_wss *chip = acard->chip; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); chip->suspend(chip); return 0; } static int snd_azt2320_pnp_resume(struct pnp_card_link *pcard) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_card_azt2320 *acard = card->private_data; struct snd_wss *chip = acard->chip; chip->resume(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif static struct pnp_card_driver azt2320_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = "azt2320", .id_table = snd_azt2320_pnpids, .probe = snd_azt2320_pnp_detect, .remove = snd_azt2320_pnp_remove, #ifdef CONFIG_PM .suspend = snd_azt2320_pnp_suspend, .resume = snd_azt2320_pnp_resume, #endif }; static int __init alsa_card_azt2320_init(void) { int err; err = pnp_register_card_driver(&azt2320_pnpc_driver); if (err) return err; if (!azt2320_devices) { pnp_unregister_card_driver(&azt2320_pnpc_driver); #ifdef MODULE snd_printk(KERN_ERR "no AZT2320 based soundcards found\n"); #endif return -ENODEV; } return 0; } static void __exit alsa_card_azt2320_exit(void) { pnp_unregister_card_driver(&azt2320_pnpc_driver); } module_init(alsa_card_azt2320_init) module_exit(alsa_card_azt2320_exit)
gpl-2.0
cphelps76/DEMENTED_kernel_grouper
drivers/s390/char/fs3270.c
1531
12297
/* * IBM/3270 Driver - fullscreen driver. * * Author(s): * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) * Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com> * Copyright IBM Corp. 2003, 2009 */ #include <linux/bootmem.h> #include <linux/console.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/types.h> #include <asm/compat.h> #include <asm/ccwdev.h> #include <asm/cio.h> #include <asm/ebcdic.h> #include <asm/idals.h> #include "raw3270.h" #include "ctrlchar.h" static struct raw3270_fn fs3270_fn; struct fs3270 { struct raw3270_view view; struct pid *fs_pid; /* Pid of controlling program. */ int read_command; /* ccw command to use for reads. */ int write_command; /* ccw command to use for writes. */ int attention; /* Got attention. */ int active; /* Fullscreen view is active. */ struct raw3270_request *init; /* single init request. */ wait_queue_head_t wait; /* Init & attention wait queue. */ struct idal_buffer *rdbuf; /* full-screen-deactivate buffer */ size_t rdbuf_size; /* size of data returned by RDBUF */ }; static DEFINE_MUTEX(fs3270_mutex); static void fs3270_wake_up(struct raw3270_request *rq, void *data) { wake_up((wait_queue_head_t *) data); } static inline int fs3270_working(struct fs3270 *fp) { /* * The fullscreen view is in working order if the view * has been activated AND the initial request is finished. */ return fp->active && raw3270_request_final(fp->init); } static int fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq) { struct fs3270 *fp; int rc; fp = (struct fs3270 *) view; rq->callback = fs3270_wake_up; rq->callback_data = &fp->wait; do { if (!fs3270_working(fp)) { /* Fullscreen view isn't ready yet. */ rc = wait_event_interruptible(fp->wait, fs3270_working(fp)); if (rc != 0) break; } rc = raw3270_start(view, rq); if (rc == 0) { /* Started successfully. Now wait for completion. */ wait_event(fp->wait, raw3270_request_final(rq)); } } while (rc == -EACCES); return rc; } /* * Switch to the fullscreen view. */ static void fs3270_reset_callback(struct raw3270_request *rq, void *data) { struct fs3270 *fp; fp = (struct fs3270 *) rq->view; raw3270_request_reset(rq); wake_up(&fp->wait); } static void fs3270_restore_callback(struct raw3270_request *rq, void *data) { struct fs3270 *fp; fp = (struct fs3270 *) rq->view; if (rq->rc != 0 || rq->rescnt != 0) { if (fp->fs_pid) kill_pid(fp->fs_pid, SIGHUP, 1); } fp->rdbuf_size = 0; raw3270_request_reset(rq); wake_up(&fp->wait); } static int fs3270_activate(struct raw3270_view *view) { struct fs3270 *fp; char *cp; int rc; fp = (struct fs3270 *) view; /* If an old init command is still running just return. */ if (!raw3270_request_final(fp->init)) return 0; if (fp->rdbuf_size == 0) { /* No saved buffer. Just clear the screen. */ raw3270_request_set_cmd(fp->init, TC_EWRITEA); fp->init->callback = fs3270_reset_callback; } else { /* Restore fullscreen buffer saved by fs3270_deactivate. */ raw3270_request_set_cmd(fp->init, TC_EWRITEA); raw3270_request_set_idal(fp->init, fp->rdbuf); fp->init->ccw.count = fp->rdbuf_size; cp = fp->rdbuf->data[0]; cp[0] = TW_KR; cp[1] = TO_SBA; cp[2] = cp[6]; cp[3] = cp[7]; cp[4] = TO_IC; cp[5] = TO_SBA; cp[6] = 0x40; cp[7] = 0x40; fp->init->rescnt = 0; fp->init->callback = fs3270_restore_callback; } rc = fp->init->rc = raw3270_start_locked(view, fp->init); if (rc) fp->init->callback(fp->init, NULL); else fp->active = 1; return rc; } /* * Shutdown fullscreen view. */ static void fs3270_save_callback(struct raw3270_request *rq, void *data) { struct fs3270 *fp; fp = (struct fs3270 *) rq->view; /* Correct idal buffer element 0 address. */ fp->rdbuf->data[0] -= 5; fp->rdbuf->size += 5; /* * If the rdbuf command failed or the idal buffer is * to small for the amount of data returned by the * rdbuf command, then we have no choice but to send * a SIGHUP to the application. */ if (rq->rc != 0 || rq->rescnt == 0) { if (fp->fs_pid) kill_pid(fp->fs_pid, SIGHUP, 1); fp->rdbuf_size = 0; } else fp->rdbuf_size = fp->rdbuf->size - rq->rescnt; raw3270_request_reset(rq); wake_up(&fp->wait); } static void fs3270_deactivate(struct raw3270_view *view) { struct fs3270 *fp; fp = (struct fs3270 *) view; fp->active = 0; /* If an old init command is still running just return. */ if (!raw3270_request_final(fp->init)) return; /* Prepare read-buffer request. */ raw3270_request_set_cmd(fp->init, TC_RDBUF); /* * Hackish: skip first 5 bytes of the idal buffer to make * room for the TW_KR/TO_SBA/<address>/<address>/TO_IC sequence * in the activation command. */ fp->rdbuf->data[0] += 5; fp->rdbuf->size -= 5; raw3270_request_set_idal(fp->init, fp->rdbuf); fp->init->rescnt = 0; fp->init->callback = fs3270_save_callback; /* Start I/O to read in the 3270 buffer. */ fp->init->rc = raw3270_start_locked(view, fp->init); if (fp->init->rc) fp->init->callback(fp->init, NULL); } static int fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb) { /* Handle ATTN. Set indication and wake waiters for attention. */ if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { fp->attention = 1; wake_up(&fp->wait); } if (rq) { if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) rq->rc = -EIO; else /* Normal end. Copy residual count. */ rq->rescnt = irb->scsw.cmd.count; } return RAW3270_IO_DONE; } /* * Process reads from fullscreen 3270. */ static ssize_t fs3270_read(struct file *filp, char __user *data, size_t count, loff_t *off) { struct fs3270 *fp; struct raw3270_request *rq; struct idal_buffer *ib; ssize_t rc; if (count == 0 || count > 65535) return -EINVAL; fp = filp->private_data; if (!fp) return -ENODEV; ib = idal_buffer_alloc(count, 0); if (IS_ERR(ib)) return -ENOMEM; rq = raw3270_request_alloc(0); if (!IS_ERR(rq)) { if (fp->read_command == 0 && fp->write_command != 0) fp->read_command = 6; raw3270_request_set_cmd(rq, fp->read_command ? : 2); raw3270_request_set_idal(rq, ib); rc = wait_event_interruptible(fp->wait, fp->attention); fp->attention = 0; if (rc == 0) { rc = fs3270_do_io(&fp->view, rq); if (rc == 0) { count -= rq->rescnt; if (idal_buffer_to_user(ib, data, count) != 0) rc = -EFAULT; else rc = count; } } raw3270_request_free(rq); } else rc = PTR_ERR(rq); idal_buffer_free(ib); return rc; } /* * Process writes to fullscreen 3270. */ static ssize_t fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *off) { struct fs3270 *fp; struct raw3270_request *rq; struct idal_buffer *ib; int write_command; ssize_t rc; fp = filp->private_data; if (!fp) return -ENODEV; ib = idal_buffer_alloc(count, 0); if (IS_ERR(ib)) return -ENOMEM; rq = raw3270_request_alloc(0); if (!IS_ERR(rq)) { if (idal_buffer_from_user(ib, data, count) == 0) { write_command = fp->write_command ? : 1; if (write_command == 5) write_command = 13; raw3270_request_set_cmd(rq, write_command); raw3270_request_set_idal(rq, ib); rc = fs3270_do_io(&fp->view, rq); if (rc == 0) rc = count - rq->rescnt; } else rc = -EFAULT; raw3270_request_free(rq); } else rc = PTR_ERR(rq); idal_buffer_free(ib); return rc; } /* * process ioctl commands for the tube driver */ static long fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { char __user *argp; struct fs3270 *fp; struct raw3270_iocb iocb; int rc; fp = filp->private_data; if (!fp) return -ENODEV; if (is_compat_task()) argp = compat_ptr(arg); else argp = (char __user *)arg; rc = 0; mutex_lock(&fs3270_mutex); switch (cmd) { case TUBICMD: fp->read_command = arg; break; case TUBOCMD: fp->write_command = arg; break; case TUBGETI: rc = put_user(fp->read_command, argp); break; case TUBGETO: rc = put_user(fp->write_command, argp); break; case TUBGETMOD: iocb.model = fp->view.model; iocb.line_cnt = fp->view.rows; iocb.col_cnt = fp->view.cols; iocb.pf_cnt = 24; iocb.re_cnt = 20; iocb.map = 0; if (copy_to_user(argp, &iocb, sizeof(struct raw3270_iocb))) rc = -EFAULT; break; } mutex_unlock(&fs3270_mutex); return rc; } /* * Allocate fs3270 structure. */ static struct fs3270 * fs3270_alloc_view(void) { struct fs3270 *fp; fp = kzalloc(sizeof(struct fs3270),GFP_KERNEL); if (!fp) return ERR_PTR(-ENOMEM); fp->init = raw3270_request_alloc(0); if (IS_ERR(fp->init)) { kfree(fp); return ERR_PTR(-ENOMEM); } return fp; } /* * Free fs3270 structure. */ static void fs3270_free_view(struct raw3270_view *view) { struct fs3270 *fp; fp = (struct fs3270 *) view; if (fp->rdbuf) idal_buffer_free(fp->rdbuf); raw3270_request_free(((struct fs3270 *) view)->init); kfree(view); } /* * Unlink fs3270 data structure from filp. */ static void fs3270_release(struct raw3270_view *view) { struct fs3270 *fp; fp = (struct fs3270 *) view; if (fp->fs_pid) kill_pid(fp->fs_pid, SIGHUP, 1); } /* View to a 3270 device. Can be console, tty or fullscreen. */ static struct raw3270_fn fs3270_fn = { .activate = fs3270_activate, .deactivate = fs3270_deactivate, .intv = (void *) fs3270_irq, .release = fs3270_release, .free = fs3270_free_view }; /* * This routine is called whenever a 3270 fullscreen device is opened. */ static int fs3270_open(struct inode *inode, struct file *filp) { struct fs3270 *fp; struct idal_buffer *ib; int minor, rc = 0; if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR) return -ENODEV; minor = iminor(filp->f_path.dentry->d_inode); /* Check for minor 0 multiplexer. */ if (minor == 0) { struct tty_struct *tty = get_current_tty(); if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) { tty_kref_put(tty); return -ENODEV; } minor = tty->index + RAW3270_FIRSTMINOR; tty_kref_put(tty); } mutex_lock(&fs3270_mutex); /* Check if some other program is already using fullscreen mode. */ fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor); if (!IS_ERR(fp)) { raw3270_put_view(&fp->view); rc = -EBUSY; goto out; } /* Allocate fullscreen view structure. */ fp = fs3270_alloc_view(); if (IS_ERR(fp)) { rc = PTR_ERR(fp); goto out; } init_waitqueue_head(&fp->wait); fp->fs_pid = get_pid(task_pid(current)); rc = raw3270_add_view(&fp->view, &fs3270_fn, minor); if (rc) { fs3270_free_view(&fp->view); goto out; } /* Allocate idal-buffer. */ ib = idal_buffer_alloc(2*fp->view.rows*fp->view.cols + 5, 0); if (IS_ERR(ib)) { raw3270_put_view(&fp->view); raw3270_del_view(&fp->view); rc = PTR_ERR(ib); goto out; } fp->rdbuf = ib; rc = raw3270_activate_view(&fp->view); if (rc) { raw3270_put_view(&fp->view); raw3270_del_view(&fp->view); goto out; } nonseekable_open(inode, filp); filp->private_data = fp; out: mutex_unlock(&fs3270_mutex); return rc; } /* * This routine is called when the 3270 tty is closed. We wait * for the remaining request to be completed. Then we clean up. */ static int fs3270_close(struct inode *inode, struct file *filp) { struct fs3270 *fp; fp = filp->private_data; filp->private_data = NULL; if (fp) { put_pid(fp->fs_pid); fp->fs_pid = NULL; raw3270_reset(&fp->view); raw3270_put_view(&fp->view); raw3270_del_view(&fp->view); } return 0; } static const struct file_operations fs3270_fops = { .owner = THIS_MODULE, /* owner */ .read = fs3270_read, /* read */ .write = fs3270_write, /* write */ .unlocked_ioctl = fs3270_ioctl, /* ioctl */ .compat_ioctl = fs3270_ioctl, /* ioctl */ .open = fs3270_open, /* open */ .release = fs3270_close, /* release */ .llseek = no_llseek, }; /* * 3270 fullscreen driver initialization. */ static int __init fs3270_init(void) { int rc; rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops); if (rc) return rc; return 0; } static void __exit fs3270_exit(void) { unregister_chrdev(IBM_FS3270_MAJOR, "fs3270"); } MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(IBM_FS3270_MAJOR); module_init(fs3270_init); module_exit(fs3270_exit);
gpl-2.0
openRPi/linux
fs/ubifs/tnc.c
1787
89100
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Adrian Hunter * Artem Bityutskiy (Битюцкий Артём) */ /* * This file implements TNC (Tree Node Cache) which caches indexing nodes of * the UBIFS B-tree. * * At the moment the locking rules of the TNC tree are quite simple and * straightforward. We just have a mutex and lock it when we traverse the * tree. If a znode is not in memory, we read it from flash while still having * the mutex locked. */ #include <linux/crc32.h> #include <linux/slab.h> #include "ubifs.h" /* * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions. * @NAME_LESS: name corresponding to the first argument is less than second * @NAME_MATCHES: names match * @NAME_GREATER: name corresponding to the second argument is greater than * first * @NOT_ON_MEDIA: node referred by zbranch does not exist on the media * * These constants were introduce to improve readability. */ enum { NAME_LESS = 0, NAME_MATCHES = 1, NAME_GREATER = 2, NOT_ON_MEDIA = 3, }; /** * insert_old_idx - record an index node obsoleted since the last commit start. * @c: UBIFS file-system description object * @lnum: LEB number of obsoleted index node * @offs: offset of obsoleted index node * * Returns %0 on success, and a negative error code on failure. * * For recovery, there must always be a complete intact version of the index on * flash at all times. That is called the "old index". It is the index as at the * time of the last successful commit. Many of the index nodes in the old index * may be dirty, but they must not be erased until the next successful commit * (at which point that index becomes the old index). * * That means that the garbage collection and the in-the-gaps method of * committing must be able to determine if an index node is in the old index. * Most of the old index nodes can be found by looking up the TNC using the * 'lookup_znode()' function. However, some of the old index nodes may have * been deleted from the current index or may have been changed so much that * they cannot be easily found. In those cases, an entry is added to an RB-tree. * That is what this function does. The RB-tree is ordered by LEB number and * offset because they uniquely identify the old index node. */ static int insert_old_idx(struct ubifs_info *c, int lnum, int offs) { struct ubifs_old_idx *old_idx, *o; struct rb_node **p, *parent = NULL; old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS); if (unlikely(!old_idx)) return -ENOMEM; old_idx->lnum = lnum; old_idx->offs = offs; p = &c->old_idx.rb_node; while (*p) { parent = *p; o = rb_entry(parent, struct ubifs_old_idx, rb); if (lnum < o->lnum) p = &(*p)->rb_left; else if (lnum > o->lnum) p = &(*p)->rb_right; else if (offs < o->offs) p = &(*p)->rb_left; else if (offs > o->offs) p = &(*p)->rb_right; else { ubifs_err("old idx added twice!"); kfree(old_idx); return 0; } } rb_link_node(&old_idx->rb, parent, p); rb_insert_color(&old_idx->rb, &c->old_idx); return 0; } /** * insert_old_idx_znode - record a znode obsoleted since last commit start. * @c: UBIFS file-system description object * @znode: znode of obsoleted index node * * Returns %0 on success, and a negative error code on failure. */ int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode) { if (znode->parent) { struct ubifs_zbranch *zbr; zbr = &znode->parent->zbranch[znode->iip]; if (zbr->len) return insert_old_idx(c, zbr->lnum, zbr->offs); } else if (c->zroot.len) return insert_old_idx(c, c->zroot.lnum, c->zroot.offs); return 0; } /** * ins_clr_old_idx_znode - record a znode obsoleted since last commit start. * @c: UBIFS file-system description object * @znode: znode of obsoleted index node * * Returns %0 on success, and a negative error code on failure. */ static int ins_clr_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode) { int err; if (znode->parent) { struct ubifs_zbranch *zbr; zbr = &znode->parent->zbranch[znode->iip]; if (zbr->len) { err = insert_old_idx(c, zbr->lnum, zbr->offs); if (err) return err; zbr->lnum = 0; zbr->offs = 0; zbr->len = 0; } } else if (c->zroot.len) { err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs); if (err) return err; c->zroot.lnum = 0; c->zroot.offs = 0; c->zroot.len = 0; } return 0; } /** * destroy_old_idx - destroy the old_idx RB-tree. * @c: UBIFS file-system description object * * During start commit, the old_idx RB-tree is used to avoid overwriting index * nodes that were in the index last commit but have since been deleted. This * is necessary for recovery i.e. the old index must be kept intact until the * new index is successfully written. The old-idx RB-tree is used for the * in-the-gaps method of writing index nodes and is destroyed every commit. */ void destroy_old_idx(struct ubifs_info *c) { struct rb_node *this = c->old_idx.rb_node; struct ubifs_old_idx *old_idx; while (this) { if (this->rb_left) { this = this->rb_left; continue; } else if (this->rb_right) { this = this->rb_right; continue; } old_idx = rb_entry(this, struct ubifs_old_idx, rb); this = rb_parent(this); if (this) { if (this->rb_left == &old_idx->rb) this->rb_left = NULL; else this->rb_right = NULL; } kfree(old_idx); } c->old_idx = RB_ROOT; } /** * copy_znode - copy a dirty znode. * @c: UBIFS file-system description object * @znode: znode to copy * * A dirty znode being committed may not be changed, so it is copied. */ static struct ubifs_znode *copy_znode(struct ubifs_info *c, struct ubifs_znode *znode) { struct ubifs_znode *zn; zn = kmalloc(c->max_znode_sz, GFP_NOFS); if (unlikely(!zn)) return ERR_PTR(-ENOMEM); memcpy(zn, znode, c->max_znode_sz); zn->cnext = NULL; __set_bit(DIRTY_ZNODE, &zn->flags); __clear_bit(COW_ZNODE, &zn->flags); ubifs_assert(!ubifs_zn_obsolete(znode)); __set_bit(OBSOLETE_ZNODE, &znode->flags); if (znode->level != 0) { int i; const int n = zn->child_cnt; /* The children now have new parent */ for (i = 0; i < n; i++) { struct ubifs_zbranch *zbr = &zn->zbranch[i]; if (zbr->znode) zbr->znode->parent = zn; } } atomic_long_inc(&c->dirty_zn_cnt); return zn; } /** * add_idx_dirt - add dirt due to a dirty znode. * @c: UBIFS file-system description object * @lnum: LEB number of index node * @dirt: size of index node * * This function updates lprops dirty space and the new size of the index. */ static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt) { c->calc_idx_sz -= ALIGN(dirt, 8); return ubifs_add_dirt(c, lnum, dirt); } /** * dirty_cow_znode - ensure a znode is not being committed. * @c: UBIFS file-system description object * @zbr: branch of znode to check * * Returns dirtied znode on success or negative error code on failure. */ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr) { struct ubifs_znode *znode = zbr->znode; struct ubifs_znode *zn; int err; if (!ubifs_zn_cow(znode)) { /* znode is not being committed */ if (!test_and_set_bit(DIRTY_ZNODE, &znode->flags)) { atomic_long_inc(&c->dirty_zn_cnt); atomic_long_dec(&c->clean_zn_cnt); atomic_long_dec(&ubifs_clean_zn_cnt); err = add_idx_dirt(c, zbr->lnum, zbr->len); if (unlikely(err)) return ERR_PTR(err); } return znode; } zn = copy_znode(c, znode); if (IS_ERR(zn)) return zn; if (zbr->len) { err = insert_old_idx(c, zbr->lnum, zbr->offs); if (unlikely(err)) return ERR_PTR(err); err = add_idx_dirt(c, zbr->lnum, zbr->len); } else err = 0; zbr->znode = zn; zbr->lnum = 0; zbr->offs = 0; zbr->len = 0; if (unlikely(err)) return ERR_PTR(err); return zn; } /** * lnc_add - add a leaf node to the leaf node cache. * @c: UBIFS file-system description object * @zbr: zbranch of leaf node * @node: leaf node * * Leaf nodes are non-index nodes directory entry nodes or data nodes. The * purpose of the leaf node cache is to save re-reading the same leaf node over * and over again. Most things are cached by VFS, however the file system must * cache directory entries for readdir and for resolving hash collisions. The * present implementation of the leaf node cache is extremely simple, and * allows for error returns that are not used but that may be needed if a more * complex implementation is created. * * Note, this function does not add the @node object to LNC directly, but * allocates a copy of the object and adds the copy to LNC. The reason for this * is that @node has been allocated outside of the TNC subsystem and will be * used with @c->tnc_mutex unlock upon return from the TNC subsystem. But LNC * may be changed at any time, e.g. freed by the shrinker. */ static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr, const void *node) { int err; void *lnc_node; const struct ubifs_dent_node *dent = node; ubifs_assert(!zbr->leaf); ubifs_assert(zbr->len != 0); ubifs_assert(is_hash_key(c, &zbr->key)); err = ubifs_validate_entry(c, dent); if (err) { dump_stack(); ubifs_dump_node(c, dent); return err; } lnc_node = kmemdup(node, zbr->len, GFP_NOFS); if (!lnc_node) /* We don't have to have the cache, so no error */ return 0; zbr->leaf = lnc_node; return 0; } /** * lnc_add_directly - add a leaf node to the leaf-node-cache. * @c: UBIFS file-system description object * @zbr: zbranch of leaf node * @node: leaf node * * This function is similar to 'lnc_add()', but it does not create a copy of * @node but inserts @node to TNC directly. */ static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr, void *node) { int err; ubifs_assert(!zbr->leaf); ubifs_assert(zbr->len != 0); err = ubifs_validate_entry(c, node); if (err) { dump_stack(); ubifs_dump_node(c, node); return err; } zbr->leaf = node; return 0; } /** * lnc_free - remove a leaf node from the leaf node cache. * @zbr: zbranch of leaf node * @node: leaf node */ static void lnc_free(struct ubifs_zbranch *zbr) { if (!zbr->leaf) return; kfree(zbr->leaf); zbr->leaf = NULL; } /** * tnc_read_node_nm - read a "hashed" leaf node. * @c: UBIFS file-system description object * @zbr: key and position of the node * @node: node is returned here * * This function reads a "hashed" node defined by @zbr from the leaf node cache * (in it is there) or from the hash media, in which case the node is also * added to LNC. Returns zero in case of success or a negative negative error * code in case of failure. */ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr, void *node) { int err; ubifs_assert(is_hash_key(c, &zbr->key)); if (zbr->leaf) { /* Read from the leaf node cache */ ubifs_assert(zbr->len != 0); memcpy(node, zbr->leaf, zbr->len); return 0; } err = ubifs_tnc_read_node(c, zbr, node); if (err) return err; /* Add the node to the leaf node cache */ err = lnc_add(c, zbr, node); return err; } /** * try_read_node - read a node if it is a node. * @c: UBIFS file-system description object * @buf: buffer to read to * @type: node type * @len: node length (not aligned) * @lnum: LEB number of node to read * @offs: offset of node to read * * This function tries to read a node of known type and length, checks it and * stores it in @buf. This function returns %1 if a node is present and %0 if * a node is not present. A negative error code is returned for I/O errors. * This function performs that same function as ubifs_read_node except that * it does not require that there is actually a node present and instead * the return code indicates if a node was read. * * Note, this function does not check CRC of data nodes if @c->no_chk_data_crc * is true (it is controlled by corresponding mount option). However, if * @c->mounting or @c->remounting_rw is true (we are mounting or re-mounting to * R/W mode), @c->no_chk_data_crc is ignored and CRC is checked. This is * because during mounting or re-mounting from R/O mode to R/W mode we may read * journal nodes (when replying the journal or doing the recovery) and the * journal nodes may potentially be corrupted, so checking is required. */ static int try_read_node(const struct ubifs_info *c, void *buf, int type, int len, int lnum, int offs) { int err, node_len; struct ubifs_ch *ch = buf; uint32_t crc, node_crc; dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); err = ubifs_leb_read(c, lnum, buf, offs, len, 1); if (err) { ubifs_err("cannot read node type %d from LEB %d:%d, error %d", type, lnum, offs, err); return err; } if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC) return 0; if (ch->node_type != type) return 0; node_len = le32_to_cpu(ch->len); if (node_len != len) return 0; if (type == UBIFS_DATA_NODE && c->no_chk_data_crc && !c->mounting && !c->remounting_rw) return 1; crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); node_crc = le32_to_cpu(ch->crc); if (crc != node_crc) return 0; return 1; } /** * fallible_read_node - try to read a leaf node. * @c: UBIFS file-system description object * @key: key of node to read * @zbr: position of node * @node: node returned * * This function tries to read a node and returns %1 if the node is read, %0 * if the node is not present, and a negative error code in the case of error. */ static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_zbranch *zbr, void *node) { int ret; dbg_tnck(key, "LEB %d:%d, key ", zbr->lnum, zbr->offs); ret = try_read_node(c, node, key_type(c, key), zbr->len, zbr->lnum, zbr->offs); if (ret == 1) { union ubifs_key node_key; struct ubifs_dent_node *dent = node; /* All nodes have key in the same place */ key_read(c, &dent->key, &node_key); if (keys_cmp(c, key, &node_key) != 0) ret = 0; } if (ret == 0 && c->replaying) dbg_mntk(key, "dangling branch LEB %d:%d len %d, key ", zbr->lnum, zbr->offs, zbr->len); return ret; } /** * matches_name - determine if a direntry or xattr entry matches a given name. * @c: UBIFS file-system description object * @zbr: zbranch of dent * @nm: name to match * * This function checks if xentry/direntry referred by zbranch @zbr matches name * @nm. Returns %NAME_MATCHES if it does, %NAME_LESS if the name referred by * @zbr is less than @nm, and %NAME_GREATER if it is greater than @nm. In case * of failure, a negative error code is returned. */ static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr, const struct qstr *nm) { struct ubifs_dent_node *dent; int nlen, err; /* If possible, match against the dent in the leaf node cache */ if (!zbr->leaf) { dent = kmalloc(zbr->len, GFP_NOFS); if (!dent) return -ENOMEM; err = ubifs_tnc_read_node(c, zbr, dent); if (err) goto out_free; /* Add the node to the leaf node cache */ err = lnc_add_directly(c, zbr, dent); if (err) goto out_free; } else dent = zbr->leaf; nlen = le16_to_cpu(dent->nlen); err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len)); if (err == 0) { if (nlen == nm->len) return NAME_MATCHES; else if (nlen < nm->len) return NAME_LESS; else return NAME_GREATER; } else if (err < 0) return NAME_LESS; else return NAME_GREATER; out_free: kfree(dent); return err; } /** * get_znode - get a TNC znode that may not be loaded yet. * @c: UBIFS file-system description object * @znode: parent znode * @n: znode branch slot number * * This function returns the znode or a negative error code. */ static struct ubifs_znode *get_znode(struct ubifs_info *c, struct ubifs_znode *znode, int n) { struct ubifs_zbranch *zbr; zbr = &znode->zbranch[n]; if (zbr->znode) znode = zbr->znode; else znode = ubifs_load_znode(c, zbr, znode, n); return znode; } /** * tnc_next - find next TNC entry. * @c: UBIFS file-system description object * @zn: znode is passed and returned here * @n: znode branch slot number is passed and returned here * * This function returns %0 if the next TNC entry is found, %-ENOENT if there is * no next entry, or a negative error code otherwise. */ static int tnc_next(struct ubifs_info *c, struct ubifs_znode **zn, int *n) { struct ubifs_znode *znode = *zn; int nn = *n; nn += 1; if (nn < znode->child_cnt) { *n = nn; return 0; } while (1) { struct ubifs_znode *zp; zp = znode->parent; if (!zp) return -ENOENT; nn = znode->iip + 1; znode = zp; if (nn < znode->child_cnt) { znode = get_znode(c, znode, nn); if (IS_ERR(znode)) return PTR_ERR(znode); while (znode->level != 0) { znode = get_znode(c, znode, 0); if (IS_ERR(znode)) return PTR_ERR(znode); } nn = 0; break; } } *zn = znode; *n = nn; return 0; } /** * tnc_prev - find previous TNC entry. * @c: UBIFS file-system description object * @zn: znode is returned here * @n: znode branch slot number is passed and returned here * * This function returns %0 if the previous TNC entry is found, %-ENOENT if * there is no next entry, or a negative error code otherwise. */ static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n) { struct ubifs_znode *znode = *zn; int nn = *n; if (nn > 0) { *n = nn - 1; return 0; } while (1) { struct ubifs_znode *zp; zp = znode->parent; if (!zp) return -ENOENT; nn = znode->iip - 1; znode = zp; if (nn >= 0) { znode = get_znode(c, znode, nn); if (IS_ERR(znode)) return PTR_ERR(znode); while (znode->level != 0) { nn = znode->child_cnt - 1; znode = get_znode(c, znode, nn); if (IS_ERR(znode)) return PTR_ERR(znode); } nn = znode->child_cnt - 1; break; } } *zn = znode; *n = nn; return 0; } /** * resolve_collision - resolve a collision. * @c: UBIFS file-system description object * @key: key of a directory or extended attribute entry * @zn: znode is returned here * @n: zbranch number is passed and returned here * @nm: name of the entry * * This function is called for "hashed" keys to make sure that the found key * really corresponds to the looked up node (directory or extended attribute * entry). It returns %1 and sets @zn and @n if the collision is resolved. * %0 is returned if @nm is not found and @zn and @n are set to the previous * entry, i.e. to the entry after which @nm could follow if it were in TNC. * This means that @n may be set to %-1 if the leftmost key in @zn is the * previous one. A negative error code is returned on failures. */ static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n, const struct qstr *nm) { int err; err = matches_name(c, &(*zn)->zbranch[*n], nm); if (unlikely(err < 0)) return err; if (err == NAME_MATCHES) return 1; if (err == NAME_GREATER) { /* Look left */ while (1) { err = tnc_prev(c, zn, n); if (err == -ENOENT) { ubifs_assert(*n == 0); *n = -1; return 0; } if (err < 0) return err; if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) { /* * We have found the branch after which we would * like to insert, but inserting in this znode * may still be wrong. Consider the following 3 * znodes, in the case where we are resolving a * collision with Key2. * * znode zp * ---------------------- * level 1 | Key0 | Key1 | * ----------------------- * | | * znode za | | znode zb * ------------ ------------ * level 0 | Key0 | | Key2 | * ------------ ------------ * * The lookup finds Key2 in znode zb. Lets say * there is no match and the name is greater so * we look left. When we find Key0, we end up * here. If we return now, we will insert into * znode za at slot n = 1. But that is invalid * according to the parent's keys. Key2 must * be inserted into znode zb. * * Note, this problem is not relevant for the * case when we go right, because * 'tnc_insert()' would correct the parent key. */ if (*n == (*zn)->child_cnt - 1) { err = tnc_next(c, zn, n); if (err) { /* Should be impossible */ ubifs_assert(0); if (err == -ENOENT) err = -EINVAL; return err; } ubifs_assert(*n == 0); *n = -1; } return 0; } err = matches_name(c, &(*zn)->zbranch[*n], nm); if (err < 0) return err; if (err == NAME_LESS) return 0; if (err == NAME_MATCHES) return 1; ubifs_assert(err == NAME_GREATER); } } else { int nn = *n; struct ubifs_znode *znode = *zn; /* Look right */ while (1) { err = tnc_next(c, &znode, &nn); if (err == -ENOENT) return 0; if (err < 0) return err; if (keys_cmp(c, &znode->zbranch[nn].key, key)) return 0; err = matches_name(c, &znode->zbranch[nn], nm); if (err < 0) return err; if (err == NAME_GREATER) return 0; *zn = znode; *n = nn; if (err == NAME_MATCHES) return 1; ubifs_assert(err == NAME_LESS); } } } /** * fallible_matches_name - determine if a dent matches a given name. * @c: UBIFS file-system description object * @zbr: zbranch of dent * @nm: name to match * * This is a "fallible" version of 'matches_name()' function which does not * panic if the direntry/xentry referred by @zbr does not exist on the media. * * This function checks if xentry/direntry referred by zbranch @zbr matches name * @nm. Returns %NAME_MATCHES it does, %NAME_LESS if the name referred by @zbr * is less than @nm, %NAME_GREATER if it is greater than @nm, and @NOT_ON_MEDIA * if xentry/direntry referred by @zbr does not exist on the media. A negative * error code is returned in case of failure. */ static int fallible_matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr, const struct qstr *nm) { struct ubifs_dent_node *dent; int nlen, err; /* If possible, match against the dent in the leaf node cache */ if (!zbr->leaf) { dent = kmalloc(zbr->len, GFP_NOFS); if (!dent) return -ENOMEM; err = fallible_read_node(c, &zbr->key, zbr, dent); if (err < 0) goto out_free; if (err == 0) { /* The node was not present */ err = NOT_ON_MEDIA; goto out_free; } ubifs_assert(err == 1); err = lnc_add_directly(c, zbr, dent); if (err) goto out_free; } else dent = zbr->leaf; nlen = le16_to_cpu(dent->nlen); err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len)); if (err == 0) { if (nlen == nm->len) return NAME_MATCHES; else if (nlen < nm->len) return NAME_LESS; else return NAME_GREATER; } else if (err < 0) return NAME_LESS; else return NAME_GREATER; out_free: kfree(dent); return err; } /** * fallible_resolve_collision - resolve a collision even if nodes are missing. * @c: UBIFS file-system description object * @key: key * @zn: znode is returned here * @n: branch number is passed and returned here * @nm: name of directory entry * @adding: indicates caller is adding a key to the TNC * * This is a "fallible" version of the 'resolve_collision()' function which * does not panic if one of the nodes referred to by TNC does not exist on the * media. This may happen when replaying the journal if a deleted node was * Garbage-collected and the commit was not done. A branch that refers to a node * that is not present is called a dangling branch. The following are the return * codes for this function: * o if @nm was found, %1 is returned and @zn and @n are set to the found * branch; * o if we are @adding and @nm was not found, %0 is returned; * o if we are not @adding and @nm was not found, but a dangling branch was * found, then %1 is returned and @zn and @n are set to the dangling branch; * o a negative error code is returned in case of failure. */ static int fallible_resolve_collision(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n, const struct qstr *nm, int adding) { struct ubifs_znode *o_znode = NULL, *znode = *zn; int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n; cmp = fallible_matches_name(c, &znode->zbranch[nn], nm); if (unlikely(cmp < 0)) return cmp; if (cmp == NAME_MATCHES) return 1; if (cmp == NOT_ON_MEDIA) { o_znode = znode; o_n = nn; /* * We are unlucky and hit a dangling branch straight away. * Now we do not really know where to go to find the needed * branch - to the left or to the right. Well, let's try left. */ unsure = 1; } else if (!adding) unsure = 1; /* Remove a dangling branch wherever it is */ if (cmp == NAME_GREATER || unsure) { /* Look left */ while (1) { err = tnc_prev(c, zn, n); if (err == -ENOENT) { ubifs_assert(*n == 0); *n = -1; break; } if (err < 0) return err; if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) { /* See comments in 'resolve_collision()' */ if (*n == (*zn)->child_cnt - 1) { err = tnc_next(c, zn, n); if (err) { /* Should be impossible */ ubifs_assert(0); if (err == -ENOENT) err = -EINVAL; return err; } ubifs_assert(*n == 0); *n = -1; } break; } err = fallible_matches_name(c, &(*zn)->zbranch[*n], nm); if (err < 0) return err; if (err == NAME_MATCHES) return 1; if (err == NOT_ON_MEDIA) { o_znode = *zn; o_n = *n; continue; } if (!adding) continue; if (err == NAME_LESS) break; else unsure = 0; } } if (cmp == NAME_LESS || unsure) { /* Look right */ *zn = znode; *n = nn; while (1) { err = tnc_next(c, &znode, &nn); if (err == -ENOENT) break; if (err < 0) return err; if (keys_cmp(c, &znode->zbranch[nn].key, key)) break; err = fallible_matches_name(c, &znode->zbranch[nn], nm); if (err < 0) return err; if (err == NAME_GREATER) break; *zn = znode; *n = nn; if (err == NAME_MATCHES) return 1; if (err == NOT_ON_MEDIA) { o_znode = znode; o_n = nn; } } } /* Never match a dangling branch when adding */ if (adding || !o_znode) return 0; dbg_mntk(key, "dangling match LEB %d:%d len %d key ", o_znode->zbranch[o_n].lnum, o_znode->zbranch[o_n].offs, o_znode->zbranch[o_n].len); *zn = o_znode; *n = o_n; return 1; } /** * matches_position - determine if a zbranch matches a given position. * @zbr: zbranch of dent * @lnum: LEB number of dent to match * @offs: offset of dent to match * * This function returns %1 if @lnum:@offs matches, and %0 otherwise. */ static int matches_position(struct ubifs_zbranch *zbr, int lnum, int offs) { if (zbr->lnum == lnum && zbr->offs == offs) return 1; else return 0; } /** * resolve_collision_directly - resolve a collision directly. * @c: UBIFS file-system description object * @key: key of directory entry * @zn: znode is passed and returned here * @n: zbranch number is passed and returned here * @lnum: LEB number of dent node to match * @offs: offset of dent node to match * * This function is used for "hashed" keys to make sure the found directory or * extended attribute entry node is what was looked for. It is used when the * flash address of the right node is known (@lnum:@offs) which makes it much * easier to resolve collisions (no need to read entries and match full * names). This function returns %1 and sets @zn and @n if the collision is * resolved, %0 if @lnum:@offs is not found and @zn and @n are set to the * previous directory entry. Otherwise a negative error code is returned. */ static int resolve_collision_directly(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n, int lnum, int offs) { struct ubifs_znode *znode; int nn, err; znode = *zn; nn = *n; if (matches_position(&znode->zbranch[nn], lnum, offs)) return 1; /* Look left */ while (1) { err = tnc_prev(c, &znode, &nn); if (err == -ENOENT) break; if (err < 0) return err; if (keys_cmp(c, &znode->zbranch[nn].key, key)) break; if (matches_position(&znode->zbranch[nn], lnum, offs)) { *zn = znode; *n = nn; return 1; } } /* Look right */ znode = *zn; nn = *n; while (1) { err = tnc_next(c, &znode, &nn); if (err == -ENOENT) return 0; if (err < 0) return err; if (keys_cmp(c, &znode->zbranch[nn].key, key)) return 0; *zn = znode; *n = nn; if (matches_position(&znode->zbranch[nn], lnum, offs)) return 1; } } /** * dirty_cow_bottom_up - dirty a znode and its ancestors. * @c: UBIFS file-system description object * @znode: znode to dirty * * If we do not have a unique key that resides in a znode, then we cannot * dirty that znode from the top down (i.e. by using lookup_level0_dirty) * This function records the path back to the last dirty ancestor, and then * dirties the znodes on that path. */ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c, struct ubifs_znode *znode) { struct ubifs_znode *zp; int *path = c->bottom_up_buf, p = 0; ubifs_assert(c->zroot.znode); ubifs_assert(znode); if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) { kfree(c->bottom_up_buf); c->bottom_up_buf = kmalloc(c->zroot.znode->level * sizeof(int), GFP_NOFS); if (!c->bottom_up_buf) return ERR_PTR(-ENOMEM); path = c->bottom_up_buf; } if (c->zroot.znode->level) { /* Go up until parent is dirty */ while (1) { int n; zp = znode->parent; if (!zp) break; n = znode->iip; ubifs_assert(p < c->zroot.znode->level); path[p++] = n; if (!zp->cnext && ubifs_zn_dirty(znode)) break; znode = zp; } } /* Come back down, dirtying as we go */ while (1) { struct ubifs_zbranch *zbr; zp = znode->parent; if (zp) { ubifs_assert(path[p - 1] >= 0); ubifs_assert(path[p - 1] < zp->child_cnt); zbr = &zp->zbranch[path[--p]]; znode = dirty_cow_znode(c, zbr); } else { ubifs_assert(znode == c->zroot.znode); znode = dirty_cow_znode(c, &c->zroot); } if (IS_ERR(znode) || !p) break; ubifs_assert(path[p - 1] >= 0); ubifs_assert(path[p - 1] < znode->child_cnt); znode = znode->zbranch[path[p - 1]].znode; } return znode; } /** * ubifs_lookup_level0 - search for zero-level znode. * @c: UBIFS file-system description object * @key: key to lookup * @zn: znode is returned here * @n: znode branch slot number is returned here * * This function looks up the TNC tree and search for zero-level znode which * refers key @key. The found zero-level znode is returned in @zn. There are 3 * cases: * o exact match, i.e. the found zero-level znode contains key @key, then %1 * is returned and slot number of the matched branch is stored in @n; * o not exact match, which means that zero-level znode does not contain * @key, then %0 is returned and slot number of the closest branch is stored * in @n; * o @key is so small that it is even less than the lowest key of the * leftmost zero-level node, then %0 is returned and %0 is stored in @n. * * Note, when the TNC tree is traversed, some znodes may be absent, then this * function reads corresponding indexing nodes and inserts them to TNC. In * case of failure, a negative error code is returned. */ int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n) { int err, exact; struct ubifs_znode *znode; unsigned long time = get_seconds(); dbg_tnck(key, "search key "); ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY); znode = c->zroot.znode; if (unlikely(!znode)) { znode = ubifs_load_znode(c, &c->zroot, NULL, 0); if (IS_ERR(znode)) return PTR_ERR(znode); } znode->time = time; while (1) { struct ubifs_zbranch *zbr; exact = ubifs_search_zbranch(c, znode, key, n); if (znode->level == 0) break; if (*n < 0) *n = 0; zbr = &znode->zbranch[*n]; if (zbr->znode) { znode->time = time; znode = zbr->znode; continue; } /* znode is not in TNC cache, load it from the media */ znode = ubifs_load_znode(c, zbr, znode, *n); if (IS_ERR(znode)) return PTR_ERR(znode); } *zn = znode; if (exact || !is_hash_key(c, key) || *n != -1) { dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n); return exact; } /* * Here is a tricky place. We have not found the key and this is a * "hashed" key, which may collide. The rest of the code deals with * situations like this: * * | 3 | 5 | * / \ * | 3 | 5 | | 6 | 7 | (x) * * Or more a complex example: * * | 1 | 5 | * / \ * | 1 | 3 | | 5 | 8 | * \ / * | 5 | 5 | | 6 | 7 | (x) * * In the examples, if we are looking for key "5", we may reach nodes * marked with "(x)". In this case what we have do is to look at the * left and see if there is "5" key there. If there is, we have to * return it. * * Note, this whole situation is possible because we allow to have * elements which are equivalent to the next key in the parent in the * children of current znode. For example, this happens if we split a * znode like this: | 3 | 5 | 5 | 6 | 7 |, which results in something * like this: * | 3 | 5 | * / \ * | 3 | 5 | | 5 | 6 | 7 | * ^ * And this becomes what is at the first "picture" after key "5" marked * with "^" is removed. What could be done is we could prohibit * splitting in the middle of the colliding sequence. Also, when * removing the leftmost key, we would have to correct the key of the * parent node, which would introduce additional complications. Namely, * if we changed the leftmost key of the parent znode, the garbage * collector would be unable to find it (GC is doing this when GC'ing * indexing LEBs). Although we already have an additional RB-tree where * we save such changed znodes (see 'ins_clr_old_idx_znode()') until * after the commit. But anyway, this does not look easy to implement * so we did not try this. */ err = tnc_prev(c, &znode, n); if (err == -ENOENT) { dbg_tnc("found 0, lvl %d, n -1", znode->level); *n = -1; return 0; } if (unlikely(err < 0)) return err; if (keys_cmp(c, key, &znode->zbranch[*n].key)) { dbg_tnc("found 0, lvl %d, n -1", znode->level); *n = -1; return 0; } dbg_tnc("found 1, lvl %d, n %d", znode->level, *n); *zn = znode; return 1; } /** * lookup_level0_dirty - search for zero-level znode dirtying. * @c: UBIFS file-system description object * @key: key to lookup * @zn: znode is returned here * @n: znode branch slot number is returned here * * This function looks up the TNC tree and search for zero-level znode which * refers key @key. The found zero-level znode is returned in @zn. There are 3 * cases: * o exact match, i.e. the found zero-level znode contains key @key, then %1 * is returned and slot number of the matched branch is stored in @n; * o not exact match, which means that zero-level znode does not contain @key * then %0 is returned and slot number of the closed branch is stored in * @n; * o @key is so small that it is even less than the lowest key of the * leftmost zero-level node, then %0 is returned and %-1 is stored in @n. * * Additionally all znodes in the path from the root to the located zero-level * znode are marked as dirty. * * Note, when the TNC tree is traversed, some znodes may be absent, then this * function reads corresponding indexing nodes and inserts them to TNC. In * case of failure, a negative error code is returned. */ static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n) { int err, exact; struct ubifs_znode *znode; unsigned long time = get_seconds(); dbg_tnck(key, "search and dirty key "); znode = c->zroot.znode; if (unlikely(!znode)) { znode = ubifs_load_znode(c, &c->zroot, NULL, 0); if (IS_ERR(znode)) return PTR_ERR(znode); } znode = dirty_cow_znode(c, &c->zroot); if (IS_ERR(znode)) return PTR_ERR(znode); znode->time = time; while (1) { struct ubifs_zbranch *zbr; exact = ubifs_search_zbranch(c, znode, key, n); if (znode->level == 0) break; if (*n < 0) *n = 0; zbr = &znode->zbranch[*n]; if (zbr->znode) { znode->time = time; znode = dirty_cow_znode(c, zbr); if (IS_ERR(znode)) return PTR_ERR(znode); continue; } /* znode is not in TNC cache, load it from the media */ znode = ubifs_load_znode(c, zbr, znode, *n); if (IS_ERR(znode)) return PTR_ERR(znode); znode = dirty_cow_znode(c, zbr); if (IS_ERR(znode)) return PTR_ERR(znode); } *zn = znode; if (exact || !is_hash_key(c, key) || *n != -1) { dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n); return exact; } /* * See huge comment at 'lookup_level0_dirty()' what is the rest of the * code. */ err = tnc_prev(c, &znode, n); if (err == -ENOENT) { *n = -1; dbg_tnc("found 0, lvl %d, n -1", znode->level); return 0; } if (unlikely(err < 0)) return err; if (keys_cmp(c, key, &znode->zbranch[*n].key)) { *n = -1; dbg_tnc("found 0, lvl %d, n -1", znode->level); return 0; } if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) return PTR_ERR(znode); } dbg_tnc("found 1, lvl %d, n %d", znode->level, *n); *zn = znode; return 1; } /** * maybe_leb_gced - determine if a LEB may have been garbage collected. * @c: UBIFS file-system description object * @lnum: LEB number * @gc_seq1: garbage collection sequence number * * This function determines if @lnum may have been garbage collected since * sequence number @gc_seq1. If it may have been then %1 is returned, otherwise * %0 is returned. */ static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1) { int gc_seq2, gced_lnum; gced_lnum = c->gced_lnum; smp_rmb(); gc_seq2 = c->gc_seq; /* Same seq means no GC */ if (gc_seq1 == gc_seq2) return 0; /* Different by more than 1 means we don't know */ if (gc_seq1 + 1 != gc_seq2) return 1; /* * We have seen the sequence number has increased by 1. Now we need to * be sure we read the right LEB number, so read it again. */ smp_rmb(); if (gced_lnum != c->gced_lnum) return 1; /* Finally we can check lnum */ if (gced_lnum == lnum) return 1; return 0; } /** * ubifs_tnc_locate - look up a file-system node and return it and its location. * @c: UBIFS file-system description object * @key: node key to lookup * @node: the node is returned here * @lnum: LEB number is returned here * @offs: offset is returned here * * This function looks up and reads node with key @key. The caller has to make * sure the @node buffer is large enough to fit the node. Returns zero in case * of success, %-ENOENT if the node was not found, and a negative error code in * case of failure. The node location can be returned in @lnum and @offs. */ int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key, void *node, int *lnum, int *offs) { int found, n, err, safely = 0, gc_seq1; struct ubifs_znode *znode; struct ubifs_zbranch zbr, *zt; again: mutex_lock(&c->tnc_mutex); found = ubifs_lookup_level0(c, key, &znode, &n); if (!found) { err = -ENOENT; goto out; } else if (found < 0) { err = found; goto out; } zt = &znode->zbranch[n]; if (lnum) { *lnum = zt->lnum; *offs = zt->offs; } if (is_hash_key(c, key)) { /* * In this case the leaf node cache gets used, so we pass the * address of the zbranch and keep the mutex locked */ err = tnc_read_node_nm(c, zt, node); goto out; } if (safely) { err = ubifs_tnc_read_node(c, zt, node); goto out; } /* Drop the TNC mutex prematurely and race with garbage collection */ zbr = znode->zbranch[n]; gc_seq1 = c->gc_seq; mutex_unlock(&c->tnc_mutex); if (ubifs_get_wbuf(c, zbr.lnum)) { /* We do not GC journal heads */ err = ubifs_tnc_read_node(c, &zbr, node); return err; } err = fallible_read_node(c, key, &zbr, node); if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) { /* * The node may have been GC'ed out from under us so try again * while keeping the TNC mutex locked. */ safely = 1; goto again; } return 0; out: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_get_bu_keys - lookup keys for bulk-read. * @c: UBIFS file-system description object * @bu: bulk-read parameters and results * * Lookup consecutive data node keys for the same inode that reside * consecutively in the same LEB. This function returns zero in case of success * and a negative error code in case of failure. * * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares * maximum possible amount of nodes for bulk-read. */ int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) { int n, err = 0, lnum = -1, uninitialized_var(offs); int uninitialized_var(len); unsigned int block = key_block(c, &bu->key); struct ubifs_znode *znode; bu->cnt = 0; bu->blk_cnt = 0; bu->eof = 0; mutex_lock(&c->tnc_mutex); /* Find first key */ err = ubifs_lookup_level0(c, &bu->key, &znode, &n); if (err < 0) goto out; if (err) { /* Key found */ len = znode->zbranch[n].len; /* The buffer must be big enough for at least 1 node */ if (len > bu->buf_len) { err = -EINVAL; goto out; } /* Add this key */ bu->zbranch[bu->cnt++] = znode->zbranch[n]; bu->blk_cnt += 1; lnum = znode->zbranch[n].lnum; offs = ALIGN(znode->zbranch[n].offs + len, 8); } while (1) { struct ubifs_zbranch *zbr; union ubifs_key *key; unsigned int next_block; /* Find next key */ err = tnc_next(c, &znode, &n); if (err) goto out; zbr = &znode->zbranch[n]; key = &zbr->key; /* See if there is another data key for this file */ if (key_inum(c, key) != key_inum(c, &bu->key) || key_type(c, key) != UBIFS_DATA_KEY) { err = -ENOENT; goto out; } if (lnum < 0) { /* First key found */ lnum = zbr->lnum; offs = ALIGN(zbr->offs + zbr->len, 8); len = zbr->len; if (len > bu->buf_len) { err = -EINVAL; goto out; } } else { /* * The data nodes must be in consecutive positions in * the same LEB. */ if (zbr->lnum != lnum || zbr->offs != offs) goto out; offs += ALIGN(zbr->len, 8); len = ALIGN(len, 8) + zbr->len; /* Must not exceed buffer length */ if (len > bu->buf_len) goto out; } /* Allow for holes */ next_block = key_block(c, key); bu->blk_cnt += (next_block - block - 1); if (bu->blk_cnt >= UBIFS_MAX_BULK_READ) goto out; block = next_block; /* Add this key */ bu->zbranch[bu->cnt++] = *zbr; bu->blk_cnt += 1; /* See if we have room for more */ if (bu->cnt >= UBIFS_MAX_BULK_READ) goto out; if (bu->blk_cnt >= UBIFS_MAX_BULK_READ) goto out; } out: if (err == -ENOENT) { bu->eof = 1; err = 0; } bu->gc_seq = c->gc_seq; mutex_unlock(&c->tnc_mutex); if (err) return err; /* * An enormous hole could cause bulk-read to encompass too many * page cache pages, so limit the number here. */ if (bu->blk_cnt > UBIFS_MAX_BULK_READ) bu->blk_cnt = UBIFS_MAX_BULK_READ; /* * Ensure that bulk-read covers a whole number of page cache * pages. */ if (UBIFS_BLOCKS_PER_PAGE == 1 || !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1))) return 0; if (bu->eof) { /* At the end of file we can round up */ bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1; return 0; } /* Exclude data nodes that do not make up a whole page cache page */ block = key_block(c, &bu->key) + bu->blk_cnt; block &= ~(UBIFS_BLOCKS_PER_PAGE - 1); while (bu->cnt) { if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block) break; bu->cnt -= 1; } return 0; } /** * read_wbuf - bulk-read from a LEB with a wbuf. * @wbuf: wbuf that may overlap the read * @buf: buffer into which to read * @len: read length * @lnum: LEB number from which to read * @offs: offset from which to read * * This functions returns %0 on success or a negative error code on failure. */ static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum, int offs) { const struct ubifs_info *c = wbuf->c; int rlen, overlap; dbg_io("LEB %d:%d, length %d", lnum, offs, len); ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(!(offs & 7) && offs < c->leb_size); ubifs_assert(offs + len <= c->leb_size); spin_lock(&wbuf->lock); overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs); if (!overlap) { /* We may safely unlock the write-buffer and read the data */ spin_unlock(&wbuf->lock); return ubifs_leb_read(c, lnum, buf, offs, len, 0); } /* Don't read under wbuf */ rlen = wbuf->offs - offs; if (rlen < 0) rlen = 0; /* Copy the rest from the write-buffer */ memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen); spin_unlock(&wbuf->lock); if (rlen > 0) /* Read everything that goes before write-buffer */ return ubifs_leb_read(c, lnum, buf, offs, rlen, 0); return 0; } /** * validate_data_node - validate data nodes for bulk-read. * @c: UBIFS file-system description object * @buf: buffer containing data node to validate * @zbr: zbranch of data node to validate * * This functions returns %0 on success or a negative error code on failure. */ static int validate_data_node(struct ubifs_info *c, void *buf, struct ubifs_zbranch *zbr) { union ubifs_key key1; struct ubifs_ch *ch = buf; int err, len; if (ch->node_type != UBIFS_DATA_NODE) { ubifs_err("bad node type (%d but expected %d)", ch->node_type, UBIFS_DATA_NODE); goto out_err; } err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0); if (err) { ubifs_err("expected node type %d", UBIFS_DATA_NODE); goto out; } len = le32_to_cpu(ch->len); if (len != zbr->len) { ubifs_err("bad node length %d, expected %d", len, zbr->len); goto out_err; } /* Make sure the key of the read node is correct */ key_read(c, buf + UBIFS_KEY_OFFSET, &key1); if (!keys_eq(c, &zbr->key, &key1)) { ubifs_err("bad key in node at LEB %d:%d", zbr->lnum, zbr->offs); dbg_tnck(&zbr->key, "looked for key "); dbg_tnck(&key1, "found node's key "); goto out_err; } return 0; out_err: err = -EINVAL; out: ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs); ubifs_dump_node(c, buf); dump_stack(); return err; } /** * ubifs_tnc_bulk_read - read a number of data nodes in one go. * @c: UBIFS file-system description object * @bu: bulk-read parameters and results * * This functions reads and validates the data nodes that were identified by the * 'ubifs_tnc_get_bu_keys()' function. This functions returns %0 on success, * -EAGAIN to indicate a race with GC, or another negative error code on * failure. */ int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu) { int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i; struct ubifs_wbuf *wbuf; void *buf; len = bu->zbranch[bu->cnt - 1].offs; len += bu->zbranch[bu->cnt - 1].len - offs; if (len > bu->buf_len) { ubifs_err("buffer too small %d vs %d", bu->buf_len, len); return -EINVAL; } /* Do the read */ wbuf = ubifs_get_wbuf(c, lnum); if (wbuf) err = read_wbuf(wbuf, bu->buf, len, lnum, offs); else err = ubifs_leb_read(c, lnum, bu->buf, offs, len, 0); /* Check for a race with GC */ if (maybe_leb_gced(c, lnum, bu->gc_seq)) return -EAGAIN; if (err && err != -EBADMSG) { ubifs_err("failed to read from LEB %d:%d, error %d", lnum, offs, err); dump_stack(); dbg_tnck(&bu->key, "key "); return err; } /* Validate the nodes read */ buf = bu->buf; for (i = 0; i < bu->cnt; i++) { err = validate_data_node(c, buf, &bu->zbranch[i]); if (err) return err; buf = buf + ALIGN(bu->zbranch[i].len, 8); } return 0; } /** * do_lookup_nm- look up a "hashed" node. * @c: UBIFS file-system description object * @key: node key to lookup * @node: the node is returned here * @nm: node name * * This function look up and reads a node which contains name hash in the key. * Since the hash may have collisions, there may be many nodes with the same * key, so we have to sequentially look to all of them until the needed one is * found. This function returns zero in case of success, %-ENOENT if the node * was not found, and a negative error code in case of failure. */ static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key, void *node, const struct qstr *nm) { int found, n, err; struct ubifs_znode *znode; dbg_tnck(key, "name '%.*s' key ", nm->len, nm->name); mutex_lock(&c->tnc_mutex); found = ubifs_lookup_level0(c, key, &znode, &n); if (!found) { err = -ENOENT; goto out_unlock; } else if (found < 0) { err = found; goto out_unlock; } ubifs_assert(n >= 0); err = resolve_collision(c, key, &znode, &n, nm); dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n); if (unlikely(err < 0)) goto out_unlock; if (err == 0) { err = -ENOENT; goto out_unlock; } err = tnc_read_node_nm(c, &znode->zbranch[n], node); out_unlock: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_lookup_nm - look up a "hashed" node. * @c: UBIFS file-system description object * @key: node key to lookup * @node: the node is returned here * @nm: node name * * This function look up and reads a node which contains name hash in the key. * Since the hash may have collisions, there may be many nodes with the same * key, so we have to sequentially look to all of them until the needed one is * found. This function returns zero in case of success, %-ENOENT if the node * was not found, and a negative error code in case of failure. */ int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key, void *node, const struct qstr *nm) { int err, len; const struct ubifs_dent_node *dent = node; /* * We assume that in most of the cases there are no name collisions and * 'ubifs_tnc_lookup()' returns us the right direntry. */ err = ubifs_tnc_lookup(c, key, node); if (err) return err; len = le16_to_cpu(dent->nlen); if (nm->len == len && !memcmp(dent->name, nm->name, len)) return 0; /* * Unluckily, there are hash collisions and we have to iterate over * them look at each direntry with colliding name hash sequentially. */ return do_lookup_nm(c, key, node, nm); } /** * correct_parent_keys - correct parent znodes' keys. * @c: UBIFS file-system description object * @znode: znode to correct parent znodes for * * This is a helper function for 'tnc_insert()'. When the key of the leftmost * zbranch changes, keys of parent znodes have to be corrected. This helper * function is called in such situations and corrects the keys if needed. */ static void correct_parent_keys(const struct ubifs_info *c, struct ubifs_znode *znode) { union ubifs_key *key, *key1; ubifs_assert(znode->parent); ubifs_assert(znode->iip == 0); key = &znode->zbranch[0].key; key1 = &znode->parent->zbranch[0].key; while (keys_cmp(c, key, key1) < 0) { key_copy(c, key, key1); znode = znode->parent; znode->alt = 1; if (!znode->parent || znode->iip) break; key1 = &znode->parent->zbranch[0].key; } } /** * insert_zbranch - insert a zbranch into a znode. * @znode: znode into which to insert * @zbr: zbranch to insert * @n: slot number to insert to * * This is a helper function for 'tnc_insert()'. UBIFS does not allow "gaps" in * znode's array of zbranches and keeps zbranches consolidated, so when a new * zbranch has to be inserted to the @znode->zbranches[]' array at the @n-th * slot, zbranches starting from @n have to be moved right. */ static void insert_zbranch(struct ubifs_znode *znode, const struct ubifs_zbranch *zbr, int n) { int i; ubifs_assert(ubifs_zn_dirty(znode)); if (znode->level) { for (i = znode->child_cnt; i > n; i--) { znode->zbranch[i] = znode->zbranch[i - 1]; if (znode->zbranch[i].znode) znode->zbranch[i].znode->iip = i; } if (zbr->znode) zbr->znode->iip = n; } else for (i = znode->child_cnt; i > n; i--) znode->zbranch[i] = znode->zbranch[i - 1]; znode->zbranch[n] = *zbr; znode->child_cnt += 1; /* * After inserting at slot zero, the lower bound of the key range of * this znode may have changed. If this znode is subsequently split * then the upper bound of the key range may change, and furthermore * it could change to be lower than the original lower bound. If that * happens, then it will no longer be possible to find this znode in the * TNC using the key from the index node on flash. That is bad because * if it is not found, we will assume it is obsolete and may overwrite * it. Then if there is an unclean unmount, we will start using the * old index which will be broken. * * So we first mark znodes that have insertions at slot zero, and then * if they are split we add their lnum/offs to the old_idx tree. */ if (n == 0) znode->alt = 1; } /** * tnc_insert - insert a node into TNC. * @c: UBIFS file-system description object * @znode: znode to insert into * @zbr: branch to insert * @n: slot number to insert new zbranch to * * This function inserts a new node described by @zbr into znode @znode. If * znode does not have a free slot for new zbranch, it is split. Parent znodes * are splat as well if needed. Returns zero in case of success or a negative * error code in case of failure. */ static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode, struct ubifs_zbranch *zbr, int n) { struct ubifs_znode *zn, *zi, *zp; int i, keep, move, appending = 0; union ubifs_key *key = &zbr->key, *key1; ubifs_assert(n >= 0 && n <= c->fanout); /* Implement naive insert for now */ again: zp = znode->parent; if (znode->child_cnt < c->fanout) { ubifs_assert(n != c->fanout); dbg_tnck(key, "inserted at %d level %d, key ", n, znode->level); insert_zbranch(znode, zbr, n); /* Ensure parent's key is correct */ if (n == 0 && zp && znode->iip == 0) correct_parent_keys(c, znode); return 0; } /* * Unfortunately, @znode does not have more empty slots and we have to * split it. */ dbg_tnck(key, "splitting level %d, key ", znode->level); if (znode->alt) /* * We can no longer be sure of finding this znode by key, so we * record it in the old_idx tree. */ ins_clr_old_idx_znode(c, znode); zn = kzalloc(c->max_znode_sz, GFP_NOFS); if (!zn) return -ENOMEM; zn->parent = zp; zn->level = znode->level; /* Decide where to split */ if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) { /* Try not to split consecutive data keys */ if (n == c->fanout) { key1 = &znode->zbranch[n - 1].key; if (key_inum(c, key1) == key_inum(c, key) && key_type(c, key1) == UBIFS_DATA_KEY) appending = 1; } else goto check_split; } else if (appending && n != c->fanout) { /* Try not to split consecutive data keys */ appending = 0; check_split: if (n >= (c->fanout + 1) / 2) { key1 = &znode->zbranch[0].key; if (key_inum(c, key1) == key_inum(c, key) && key_type(c, key1) == UBIFS_DATA_KEY) { key1 = &znode->zbranch[n].key; if (key_inum(c, key1) != key_inum(c, key) || key_type(c, key1) != UBIFS_DATA_KEY) { keep = n; move = c->fanout - keep; zi = znode; goto do_split; } } } } if (appending) { keep = c->fanout; move = 0; } else { keep = (c->fanout + 1) / 2; move = c->fanout - keep; } /* * Although we don't at present, we could look at the neighbors and see * if we can move some zbranches there. */ if (n < keep) { /* Insert into existing znode */ zi = znode; move += 1; keep -= 1; } else { /* Insert into new znode */ zi = zn; n -= keep; /* Re-parent */ if (zn->level != 0) zbr->znode->parent = zn; } do_split: __set_bit(DIRTY_ZNODE, &zn->flags); atomic_long_inc(&c->dirty_zn_cnt); zn->child_cnt = move; znode->child_cnt = keep; dbg_tnc("moving %d, keeping %d", move, keep); /* Move zbranch */ for (i = 0; i < move; i++) { zn->zbranch[i] = znode->zbranch[keep + i]; /* Re-parent */ if (zn->level != 0) if (zn->zbranch[i].znode) { zn->zbranch[i].znode->parent = zn; zn->zbranch[i].znode->iip = i; } } /* Insert new key and branch */ dbg_tnck(key, "inserting at %d level %d, key ", n, zn->level); insert_zbranch(zi, zbr, n); /* Insert new znode (produced by spitting) into the parent */ if (zp) { if (n == 0 && zi == znode && znode->iip == 0) correct_parent_keys(c, znode); /* Locate insertion point */ n = znode->iip + 1; /* Tail recursion */ zbr->key = zn->zbranch[0].key; zbr->znode = zn; zbr->lnum = 0; zbr->offs = 0; zbr->len = 0; znode = zp; goto again; } /* We have to split root znode */ dbg_tnc("creating new zroot at level %d", znode->level + 1); zi = kzalloc(c->max_znode_sz, GFP_NOFS); if (!zi) return -ENOMEM; zi->child_cnt = 2; zi->level = znode->level + 1; __set_bit(DIRTY_ZNODE, &zi->flags); atomic_long_inc(&c->dirty_zn_cnt); zi->zbranch[0].key = znode->zbranch[0].key; zi->zbranch[0].znode = znode; zi->zbranch[0].lnum = c->zroot.lnum; zi->zbranch[0].offs = c->zroot.offs; zi->zbranch[0].len = c->zroot.len; zi->zbranch[1].key = zn->zbranch[0].key; zi->zbranch[1].znode = zn; c->zroot.lnum = 0; c->zroot.offs = 0; c->zroot.len = 0; c->zroot.znode = zi; zn->parent = zi; zn->iip = 1; znode->parent = zi; znode->iip = 0; return 0; } /** * ubifs_tnc_add - add a node to TNC. * @c: UBIFS file-system description object * @key: key to add * @lnum: LEB number of node * @offs: node offset * @len: node length * * This function adds a node with key @key to TNC. The node may be new or it may * obsolete some existing one. Returns %0 on success or negative error code on * failure. */ int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum, int offs, int len) { int found, n, err = 0; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "%d:%d, len %d, key ", lnum, offs, len); found = lookup_level0_dirty(c, key, &znode, &n); if (!found) { struct ubifs_zbranch zbr; zbr.znode = NULL; zbr.lnum = lnum; zbr.offs = offs; zbr.len = len; key_copy(c, key, &zbr.key); err = tnc_insert(c, znode, &zbr, n + 1); } else if (found == 1) { struct ubifs_zbranch *zbr = &znode->zbranch[n]; lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; } else err = found; if (!err) err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_replace - replace a node in the TNC only if the old node is found. * @c: UBIFS file-system description object * @key: key to add * @old_lnum: LEB number of old node * @old_offs: old node offset * @lnum: LEB number of node * @offs: node offset * @len: node length * * This function replaces a node with key @key in the TNC only if the old node * is found. This function is called by garbage collection when node are moved. * Returns %0 on success or negative error code on failure. */ int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key, int old_lnum, int old_offs, int lnum, int offs, int len) { int found, n, err = 0; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "old LEB %d:%d, new LEB %d:%d, len %d, key ", old_lnum, old_offs, lnum, offs, len); found = lookup_level0_dirty(c, key, &znode, &n); if (found < 0) { err = found; goto out_unlock; } if (found == 1) { struct ubifs_zbranch *zbr = &znode->zbranch[n]; found = 0; if (zbr->lnum == old_lnum && zbr->offs == old_offs) { lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); if (err) goto out_unlock; zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; found = 1; } else if (is_hash_key(c, key)) { found = resolve_collision_directly(c, key, &znode, &n, old_lnum, old_offs); dbg_tnc("rc returned %d, znode %p, n %d, LEB %d:%d", found, znode, n, old_lnum, old_offs); if (found < 0) { err = found; goto out_unlock; } if (found) { /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } } zbr = &znode->zbranch[n]; lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); if (err) goto out_unlock; zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; } } } if (!found) err = ubifs_add_dirt(c, lnum, len); if (!err) err = dbg_check_tnc(c, 0); out_unlock: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_add_nm - add a "hashed" node to TNC. * @c: UBIFS file-system description object * @key: key to add * @lnum: LEB number of node * @offs: node offset * @len: node length * @nm: node name * * This is the same as 'ubifs_tnc_add()' but it should be used with keys which * may have collisions, like directory entry keys. */ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key, int lnum, int offs, int len, const struct qstr *nm) { int found, n, err = 0; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "LEB %d:%d, name '%.*s', key ", lnum, offs, nm->len, nm->name); found = lookup_level0_dirty(c, key, &znode, &n); if (found < 0) { err = found; goto out_unlock; } if (found == 1) { if (c->replaying) found = fallible_resolve_collision(c, key, &znode, &n, nm, 1); else found = resolve_collision(c, key, &znode, &n, nm); dbg_tnc("rc returned %d, znode %p, n %d", found, znode, n); if (found < 0) { err = found; goto out_unlock; } /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } } if (found == 1) { struct ubifs_zbranch *zbr = &znode->zbranch[n]; lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; goto out_unlock; } } if (!found) { struct ubifs_zbranch zbr; zbr.znode = NULL; zbr.lnum = lnum; zbr.offs = offs; zbr.len = len; key_copy(c, key, &zbr.key); err = tnc_insert(c, znode, &zbr, n + 1); if (err) goto out_unlock; if (c->replaying) { /* * We did not find it in the index so there may be a * dangling branch still in the index. So we remove it * by passing 'ubifs_tnc_remove_nm()' the same key but * an unmatchable name. */ struct qstr noname = { .name = "" }; err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); if (err) return err; return ubifs_tnc_remove_nm(c, key, &noname); } } out_unlock: if (!err) err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); return err; } /** * tnc_delete - delete a znode form TNC. * @c: UBIFS file-system description object * @znode: znode to delete from * @n: zbranch slot number to delete * * This function deletes a leaf node from @n-th slot of @znode. Returns zero in * case of success and a negative error code in case of failure. */ static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n) { struct ubifs_zbranch *zbr; struct ubifs_znode *zp; int i, err; /* Delete without merge for now */ ubifs_assert(znode->level == 0); ubifs_assert(n >= 0 && n < c->fanout); dbg_tnck(&znode->zbranch[n].key, "deleting key "); zbr = &znode->zbranch[n]; lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); if (err) { ubifs_dump_znode(c, znode); return err; } /* We do not "gap" zbranch slots */ for (i = n; i < znode->child_cnt - 1; i++) znode->zbranch[i] = znode->zbranch[i + 1]; znode->child_cnt -= 1; if (znode->child_cnt > 0) return 0; /* * This was the last zbranch, we have to delete this znode from the * parent. */ do { ubifs_assert(!ubifs_zn_obsolete(znode)); ubifs_assert(ubifs_zn_dirty(znode)); zp = znode->parent; n = znode->iip; atomic_long_dec(&c->dirty_zn_cnt); err = insert_old_idx_znode(c, znode); if (err) return err; if (znode->cnext) { __set_bit(OBSOLETE_ZNODE, &znode->flags); atomic_long_inc(&c->clean_zn_cnt); atomic_long_inc(&ubifs_clean_zn_cnt); } else kfree(znode); znode = zp; } while (znode->child_cnt == 1); /* while removing last child */ /* Remove from znode, entry n - 1 */ znode->child_cnt -= 1; ubifs_assert(znode->level != 0); for (i = n; i < znode->child_cnt; i++) { znode->zbranch[i] = znode->zbranch[i + 1]; if (znode->zbranch[i].znode) znode->zbranch[i].znode->iip = i; } /* * If this is the root and it has only 1 child then * collapse the tree. */ if (!znode->parent) { while (znode->child_cnt == 1 && znode->level != 0) { zp = znode; zbr = &znode->zbranch[0]; znode = get_znode(c, znode, 0); if (IS_ERR(znode)) return PTR_ERR(znode); znode = dirty_cow_znode(c, zbr); if (IS_ERR(znode)) return PTR_ERR(znode); znode->parent = NULL; znode->iip = 0; if (c->zroot.len) { err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs); if (err) return err; } c->zroot.lnum = zbr->lnum; c->zroot.offs = zbr->offs; c->zroot.len = zbr->len; c->zroot.znode = znode; ubifs_assert(!ubifs_zn_obsolete(zp)); ubifs_assert(ubifs_zn_dirty(zp)); atomic_long_dec(&c->dirty_zn_cnt); if (zp->cnext) { __set_bit(OBSOLETE_ZNODE, &zp->flags); atomic_long_inc(&c->clean_zn_cnt); atomic_long_inc(&ubifs_clean_zn_cnt); } else kfree(zp); } } return 0; } /** * ubifs_tnc_remove - remove an index entry of a node. * @c: UBIFS file-system description object * @key: key of node * * Returns %0 on success or negative error code on failure. */ int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key) { int found, n, err = 0; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "key "); found = lookup_level0_dirty(c, key, &znode, &n); if (found < 0) { err = found; goto out_unlock; } if (found == 1) err = tnc_delete(c, znode, n); if (!err) err = dbg_check_tnc(c, 0); out_unlock: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_remove_nm - remove an index entry for a "hashed" node. * @c: UBIFS file-system description object * @key: key of node * @nm: directory entry name * * Returns %0 on success or negative error code on failure. */ int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key, const struct qstr *nm) { int n, err; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "%.*s, key ", nm->len, nm->name); err = lookup_level0_dirty(c, key, &znode, &n); if (err < 0) goto out_unlock; if (err) { if (c->replaying) err = fallible_resolve_collision(c, key, &znode, &n, nm, 0); else err = resolve_collision(c, key, &znode, &n, nm); dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n); if (err < 0) goto out_unlock; if (err) { /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } } err = tnc_delete(c, znode, n); } } out_unlock: if (!err) err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); return err; } /** * key_in_range - determine if a key falls within a range of keys. * @c: UBIFS file-system description object * @key: key to check * @from_key: lowest key in range * @to_key: highest key in range * * This function returns %1 if the key is in range and %0 otherwise. */ static int key_in_range(struct ubifs_info *c, union ubifs_key *key, union ubifs_key *from_key, union ubifs_key *to_key) { if (keys_cmp(c, key, from_key) < 0) return 0; if (keys_cmp(c, key, to_key) > 0) return 0; return 1; } /** * ubifs_tnc_remove_range - remove index entries in range. * @c: UBIFS file-system description object * @from_key: lowest key to remove * @to_key: highest key to remove * * This function removes index entries starting at @from_key and ending at * @to_key. This function returns zero in case of success and a negative error * code in case of failure. */ int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key, union ubifs_key *to_key) { int i, n, k, err = 0; struct ubifs_znode *znode; union ubifs_key *key; mutex_lock(&c->tnc_mutex); while (1) { /* Find first level 0 znode that contains keys to remove */ err = ubifs_lookup_level0(c, from_key, &znode, &n); if (err < 0) goto out_unlock; if (err) key = from_key; else { err = tnc_next(c, &znode, &n); if (err == -ENOENT) { err = 0; goto out_unlock; } if (err < 0) goto out_unlock; key = &znode->zbranch[n].key; if (!key_in_range(c, key, from_key, to_key)) { err = 0; goto out_unlock; } } /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } } /* Remove all keys in range except the first */ for (i = n + 1, k = 0; i < znode->child_cnt; i++, k++) { key = &znode->zbranch[i].key; if (!key_in_range(c, key, from_key, to_key)) break; lnc_free(&znode->zbranch[i]); err = ubifs_add_dirt(c, znode->zbranch[i].lnum, znode->zbranch[i].len); if (err) { ubifs_dump_znode(c, znode); goto out_unlock; } dbg_tnck(key, "removing key "); } if (k) { for (i = n + 1 + k; i < znode->child_cnt; i++) znode->zbranch[i - k] = znode->zbranch[i]; znode->child_cnt -= k; } /* Now delete the first */ err = tnc_delete(c, znode, n); if (err) goto out_unlock; } out_unlock: if (!err) err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_remove_ino - remove an inode from TNC. * @c: UBIFS file-system description object * @inum: inode number to remove * * This function remove inode @inum and all the extended attributes associated * with the anode from TNC and returns zero in case of success or a negative * error code in case of failure. */ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) { union ubifs_key key1, key2; struct ubifs_dent_node *xent, *pxent = NULL; struct qstr nm = { .name = NULL }; dbg_tnc("ino %lu", (unsigned long)inum); /* * Walk all extended attribute entries and remove them together with * corresponding extended attribute inodes. */ lowest_xent_key(c, &key1, inum); while (1) { ino_t xattr_inum; int err; xent = ubifs_tnc_next_ent(c, &key1, &nm); if (IS_ERR(xent)) { err = PTR_ERR(xent); if (err == -ENOENT) break; return err; } xattr_inum = le64_to_cpu(xent->inum); dbg_tnc("xent '%s', ino %lu", xent->name, (unsigned long)xattr_inum); nm.name = xent->name; nm.len = le16_to_cpu(xent->nlen); err = ubifs_tnc_remove_nm(c, &key1, &nm); if (err) { kfree(xent); return err; } lowest_ino_key(c, &key1, xattr_inum); highest_ino_key(c, &key2, xattr_inum); err = ubifs_tnc_remove_range(c, &key1, &key2); if (err) { kfree(xent); return err; } kfree(pxent); pxent = xent; key_read(c, &xent->key, &key1); } kfree(pxent); lowest_ino_key(c, &key1, inum); highest_ino_key(c, &key2, inum); return ubifs_tnc_remove_range(c, &key1, &key2); } /** * ubifs_tnc_next_ent - walk directory or extended attribute entries. * @c: UBIFS file-system description object * @key: key of last entry * @nm: name of last entry found or %NULL * * This function finds and reads the next directory or extended attribute entry * after the given key (@key) if there is one. @nm is used to resolve * collisions. * * If the name of the current entry is not known and only the key is known, * @nm->name has to be %NULL. In this case the semantics of this function is a * little bit different and it returns the entry corresponding to this key, not * the next one. If the key was not found, the closest "right" entry is * returned. * * If the fist entry has to be found, @key has to contain the lowest possible * key value for this inode and @name has to be %NULL. * * This function returns the found directory or extended attribute entry node * in case of success, %-ENOENT is returned if no entry was found, and a * negative error code is returned in case of failure. */ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c, union ubifs_key *key, const struct qstr *nm) { int n, err, type = key_type(c, key); struct ubifs_znode *znode; struct ubifs_dent_node *dent; struct ubifs_zbranch *zbr; union ubifs_key *dkey; dbg_tnck(key, "%s ", nm->name ? (char *)nm->name : "(lowest)"); ubifs_assert(is_hash_key(c, key)); mutex_lock(&c->tnc_mutex); err = ubifs_lookup_level0(c, key, &znode, &n); if (unlikely(err < 0)) goto out_unlock; if (nm->name) { if (err) { /* Handle collisions */ err = resolve_collision(c, key, &znode, &n, nm); dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n); if (unlikely(err < 0)) goto out_unlock; } /* Now find next entry */ err = tnc_next(c, &znode, &n); if (unlikely(err)) goto out_unlock; } else { /* * The full name of the entry was not given, in which case the * behavior of this function is a little different and it * returns current entry, not the next one. */ if (!err) { /* * However, the given key does not exist in the TNC * tree and @znode/@n variables contain the closest * "preceding" element. Switch to the next one. */ err = tnc_next(c, &znode, &n); if (err) goto out_unlock; } } zbr = &znode->zbranch[n]; dent = kmalloc(zbr->len, GFP_NOFS); if (unlikely(!dent)) { err = -ENOMEM; goto out_unlock; } /* * The above 'tnc_next()' call could lead us to the next inode, check * this. */ dkey = &zbr->key; if (key_inum(c, dkey) != key_inum(c, key) || key_type(c, dkey) != type) { err = -ENOENT; goto out_free; } err = tnc_read_node_nm(c, zbr, dent); if (unlikely(err)) goto out_free; mutex_unlock(&c->tnc_mutex); return dent; out_free: kfree(dent); out_unlock: mutex_unlock(&c->tnc_mutex); return ERR_PTR(err); } /** * tnc_destroy_cnext - destroy left-over obsolete znodes from a failed commit. * @c: UBIFS file-system description object * * Destroy left-over obsolete znodes from a failed commit. */ static void tnc_destroy_cnext(struct ubifs_info *c) { struct ubifs_znode *cnext; if (!c->cnext) return; ubifs_assert(c->cmt_state == COMMIT_BROKEN); cnext = c->cnext; do { struct ubifs_znode *znode = cnext; cnext = cnext->cnext; if (ubifs_zn_obsolete(znode)) kfree(znode); } while (cnext && cnext != c->cnext); } /** * ubifs_tnc_close - close TNC subsystem and free all related resources. * @c: UBIFS file-system description object */ void ubifs_tnc_close(struct ubifs_info *c) { tnc_destroy_cnext(c); if (c->zroot.znode) { long n; ubifs_destroy_tnc_subtree(c->zroot.znode); n = atomic_long_read(&c->clean_zn_cnt); atomic_long_sub(n, &ubifs_clean_zn_cnt); } kfree(c->gap_lebs); kfree(c->ilebs); destroy_old_idx(c); } /** * left_znode - get the znode to the left. * @c: UBIFS file-system description object * @znode: znode * * This function returns a pointer to the znode to the left of @znode or NULL if * there is not one. A negative error code is returned on failure. */ static struct ubifs_znode *left_znode(struct ubifs_info *c, struct ubifs_znode *znode) { int level = znode->level; while (1) { int n = znode->iip - 1; /* Go up until we can go left */ znode = znode->parent; if (!znode) return NULL; if (n >= 0) { /* Now go down the rightmost branch to 'level' */ znode = get_znode(c, znode, n); if (IS_ERR(znode)) return znode; while (znode->level != level) { n = znode->child_cnt - 1; znode = get_znode(c, znode, n); if (IS_ERR(znode)) return znode; } break; } } return znode; } /** * right_znode - get the znode to the right. * @c: UBIFS file-system description object * @znode: znode * * This function returns a pointer to the znode to the right of @znode or NULL * if there is not one. A negative error code is returned on failure. */ static struct ubifs_znode *right_znode(struct ubifs_info *c, struct ubifs_znode *znode) { int level = znode->level; while (1) { int n = znode->iip + 1; /* Go up until we can go right */ znode = znode->parent; if (!znode) return NULL; if (n < znode->child_cnt) { /* Now go down the leftmost branch to 'level' */ znode = get_znode(c, znode, n); if (IS_ERR(znode)) return znode; while (znode->level != level) { znode = get_znode(c, znode, 0); if (IS_ERR(znode)) return znode; } break; } } return znode; } /** * lookup_znode - find a particular indexing node from TNC. * @c: UBIFS file-system description object * @key: index node key to lookup * @level: index node level * @lnum: index node LEB number * @offs: index node offset * * This function searches an indexing node by its first key @key and its * address @lnum:@offs. It looks up the indexing tree by pulling all indexing * nodes it traverses to TNC. This function is called for indexing nodes which * were found on the media by scanning, for example when garbage-collecting or * when doing in-the-gaps commit. This means that the indexing node which is * looked for does not have to have exactly the same leftmost key @key, because * the leftmost key may have been changed, in which case TNC will contain a * dirty znode which still refers the same @lnum:@offs. This function is clever * enough to recognize such indexing nodes. * * Note, if a znode was deleted or changed too much, then this function will * not find it. For situations like this UBIFS has the old index RB-tree * (indexed by @lnum:@offs). * * This function returns a pointer to the znode found or %NULL if it is not * found. A negative error code is returned on failure. */ static struct ubifs_znode *lookup_znode(struct ubifs_info *c, union ubifs_key *key, int level, int lnum, int offs) { struct ubifs_znode *znode, *zn; int n, nn; ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY); /* * The arguments have probably been read off flash, so don't assume * they are valid. */ if (level < 0) return ERR_PTR(-EINVAL); /* Get the root znode */ znode = c->zroot.znode; if (!znode) { znode = ubifs_load_znode(c, &c->zroot, NULL, 0); if (IS_ERR(znode)) return znode; } /* Check if it is the one we are looking for */ if (c->zroot.lnum == lnum && c->zroot.offs == offs) return znode; /* Descend to the parent level i.e. (level + 1) */ if (level >= znode->level) return NULL; while (1) { ubifs_search_zbranch(c, znode, key, &n); if (n < 0) { /* * We reached a znode where the leftmost key is greater * than the key we are searching for. This is the same * situation as the one described in a huge comment at * the end of the 'ubifs_lookup_level0()' function. And * for exactly the same reasons we have to try to look * left before giving up. */ znode = left_znode(c, znode); if (!znode) return NULL; if (IS_ERR(znode)) return znode; ubifs_search_zbranch(c, znode, key, &n); ubifs_assert(n >= 0); } if (znode->level == level + 1) break; znode = get_znode(c, znode, n); if (IS_ERR(znode)) return znode; } /* Check if the child is the one we are looking for */ if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs) return get_znode(c, znode, n); /* If the key is unique, there is nowhere else to look */ if (!is_hash_key(c, key)) return NULL; /* * The key is not unique and so may be also in the znodes to either * side. */ zn = znode; nn = n; /* Look left */ while (1) { /* Move one branch to the left */ if (n) n -= 1; else { znode = left_znode(c, znode); if (!znode) break; if (IS_ERR(znode)) return znode; n = znode->child_cnt - 1; } /* Check it */ if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs) return get_znode(c, znode, n); /* Stop if the key is less than the one we are looking for */ if (keys_cmp(c, &znode->zbranch[n].key, key) < 0) break; } /* Back to the middle */ znode = zn; n = nn; /* Look right */ while (1) { /* Move one branch to the right */ if (++n >= znode->child_cnt) { znode = right_znode(c, znode); if (!znode) break; if (IS_ERR(znode)) return znode; n = 0; } /* Check it */ if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs) return get_znode(c, znode, n); /* Stop if the key is greater than the one we are looking for */ if (keys_cmp(c, &znode->zbranch[n].key, key) > 0) break; } return NULL; } /** * is_idx_node_in_tnc - determine if an index node is in the TNC. * @c: UBIFS file-system description object * @key: key of index node * @level: index node level * @lnum: LEB number of index node * @offs: offset of index node * * This function returns %0 if the index node is not referred to in the TNC, %1 * if the index node is referred to in the TNC and the corresponding znode is * dirty, %2 if an index node is referred to in the TNC and the corresponding * znode is clean, and a negative error code in case of failure. * * Note, the @key argument has to be the key of the first child. Also note, * this function relies on the fact that 0:0 is never a valid LEB number and * offset for a main-area node. */ int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level, int lnum, int offs) { struct ubifs_znode *znode; znode = lookup_znode(c, key, level, lnum, offs); if (!znode) return 0; if (IS_ERR(znode)) return PTR_ERR(znode); return ubifs_zn_dirty(znode) ? 1 : 2; } /** * is_leaf_node_in_tnc - determine if a non-indexing not is in the TNC. * @c: UBIFS file-system description object * @key: node key * @lnum: node LEB number * @offs: node offset * * This function returns %1 if the node is referred to in the TNC, %0 if it is * not, and a negative error code in case of failure. * * Note, this function relies on the fact that 0:0 is never a valid LEB number * and offset for a main-area node. */ static int is_leaf_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int lnum, int offs) { struct ubifs_zbranch *zbr; struct ubifs_znode *znode, *zn; int n, found, err, nn; const int unique = !is_hash_key(c, key); found = ubifs_lookup_level0(c, key, &znode, &n); if (found < 0) return found; /* Error code */ if (!found) return 0; zbr = &znode->zbranch[n]; if (lnum == zbr->lnum && offs == zbr->offs) return 1; /* Found it */ if (unique) return 0; /* * Because the key is not unique, we have to look left * and right as well */ zn = znode; nn = n; /* Look left */ while (1) { err = tnc_prev(c, &znode, &n); if (err == -ENOENT) break; if (err) return err; if (keys_cmp(c, key, &znode->zbranch[n].key)) break; zbr = &znode->zbranch[n]; if (lnum == zbr->lnum && offs == zbr->offs) return 1; /* Found it */ } /* Look right */ znode = zn; n = nn; while (1) { err = tnc_next(c, &znode, &n); if (err) { if (err == -ENOENT) return 0; return err; } if (keys_cmp(c, key, &znode->zbranch[n].key)) break; zbr = &znode->zbranch[n]; if (lnum == zbr->lnum && offs == zbr->offs) return 1; /* Found it */ } return 0; } /** * ubifs_tnc_has_node - determine whether a node is in the TNC. * @c: UBIFS file-system description object * @key: node key * @level: index node level (if it is an index node) * @lnum: node LEB number * @offs: node offset * @is_idx: non-zero if the node is an index node * * This function returns %1 if the node is in the TNC, %0 if it is not, and a * negative error code in case of failure. For index nodes, @key has to be the * key of the first child. An index node is considered to be in the TNC only if * the corresponding znode is clean or has not been loaded. */ int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level, int lnum, int offs, int is_idx) { int err; mutex_lock(&c->tnc_mutex); if (is_idx) { err = is_idx_node_in_tnc(c, key, level, lnum, offs); if (err < 0) goto out_unlock; if (err == 1) /* The index node was found but it was dirty */ err = 0; else if (err == 2) /* The index node was found and it was clean */ err = 1; else BUG_ON(err != 0); } else err = is_leaf_node_in_tnc(c, key, lnum, offs); out_unlock: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_dirty_idx_node - dirty an index node. * @c: UBIFS file-system description object * @key: index node key * @level: index node level * @lnum: index node LEB number * @offs: index node offset * * This function loads and dirties an index node so that it can be garbage * collected. The @key argument has to be the key of the first child. This * function relies on the fact that 0:0 is never a valid LEB number and offset * for a main-area node. Returns %0 on success and a negative error code on * failure. */ int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level, int lnum, int offs) { struct ubifs_znode *znode; int err = 0; mutex_lock(&c->tnc_mutex); znode = lookup_znode(c, key, level, lnum, offs); if (!znode) goto out_unlock; if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } out_unlock: mutex_unlock(&c->tnc_mutex); return err; } /** * dbg_check_inode_size - check if inode size is correct. * @c: UBIFS file-system description object * @inum: inode number * @size: inode size * * This function makes sure that the inode size (@size) is correct and it does * not have any pages beyond @size. Returns zero if the inode is OK, %-EINVAL * if it has a data page beyond @size, and other negative error code in case of * other errors. */ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, loff_t size) { int err, n; union ubifs_key from_key, to_key, *key; struct ubifs_znode *znode; unsigned int block; if (!S_ISREG(inode->i_mode)) return 0; if (!dbg_is_chk_gen(c)) return 0; block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT; data_key_init(c, &from_key, inode->i_ino, block); highest_data_key(c, &to_key, inode->i_ino); mutex_lock(&c->tnc_mutex); err = ubifs_lookup_level0(c, &from_key, &znode, &n); if (err < 0) goto out_unlock; if (err) { err = -EINVAL; key = &from_key; goto out_dump; } err = tnc_next(c, &znode, &n); if (err == -ENOENT) { err = 0; goto out_unlock; } if (err < 0) goto out_unlock; ubifs_assert(err == 0); key = &znode->zbranch[n].key; if (!key_in_range(c, key, &from_key, &to_key)) goto out_unlock; out_dump: block = key_block(c, key); ubifs_err("inode %lu has size %lld, but there are data at offset %lld", (unsigned long)inode->i_ino, size, ((loff_t)block) << UBIFS_BLOCK_SHIFT); mutex_unlock(&c->tnc_mutex); ubifs_dump_inode(c, inode); dump_stack(); return -EINVAL; out_unlock: mutex_unlock(&c->tnc_mutex); return err; }
gpl-2.0
kinhoom/linux
arch/mips/dec/kn02xa-berr.c
2043
3957
/* * Bus error event handling code for 5000-series systems equipped * with parity error detection logic, i.e. DECstation/DECsystem * 5000/120, /125, /133 (KN02-BA), 5000/150 (KN04-BA) and Personal * DECstation/DECsystem 5000/20, /25, /33 (KN02-CA), 5000/50 * (KN04-CA) systems. * * Copyright (c) 2005 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> #include <asm/addrspace.h> #include <asm/cpu-type.h> #include <asm/irq_regs.h> #include <asm/ptrace.h> #include <asm/traps.h> #include <asm/dec/kn02ca.h> #include <asm/dec/kn02xa.h> #include <asm/dec/kn05.h> static inline void dec_kn02xa_be_ack(void) { volatile u32 *mer = (void *)CKSEG1ADDR(KN02XA_MER); volatile u32 *mem_intr = (void *)CKSEG1ADDR(KN02XA_MEM_INTR); *mer = KN02CA_MER_INTR; /* Clear errors; keep the ARC IRQ. */ *mem_intr = 0; /* Any write clears the bus IRQ. */ iob(); } static int dec_kn02xa_be_backend(struct pt_regs *regs, int is_fixup, int invoker) { volatile u32 *kn02xa_mer = (void *)CKSEG1ADDR(KN02XA_MER); volatile u32 *kn02xa_ear = (void *)CKSEG1ADDR(KN02XA_EAR); static const char excstr[] = "exception"; static const char intstr[] = "interrupt"; static const char cpustr[] = "CPU"; static const char mreadstr[] = "memory read"; static const char readstr[] = "read"; static const char writestr[] = "write"; static const char timestr[] = "timeout"; static const char paritystr[] = "parity error"; static const char lanestat[][4] = { " OK", "BAD" }; const char *kind, *agent, *cycle, *event; unsigned long address; u32 mer = *kn02xa_mer; u32 ear = *kn02xa_ear; int action = MIPS_BE_FATAL; /* Ack ASAP, so that any subsequent errors get caught. */ dec_kn02xa_be_ack(); kind = invoker ? intstr : excstr; /* No DMA errors? */ agent = cpustr; address = ear & KN02XA_EAR_ADDRESS; /* Low 256MB is decoded as memory, high -- as TC. */ if (address < 0x10000000) { cycle = mreadstr; event = paritystr; } else { cycle = invoker ? writestr : readstr; event = timestr; } if (is_fixup) action = MIPS_BE_FIXUP; if (action != MIPS_BE_FIXUP) printk(KERN_ALERT "Bus error %s: %s %s %s at %#010lx\n", kind, agent, cycle, event, address); if (action != MIPS_BE_FIXUP && address < 0x10000000) printk(KERN_ALERT " Byte lane status %#3x -- " "#3: %s, #2: %s, #1: %s, #0: %s\n", (mer & KN02XA_MER_BYTERR) >> 8, lanestat[(mer & KN02XA_MER_BYTERR_3) != 0], lanestat[(mer & KN02XA_MER_BYTERR_2) != 0], lanestat[(mer & KN02XA_MER_BYTERR_1) != 0], lanestat[(mer & KN02XA_MER_BYTERR_0) != 0]); return action; } int dec_kn02xa_be_handler(struct pt_regs *regs, int is_fixup) { return dec_kn02xa_be_backend(regs, is_fixup, 0); } irqreturn_t dec_kn02xa_be_interrupt(int irq, void *dev_id) { struct pt_regs *regs = get_irq_regs(); int action = dec_kn02xa_be_backend(regs, 0, 1); if (action == MIPS_BE_DISCARD) return IRQ_HANDLED; /* * FIXME: Find the affected processes and kill them, otherwise * we must die. * * The interrupt is asynchronously delivered thus EPC and RA * may be irrelevant, but are printed for a reference. */ printk(KERN_ALERT "Fatal bus interrupt, epc == %08lx, ra == %08lx\n", regs->cp0_epc, regs->regs[31]); die("Unrecoverable bus error", regs); } void __init dec_kn02xa_be_init(void) { volatile u32 *mbcs = (void *)CKSEG1ADDR(KN4K_SLOT_BASE + KN4K_MB_CSR); /* For KN04 we need to make sure EE (?) is enabled in the MB. */ if (current_cpu_type() == CPU_R4000SC) *mbcs |= KN4K_MB_CSR_EE; fast_iob(); /* Clear any leftover errors from the firmware. */ dec_kn02xa_be_ack(); }
gpl-2.0
DevSwift/Kernel-3.4-NovaThor
net/compat-wireless/drivers/staging/ath6kl/bmi/src/bmi.c
2811
32630
//------------------------------------------------------------------------------ // <copyright file="bmi.c" company="Atheros"> // Copyright (c) 2004-2010 Atheros Corporation. All rights reserved. // // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // // //------------------------------------------------------------------------------ //============================================================================== // // Author(s): ="Atheros" //============================================================================== #ifdef THREAD_X #include <string.h> #endif #include "hif.h" #include "bmi.h" #include "htc_api.h" #include "bmi_internal.h" #ifdef ATH_DEBUG_MODULE static struct ath_debug_mask_description bmi_debug_desc[] = { { ATH_DEBUG_BMI , "BMI Tracing"}, }; ATH_DEBUG_INSTANTIATE_MODULE_VAR(bmi, "bmi", "Boot Manager Interface", ATH_DEBUG_MASK_DEFAULTS, ATH_DEBUG_DESCRIPTION_COUNT(bmi_debug_desc), bmi_debug_desc); #endif /* Although we had envisioned BMI to run on top of HTC, this is not how the final implementation ended up. On the Target side, BMI is a part of the BSP and does not use the HTC protocol nor even DMA -- it is intentionally kept very simple. */ static bool pendingEventsFuncCheck = false; static u32 *pBMICmdCredits; static u8 *pBMICmdBuf; #define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \ sizeof(u32) /* cmd */ + \ sizeof(u32) /* addr */ + \ sizeof(u32))/* length */ #define BMI_COMMAND_FITS(sz) ((sz) <= MAX_BMI_CMDBUF_SZ) /* APIs visible to the driver */ void BMIInit(void) { bmiDone = false; pendingEventsFuncCheck = false; /* * On some platforms, it's not possible to DMA to a static variable * in a device driver (e.g. Linux loadable driver module). * So we need to A_MALLOC space for "command credits" and for commands. * * Note: implicitly relies on A_MALLOC to provide a buffer that is * suitable for DMA (or PIO). This buffer will be passed down the * bus stack. */ if (!pBMICmdCredits) { pBMICmdCredits = (u32 *)A_MALLOC_NOWAIT(4); A_ASSERT(pBMICmdCredits); } if (!pBMICmdBuf) { pBMICmdBuf = (u8 *)A_MALLOC_NOWAIT(MAX_BMI_CMDBUF_SZ); A_ASSERT(pBMICmdBuf); } A_REGISTER_MODULE_DEBUG_INFO(bmi); } void BMICleanup(void) { if (pBMICmdCredits) { kfree(pBMICmdCredits); pBMICmdCredits = NULL; } if (pBMICmdBuf) { kfree(pBMICmdBuf); pBMICmdBuf = NULL; } } int BMIDone(struct hif_device *device) { int status; u32 cid; if (bmiDone) { AR_DEBUG_PRINTF (ATH_DEBUG_BMI, ("BMIDone skipped\n")); return 0; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Done: Enter (device: 0x%p)\n", device)); bmiDone = true; cid = BMI_DONE; status = bmiBufferSend(device, (u8 *)&cid, sizeof(cid)); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n")); return A_ERROR; } if (pBMICmdCredits) { kfree(pBMICmdCredits); pBMICmdCredits = NULL; } if (pBMICmdBuf) { kfree(pBMICmdBuf); pBMICmdBuf = NULL; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Done: Exit\n")); return 0; } int BMIGetTargetInfo(struct hif_device *device, struct bmi_target_info *targ_info) { int status; u32 cid; if (bmiDone) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Get Target Info: Enter (device: 0x%p)\n", device)); cid = BMI_GET_TARGET_INFO; status = bmiBufferSend(device, (u8 *)&cid, sizeof(cid)); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n")); return A_ERROR; } status = bmiBufferReceive(device, (u8 *)&targ_info->target_ver, sizeof(targ_info->target_ver), true); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read Target Version from the device\n")); return A_ERROR; } if (targ_info->target_ver == TARGET_VERSION_SENTINAL) { /* Determine how many bytes are in the Target's targ_info */ status = bmiBufferReceive(device, (u8 *)&targ_info->target_info_byte_count, sizeof(targ_info->target_info_byte_count), true); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read Target Info Byte Count from the device\n")); return A_ERROR; } /* * The Target's targ_info doesn't match the Host's targ_info. * We need to do some backwards compatibility work to make this OK. */ A_ASSERT(targ_info->target_info_byte_count == sizeof(*targ_info)); /* Read the remainder of the targ_info */ status = bmiBufferReceive(device, ((u8 *)targ_info)+sizeof(targ_info->target_info_byte_count), sizeof(*targ_info)-sizeof(targ_info->target_info_byte_count), true); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read Target Info (%d bytes) from the device\n", targ_info->target_info_byte_count)); return A_ERROR; } } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Get Target Info: Exit (ver: 0x%x type: 0x%x)\n", targ_info->target_ver, targ_info->target_type)); return 0; } int BMIReadMemory(struct hif_device *device, u32 address, u8 *buffer, u32 length) { u32 cid; int status; u32 offset; u32 remaining, rxlen; A_ASSERT(BMI_COMMAND_FITS(BMI_DATASZ_MAX + sizeof(cid) + sizeof(address) + sizeof(length))); memset (pBMICmdBuf, 0, BMI_DATASZ_MAX + sizeof(cid) + sizeof(address) + sizeof(length)); if (bmiDone) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Read Memory: Enter (device: 0x%p, address: 0x%x, length: %d)\n", device, address, length)); cid = BMI_READ_MEMORY; remaining = length; while (remaining) { rxlen = (remaining < BMI_DATASZ_MAX) ? remaining : BMI_DATASZ_MAX; offset = 0; memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(pBMICmdBuf[offset]), &address, sizeof(address)); offset += sizeof(address); memcpy(&(pBMICmdBuf[offset]), &rxlen, sizeof(rxlen)); offset += sizeof(length); status = bmiBufferSend(device, pBMICmdBuf, offset); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n")); return A_ERROR; } status = bmiBufferReceive(device, pBMICmdBuf, rxlen, true); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read from the device\n")); return A_ERROR; } memcpy(&buffer[length - remaining], pBMICmdBuf, rxlen); remaining -= rxlen; address += rxlen; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Read Memory: Exit\n")); return 0; } int BMIWriteMemory(struct hif_device *device, u32 address, u8 *buffer, u32 length) { u32 cid; int status; u32 offset; u32 remaining, txlen; const u32 header = sizeof(cid) + sizeof(address) + sizeof(length); u8 alignedBuffer[BMI_DATASZ_MAX]; u8 *src; A_ASSERT(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header)); memset (pBMICmdBuf, 0, BMI_DATASZ_MAX + header); if (bmiDone) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Write Memory: Enter (device: 0x%p, address: 0x%x, length: %d)\n", device, address, length)); cid = BMI_WRITE_MEMORY; remaining = length; while (remaining) { src = &buffer[length - remaining]; if (remaining < (BMI_DATASZ_MAX - header)) { if (remaining & 3) { /* align it with 4 bytes */ remaining = remaining + (4 - (remaining & 3)); memcpy(alignedBuffer, src, remaining); src = alignedBuffer; } txlen = remaining; } else { txlen = (BMI_DATASZ_MAX - header); } offset = 0; memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(pBMICmdBuf[offset]), &address, sizeof(address)); offset += sizeof(address); memcpy(&(pBMICmdBuf[offset]), &txlen, sizeof(txlen)); offset += sizeof(txlen); memcpy(&(pBMICmdBuf[offset]), src, txlen); offset += txlen; status = bmiBufferSend(device, pBMICmdBuf, offset); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n")); return A_ERROR; } remaining -= txlen; address += txlen; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Write Memory: Exit\n")); return 0; } int BMIExecute(struct hif_device *device, u32 address, u32 *param) { u32 cid; int status; u32 offset; A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address) + sizeof(param))); memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address) + sizeof(param)); if (bmiDone) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Execute: Enter (device: 0x%p, address: 0x%x, param: %d)\n", device, address, *param)); cid = BMI_EXECUTE; offset = 0; memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(pBMICmdBuf[offset]), &address, sizeof(address)); offset += sizeof(address); memcpy(&(pBMICmdBuf[offset]), param, sizeof(*param)); offset += sizeof(*param); status = bmiBufferSend(device, pBMICmdBuf, offset); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n")); return A_ERROR; } status = bmiBufferReceive(device, pBMICmdBuf, sizeof(*param), false); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read from the device\n")); return A_ERROR; } memcpy(param, pBMICmdBuf, sizeof(*param)); AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Execute: Exit (param: %d)\n", *param)); return 0; } int BMISetAppStart(struct hif_device *device, u32 address) { u32 cid; int status; u32 offset; A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address))); memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address)); if (bmiDone) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Set App Start: Enter (device: 0x%p, address: 0x%x)\n", device, address)); cid = BMI_SET_APP_START; offset = 0; memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(pBMICmdBuf[offset]), &address, sizeof(address)); offset += sizeof(address); status = bmiBufferSend(device, pBMICmdBuf, offset); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Set App Start: Exit\n")); return 0; } int BMIReadSOCRegister(struct hif_device *device, u32 address, u32 *param) { u32 cid; int status; u32 offset; A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address))); memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address)); if (bmiDone) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Read SOC Register: Enter (device: 0x%p, address: 0x%x)\n", device, address)); cid = BMI_READ_SOC_REGISTER; offset = 0; memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(pBMICmdBuf[offset]), &address, sizeof(address)); offset += sizeof(address); status = bmiBufferSend(device, pBMICmdBuf, offset); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n")); return A_ERROR; } status = bmiBufferReceive(device, pBMICmdBuf, sizeof(*param), true); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read from the device\n")); return A_ERROR; } memcpy(param, pBMICmdBuf, sizeof(*param)); AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Read SOC Register: Exit (value: %d)\n", *param)); return 0; } int BMIWriteSOCRegister(struct hif_device *device, u32 address, u32 param) { u32 cid; int status; u32 offset; A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address) + sizeof(param))); memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address) + sizeof(param)); if (bmiDone) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Write SOC Register: Enter (device: 0x%p, address: 0x%x, param: %d)\n", device, address, param)); cid = BMI_WRITE_SOC_REGISTER; offset = 0; memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(pBMICmdBuf[offset]), &address, sizeof(address)); offset += sizeof(address); memcpy(&(pBMICmdBuf[offset]), &param, sizeof(param)); offset += sizeof(param); status = bmiBufferSend(device, pBMICmdBuf, offset); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Read SOC Register: Exit\n")); return 0; } int BMIrompatchInstall(struct hif_device *device, u32 ROM_addr, u32 RAM_addr, u32 nbytes, u32 do_activate, u32 *rompatch_id) { u32 cid; int status; u32 offset; A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(ROM_addr) + sizeof(RAM_addr) + sizeof(nbytes) + sizeof(do_activate))); memset(pBMICmdBuf, 0, sizeof(cid) + sizeof(ROM_addr) + sizeof(RAM_addr) + sizeof(nbytes) + sizeof(do_activate)); if (bmiDone) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI rompatch Install: Enter (device: 0x%p, ROMaddr: 0x%x, RAMaddr: 0x%x length: %d activate: %d)\n", device, ROM_addr, RAM_addr, nbytes, do_activate)); cid = BMI_ROMPATCH_INSTALL; offset = 0; memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(pBMICmdBuf[offset]), &ROM_addr, sizeof(ROM_addr)); offset += sizeof(ROM_addr); memcpy(&(pBMICmdBuf[offset]), &RAM_addr, sizeof(RAM_addr)); offset += sizeof(RAM_addr); memcpy(&(pBMICmdBuf[offset]), &nbytes, sizeof(nbytes)); offset += sizeof(nbytes); memcpy(&(pBMICmdBuf[offset]), &do_activate, sizeof(do_activate)); offset += sizeof(do_activate); status = bmiBufferSend(device, pBMICmdBuf, offset); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n")); return A_ERROR; } status = bmiBufferReceive(device, pBMICmdBuf, sizeof(*rompatch_id), true); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read from the device\n")); return A_ERROR; } memcpy(rompatch_id, pBMICmdBuf, sizeof(*rompatch_id)); AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI rompatch Install: (rompatch_id=%d)\n", *rompatch_id)); return 0; } int BMIrompatchUninstall(struct hif_device *device, u32 rompatch_id) { u32 cid; int status; u32 offset; A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(rompatch_id))); memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(rompatch_id)); if (bmiDone) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI rompatch Uninstall: Enter (device: 0x%p, rompatch_id: %d)\n", device, rompatch_id)); cid = BMI_ROMPATCH_UNINSTALL; offset = 0; memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(pBMICmdBuf[offset]), &rompatch_id, sizeof(rompatch_id)); offset += sizeof(rompatch_id); status = bmiBufferSend(device, pBMICmdBuf, offset); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI rompatch UNinstall: (rompatch_id=0x%x)\n", rompatch_id)); return 0; } static int _BMIrompatchChangeActivation(struct hif_device *device, u32 rompatch_count, u32 *rompatch_list, u32 do_activate) { u32 cid; int status; u32 offset; u32 length; A_ASSERT(BMI_COMMAND_FITS(BMI_DATASZ_MAX + sizeof(cid) + sizeof(rompatch_count))); memset(pBMICmdBuf, 0, BMI_DATASZ_MAX + sizeof(cid) + sizeof(rompatch_count)); if (bmiDone) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Change rompatch Activation: Enter (device: 0x%p, count: %d)\n", device, rompatch_count)); cid = do_activate ? BMI_ROMPATCH_ACTIVATE : BMI_ROMPATCH_DEACTIVATE; offset = 0; memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(pBMICmdBuf[offset]), &rompatch_count, sizeof(rompatch_count)); offset += sizeof(rompatch_count); length = rompatch_count * sizeof(*rompatch_list); memcpy(&(pBMICmdBuf[offset]), rompatch_list, length); offset += length; status = bmiBufferSend(device, pBMICmdBuf, offset); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Change rompatch Activation: Exit\n")); return 0; } int BMIrompatchActivate(struct hif_device *device, u32 rompatch_count, u32 *rompatch_list) { return _BMIrompatchChangeActivation(device, rompatch_count, rompatch_list, 1); } int BMIrompatchDeactivate(struct hif_device *device, u32 rompatch_count, u32 *rompatch_list) { return _BMIrompatchChangeActivation(device, rompatch_count, rompatch_list, 0); } int BMILZData(struct hif_device *device, u8 *buffer, u32 length) { u32 cid; int status; u32 offset; u32 remaining, txlen; const u32 header = sizeof(cid) + sizeof(length); A_ASSERT(BMI_COMMAND_FITS(BMI_DATASZ_MAX+header)); memset (pBMICmdBuf, 0, BMI_DATASZ_MAX+header); if (bmiDone) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI Send LZ Data: Enter (device: 0x%p, length: %d)\n", device, length)); cid = BMI_LZ_DATA; remaining = length; while (remaining) { txlen = (remaining < (BMI_DATASZ_MAX - header)) ? remaining : (BMI_DATASZ_MAX - header); offset = 0; memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(pBMICmdBuf[offset]), &txlen, sizeof(txlen)); offset += sizeof(txlen); memcpy(&(pBMICmdBuf[offset]), &buffer[length - remaining], txlen); offset += txlen; status = bmiBufferSend(device, pBMICmdBuf, offset); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to write to the device\n")); return A_ERROR; } remaining -= txlen; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI LZ Data: Exit\n")); return 0; } int BMILZStreamStart(struct hif_device *device, u32 address) { u32 cid; int status; u32 offset; A_ASSERT(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address))); memset (pBMICmdBuf, 0, sizeof(cid) + sizeof(address)); if (bmiDone) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Command disallowed\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI LZ Stream Start: Enter (device: 0x%p, address: 0x%x)\n", device, address)); cid = BMI_LZ_STREAM_START; offset = 0; memcpy(&(pBMICmdBuf[offset]), &cid, sizeof(cid)); offset += sizeof(cid); memcpy(&(pBMICmdBuf[offset]), &address, sizeof(address)); offset += sizeof(address); status = bmiBufferSend(device, pBMICmdBuf, offset); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to Start LZ Stream to the device\n")); return A_ERROR; } AR_DEBUG_PRINTF(ATH_DEBUG_BMI, ("BMI LZ Stream Start: Exit\n")); return 0; } /* BMI Access routines */ int bmiBufferSend(struct hif_device *device, u8 *buffer, u32 length) { int status; u32 timeout; u32 address; u32 mboxAddress[HTC_MAILBOX_NUM_MAX]; HIFConfigureDevice(device, HIF_DEVICE_GET_MBOX_ADDR, &mboxAddress[0], sizeof(mboxAddress)); *pBMICmdCredits = 0; timeout = BMI_COMMUNICATION_TIMEOUT; while(timeout-- && !(*pBMICmdCredits)) { /* Read the counter register to get the command credits */ address = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4; /* hit the credit counter with a 4-byte access, the first byte read will hit the counter and cause * a decrement, while the remaining 3 bytes has no effect. The rationale behind this is to * make all HIF accesses 4-byte aligned */ status = HIFReadWrite(device, address, (u8 *)pBMICmdCredits, 4, HIF_RD_SYNC_BYTE_INC, NULL); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to decrement the command credit count register\n")); return A_ERROR; } /* the counter is only 8=bits, ignore anything in the upper 3 bytes */ (*pBMICmdCredits) &= 0xFF; } if (*pBMICmdCredits) { address = mboxAddress[ENDPOINT1]; status = HIFReadWrite(device, address, buffer, length, HIF_WR_SYNC_BYTE_INC, NULL); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to send the BMI data to the device\n")); return A_ERROR; } } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI Communication timeout - bmiBufferSend\n")); return A_ERROR; } return status; } int bmiBufferReceive(struct hif_device *device, u8 *buffer, u32 length, bool want_timeout) { int status; u32 address; u32 mboxAddress[HTC_MAILBOX_NUM_MAX]; struct hif_pending_events_info hifPendingEvents; static HIF_PENDING_EVENTS_FUNC getPendingEventsFunc = NULL; if (!pendingEventsFuncCheck) { /* see if the HIF layer implements an alternative function to get pending events * do this only once! */ HIFConfigureDevice(device, HIF_DEVICE_GET_PENDING_EVENTS_FUNC, &getPendingEventsFunc, sizeof(getPendingEventsFunc)); pendingEventsFuncCheck = true; } HIFConfigureDevice(device, HIF_DEVICE_GET_MBOX_ADDR, &mboxAddress[0], sizeof(mboxAddress)); /* * During normal bootup, small reads may be required. * Rather than issue an HIF Read and then wait as the Target * adds successive bytes to the FIFO, we wait here until * we know that response data is available. * * This allows us to cleanly timeout on an unexpected * Target failure rather than risk problems at the HIF level. In * particular, this avoids SDIO timeouts and possibly garbage * data on some host controllers. And on an interconnect * such as Compact Flash (as well as some SDIO masters) which * does not provide any indication on data timeout, it avoids * a potential hang or garbage response. * * Synchronization is more difficult for reads larger than the * size of the MBOX FIFO (128B), because the Target is unable * to push the 129th byte of data until AFTER the Host posts an * HIF Read and removes some FIFO data. So for large reads the * Host proceeds to post an HIF Read BEFORE all the data is * actually available to read. Fortunately, large BMI reads do * not occur in practice -- they're supported for debug/development. * * So Host/Target BMI synchronization is divided into these cases: * CASE 1: length < 4 * Should not happen * * CASE 2: 4 <= length <= 128 * Wait for first 4 bytes to be in FIFO * If CONSERVATIVE_BMI_READ is enabled, also wait for * a BMI command credit, which indicates that the ENTIRE * response is available in the the FIFO * * CASE 3: length > 128 * Wait for the first 4 bytes to be in FIFO * * For most uses, a small timeout should be sufficient and we will * usually see a response quickly; but there may be some unusual * (debug) cases of BMI_EXECUTE where we want an larger timeout. * For now, we use an unbounded busy loop while waiting for * BMI_EXECUTE. * * If BMI_EXECUTE ever needs to support longer-latency execution, * especially in production, this code needs to be enhanced to sleep * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently * a function of Host processor speed. */ if (length >= 4) { /* NB: Currently, always true */ /* * NB: word_available is declared static for esoteric reasons * having to do with protection on some OSes. */ static u32 word_available; u32 timeout; word_available = 0; timeout = BMI_COMMUNICATION_TIMEOUT; while((!want_timeout || timeout--) && !word_available) { if (getPendingEventsFunc != NULL) { status = getPendingEventsFunc(device, &hifPendingEvents, NULL); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("BMI: Failed to get pending events \n")); break; } if (hifPendingEvents.AvailableRecvBytes >= sizeof(u32)) { word_available = 1; } continue; } status = HIFReadWrite(device, RX_LOOKAHEAD_VALID_ADDRESS, (u8 *)&word_available, sizeof(word_available), HIF_RD_SYNC_BYTE_INC, NULL); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read RX_LOOKAHEAD_VALID register\n")); return A_ERROR; } /* We did a 4-byte read to the same register; all we really want is one bit */ word_available &= (1 << ENDPOINT1); } if (!word_available) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI Communication timeout - bmiBufferReceive FIFO empty\n")); return A_ERROR; } } #define CONSERVATIVE_BMI_READ 0 #if CONSERVATIVE_BMI_READ /* * This is an extra-conservative CREDIT check. It guarantees * that ALL data is available in the FIFO before we start to * read from the interconnect. * * This credit check is useless when firmware chooses to * allow multiple outstanding BMI Command Credits, since the next * credit will already be present. To restrict the Target to one * BMI Command Credit, see HI_OPTION_BMI_CRED_LIMIT. * * And for large reads (when HI_OPTION_BMI_CRED_LIMIT is set) * we cannot wait for the next credit because the Target's FIFO * will not hold the entire response. So we need the Host to * start to empty the FIFO sooner. (And again, large reads are * not used in practice; they are for debug/development only.) * * For a more conservative Host implementation (which would be * safer for a Compact Flash interconnect): * Set CONSERVATIVE_BMI_READ (above) to 1 * Set HI_OPTION_BMI_CRED_LIMIT and * reduce BMI_DATASZ_MAX to 32 or 64 */ if ((length > 4) && (length < 128)) { /* check against MBOX FIFO size */ u32 timeout; *pBMICmdCredits = 0; timeout = BMI_COMMUNICATION_TIMEOUT; while((!want_timeout || timeout--) && !(*pBMICmdCredits) { /* Read the counter register to get the command credits */ address = COUNT_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 1; /* read the counter using a 4-byte read. Since the counter is NOT auto-decrementing, * we can read this counter multiple times using a non-incrementing address mode. * The rationale here is to make all HIF accesses a multiple of 4 bytes */ status = HIFReadWrite(device, address, (u8 *)pBMICmdCredits, sizeof(*pBMICmdCredits), HIF_RD_SYNC_BYTE_FIX, NULL); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read the command credit count register\n")); return A_ERROR; } /* we did a 4-byte read to the same count register so mask off upper bytes */ (*pBMICmdCredits) &= 0xFF; } if (!(*pBMICmdCredits)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI Communication timeout- bmiBufferReceive no credit\n")); return A_ERROR; } } #endif address = mboxAddress[ENDPOINT1]; status = HIFReadWrite(device, address, buffer, length, HIF_RD_SYNC_BYTE_INC, NULL); if (status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unable to read the BMI data from the device\n")); return A_ERROR; } return 0; } int BMIFastDownload(struct hif_device *device, u32 address, u8 *buffer, u32 length) { int status = A_ERROR; u32 lastWord = 0; u32 lastWordOffset = length & ~0x3; u32 unalignedBytes = length & 0x3; status = BMILZStreamStart (device, address); if (status) { return A_ERROR; } if (unalignedBytes) { /* copy the last word into a zero padded buffer */ memcpy(&lastWord, &buffer[lastWordOffset], unalignedBytes); } status = BMILZData(device, buffer, lastWordOffset); if (status) { return A_ERROR; } if (unalignedBytes) { status = BMILZData(device, (u8 *)&lastWord, 4); } if (!status) { // // Close compressed stream and open a new (fake) one. This serves mainly to flush Target caches. // status = BMILZStreamStart (device, 0x00); if (status) { return A_ERROR; } } return status; } int BMIRawWrite(struct hif_device *device, u8 *buffer, u32 length) { return bmiBufferSend(device, buffer, length); } int BMIRawRead(struct hif_device *device, u8 *buffer, u32 length, bool want_timeout) { return bmiBufferReceive(device, buffer, length, want_timeout); }
gpl-2.0
andyhui/linux-kernel-3.12.17
arch/arm/mach-omap2/emu.c
3067
1300
/* * emu.c * * ETM and ETB CoreSight components' resources as found in OMAP3xxx. * * Copyright (C) 2009 Nokia Corporation. * Alexander Shishkin * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/module.h> #include <linux/device.h> #include <linux/amba/bus.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/err.h> #include "soc.h" #include "iomap.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexander Shishkin"); /* Cortex CoreSight components within omap3xxx EMU */ #define ETM_BASE (L4_EMU_34XX_PHYS + 0x10000) #define DBG_BASE (L4_EMU_34XX_PHYS + 0x11000) #define ETB_BASE (L4_EMU_34XX_PHYS + 0x1b000) #define DAPCTL (L4_EMU_34XX_PHYS + 0x1d000) static AMBA_APB_DEVICE(omap3_etb, "etb", 0x000bb907, ETB_BASE, { }, NULL); static AMBA_APB_DEVICE(omap3_etm, "etm", 0x102bb921, ETM_BASE, { }, NULL); static int __init emu_init(void) { if (!cpu_is_omap34xx()) return -ENODEV; amba_device_register(&omap3_etb_device, &iomem_resource); amba_device_register(&omap3_etm_device, &iomem_resource); return 0; } omap_subsys_initcall(emu_init);
gpl-2.0
honor6-dev/android_kernel_huawei_h60
drivers/video/omap2/omapfb/omapfb-sysfs.c
4347
12283
/* * linux/drivers/video/omap2/omapfb-sysfs.c * * Copyright (C) 2008 Nokia Corporation * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> * * Some code and ideas taken from drivers/video/omap/ driver * by Imre Deak. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/fb.h> #include <linux/sysfs.h> #include <linux/device.h> #include <linux/uaccess.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/omapfb.h> #include <video/omapdss.h> #include <video/omapvrfb.h> #include "omapfb.h" static ssize_t show_rotate_type(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->rotation_type); } static ssize_t store_rotate_type(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_mem_region *rg; int rot_type; int r; r = kstrtoint(buf, 0, &rot_type); if (r) return r; if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB) return -EINVAL; if (!lock_fb_info(fbi)) return -ENODEV; r = 0; if (rot_type == ofbi->rotation_type) goto out; rg = omapfb_get_mem_region(ofbi->region); if (rg->size) { r = -EBUSY; goto put_region; } ofbi->rotation_type = rot_type; /* * Since the VRAM for this FB is not allocated at the moment we don't * need to do any further parameter checking at this point. */ put_region: omapfb_put_mem_region(rg); out: unlock_fb_info(fbi); return r ? r : count; } static ssize_t show_mirror(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->mirror); } static ssize_t store_mirror(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); bool mirror; int r; struct fb_var_screeninfo new_var; r = strtobool(buf, &mirror); if (r) return r; if (!lock_fb_info(fbi)) return -ENODEV; ofbi->mirror = mirror; omapfb_get_mem_region(ofbi->region); memcpy(&new_var, &fbi->var, sizeof(new_var)); r = check_fb_var(fbi, &new_var); if (r) goto out; memcpy(&fbi->var, &new_var, sizeof(fbi->var)); set_fb_fix(fbi); r = omapfb_apply_changes(fbi, 0); if (r) goto out; r = count; out: omapfb_put_mem_region(ofbi->region); unlock_fb_info(fbi); return r; } static ssize_t show_overlays(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; ssize_t l = 0; int t; if (!lock_fb_info(fbi)) return -ENODEV; omapfb_lock(fbdev); for (t = 0; t < ofbi->num_overlays; t++) { struct omap_overlay *ovl = ofbi->overlays[t]; int ovlnum; for (ovlnum = 0; ovlnum < fbdev->num_overlays; ++ovlnum) if (ovl == fbdev->overlays[ovlnum]) break; l += snprintf(buf + l, PAGE_SIZE - l, "%s%d", t == 0 ? "" : ",", ovlnum); } l += snprintf(buf + l, PAGE_SIZE - l, "\n"); omapfb_unlock(fbdev); unlock_fb_info(fbi); return l; } static struct omapfb_info *get_overlay_fb(struct omapfb2_device *fbdev, struct omap_overlay *ovl) { int i, t; for (i = 0; i < fbdev->num_fbs; i++) { struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]); for (t = 0; t < ofbi->num_overlays; t++) { if (ofbi->overlays[t] == ovl) return ofbi; } } return NULL; } static ssize_t store_overlays(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; struct omap_overlay *ovls[OMAPFB_MAX_OVL_PER_FB]; struct omap_overlay *ovl; int num_ovls, r, i; int len; bool added = false; num_ovls = 0; len = strlen(buf); if (buf[len - 1] == '\n') len = len - 1; if (!lock_fb_info(fbi)) return -ENODEV; omapfb_lock(fbdev); if (len > 0) { char *p = (char *)buf; int ovlnum; while (p < buf + len) { int found; if (num_ovls == OMAPFB_MAX_OVL_PER_FB) { r = -EINVAL; goto out; } ovlnum = simple_strtoul(p, &p, 0); if (ovlnum > fbdev->num_overlays) { r = -EINVAL; goto out; } found = 0; for (i = 0; i < num_ovls; ++i) { if (ovls[i] == fbdev->overlays[ovlnum]) { found = 1; break; } } if (!found) ovls[num_ovls++] = fbdev->overlays[ovlnum]; p++; } } for (i = 0; i < num_ovls; ++i) { struct omapfb_info *ofbi2 = get_overlay_fb(fbdev, ovls[i]); if (ofbi2 && ofbi2 != ofbi) { dev_err(fbdev->dev, "overlay already in use\n"); r = -EINVAL; goto out; } } /* detach unused overlays */ for (i = 0; i < ofbi->num_overlays; ++i) { int t, found; ovl = ofbi->overlays[i]; found = 0; for (t = 0; t < num_ovls; ++t) { if (ovl == ovls[t]) { found = 1; break; } } if (found) continue; DBG("detaching %d\n", ofbi->overlays[i]->id); omapfb_get_mem_region(ofbi->region); omapfb_overlay_enable(ovl, 0); if (ovl->manager) ovl->manager->apply(ovl->manager); omapfb_put_mem_region(ofbi->region); for (t = i + 1; t < ofbi->num_overlays; t++) { ofbi->rotation[t-1] = ofbi->rotation[t]; ofbi->overlays[t-1] = ofbi->overlays[t]; } ofbi->num_overlays--; i--; } for (i = 0; i < num_ovls; ++i) { int t, found; ovl = ovls[i]; found = 0; for (t = 0; t < ofbi->num_overlays; ++t) { if (ovl == ofbi->overlays[t]) { found = 1; break; } } if (found) continue; ofbi->rotation[ofbi->num_overlays] = 0; ofbi->overlays[ofbi->num_overlays++] = ovl; added = true; } if (added) { omapfb_get_mem_region(ofbi->region); r = omapfb_apply_changes(fbi, 0); omapfb_put_mem_region(ofbi->region); if (r) goto out; } r = count; out: omapfb_unlock(fbdev); unlock_fb_info(fbi); return r; } static ssize_t show_overlays_rotate(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); ssize_t l = 0; int t; if (!lock_fb_info(fbi)) return -ENODEV; for (t = 0; t < ofbi->num_overlays; t++) { l += snprintf(buf + l, PAGE_SIZE - l, "%s%d", t == 0 ? "" : ",", ofbi->rotation[t]); } l += snprintf(buf + l, PAGE_SIZE - l, "\n"); unlock_fb_info(fbi); return l; } static ssize_t store_overlays_rotate(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); int num_ovls = 0, r, i; int len; bool changed = false; u8 rotation[OMAPFB_MAX_OVL_PER_FB]; len = strlen(buf); if (buf[len - 1] == '\n') len = len - 1; if (!lock_fb_info(fbi)) return -ENODEV; if (len > 0) { char *p = (char *)buf; while (p < buf + len) { int rot; if (num_ovls == ofbi->num_overlays) { r = -EINVAL; goto out; } rot = simple_strtoul(p, &p, 0); if (rot < 0 || rot > 3) { r = -EINVAL; goto out; } if (ofbi->rotation[num_ovls] != rot) changed = true; rotation[num_ovls++] = rot; p++; } } if (num_ovls != ofbi->num_overlays) { r = -EINVAL; goto out; } if (changed) { for (i = 0; i < num_ovls; ++i) ofbi->rotation[i] = rotation[i]; omapfb_get_mem_region(ofbi->region); r = omapfb_apply_changes(fbi, 0); omapfb_put_mem_region(ofbi->region); if (r) goto out; /* FIXME error handling? */ } r = count; out: unlock_fb_info(fbi); return r; } static ssize_t show_size(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%lu\n", ofbi->region->size); } static ssize_t store_size(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; struct omap_dss_device *display = fb2display(fbi); struct omapfb2_mem_region *rg; unsigned long size; int r; int i; r = kstrtoul(buf, 0, &size); if (r) return r; size = PAGE_ALIGN(size); if (!lock_fb_info(fbi)) return -ENODEV; if (display && display->driver->sync) display->driver->sync(display); rg = ofbi->region; down_write_nested(&rg->lock, rg->id); atomic_inc(&rg->lock_count); if (atomic_read(&rg->map_count)) { r = -EBUSY; goto out; } for (i = 0; i < fbdev->num_fbs; i++) { struct omapfb_info *ofbi2 = FB2OFB(fbdev->fbs[i]); int j; if (ofbi2->region != rg) continue; for (j = 0; j < ofbi2->num_overlays; j++) { struct omap_overlay *ovl; ovl = ofbi2->overlays[j]; if (ovl->is_enabled(ovl)) { r = -EBUSY; goto out; } } } if (size != ofbi->region->size) { r = omapfb_realloc_fbmem(fbi, size, ofbi->region->type); if (r) { dev_err(dev, "realloc fbmem failed\n"); goto out; } } r = count; out: atomic_dec(&rg->lock_count); up_write(&rg->lock); unlock_fb_info(fbi); return r; } static ssize_t show_phys(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%0x\n", ofbi->region->paddr); } static ssize_t show_virt(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr); } static ssize_t show_upd_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); enum omapfb_update_mode mode; int r; r = omapfb_get_update_mode(fbi, &mode); if (r) return r; return snprintf(buf, PAGE_SIZE, "%u\n", (unsigned)mode); } static ssize_t store_upd_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *fbi = dev_get_drvdata(dev); unsigned mode; int r; r = kstrtouint(buf, 0, &mode); if (r) return r; r = omapfb_set_update_mode(fbi, mode); if (r) return r; return count; } static struct device_attribute omapfb_attrs[] = { __ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type, store_rotate_type), __ATTR(mirror, S_IRUGO | S_IWUSR, show_mirror, store_mirror), __ATTR(size, S_IRUGO | S_IWUSR, show_size, store_size), __ATTR(overlays, S_IRUGO | S_IWUSR, show_overlays, store_overlays), __ATTR(overlays_rotate, S_IRUGO | S_IWUSR, show_overlays_rotate, store_overlays_rotate), __ATTR(phys_addr, S_IRUGO, show_phys, NULL), __ATTR(virt_addr, S_IRUGO, show_virt, NULL), __ATTR(update_mode, S_IRUGO | S_IWUSR, show_upd_mode, store_upd_mode), }; int omapfb_create_sysfs(struct omapfb2_device *fbdev) { int i; int r; DBG("create sysfs for fbs\n"); for (i = 0; i < fbdev->num_fbs; i++) { int t; for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) { r = device_create_file(fbdev->fbs[i]->dev, &omapfb_attrs[t]); if (r) { dev_err(fbdev->dev, "failed to create sysfs " "file\n"); return r; } } } return 0; } void omapfb_remove_sysfs(struct omapfb2_device *fbdev) { int i, t; DBG("remove sysfs for fbs\n"); for (i = 0; i < fbdev->num_fbs; i++) { for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) device_remove_file(fbdev->fbs[i]->dev, &omapfb_attrs[t]); } }
gpl-2.0
CyanideL/android_kernel_oneplus_msm8974
arch/mips/bcm47xx/wgt634u.c
4859
4212
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net> */ #include <linux/platform_device.h> #include <linux/module.h> #include <linux/leds.h> #include <linux/mtd/physmap.h> #include <linux/ssb/ssb.h> #include <linux/interrupt.h> #include <linux/reboot.h> #include <linux/gpio.h> #include <asm/mach-bcm47xx/bcm47xx.h> /* GPIO definitions for the WGT634U */ #define WGT634U_GPIO_LED 3 #define WGT634U_GPIO_RESET 2 #define WGT634U_GPIO_TP1 7 #define WGT634U_GPIO_TP2 6 #define WGT634U_GPIO_TP3 5 #define WGT634U_GPIO_TP4 4 #define WGT634U_GPIO_TP5 1 static struct gpio_led wgt634u_leds[] = { { .name = "power", .gpio = WGT634U_GPIO_LED, .active_low = 1, .default_trigger = "heartbeat", }, }; static struct gpio_led_platform_data wgt634u_led_data = { .num_leds = ARRAY_SIZE(wgt634u_leds), .leds = wgt634u_leds, }; static struct platform_device wgt634u_gpio_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &wgt634u_led_data, } }; /* 8MiB flash. The struct mtd_partition matches original Netgear WGT634U firmware. */ static struct mtd_partition wgt634u_partitions[] = { { .name = "cfe", .offset = 0, .size = 0x60000, /* 384k */ .mask_flags = MTD_WRITEABLE /* force read-only */ }, { .name = "config", .offset = 0x60000, .size = 0x20000 /* 128k */ }, { .name = "linux", .offset = 0x80000, .size = 0x140000 /* 1280k */ }, { .name = "jffs", .offset = 0x1c0000, .size = 0x620000 /* 6272k */ }, { .name = "nvram", .offset = 0x7e0000, .size = 0x20000 /* 128k */ }, }; static struct physmap_flash_data wgt634u_flash_data = { .parts = wgt634u_partitions, .nr_parts = ARRAY_SIZE(wgt634u_partitions) }; static struct resource wgt634u_flash_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device wgt634u_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &wgt634u_flash_data, }, .resource = &wgt634u_flash_resource, .num_resources = 1, }; /* Platform devices */ static struct platform_device *wgt634u_devices[] __initdata = { &wgt634u_flash, &wgt634u_gpio_leds, }; static irqreturn_t gpio_interrupt(int irq, void *ignored) { int state; /* Interrupts are shared, check if the current one is a GPIO interrupt. */ if (!ssb_chipco_irq_status(&bcm47xx_bus.ssb.chipco, SSB_CHIPCO_IRQ_GPIO)) return IRQ_NONE; state = gpio_get_value(WGT634U_GPIO_RESET); /* Interrupt are level triggered, revert the interrupt polarity to clear the interrupt. */ gpio_polarity(WGT634U_GPIO_RESET, state); if (!state) { printk(KERN_INFO "Reset button pressed"); ctrl_alt_del(); } return IRQ_HANDLED; } static int __init wgt634u_init(void) { /* There is no easy way to detect that we are running on a WGT634U * machine. Use the MAC address as an heuristic. Netgear Inc. has * been allocated ranges 00:09:5b:xx:xx:xx and 00:0f:b5:xx:xx:xx. */ u8 *et0mac; if (bcm47xx_bus_type != BCM47XX_BUS_TYPE_SSB) return -ENODEV; et0mac = bcm47xx_bus.ssb.sprom.et0mac; if (et0mac[0] == 0x00 && ((et0mac[1] == 0x09 && et0mac[2] == 0x5b) || (et0mac[1] == 0x0f && et0mac[2] == 0xb5))) { struct ssb_mipscore *mcore = &bcm47xx_bus.ssb.mipscore; printk(KERN_INFO "WGT634U machine detected.\n"); if (!request_irq(gpio_to_irq(WGT634U_GPIO_RESET), gpio_interrupt, IRQF_SHARED, "WGT634U GPIO", &bcm47xx_bus.ssb.chipco)) { gpio_direction_input(WGT634U_GPIO_RESET); gpio_intmask(WGT634U_GPIO_RESET, 1); ssb_chipco_irq_mask(&bcm47xx_bus.ssb.chipco, SSB_CHIPCO_IRQ_GPIO, SSB_CHIPCO_IRQ_GPIO); } wgt634u_flash_data.width = mcore->flash_buswidth; wgt634u_flash_resource.start = mcore->flash_window; wgt634u_flash_resource.end = mcore->flash_window + mcore->flash_window_size - 1; return platform_add_devices(wgt634u_devices, ARRAY_SIZE(wgt634u_devices)); } else return -ENODEV; } module_init(wgt634u_init);
gpl-2.0
tiny4579/tinykernel-flo
drivers/ata/pata_sc1200.c
5115
7501
/* * New ATA layer SC1200 driver Alan Cox <alan@lxorguk.ukuu.org.uk> * * TODO: Mode selection filtering * TODO: Needs custom DMA cleanup code * * Based very heavily on * * linux/drivers/ide/pci/sc1200.c Version 0.91 28-Jan-2003 * * Copyright (C) 2000-2002 Mark Lord <mlord@pobox.com> * May be copied or modified under the terms of the GNU General Public License * * Development of this chipset driver was funded * by the nice folks at National Semiconductor. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_sc1200" #define DRV_VERSION "0.2.6" #define SC1200_REV_A 0x00 #define SC1200_REV_B1 0x01 #define SC1200_REV_B3 0x02 #define SC1200_REV_C1 0x03 #define SC1200_REV_D1 0x04 /** * sc1200_clock - PCI clock * * Return the PCI bus clocking for the SC1200 chipset configuration * in use. We return 0 for 33MHz 1 for 48MHz and 2 for 66Mhz */ static int sc1200_clock(void) { /* Magic registers that give us the chipset data */ u8 chip_id = inb(0x903C); u8 silicon_rev = inb(0x903D); u16 pci_clock; if (chip_id == 0x04 && silicon_rev < SC1200_REV_B1) return 0; /* 33 MHz mode */ /* Clock generator configuration 0x901E its 8/9 are the PCI clocking 0/3 is 33Mhz 1 is 48 2 is 66 */ pci_clock = inw(0x901E); pci_clock >>= 8; pci_clock &= 0x03; if (pci_clock == 3) pci_clock = 0; return pci_clock; } /** * sc1200_set_piomode - PIO setup * @ap: ATA interface * @adev: device on the interface * * Set our PIO requirements. This is fairly simple on the SC1200 */ static void sc1200_set_piomode(struct ata_port *ap, struct ata_device *adev) { static const u32 pio_timings[4][5] = { /* format0, 33Mhz */ { 0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010 }, /* format1, 33Mhz */ { 0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010 }, /* format1, 48Mhz */ { 0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021 }, /* format1, 66Mhz */ { 0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131 } }; struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 format; unsigned int reg = 0x40 + 0x10 * ap->port_no; int mode = adev->pio_mode - XFER_PIO_0; pci_read_config_dword(pdev, reg + 4, &format); format >>= 31; format += sc1200_clock(); pci_write_config_dword(pdev, reg + 8 * adev->devno, pio_timings[format][mode]); } /** * sc1200_set_dmamode - DMA timing setup * @ap: ATA interface * @adev: Device being configured * * We cannot mix MWDMA and UDMA without reloading timings each switch * master to slave. */ static void sc1200_set_dmamode(struct ata_port *ap, struct ata_device *adev) { static const u32 udma_timing[3][3] = { { 0x00921250, 0x00911140, 0x00911030 }, { 0x00932470, 0x00922260, 0x00922140 }, { 0x009436A1, 0x00933481, 0x00923261 } }; static const u32 mwdma_timing[3][3] = { { 0x00077771, 0x00012121, 0x00002020 }, { 0x000BBBB2, 0x00024241, 0x00013131 }, { 0x000FFFF3, 0x00035352, 0x00015151 } }; int clock = sc1200_clock(); struct pci_dev *pdev = to_pci_dev(ap->host->dev); unsigned int reg = 0x40 + 0x10 * ap->port_no; int mode = adev->dma_mode; u32 format; if (mode >= XFER_UDMA_0) format = udma_timing[clock][mode - XFER_UDMA_0]; else format = mwdma_timing[clock][mode - XFER_MW_DMA_0]; if (adev->devno == 0) { u32 timings; pci_read_config_dword(pdev, reg + 4, &timings); timings &= 0x80000000UL; timings |= format; pci_write_config_dword(pdev, reg + 4, timings); } else pci_write_config_dword(pdev, reg + 12, format); } /** * sc1200_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap * this interface so that we can load the correct ATA timings if * necessary. Specifically we have a problem that there is only * one MWDMA/UDMA bit. */ static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; struct ata_device *prev = ap->private_data; /* See if the DMA settings could be wrong */ if (ata_dma_enabled(adev) && adev != prev && prev != NULL) { /* Maybe, but do the channels match MWDMA/UDMA ? */ if ((ata_using_udma(adev) && !ata_using_udma(prev)) || (ata_using_udma(prev) && !ata_using_udma(adev))) /* Switch the mode bits */ sc1200_set_dmamode(ap, adev); } return ata_bmdma_qc_issue(qc); } /** * sc1200_qc_defer - implement serialization * @qc: command * * Serialize command issue on this controller. */ static int sc1200_qc_defer(struct ata_queued_cmd *qc) { struct ata_host *host = qc->ap->host; struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; int rc; /* First apply the usual rules */ rc = ata_std_qc_defer(qc); if (rc != 0) return rc; /* Now apply serialization rules. Only allow a command if the other channel state machine is idle */ if (alt && alt->qc_active) return ATA_DEFER_PORT; return 0; } static struct scsi_host_template sc1200_sht = { ATA_BMDMA_SHT(DRV_NAME), .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; static struct ata_port_operations sc1200_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_prep = ata_bmdma_dumb_qc_prep, .qc_issue = sc1200_qc_issue, .qc_defer = sc1200_qc_defer, .cable_detect = ata_cable_40wire, .set_piomode = sc1200_set_piomode, .set_dmamode = sc1200_set_dmamode, }; /** * sc1200_init_one - Initialise an SC1200 * @dev: PCI device * @id: Entry in match table * * Just throw the needed data at the libata helper and it does all * our work. */ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, .port_ops = &sc1200_port_ops }; const struct ata_port_info *ppi[] = { &info, NULL }; return ata_pci_bmdma_init_one(dev, ppi, &sc1200_sht, NULL, 0); } static const struct pci_device_id sc1200[] = { { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_IDE), }, { }, }; static struct pci_driver sc1200_pci_driver = { .name = DRV_NAME, .id_table = sc1200, .probe = sc1200_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; static int __init sc1200_init(void) { return pci_register_driver(&sc1200_pci_driver); } static void __exit sc1200_exit(void) { pci_unregister_driver(&sc1200_pci_driver); } MODULE_AUTHOR("Alan Cox, Mark Lord"); MODULE_DESCRIPTION("low-level driver for the NS/AMD SC1200"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, sc1200); MODULE_VERSION(DRV_VERSION); module_init(sc1200_init); module_exit(sc1200_exit);
gpl-2.0
Jazz-823/kernel_lge_hammerhead
drivers/ata/pata_sl82c105.c
5115
9694
/* * pata_sl82c105.c - SL82C105 PATA for new ATA layer * (C) 2005 Red Hat Inc * (C) 2011 Bartlomiej Zolnierkiewicz * * Based in part on linux/drivers/ide/pci/sl82c105.c * SL82C105/Winbond 553 IDE driver * * and in part on the documentation and errata sheet * * * Note: The controller like many controllers has shared timings for * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back * in the dma_stop function. Thus we actually don't need a set_dmamode * method as the PIO method is always called and will set the right PIO * timing parameters. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_sl82c105" #define DRV_VERSION "0.3.3" enum { /* * SL82C105 PCI config register 0x40 bits. */ CTRL_IDE_IRQB = (1 << 30), CTRL_IDE_IRQA = (1 << 28), CTRL_LEGIRQ = (1 << 11), CTRL_P1F16 = (1 << 5), CTRL_P1EN = (1 << 4), CTRL_P0F16 = (1 << 1), CTRL_P0EN = (1 << 0) }; /** * sl82c105_pre_reset - probe begin * @link: ATA link * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ static int sl82c105_pre_reset(struct ata_link *link, unsigned long deadline) { static const struct pci_bits sl82c105_enable_bits[] = { { 0x40, 1, 0x01, 0x01 }, { 0x40, 1, 0x10, 0x10 } }; struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no])) return -ENOENT; return ata_sff_prereset(link, deadline); } /** * sl82c105_configure_piomode - set chip PIO timing * @ap: ATA interface * @adev: ATA device * @pio: PIO mode * * Called to do the PIO mode setup. Our timing registers are shared * so a configure_dmamode call will undo any work we do here and vice * versa */ static void sl82c105_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); static u16 pio_timing[5] = { 0x50D, 0x407, 0x304, 0x242, 0x240 }; u16 dummy; int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno); pci_write_config_word(pdev, timing, pio_timing[pio]); /* Can we lose this oddity of the old driver */ pci_read_config_word(pdev, timing, &dummy); } /** * sl82c105_set_piomode - set initial PIO mode data * @ap: ATA interface * @adev: ATA device * * Called to do the PIO mode setup. Our timing registers are shared * but we want to set the PIO timing by default. */ static void sl82c105_set_piomode(struct ata_port *ap, struct ata_device *adev) { sl82c105_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); } /** * sl82c105_configure_dmamode - set DMA mode in chip * @ap: ATA interface * @adev: ATA device * * Load DMA cycle times into the chip ready for a DMA transfer * to occur. */ static void sl82c105_configure_dmamode(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); static u16 dma_timing[3] = { 0x707, 0x201, 0x200 }; u16 dummy; int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno); int dma = adev->dma_mode - XFER_MW_DMA_0; pci_write_config_word(pdev, timing, dma_timing[dma]); /* Can we lose this oddity of the old driver */ pci_read_config_word(pdev, timing, &dummy); } /** * sl82c105_reset_engine - Reset the DMA engine * @ap: ATA interface * * The sl82c105 has some serious problems with the DMA engine * when transfers don't run as expected or ATAPI is used. The * recommended fix is to reset the engine each use using a chip * test register. */ static void sl82c105_reset_engine(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u16 val; pci_read_config_word(pdev, 0x7E, &val); pci_write_config_word(pdev, 0x7E, val | 4); pci_write_config_word(pdev, 0x7E, val & ~4); } /** * sl82c105_bmdma_start - DMA engine begin * @qc: ATA command * * Reset the DMA engine each use as recommended by the errata * document. * * FIXME: if we switch clock at BMDMA start/end we might get better * PIO performance on DMA capable devices. */ static void sl82c105_bmdma_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; udelay(100); sl82c105_reset_engine(ap); udelay(100); /* Set the clocks for DMA */ sl82c105_configure_dmamode(ap, qc->dev); /* Activate DMA */ ata_bmdma_start(qc); } /** * sl82c105_bmdma_end - DMA engine stop * @qc: ATA command * * Reset the DMA engine each use as recommended by the errata * document. * * This function is also called to turn off DMA when a timeout occurs * during DMA operation. In both cases we need to reset the engine, * so no actual eng_timeout handler is required. * * We assume bmdma_stop is always called if bmdma_start as called. If * not then we may need to wrap qc_issue. */ static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; ata_bmdma_stop(qc); sl82c105_reset_engine(ap); udelay(100); /* This will redo the initial setup of the DMA device to matching PIO timings */ sl82c105_set_piomode(ap, qc->dev); } /** * sl82c105_qc_defer - implement serialization * @qc: command * * We must issue one command per host not per channel because * of the reset bug. * * Q: is the scsi host lock sufficient ? */ static int sl82c105_qc_defer(struct ata_queued_cmd *qc) { struct ata_host *host = qc->ap->host; struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; int rc; /* First apply the usual rules */ rc = ata_std_qc_defer(qc); if (rc != 0) return rc; /* Now apply serialization rules. Only allow a command if the other channel state machine is idle */ if (alt && alt->qc_active) return ATA_DEFER_PORT; return 0; } static bool sl82c105_sff_irq_check(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 val, mask = ap->port_no ? CTRL_IDE_IRQB : CTRL_IDE_IRQA; pci_read_config_dword(pdev, 0x40, &val); return val & mask; } static struct scsi_host_template sl82c105_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations sl82c105_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_defer = sl82c105_qc_defer, .bmdma_start = sl82c105_bmdma_start, .bmdma_stop = sl82c105_bmdma_stop, .cable_detect = ata_cable_40wire, .set_piomode = sl82c105_set_piomode, .prereset = sl82c105_pre_reset, .sff_irq_check = sl82c105_sff_irq_check, }; /** * sl82c105_bridge_revision - find bridge version * @pdev: PCI device for the ATA function * * Locates the PCI bridge associated with the ATA function and * providing it is a Winbond 553 reports the revision. If it cannot * find a revision or the right device it returns -1 */ static int sl82c105_bridge_revision(struct pci_dev *pdev) { struct pci_dev *bridge; /* * The bridge should be part of the same device, but function 0. */ bridge = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); if (!bridge) return -1; /* * Make sure it is a Winbond 553 and is an ISA bridge. */ if (bridge->vendor != PCI_VENDOR_ID_WINBOND || bridge->device != PCI_DEVICE_ID_WINBOND_83C553 || bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) { pci_dev_put(bridge); return -1; } /* * We need to find function 0's revision, not function 1 */ pci_dev_put(bridge); return bridge->revision; } static void sl82c105_fixup(struct pci_dev *pdev) { u32 val; pci_read_config_dword(pdev, 0x40, &val); val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; pci_write_config_dword(pdev, 0x40, val); } static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id) { static const struct ata_port_info info_dma = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .port_ops = &sl82c105_port_ops }; static const struct ata_port_info info_early = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .port_ops = &sl82c105_port_ops }; /* for now use only the first port */ const struct ata_port_info *ppi[] = { &info_early, NULL }; int rev; int rc; rc = pcim_enable_device(dev); if (rc) return rc; rev = sl82c105_bridge_revision(dev); if (rev == -1) dev_warn(&dev->dev, "pata_sl82c105: Unable to find bridge, disabling DMA\n"); else if (rev <= 5) dev_warn(&dev->dev, "pata_sl82c105: Early bridge revision, no DMA available\n"); else ppi[0] = &info_dma; sl82c105_fixup(dev); return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0); } #ifdef CONFIG_PM static int sl82c105_reinit_one(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; sl82c105_fixup(pdev); ata_host_resume(host); return 0; } #endif static const struct pci_device_id sl82c105[] = { { PCI_VDEVICE(WINBOND, PCI_DEVICE_ID_WINBOND_82C105), }, { }, }; static struct pci_driver sl82c105_pci_driver = { .name = DRV_NAME, .id_table = sl82c105, .probe = sl82c105_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = sl82c105_reinit_one, #endif }; static int __init sl82c105_init(void) { return pci_register_driver(&sl82c105_pci_driver); } static void __exit sl82c105_exit(void) { pci_unregister_driver(&sl82c105_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Sl82c105"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, sl82c105); MODULE_VERSION(DRV_VERSION); module_init(sl82c105_init); module_exit(sl82c105_exit);
gpl-2.0
dewadg/mako-kernel
drivers/mtd/maps/rpxlite.c
8187
1371
/* * Handle mapping of the flash on the RPX Lite and CLLF boards */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #define WINDOW_ADDR 0xfe000000 #define WINDOW_SIZE 0x800000 static struct mtd_info *mymtd; static struct map_info rpxlite_map = { .name = "RPX", .size = WINDOW_SIZE, .bankwidth = 4, .phys = WINDOW_ADDR, }; static int __init init_rpxlite(void) { printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR); rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4); if (!rpxlite_map.virt) { printk("Failed to ioremap\n"); return -EIO; } simple_map_init(&rpxlite_map); mymtd = do_map_probe("cfi_probe", &rpxlite_map); if (mymtd) { mymtd->owner = THIS_MODULE; mtd_device_register(mymtd, NULL, 0); return 0; } iounmap((void *)rpxlite_map.virt); return -ENXIO; } static void __exit cleanup_rpxlite(void) { if (mymtd) { mtd_device_unregister(mymtd); map_destroy(mymtd); } if (rpxlite_map.virt) { iounmap((void *)rpxlite_map.virt); rpxlite_map.virt = 0; } } module_init(init_rpxlite); module_exit(cleanup_rpxlite); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnold Christensen <AKC@pel.dk>"); MODULE_DESCRIPTION("MTD map driver for RPX Lite and CLLF boards");
gpl-2.0
intervigilium/android_kernel_htc_msm7x30
fs/proc/proc_tty.c
10747
4805
/* * proc_tty.c -- handles /proc/tty * * Copyright 1997, Theodore Ts'o */ #include <asm/uaccess.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/tty.h> #include <linux/seq_file.h> #include <linux/bitops.h> /* * The /proc/tty directory inodes... */ static struct proc_dir_entry *proc_tty_ldisc, *proc_tty_driver; /* * This is the handler for /proc/tty/drivers */ static void show_tty_range(struct seq_file *m, struct tty_driver *p, dev_t from, int num) { seq_printf(m, "%-20s ", p->driver_name ? p->driver_name : "unknown"); seq_printf(m, "/dev/%-8s ", p->name); if (p->num > 1) { seq_printf(m, "%3d %d-%d ", MAJOR(from), MINOR(from), MINOR(from) + num - 1); } else { seq_printf(m, "%3d %7d ", MAJOR(from), MINOR(from)); } switch (p->type) { case TTY_DRIVER_TYPE_SYSTEM: seq_puts(m, "system"); if (p->subtype == SYSTEM_TYPE_TTY) seq_puts(m, ":/dev/tty"); else if (p->subtype == SYSTEM_TYPE_SYSCONS) seq_puts(m, ":console"); else if (p->subtype == SYSTEM_TYPE_CONSOLE) seq_puts(m, ":vtmaster"); break; case TTY_DRIVER_TYPE_CONSOLE: seq_puts(m, "console"); break; case TTY_DRIVER_TYPE_SERIAL: seq_puts(m, "serial"); break; case TTY_DRIVER_TYPE_PTY: if (p->subtype == PTY_TYPE_MASTER) seq_puts(m, "pty:master"); else if (p->subtype == PTY_TYPE_SLAVE) seq_puts(m, "pty:slave"); else seq_puts(m, "pty"); break; default: seq_printf(m, "type:%d.%d", p->type, p->subtype); } seq_putc(m, '\n'); } static int show_tty_driver(struct seq_file *m, void *v) { struct tty_driver *p = list_entry(v, struct tty_driver, tty_drivers); dev_t from = MKDEV(p->major, p->minor_start); dev_t to = from + p->num; if (&p->tty_drivers == tty_drivers.next) { /* pseudo-drivers first */ seq_printf(m, "%-20s /dev/%-8s ", "/dev/tty", "tty"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 0); seq_puts(m, "system:/dev/tty\n"); seq_printf(m, "%-20s /dev/%-8s ", "/dev/console", "console"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 1); seq_puts(m, "system:console\n"); #ifdef CONFIG_UNIX98_PTYS seq_printf(m, "%-20s /dev/%-8s ", "/dev/ptmx", "ptmx"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 2); seq_puts(m, "system\n"); #endif #ifdef CONFIG_VT seq_printf(m, "%-20s /dev/%-8s ", "/dev/vc/0", "vc/0"); seq_printf(m, "%3d %7d ", TTY_MAJOR, 0); seq_puts(m, "system:vtmaster\n"); #endif } while (MAJOR(from) < MAJOR(to)) { dev_t next = MKDEV(MAJOR(from)+1, 0); show_tty_range(m, p, from, next - from); from = next; } if (from != to) show_tty_range(m, p, from, to - from); return 0; } /* iterator */ static void *t_start(struct seq_file *m, loff_t *pos) { mutex_lock(&tty_mutex); return seq_list_start(&tty_drivers, *pos); } static void *t_next(struct seq_file *m, void *v, loff_t *pos) { return seq_list_next(v, &tty_drivers, pos); } static void t_stop(struct seq_file *m, void *v) { mutex_unlock(&tty_mutex); } static const struct seq_operations tty_drivers_op = { .start = t_start, .next = t_next, .stop = t_stop, .show = show_tty_driver }; static int tty_drivers_open(struct inode *inode, struct file *file) { return seq_open(file, &tty_drivers_op); } static const struct file_operations proc_tty_drivers_operations = { .open = tty_drivers_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* * This function is called by tty_register_driver() to handle * registering the driver's /proc handler into /proc/tty/driver/<foo> */ void proc_tty_register_driver(struct tty_driver *driver) { struct proc_dir_entry *ent; if (!driver->driver_name || driver->proc_entry || !driver->ops->proc_fops) return; ent = proc_create_data(driver->driver_name, 0, proc_tty_driver, driver->ops->proc_fops, driver); driver->proc_entry = ent; } /* * This function is called by tty_unregister_driver() */ void proc_tty_unregister_driver(struct tty_driver *driver) { struct proc_dir_entry *ent; ent = driver->proc_entry; if (!ent) return; remove_proc_entry(driver->driver_name, proc_tty_driver); driver->proc_entry = NULL; } /* * Called by proc_root_init() to initialize the /proc/tty subtree */ void __init proc_tty_init(void) { if (!proc_mkdir("tty", NULL)) return; proc_tty_ldisc = proc_mkdir("tty/ldisc", NULL); /* * /proc/tty/driver/serial reveals the exact character counts for * serial links which is just too easy to abuse for inferring * password lengths and inter-keystroke timings during password * entry. */ proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR|S_IXUSR, NULL); proc_create("tty/ldiscs", 0, NULL, &tty_ldiscs_proc_fops); proc_create("tty/drivers", 0, NULL, &proc_tty_drivers_operations); }
gpl-2.0
mixtile/garage-linux
drivers/video/nvidia/nv_backlight.c
11003
3627
/* * Backlight code for nVidia based graphic cards * * Copyright 2004 Antonino Daplas <adaplas@pol.net> * Copyright (c) 2006 Michael Hanselmann <linux-kernel@hansmi.ch> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/backlight.h> #include <linux/fb.h> #include <linux/pci.h> #ifdef CONFIG_PMAC_BACKLIGHT #include <asm/backlight.h> #endif #include "nv_local.h" #include "nv_type.h" #include "nv_proto.h" /* We do not have any information about which values are allowed, thus * we used safe values. */ #define MIN_LEVEL 0x158 #define MAX_LEVEL 0x534 #define LEVEL_STEP ((MAX_LEVEL - MIN_LEVEL) / FB_BACKLIGHT_MAX) static int nvidia_bl_get_level_brightness(struct nvidia_par *par, int level) { struct fb_info *info = pci_get_drvdata(par->pci_dev); int nlevel; /* Get and convert the value */ /* No locking of bl_curve since we read a single value */ nlevel = MIN_LEVEL + info->bl_curve[level] * LEVEL_STEP; if (nlevel < 0) nlevel = 0; else if (nlevel < MIN_LEVEL) nlevel = MIN_LEVEL; else if (nlevel > MAX_LEVEL) nlevel = MAX_LEVEL; return nlevel; } static int nvidia_bl_update_status(struct backlight_device *bd) { struct nvidia_par *par = bl_get_data(bd); u32 tmp_pcrt, tmp_pmc, fpcontrol; int level; if (!par->FlatPanel) return 0; if (bd->props.power != FB_BLANK_UNBLANK || bd->props.fb_blank != FB_BLANK_UNBLANK) level = 0; else level = bd->props.brightness; tmp_pmc = NV_RD32(par->PMC, 0x10F0) & 0x0000FFFF; tmp_pcrt = NV_RD32(par->PCRTC0, 0x081C) & 0xFFFFFFFC; fpcontrol = NV_RD32(par->PRAMDAC, 0x0848) & 0xCFFFFFCC; if (level > 0) { tmp_pcrt |= 0x1; tmp_pmc |= (1 << 31); /* backlight bit */ tmp_pmc |= nvidia_bl_get_level_brightness(par, level) << 16; fpcontrol |= par->fpSyncs; } else fpcontrol |= 0x20000022; NV_WR32(par->PCRTC0, 0x081C, tmp_pcrt); NV_WR32(par->PMC, 0x10F0, tmp_pmc); NV_WR32(par->PRAMDAC, 0x848, fpcontrol); return 0; } static int nvidia_bl_get_brightness(struct backlight_device *bd) { return bd->props.brightness; } static const struct backlight_ops nvidia_bl_ops = { .get_brightness = nvidia_bl_get_brightness, .update_status = nvidia_bl_update_status, }; void nvidia_bl_init(struct nvidia_par *par) { struct backlight_properties props; struct fb_info *info = pci_get_drvdata(par->pci_dev); struct backlight_device *bd; char name[12]; if (!par->FlatPanel) return; #ifdef CONFIG_PMAC_BACKLIGHT if (!machine_is(powermac) || !pmac_has_backlight_type("mnca")) return; #endif snprintf(name, sizeof(name), "nvidiabl%d", info->node); memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = FB_BACKLIGHT_LEVELS - 1; bd = backlight_device_register(name, info->dev, par, &nvidia_bl_ops, &props); if (IS_ERR(bd)) { info->bl_dev = NULL; printk(KERN_WARNING "nvidia: Backlight registration failed\n"); goto error; } info->bl_dev = bd; fb_bl_default_curve(info, 0, 0x158 * FB_BACKLIGHT_MAX / MAX_LEVEL, 0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL); bd->props.brightness = bd->props.max_brightness; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); printk("nvidia: Backlight initialized (%s)\n", name); return; error: return; } void nvidia_bl_exit(struct nvidia_par *par) { struct fb_info *info = pci_get_drvdata(par->pci_dev); struct backlight_device *bd = info->bl_dev; backlight_device_unregister(bd); printk("nvidia: Backlight unloaded\n"); }
gpl-2.0
JohnTsaiAndroid/Telegram
TMessagesProj/jni/opus/silk/fixed/noise_shape_analysis_FIX.c
252
24086
/*********************************************************************** Copyright (c) 2006-2011, Skype Limited. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of Internet Society, IETF or IETF Trust, nor the names of specific contributors, may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***********************************************************************/ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "main_FIX.h" #include "stack_alloc.h" #include "tuning_parameters.h" /* Compute gain to make warped filter coefficients have a zero mean log frequency response on a */ /* non-warped frequency scale. (So that it can be implemented with a minimum-phase monic filter.) */ /* Note: A monic filter is one with the first coefficient equal to 1.0. In Silk we omit the first */ /* coefficient in an array of coefficients, for monic filters. */ static OPUS_INLINE opus_int32 warped_gain( /* gain in Q16*/ const opus_int32 *coefs_Q24, opus_int lambda_Q16, opus_int order ) { opus_int i; opus_int32 gain_Q24; lambda_Q16 = -lambda_Q16; gain_Q24 = coefs_Q24[ order - 1 ]; for( i = order - 2; i >= 0; i-- ) { gain_Q24 = silk_SMLAWB( coefs_Q24[ i ], gain_Q24, lambda_Q16 ); } gain_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), gain_Q24, -lambda_Q16 ); return silk_INVERSE32_varQ( gain_Q24, 40 ); } /* Convert warped filter coefficients to monic pseudo-warped coefficients and limit maximum */ /* amplitude of monic warped coefficients by using bandwidth expansion on the true coefficients */ static OPUS_INLINE void limit_warped_coefs( opus_int32 *coefs_syn_Q24, opus_int32 *coefs_ana_Q24, opus_int lambda_Q16, opus_int32 limit_Q24, opus_int order ) { opus_int i, iter, ind = 0; opus_int32 tmp, maxabs_Q24, chirp_Q16, gain_syn_Q16, gain_ana_Q16; opus_int32 nom_Q16, den_Q24; /* Convert to monic coefficients */ lambda_Q16 = -lambda_Q16; for( i = order - 1; i > 0; i-- ) { coefs_syn_Q24[ i - 1 ] = silk_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 ); coefs_ana_Q24[ i - 1 ] = silk_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 ); } lambda_Q16 = -lambda_Q16; nom_Q16 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 16 ), -(opus_int32)lambda_Q16, lambda_Q16 ); den_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_syn_Q24[ 0 ], lambda_Q16 ); gain_syn_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 ); den_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_ana_Q24[ 0 ], lambda_Q16 ); gain_ana_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 ); for( i = 0; i < order; i++ ) { coefs_syn_Q24[ i ] = silk_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] ); coefs_ana_Q24[ i ] = silk_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] ); } for( iter = 0; iter < 10; iter++ ) { /* Find maximum absolute value */ maxabs_Q24 = -1; for( i = 0; i < order; i++ ) { tmp = silk_max( silk_abs_int32( coefs_syn_Q24[ i ] ), silk_abs_int32( coefs_ana_Q24[ i ] ) ); if( tmp > maxabs_Q24 ) { maxabs_Q24 = tmp; ind = i; } } if( maxabs_Q24 <= limit_Q24 ) { /* Coefficients are within range - done */ return; } /* Convert back to true warped coefficients */ for( i = 1; i < order; i++ ) { coefs_syn_Q24[ i - 1 ] = silk_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 ); coefs_ana_Q24[ i - 1 ] = silk_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 ); } gain_syn_Q16 = silk_INVERSE32_varQ( gain_syn_Q16, 32 ); gain_ana_Q16 = silk_INVERSE32_varQ( gain_ana_Q16, 32 ); for( i = 0; i < order; i++ ) { coefs_syn_Q24[ i ] = silk_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] ); coefs_ana_Q24[ i ] = silk_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] ); } /* Apply bandwidth expansion */ chirp_Q16 = SILK_FIX_CONST( 0.99, 16 ) - silk_DIV32_varQ( silk_SMULWB( maxabs_Q24 - limit_Q24, silk_SMLABB( SILK_FIX_CONST( 0.8, 10 ), SILK_FIX_CONST( 0.1, 10 ), iter ) ), silk_MUL( maxabs_Q24, ind + 1 ), 22 ); silk_bwexpander_32( coefs_syn_Q24, order, chirp_Q16 ); silk_bwexpander_32( coefs_ana_Q24, order, chirp_Q16 ); /* Convert to monic warped coefficients */ lambda_Q16 = -lambda_Q16; for( i = order - 1; i > 0; i-- ) { coefs_syn_Q24[ i - 1 ] = silk_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 ); coefs_ana_Q24[ i - 1 ] = silk_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 ); } lambda_Q16 = -lambda_Q16; nom_Q16 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 16 ), -(opus_int32)lambda_Q16, lambda_Q16 ); den_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_syn_Q24[ 0 ], lambda_Q16 ); gain_syn_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 ); den_Q24 = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_ana_Q24[ 0 ], lambda_Q16 ); gain_ana_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 ); for( i = 0; i < order; i++ ) { coefs_syn_Q24[ i ] = silk_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] ); coefs_ana_Q24[ i ] = silk_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] ); } } silk_assert( 0 ); } /**************************************************************/ /* Compute noise shaping coefficients and initial gain values */ /**************************************************************/ void silk_noise_shape_analysis_FIX( silk_encoder_state_FIX *psEnc, /* I/O Encoder state FIX */ silk_encoder_control_FIX *psEncCtrl, /* I/O Encoder control FIX */ const opus_int16 *pitch_res, /* I LPC residual from pitch analysis */ const opus_int16 *x, /* I Input signal [ frame_length + la_shape ] */ int arch /* I Run-time architecture */ ) { silk_shape_state_FIX *psShapeSt = &psEnc->sShape; opus_int k, i, nSamples, Qnrg, b_Q14, warping_Q16, scale = 0; opus_int32 SNR_adj_dB_Q7, HarmBoost_Q16, HarmShapeGain_Q16, Tilt_Q16, tmp32; opus_int32 nrg, pre_nrg_Q30, log_energy_Q7, log_energy_prev_Q7, energy_variation_Q7; opus_int32 delta_Q16, BWExp1_Q16, BWExp2_Q16, gain_mult_Q16, gain_add_Q16, strength_Q16, b_Q8; opus_int32 auto_corr[ MAX_SHAPE_LPC_ORDER + 1 ]; opus_int32 refl_coef_Q16[ MAX_SHAPE_LPC_ORDER ]; opus_int32 AR1_Q24[ MAX_SHAPE_LPC_ORDER ]; opus_int32 AR2_Q24[ MAX_SHAPE_LPC_ORDER ]; VARDECL( opus_int16, x_windowed ); const opus_int16 *x_ptr, *pitch_res_ptr; SAVE_STACK; /* Point to start of first LPC analysis block */ x_ptr = x - psEnc->sCmn.la_shape; /****************/ /* GAIN CONTROL */ /****************/ SNR_adj_dB_Q7 = psEnc->sCmn.SNR_dB_Q7; /* Input quality is the average of the quality in the lowest two VAD bands */ psEncCtrl->input_quality_Q14 = ( opus_int )silk_RSHIFT( (opus_int32)psEnc->sCmn.input_quality_bands_Q15[ 0 ] + psEnc->sCmn.input_quality_bands_Q15[ 1 ], 2 ); /* Coding quality level, between 0.0_Q0 and 1.0_Q0, but in Q14 */ psEncCtrl->coding_quality_Q14 = silk_RSHIFT( silk_sigm_Q15( silk_RSHIFT_ROUND( SNR_adj_dB_Q7 - SILK_FIX_CONST( 20.0, 7 ), 4 ) ), 1 ); /* Reduce coding SNR during low speech activity */ if( psEnc->sCmn.useCBR == 0 ) { b_Q8 = SILK_FIX_CONST( 1.0, 8 ) - psEnc->sCmn.speech_activity_Q8; b_Q8 = silk_SMULWB( silk_LSHIFT( b_Q8, 8 ), b_Q8 ); SNR_adj_dB_Q7 = silk_SMLAWB( SNR_adj_dB_Q7, silk_SMULBB( SILK_FIX_CONST( -BG_SNR_DECR_dB, 7 ) >> ( 4 + 1 ), b_Q8 ), /* Q11*/ silk_SMULWB( SILK_FIX_CONST( 1.0, 14 ) + psEncCtrl->input_quality_Q14, psEncCtrl->coding_quality_Q14 ) ); /* Q12*/ } if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* Reduce gains for periodic signals */ SNR_adj_dB_Q7 = silk_SMLAWB( SNR_adj_dB_Q7, SILK_FIX_CONST( HARM_SNR_INCR_dB, 8 ), psEnc->LTPCorr_Q15 ); } else { /* For unvoiced signals and low-quality input, adjust the quality slower than SNR_dB setting */ SNR_adj_dB_Q7 = silk_SMLAWB( SNR_adj_dB_Q7, silk_SMLAWB( SILK_FIX_CONST( 6.0, 9 ), -SILK_FIX_CONST( 0.4, 18 ), psEnc->sCmn.SNR_dB_Q7 ), SILK_FIX_CONST( 1.0, 14 ) - psEncCtrl->input_quality_Q14 ); } /*************************/ /* SPARSENESS PROCESSING */ /*************************/ /* Set quantizer offset */ if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* Initially set to 0; may be overruled in process_gains(..) */ psEnc->sCmn.indices.quantOffsetType = 0; psEncCtrl->sparseness_Q8 = 0; } else { /* Sparseness measure, based on relative fluctuations of energy per 2 milliseconds */ nSamples = silk_LSHIFT( psEnc->sCmn.fs_kHz, 1 ); energy_variation_Q7 = 0; log_energy_prev_Q7 = 0; pitch_res_ptr = pitch_res; for( k = 0; k < silk_SMULBB( SUB_FRAME_LENGTH_MS, psEnc->sCmn.nb_subfr ) / 2; k++ ) { silk_sum_sqr_shift( &nrg, &scale, pitch_res_ptr, nSamples ); nrg += silk_RSHIFT( nSamples, scale ); /* Q(-scale)*/ log_energy_Q7 = silk_lin2log( nrg ); if( k > 0 ) { energy_variation_Q7 += silk_abs( log_energy_Q7 - log_energy_prev_Q7 ); } log_energy_prev_Q7 = log_energy_Q7; pitch_res_ptr += nSamples; } psEncCtrl->sparseness_Q8 = silk_RSHIFT( silk_sigm_Q15( silk_SMULWB( energy_variation_Q7 - SILK_FIX_CONST( 5.0, 7 ), SILK_FIX_CONST( 0.1, 16 ) ) ), 7 ); /* Set quantization offset depending on sparseness measure */ if( psEncCtrl->sparseness_Q8 > SILK_FIX_CONST( SPARSENESS_THRESHOLD_QNT_OFFSET, 8 ) ) { psEnc->sCmn.indices.quantOffsetType = 0; } else { psEnc->sCmn.indices.quantOffsetType = 1; } /* Increase coding SNR for sparse signals */ SNR_adj_dB_Q7 = silk_SMLAWB( SNR_adj_dB_Q7, SILK_FIX_CONST( SPARSE_SNR_INCR_dB, 15 ), psEncCtrl->sparseness_Q8 - SILK_FIX_CONST( 0.5, 8 ) ); } /*******************************/ /* Control bandwidth expansion */ /*******************************/ /* More BWE for signals with high prediction gain */ strength_Q16 = silk_SMULWB( psEncCtrl->predGain_Q16, SILK_FIX_CONST( FIND_PITCH_WHITE_NOISE_FRACTION, 16 ) ); BWExp1_Q16 = BWExp2_Q16 = silk_DIV32_varQ( SILK_FIX_CONST( BANDWIDTH_EXPANSION, 16 ), silk_SMLAWW( SILK_FIX_CONST( 1.0, 16 ), strength_Q16, strength_Q16 ), 16 ); delta_Q16 = silk_SMULWB( SILK_FIX_CONST( 1.0, 16 ) - silk_SMULBB( 3, psEncCtrl->coding_quality_Q14 ), SILK_FIX_CONST( LOW_RATE_BANDWIDTH_EXPANSION_DELTA, 16 ) ); BWExp1_Q16 = silk_SUB32( BWExp1_Q16, delta_Q16 ); BWExp2_Q16 = silk_ADD32( BWExp2_Q16, delta_Q16 ); /* BWExp1 will be applied after BWExp2, so make it relative */ BWExp1_Q16 = silk_DIV32_16( silk_LSHIFT( BWExp1_Q16, 14 ), silk_RSHIFT( BWExp2_Q16, 2 ) ); if( psEnc->sCmn.warping_Q16 > 0 ) { /* Slightly more warping in analysis will move quantization noise up in frequency, where it's better masked */ warping_Q16 = silk_SMLAWB( psEnc->sCmn.warping_Q16, (opus_int32)psEncCtrl->coding_quality_Q14, SILK_FIX_CONST( 0.01, 18 ) ); } else { warping_Q16 = 0; } /********************************************/ /* Compute noise shaping AR coefs and gains */ /********************************************/ ALLOC( x_windowed, psEnc->sCmn.shapeWinLength, opus_int16 ); for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { /* Apply window: sine slope followed by flat part followed by cosine slope */ opus_int shift, slope_part, flat_part; flat_part = psEnc->sCmn.fs_kHz * 3; slope_part = silk_RSHIFT( psEnc->sCmn.shapeWinLength - flat_part, 1 ); silk_apply_sine_window( x_windowed, x_ptr, 1, slope_part ); shift = slope_part; silk_memcpy( x_windowed + shift, x_ptr + shift, flat_part * sizeof(opus_int16) ); shift += flat_part; silk_apply_sine_window( x_windowed + shift, x_ptr + shift, 2, slope_part ); /* Update pointer: next LPC analysis block */ x_ptr += psEnc->sCmn.subfr_length; if( psEnc->sCmn.warping_Q16 > 0 ) { /* Calculate warped auto correlation */ silk_warped_autocorrelation_FIX( auto_corr, &scale, x_windowed, warping_Q16, psEnc->sCmn.shapeWinLength, psEnc->sCmn.shapingLPCOrder ); } else { /* Calculate regular auto correlation */ silk_autocorr( auto_corr, &scale, x_windowed, psEnc->sCmn.shapeWinLength, psEnc->sCmn.shapingLPCOrder + 1, arch ); } /* Add white noise, as a fraction of energy */ auto_corr[0] = silk_ADD32( auto_corr[0], silk_max_32( silk_SMULWB( silk_RSHIFT( auto_corr[ 0 ], 4 ), SILK_FIX_CONST( SHAPE_WHITE_NOISE_FRACTION, 20 ) ), 1 ) ); /* Calculate the reflection coefficients using schur */ nrg = silk_schur64( refl_coef_Q16, auto_corr, psEnc->sCmn.shapingLPCOrder ); silk_assert( nrg >= 0 ); /* Convert reflection coefficients to prediction coefficients */ silk_k2a_Q16( AR2_Q24, refl_coef_Q16, psEnc->sCmn.shapingLPCOrder ); Qnrg = -scale; /* range: -12...30*/ silk_assert( Qnrg >= -12 ); silk_assert( Qnrg <= 30 ); /* Make sure that Qnrg is an even number */ if( Qnrg & 1 ) { Qnrg -= 1; nrg >>= 1; } tmp32 = silk_SQRT_APPROX( nrg ); Qnrg >>= 1; /* range: -6...15*/ psEncCtrl->Gains_Q16[ k ] = silk_LSHIFT_SAT32( tmp32, 16 - Qnrg ); if( psEnc->sCmn.warping_Q16 > 0 ) { /* Adjust gain for warping */ gain_mult_Q16 = warped_gain( AR2_Q24, warping_Q16, psEnc->sCmn.shapingLPCOrder ); silk_assert( psEncCtrl->Gains_Q16[ k ] >= 0 ); if ( silk_SMULWW( silk_RSHIFT_ROUND( psEncCtrl->Gains_Q16[ k ], 1 ), gain_mult_Q16 ) >= ( silk_int32_MAX >> 1 ) ) { psEncCtrl->Gains_Q16[ k ] = silk_int32_MAX; } else { psEncCtrl->Gains_Q16[ k ] = silk_SMULWW( psEncCtrl->Gains_Q16[ k ], gain_mult_Q16 ); } } /* Bandwidth expansion for synthesis filter shaping */ silk_bwexpander_32( AR2_Q24, psEnc->sCmn.shapingLPCOrder, BWExp2_Q16 ); /* Compute noise shaping filter coefficients */ silk_memcpy( AR1_Q24, AR2_Q24, psEnc->sCmn.shapingLPCOrder * sizeof( opus_int32 ) ); /* Bandwidth expansion for analysis filter shaping */ silk_assert( BWExp1_Q16 <= SILK_FIX_CONST( 1.0, 16 ) ); silk_bwexpander_32( AR1_Q24, psEnc->sCmn.shapingLPCOrder, BWExp1_Q16 ); /* Ratio of prediction gains, in energy domain */ pre_nrg_Q30 = silk_LPC_inverse_pred_gain_Q24( AR2_Q24, psEnc->sCmn.shapingLPCOrder ); nrg = silk_LPC_inverse_pred_gain_Q24( AR1_Q24, psEnc->sCmn.shapingLPCOrder ); /*psEncCtrl->GainsPre[ k ] = 1.0f - 0.7f * ( 1.0f - pre_nrg / nrg ) = 0.3f + 0.7f * pre_nrg / nrg;*/ pre_nrg_Q30 = silk_LSHIFT32( silk_SMULWB( pre_nrg_Q30, SILK_FIX_CONST( 0.7, 15 ) ), 1 ); psEncCtrl->GainsPre_Q14[ k ] = ( opus_int ) SILK_FIX_CONST( 0.3, 14 ) + silk_DIV32_varQ( pre_nrg_Q30, nrg, 14 ); /* Convert to monic warped prediction coefficients and limit absolute values */ limit_warped_coefs( AR2_Q24, AR1_Q24, warping_Q16, SILK_FIX_CONST( 3.999, 24 ), psEnc->sCmn.shapingLPCOrder ); /* Convert from Q24 to Q13 and store in int16 */ for( i = 0; i < psEnc->sCmn.shapingLPCOrder; i++ ) { psEncCtrl->AR1_Q13[ k * MAX_SHAPE_LPC_ORDER + i ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( AR1_Q24[ i ], 11 ) ); psEncCtrl->AR2_Q13[ k * MAX_SHAPE_LPC_ORDER + i ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( AR2_Q24[ i ], 11 ) ); } } /*****************/ /* Gain tweaking */ /*****************/ /* Increase gains during low speech activity and put lower limit on gains */ gain_mult_Q16 = silk_log2lin( -silk_SMLAWB( -SILK_FIX_CONST( 16.0, 7 ), SNR_adj_dB_Q7, SILK_FIX_CONST( 0.16, 16 ) ) ); gain_add_Q16 = silk_log2lin( silk_SMLAWB( SILK_FIX_CONST( 16.0, 7 ), SILK_FIX_CONST( MIN_QGAIN_DB, 7 ), SILK_FIX_CONST( 0.16, 16 ) ) ); silk_assert( gain_mult_Q16 > 0 ); for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { psEncCtrl->Gains_Q16[ k ] = silk_SMULWW( psEncCtrl->Gains_Q16[ k ], gain_mult_Q16 ); silk_assert( psEncCtrl->Gains_Q16[ k ] >= 0 ); psEncCtrl->Gains_Q16[ k ] = silk_ADD_POS_SAT32( psEncCtrl->Gains_Q16[ k ], gain_add_Q16 ); } gain_mult_Q16 = SILK_FIX_CONST( 1.0, 16 ) + silk_RSHIFT_ROUND( silk_MLA( SILK_FIX_CONST( INPUT_TILT, 26 ), psEncCtrl->coding_quality_Q14, SILK_FIX_CONST( HIGH_RATE_INPUT_TILT, 12 ) ), 10 ); for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { psEncCtrl->GainsPre_Q14[ k ] = silk_SMULWB( gain_mult_Q16, psEncCtrl->GainsPre_Q14[ k ] ); } /************************************************/ /* Control low-frequency shaping and noise tilt */ /************************************************/ /* Less low frequency shaping for noisy inputs */ strength_Q16 = silk_MUL( SILK_FIX_CONST( LOW_FREQ_SHAPING, 4 ), silk_SMLAWB( SILK_FIX_CONST( 1.0, 12 ), SILK_FIX_CONST( LOW_QUALITY_LOW_FREQ_SHAPING_DECR, 13 ), psEnc->sCmn.input_quality_bands_Q15[ 0 ] - SILK_FIX_CONST( 1.0, 15 ) ) ); strength_Q16 = silk_RSHIFT( silk_MUL( strength_Q16, psEnc->sCmn.speech_activity_Q8 ), 8 ); if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* Reduce low frequencies quantization noise for periodic signals, depending on pitch lag */ /*f = 400; freqz([1, -0.98 + 2e-4 * f], [1, -0.97 + 7e-4 * f], 2^12, Fs); axis([0, 1000, -10, 1])*/ opus_int fs_kHz_inv = silk_DIV32_16( SILK_FIX_CONST( 0.2, 14 ), psEnc->sCmn.fs_kHz ); for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) { b_Q14 = fs_kHz_inv + silk_DIV32_16( SILK_FIX_CONST( 3.0, 14 ), psEncCtrl->pitchL[ k ] ); /* Pack two coefficients in one int32 */ psEncCtrl->LF_shp_Q14[ k ] = silk_LSHIFT( SILK_FIX_CONST( 1.0, 14 ) - b_Q14 - silk_SMULWB( strength_Q16, b_Q14 ), 16 ); psEncCtrl->LF_shp_Q14[ k ] |= (opus_uint16)( b_Q14 - SILK_FIX_CONST( 1.0, 14 ) ); } silk_assert( SILK_FIX_CONST( HARM_HP_NOISE_COEF, 24 ) < SILK_FIX_CONST( 0.5, 24 ) ); /* Guarantees that second argument to SMULWB() is within range of an opus_int16*/ Tilt_Q16 = - SILK_FIX_CONST( HP_NOISE_COEF, 16 ) - silk_SMULWB( SILK_FIX_CONST( 1.0, 16 ) - SILK_FIX_CONST( HP_NOISE_COEF, 16 ), silk_SMULWB( SILK_FIX_CONST( HARM_HP_NOISE_COEF, 24 ), psEnc->sCmn.speech_activity_Q8 ) ); } else { b_Q14 = silk_DIV32_16( 21299, psEnc->sCmn.fs_kHz ); /* 1.3_Q0 = 21299_Q14*/ /* Pack two coefficients in one int32 */ psEncCtrl->LF_shp_Q14[ 0 ] = silk_LSHIFT( SILK_FIX_CONST( 1.0, 14 ) - b_Q14 - silk_SMULWB( strength_Q16, silk_SMULWB( SILK_FIX_CONST( 0.6, 16 ), b_Q14 ) ), 16 ); psEncCtrl->LF_shp_Q14[ 0 ] |= (opus_uint16)( b_Q14 - SILK_FIX_CONST( 1.0, 14 ) ); for( k = 1; k < psEnc->sCmn.nb_subfr; k++ ) { psEncCtrl->LF_shp_Q14[ k ] = psEncCtrl->LF_shp_Q14[ 0 ]; } Tilt_Q16 = -SILK_FIX_CONST( HP_NOISE_COEF, 16 ); } /****************************/ /* HARMONIC SHAPING CONTROL */ /****************************/ /* Control boosting of harmonic frequencies */ HarmBoost_Q16 = silk_SMULWB( silk_SMULWB( SILK_FIX_CONST( 1.0, 17 ) - silk_LSHIFT( psEncCtrl->coding_quality_Q14, 3 ), psEnc->LTPCorr_Q15 ), SILK_FIX_CONST( LOW_RATE_HARMONIC_BOOST, 16 ) ); /* More harmonic boost for noisy input signals */ HarmBoost_Q16 = silk_SMLAWB( HarmBoost_Q16, SILK_FIX_CONST( 1.0, 16 ) - silk_LSHIFT( psEncCtrl->input_quality_Q14, 2 ), SILK_FIX_CONST( LOW_INPUT_QUALITY_HARMONIC_BOOST, 16 ) ); if( USE_HARM_SHAPING && psEnc->sCmn.indices.signalType == TYPE_VOICED ) { /* More harmonic noise shaping for high bitrates or noisy input */ HarmShapeGain_Q16 = silk_SMLAWB( SILK_FIX_CONST( HARMONIC_SHAPING, 16 ), SILK_FIX_CONST( 1.0, 16 ) - silk_SMULWB( SILK_FIX_CONST( 1.0, 18 ) - silk_LSHIFT( psEncCtrl->coding_quality_Q14, 4 ), psEncCtrl->input_quality_Q14 ), SILK_FIX_CONST( HIGH_RATE_OR_LOW_QUALITY_HARMONIC_SHAPING, 16 ) ); /* Less harmonic noise shaping for less periodic signals */ HarmShapeGain_Q16 = silk_SMULWB( silk_LSHIFT( HarmShapeGain_Q16, 1 ), silk_SQRT_APPROX( silk_LSHIFT( psEnc->LTPCorr_Q15, 15 ) ) ); } else { HarmShapeGain_Q16 = 0; } /*************************/ /* Smooth over subframes */ /*************************/ for( k = 0; k < MAX_NB_SUBFR; k++ ) { psShapeSt->HarmBoost_smth_Q16 = silk_SMLAWB( psShapeSt->HarmBoost_smth_Q16, HarmBoost_Q16 - psShapeSt->HarmBoost_smth_Q16, SILK_FIX_CONST( SUBFR_SMTH_COEF, 16 ) ); psShapeSt->HarmShapeGain_smth_Q16 = silk_SMLAWB( psShapeSt->HarmShapeGain_smth_Q16, HarmShapeGain_Q16 - psShapeSt->HarmShapeGain_smth_Q16, SILK_FIX_CONST( SUBFR_SMTH_COEF, 16 ) ); psShapeSt->Tilt_smth_Q16 = silk_SMLAWB( psShapeSt->Tilt_smth_Q16, Tilt_Q16 - psShapeSt->Tilt_smth_Q16, SILK_FIX_CONST( SUBFR_SMTH_COEF, 16 ) ); psEncCtrl->HarmBoost_Q14[ k ] = ( opus_int )silk_RSHIFT_ROUND( psShapeSt->HarmBoost_smth_Q16, 2 ); psEncCtrl->HarmShapeGain_Q14[ k ] = ( opus_int )silk_RSHIFT_ROUND( psShapeSt->HarmShapeGain_smth_Q16, 2 ); psEncCtrl->Tilt_Q14[ k ] = ( opus_int )silk_RSHIFT_ROUND( psShapeSt->Tilt_smth_Q16, 2 ); } RESTORE_STACK; }
gpl-2.0
IMCG/fastsocket
kernel/drivers/acpi/pci_slot.c
508
10298
/* * pci_slot.c - ACPI PCI Slot Driver * * The code here is heavily leveraged from the acpiphp module. * Thanks to Matthew Wilcox <matthew@wil.cx> for much guidance. * Thanks to Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> for code * review and fixes. * * Copyright (C) 2007-2008 Hewlett-Packard Development Company, L.P. * Alex Chiang <achiang@hp.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/acpi.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <linux/dmi.h> static int debug; static int check_sta_before_sun; #define DRIVER_VERSION "0.1" #define DRIVER_AUTHOR "Alex Chiang <achiang@hp.com>" #define DRIVER_DESC "ACPI PCI Slot Detection Driver" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); module_param(debug, bool, 0644); #define _COMPONENT ACPI_PCI_COMPONENT ACPI_MODULE_NAME("pci_slot"); #define MY_NAME "pci_slot" #define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg) #define dbg(format, arg...) \ do { \ if (debug) \ printk(KERN_DEBUG "%s: " format, \ MY_NAME , ## arg); \ } while (0) #define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */ struct acpi_pci_slot { acpi_handle root_handle; /* handle of the root bridge */ struct pci_slot *pci_slot; /* corresponding pci_slot */ struct list_head list; /* node in the list of slots */ }; static int acpi_pci_slot_add(acpi_handle handle); static void acpi_pci_slot_remove(acpi_handle handle); static LIST_HEAD(slot_list); static DEFINE_MUTEX(slot_list_lock); static struct acpi_pci_driver acpi_pci_slot_driver = { .add = acpi_pci_slot_add, .remove = acpi_pci_slot_remove, }; static int check_slot(acpi_handle handle, unsigned long long *sun) { int device = -1; unsigned long long adr, sta; acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); dbg("Checking slot on path: %s\n", (char *)buffer.pointer); if (check_sta_before_sun) { /* If SxFy doesn't have _STA, we just assume it's there */ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); if (ACPI_SUCCESS(status) && !(sta & ACPI_STA_DEVICE_PRESENT)) goto out; } status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); if (ACPI_FAILURE(status)) { dbg("_ADR returned %d on %s\n", status, (char *)buffer.pointer); goto out; } /* No _SUN == not a slot == bail */ status = acpi_evaluate_integer(handle, "_SUN", NULL, sun); if (ACPI_FAILURE(status)) { dbg("_SUN returned %d on %s\n", status, (char *)buffer.pointer); goto out; } device = (adr >> 16) & 0xffff; out: kfree(buffer.pointer); return device; } struct callback_args { acpi_walk_callback user_function; /* only for walk_p2p_bridge */ struct pci_bus *pci_bus; acpi_handle root_handle; }; /* * register_slot * * Called once for each SxFy object in the namespace. Don't worry about * calling pci_create_slot multiple times for the same pci_bus:device, * since each subsequent call simply bumps the refcount on the pci_slot. * * The number of calls to pci_destroy_slot from unregister_slot is * symmetrical. */ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) { int device; unsigned long long sun; char name[SLOT_NAME_SIZE]; struct acpi_pci_slot *slot; struct pci_slot *pci_slot; struct callback_args *parent_context = context; struct pci_bus *pci_bus = parent_context->pci_bus; device = check_slot(handle, &sun); if (device < 0) return AE_OK; slot = kmalloc(sizeof(*slot), GFP_KERNEL); if (!slot) { err("%s: cannot allocate memory\n", __func__); return AE_OK; } snprintf(name, sizeof(name), "%llu", sun); pci_slot = pci_create_slot(pci_bus, device, name, NULL); if (IS_ERR(pci_slot)) { err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot)); kfree(slot); return AE_OK; } slot->root_handle = parent_context->root_handle; slot->pci_slot = pci_slot; INIT_LIST_HEAD(&slot->list); mutex_lock(&slot_list_lock); list_add(&slot->list, &slot_list); mutex_unlock(&slot_list_lock); get_device(&pci_bus->dev); dbg("pci_slot: %p, pci_bus: %x, device: %d, name: %s\n", pci_slot, pci_bus->number, device, name); return AE_OK; } /* * walk_p2p_bridge - discover and walk p2p bridges * @handle: points to an acpi_pci_root * @context: p2p_bridge_context pointer * * Note that when we call ourselves recursively, we pass a different * value of pci_bus in the child_context. */ static acpi_status walk_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) { int device, function; unsigned long long adr; acpi_status status; acpi_handle dummy_handle; acpi_walk_callback user_function; struct pci_dev *dev; struct pci_bus *pci_bus; struct callback_args child_context; struct callback_args *parent_context = context; pci_bus = parent_context->pci_bus; user_function = parent_context->user_function; status = acpi_get_handle(handle, "_ADR", &dummy_handle); if (ACPI_FAILURE(status)) return AE_OK; status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); if (ACPI_FAILURE(status)) return AE_OK; device = (adr >> 16) & 0xffff; function = adr & 0xffff; dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function)); if (!dev || !dev->subordinate) goto out; child_context.pci_bus = dev->subordinate; child_context.user_function = user_function; child_context.root_handle = parent_context->root_handle; dbg("p2p bridge walk, pci_bus = %x\n", dev->subordinate->number); status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, user_function, &child_context, NULL); if (ACPI_FAILURE(status)) goto out; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, walk_p2p_bridge, &child_context, NULL); out: pci_dev_put(dev); return AE_OK; } /* * walk_root_bridge - generic root bridge walker * @handle: points to an acpi_pci_root * @user_function: user callback for slot objects * * Call user_function for all objects underneath this root bridge. * Walk p2p bridges underneath us and call user_function on those too. */ static int walk_root_bridge(acpi_handle handle, acpi_walk_callback user_function) { int seg, bus; unsigned long long tmp; acpi_status status; acpi_handle dummy_handle; struct pci_bus *pci_bus; struct callback_args context; /* If the bridge doesn't have _STA, we assume it is always there */ status = acpi_get_handle(handle, "_STA", &dummy_handle); if (ACPI_SUCCESS(status)) { status = acpi_evaluate_integer(handle, "_STA", NULL, &tmp); if (ACPI_FAILURE(status)) { info("%s: _STA evaluation failure\n", __func__); return 0; } if ((tmp & ACPI_STA_DEVICE_FUNCTIONING) == 0) /* don't register this object */ return 0; } status = acpi_evaluate_integer(handle, "_SEG", NULL, &tmp); seg = ACPI_SUCCESS(status) ? tmp : 0; status = acpi_evaluate_integer(handle, "_BBN", NULL, &tmp); bus = ACPI_SUCCESS(status) ? tmp : 0; pci_bus = pci_find_bus(seg, bus); if (!pci_bus) return 0; context.pci_bus = pci_bus; context.user_function = user_function; context.root_handle = handle; dbg("root bridge walk, pci_bus = %x\n", pci_bus->number); status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, user_function, &context, NULL); if (ACPI_FAILURE(status)) return status; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, walk_p2p_bridge, &context, NULL); if (ACPI_FAILURE(status)) err("%s: walk_p2p_bridge failure - %d\n", __func__, status); return status; } /* * acpi_pci_slot_add * @handle: points to an acpi_pci_root */ static int acpi_pci_slot_add(acpi_handle handle) { acpi_status status; status = walk_root_bridge(handle, register_slot); if (ACPI_FAILURE(status)) err("%s: register_slot failure - %d\n", __func__, status); return status; } /* * acpi_pci_slot_remove * @handle: points to an acpi_pci_root */ static void acpi_pci_slot_remove(acpi_handle handle) { struct acpi_pci_slot *slot, *tmp; struct pci_bus *pbus; mutex_lock(&slot_list_lock); list_for_each_entry_safe(slot, tmp, &slot_list, list) { if (slot->root_handle == handle) { list_del(&slot->list); pbus = slot->pci_slot->bus; pci_destroy_slot(slot->pci_slot); put_device(&pbus->dev); kfree(slot); } } mutex_unlock(&slot_list_lock); } static int do_sta_before_sun(const struct dmi_system_id *d) { info("%s detected: will evaluate _STA before calling _SUN\n", d->ident); check_sta_before_sun = 1; return 0; } static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = { /* * Fujitsu Primequest machines will return 1023 to indicate an * error if the _SUN method is evaluated on SxFy objects that * are not present (as indicated by _STA), so for those machines, * we want to check _STA before evaluating _SUN. */ { .callback = do_sta_before_sun, .ident = "Fujitsu PRIMEQUEST", .matches = { DMI_MATCH(DMI_BIOS_VENDOR, "FUJITSU LIMITED"), DMI_MATCH(DMI_BIOS_VERSION, "PRIMEQUEST"), }, }, {} }; static int __init acpi_pci_slot_init(void) { dmi_check_system(acpi_pci_slot_dmi_table); acpi_pci_register_driver(&acpi_pci_slot_driver); return 0; } static void __exit acpi_pci_slot_exit(void) { acpi_pci_unregister_driver(&acpi_pci_slot_driver); } module_init(acpi_pci_slot_init); module_exit(acpi_pci_slot_exit);
gpl-2.0
KylinUI/android_kernel_lge_mako
drivers/input/touchscreen/cyttsp-i2c-qc.c
508
83245
/* Source for: * Cypress TrueTouch(TM) Standard Product I2C touchscreen driver. * drivers/input/touchscreen/cyttsp-i2c.c * * Copyright (C) 2009, 2010 Cypress Semiconductor, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2, and only version 2, as published by the * Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Cypress reserves the right to make changes without further notice * to the materials described herein. Cypress does not assume any * liability arising out of the application described herein. * * Contact Cypress Semiconductor at www.cypress.com * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/input/mt.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/byteorder/generic.h> #include <linux/bitops.h> #include <linux/pm_runtime.h> #include <linux/firmware.h> #include <linux/mutex.h> #include <linux/regulator/consumer.h> #ifdef CONFIG_HAS_EARLYSUSPEND #include <linux/earlysuspend.h> #endif /* CONFIG_HAS_EARLYSUSPEND */ #define CY_DECLARE_GLOBALS #include <linux/cyttsp-qc.h> uint32_t cyttsp_tsdebug1 = 0xff; module_param_named(tsdebug1, cyttsp_tsdebug1, uint, 0664); #define FW_FNAME_LEN 40 #define TTSP_BUFF_SIZE 50 /* CY TTSP I2C Driver private data */ struct cyttsp { struct i2c_client *client; struct input_dev *input; struct timer_list timer; struct mutex mutex; char phys[32]; struct cyttsp_platform_data *platform_data; u8 num_prv_st_tch; u16 fw_start_addr; u16 act_trk[CY_NUM_TRK_ID]; u16 prv_st_tch[CY_NUM_ST_TCH_ID]; u16 prv_mt_tch[CY_NUM_MT_TCH_ID]; u16 prv_mt_pos[CY_NUM_TRK_ID][2]; atomic_t irq_enabled; bool cyttsp_update_fw; bool cyttsp_fwloader_mode; bool is_suspended; struct regulator **vdd; char fw_fname[FW_FNAME_LEN]; #ifdef CONFIG_HAS_EARLYSUSPEND struct early_suspend early_suspend; #endif /* CONFIG_HAS_EARLYSUSPEND */ }; static u8 irq_cnt; /* comparison counter with register valuw */ static u32 irq_cnt_total; /* total interrupts */ static u32 irq_err_cnt; /* count number of touch interrupts with err */ #define CY_IRQ_CNT_MASK 0x000000FF /* mapped for sizeof count in reg */ #define CY_IRQ_CNT_REG 0x00 /* tt_undef[0]=reg 0x1B - Gen3 only */ #ifdef CONFIG_HAS_EARLYSUSPEND static void cyttsp_early_suspend(struct early_suspend *handler); static void cyttsp_late_resume(struct early_suspend *handler); #endif /* CONFIG_HAS_EARLYSUSPEND */ /* **************************************************************************** * Prototypes for static functions * ************************************************************************** */ static irqreturn_t cyttsp_irq(int irq, void *handle); static int cyttsp_inlist(u16 prev_track[], u8 cur_trk_id, u8 *prev_loc, u8 num_touches); static int cyttsp_next_avail_inlist(u16 cur_trk[], u8 *new_loc, u8 num_touches); static int cyttsp_putbl(struct cyttsp *ts, int show, int show_status, int show_version, int show_cid); static int __devinit cyttsp_probe(struct i2c_client *client, const struct i2c_device_id *id); static int __devexit cyttsp_remove(struct i2c_client *client); static int cyttsp_resume(struct device *dev); static int cyttsp_suspend(struct device *dev); /* Static variables */ static struct cyttsp_gen3_xydata_t g_xy_data; static struct cyttsp_bootloader_data_t g_bl_data; static struct cyttsp_sysinfo_data_t g_sysinfo_data; static const struct i2c_device_id cyttsp_id[] = { { CY_I2C_NAME, 0 }, { } }; static u8 bl_cmd[] = { CY_BL_FILE0, CY_BL_CMD, CY_BL_EXIT, CY_BL_KEY0, CY_BL_KEY1, CY_BL_KEY2, CY_BL_KEY3, CY_BL_KEY4, CY_BL_KEY5, CY_BL_KEY6, CY_BL_KEY7}; MODULE_DEVICE_TABLE(i2c, cyttsp_id); #ifdef CONFIG_PM static const struct dev_pm_ops cyttsp_pm_ops = { #ifndef CONFIG_HAS_EARLYSUSPEND .suspend = cyttsp_suspend, .resume = cyttsp_resume, #endif }; #endif static struct i2c_driver cyttsp_driver = { .driver = { .name = CY_I2C_NAME, .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &cyttsp_pm_ops, #endif }, .probe = cyttsp_probe, .remove = __devexit_p(cyttsp_remove), .id_table = cyttsp_id, }; MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard touchscreen driver"); MODULE_AUTHOR("Cypress"); static ssize_t cyttsp_irq_status(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = container_of(dev, struct i2c_client, dev); struct cyttsp *ts = i2c_get_clientdata(client); return snprintf(buf, TTSP_BUFF_SIZE, "%u\n", atomic_read(&ts->irq_enabled)); } static ssize_t cyttsp_irq_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct i2c_client *client = container_of(dev, struct i2c_client, dev); struct cyttsp *ts = i2c_get_clientdata(client); int err = 0; unsigned long value; if (size > 2) return -EINVAL; err = strict_strtoul(buf, 10, &value); if (err != 0) return err; switch (value) { case 0: if (atomic_cmpxchg(&ts->irq_enabled, 1, 0)) { pr_info("touch irq disabled!\n"); disable_irq_nosync(ts->client->irq); } err = size; break; case 1: if (!atomic_cmpxchg(&ts->irq_enabled, 0, 1)) { pr_info("touch irq enabled!\n"); enable_irq(ts->client->irq); } err = size; break; default: pr_info("cyttsp_irq_enable failed -> irq_enabled = %d\n", atomic_read(&ts->irq_enabled)); err = -EINVAL; break; } return err; } static DEVICE_ATTR(irq_enable, 0664, cyttsp_irq_status, cyttsp_irq_enable); static ssize_t cyttsp_fw_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, TTSP_BUFF_SIZE, "%d.%d.%d\n", g_bl_data.appid_lo, g_bl_data.appver_hi, g_bl_data.appver_lo); } static DEVICE_ATTR(cyttsp_fw_ver, 0664, cyttsp_fw_show, NULL); /* firmware flashing block */ #define BLK_SIZE 16 #define DATA_REC_LEN 64 #define BLK_SEED 0xff #define RECAL_REG 0x1b enum bl_commands { BL_CMD_WRBLK = 0x39, BL_CMD_INIT = 0x38, BL_CMD_TERMINATE = 0x3b, }; /* TODO: Add key as part of platform data */ #define KEY_CS (0 + 1 + 2 + 3 + 4 + 5 + 6 + 7) #define KEY {0, 1, 2, 3, 4, 5, 6, 7} static const char _key[] = KEY; #define KEY_LEN sizeof(_key) static int rec_cnt; struct fw_record { u8 seed; u8 cmd; u8 key[KEY_LEN]; u8 blk_hi; u8 blk_lo; u8 data[DATA_REC_LEN]; u8 data_cs; u8 rec_cs; }; #define fw_rec_size (sizeof(struct fw_record)) struct cmd_record { u8 reg; u8 seed; u8 cmd; u8 key[KEY_LEN]; }; #define cmd_rec_size (sizeof(struct cmd_record)) static struct fw_record data_record = { .seed = BLK_SEED, .cmd = BL_CMD_WRBLK, .key = KEY, }; static const struct cmd_record terminate_rec = { .reg = 0, .seed = BLK_SEED, .cmd = BL_CMD_TERMINATE, .key = KEY, }; static const struct cmd_record initiate_rec = { .reg = 0, .seed = BLK_SEED, .cmd = BL_CMD_INIT, .key = KEY, }; #define BL_REC1_ADDR 0x0780 #define BL_REC2_ADDR 0x07c0 #define BL_CHECKSUM_MASK 0x01 #define ID_INFO_REC ":40078000" #define ID_INFO_OFFSET_IN_REC 77 #define REC_START_CHR ':' #define REC_LEN_OFFSET 1 #define REC_ADDR_HI_OFFSET 3 #define REC_ADDR_LO_OFFSET 5 #define REC_TYPE_OFFSET 7 #define REC_DATA_OFFSET 9 #define REC_LINE_SIZE 141 #define NUM_CHAR_IN_HEX 2 #define ID_INFO_REC_LEN 9 static int cyttsp_soft_reset(struct cyttsp *ts) { int retval = 0, tries = 0; u8 host_reg = CY_SOFT_RESET_MODE; do { retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(host_reg), &host_reg); if (retval < 0) msleep(20); } while (tries++ < 10 && (retval < 0)); if (retval < 0) { pr_err("%s: failed\n", __func__); return retval; } tries = 0; do { msleep(20); cyttsp_putbl(ts, 1, true, true, false); } while (g_bl_data.bl_status != 0x10 && g_bl_data.bl_status != 0x11 && tries++ < 100); if (g_bl_data.bl_status != 0x11 && g_bl_data.bl_status != 0x10) return -EINVAL; return 0; } static void cyttsp_exit_bl_mode(struct cyttsp *ts) { int retval, tries = 0; do { retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(bl_cmd), bl_cmd); if (retval < 0) msleep(20); } while (tries++ < 10 && (retval < 0)); } static void cyttsp_set_sysinfo_mode(struct cyttsp *ts) { int retval, tries = 0; u8 host_reg = CY_SYSINFO_MODE; do { retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(host_reg), &host_reg); if (retval < 0) msleep(20); } while (tries++ < 10 && (retval < 0)); /* wait for TTSP Device to complete switch to SysInfo mode */ if (!(retval < 0)) { retval = i2c_smbus_read_i2c_block_data(ts->client, CY_REG_BASE, sizeof(struct cyttsp_sysinfo_data_t), (u8 *)&g_sysinfo_data); } else pr_err("%s: failed\n", __func__); } static void cyttsp_set_opmode(struct cyttsp *ts) { int retval, tries = 0; u8 host_reg = CY_OP_MODE; do { retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(host_reg), &host_reg); if (retval < 0) msleep(20); } while (tries++ < 10 && (retval < 0)); } static int str2uc(char *str, u8 *val) { char substr[3]; unsigned long ulval; int rc; if (!str) return -EINVAL; if (strnlen(str, NUM_CHAR_IN_HEX) < 2) return -EINVAL; substr[0] = str[0]; substr[1] = str[1]; substr[2] = '\0'; rc = strict_strtoul(substr, 16, &ulval); if (rc != 0) return rc; *val = (u8) ulval; return 0; } static int flash_block(struct cyttsp *ts, u8 *blk, int len) { int retval, i, tries = 0; char buf[(2 * (BLK_SIZE + 1)) + 1]; char *p = buf; for (i = 0; i < len; i++, p += 2) snprintf(p, TTSP_BUFF_SIZE, "%02x", blk[i]); pr_debug("%s: size %d, pos %ld payload %s\n", __func__, len, (long)0, buf); do { retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, len, blk); if (retval < 0) msleep(20); } while (tries++ < 20 && (retval < 0)); if (retval < 0) { pr_err("%s: failed\n", __func__); return retval; } return 0; } static int flash_command(struct cyttsp *ts, const struct cmd_record *record) { return flash_block(ts, (u8 *)record, cmd_rec_size); } static void init_data_record(struct fw_record *rec, unsigned short addr) { addr >>= 6; rec->blk_hi = (addr >> 8) & 0xff; rec->blk_lo = addr & 0xff; rec->rec_cs = rec->blk_hi + rec->blk_lo + (unsigned char)(BLK_SEED + BL_CMD_WRBLK + KEY_CS); rec->data_cs = 0; } static int check_record(struct cyttsp *ts, u8 *rec) { int rc; u16 addr; u8 r_len, type, hi_off, lo_off; rc = str2uc(rec + REC_LEN_OFFSET, &r_len); if (rc < 0) return rc; rc = str2uc(rec + REC_TYPE_OFFSET, &type); if (rc < 0) return rc; if (*rec != REC_START_CHR || r_len != DATA_REC_LEN || type != 0) return -EINVAL; rc = str2uc(rec + REC_ADDR_HI_OFFSET, &hi_off); if (rc < 0) return rc; rc = str2uc(rec + REC_ADDR_LO_OFFSET, &lo_off); if (rc < 0) return rc; addr = (hi_off << 8) | lo_off; if (addr >= ts->fw_start_addr || addr == BL_REC1_ADDR || addr == BL_REC2_ADDR) return 0; return -EINVAL; } static struct fw_record *prepare_record(u8 *rec) { int i, rc; u16 addr; u8 hi_off, lo_off; u8 *p; rc = str2uc(rec + REC_ADDR_HI_OFFSET, &hi_off); if (rc < 0) return ERR_PTR((long) rc); rc = str2uc(rec + REC_ADDR_LO_OFFSET, &lo_off); if (rc < 0) return ERR_PTR((long) rc); addr = (hi_off << 8) | lo_off; init_data_record(&data_record, addr); p = rec + REC_DATA_OFFSET; for (i = 0; i < DATA_REC_LEN; i++) { rc = str2uc(p, &data_record.data[i]); if (rc < 0) return ERR_PTR((long) rc); data_record.data_cs += data_record.data[i]; data_record.rec_cs += data_record.data[i]; p += 2; } data_record.rec_cs += data_record.data_cs; return &data_record; } static int flash_record(struct cyttsp *ts, const struct fw_record *record) { int len = fw_rec_size; int blk_len, rc; u8 *rec = (u8 *)record; u8 data[BLK_SIZE + 1]; u8 blk_offset; for (blk_offset = 0; len; len -= blk_len) { data[0] = blk_offset; blk_len = len > BLK_SIZE ? BLK_SIZE : len; memcpy(data + 1, rec, blk_len); rec += blk_len; rc = flash_block(ts, data, blk_len + 1); if (rc < 0) return rc; blk_offset += blk_len; } return 0; } static int flash_data_rec(struct cyttsp *ts, u8 *buf) { struct fw_record *rec; int rc, tries; if (!buf) return -EINVAL; rc = check_record(ts, buf); if (rc < 0) { pr_debug("%s: record ignored %s", __func__, buf); return 0; } rec = prepare_record(buf); if (IS_ERR_OR_NULL(rec)) return PTR_ERR(rec); rc = flash_record(ts, rec); if (rc < 0) return rc; tries = 0; do { if (rec_cnt%2) msleep(20); cyttsp_putbl(ts, 4, true, false, false); } while (g_bl_data.bl_status != 0x10 && g_bl_data.bl_status != 0x11 && tries++ < 100); rec_cnt++; return rc; } static int cyttspfw_flash_firmware(struct cyttsp *ts, const u8 *data, int data_len) { u8 *buf; int i, j; int rc, tries = 0; /* initiate bootload: this will erase all the existing data */ rc = flash_command(ts, &initiate_rec); if (rc < 0) return rc; do { msleep(100); cyttsp_putbl(ts, 4, true, false, false); } while (g_bl_data.bl_status != 0x10 && g_bl_data.bl_status != 0x11 && tries++ < 100); buf = kzalloc(REC_LINE_SIZE + 1, GFP_KERNEL); if (!buf) { pr_err("%s: no memory\n", __func__); return -ENOMEM; } rec_cnt = 0; /* flash data records */ for (i = 0, j = 0; i < data_len; i++, j++) { if ((data[i] == REC_START_CHR) && j) { buf[j] = 0; rc = flash_data_rec(ts, buf); if (rc < 0) return rc; j = 0; } buf[j] = data[i]; } /* flash last data record */ if (j) { buf[j] = 0; rc = flash_data_rec(ts, buf); if (rc < 0) return rc; } kfree(buf); /* termiate bootload */ tries = 0; rc = flash_command(ts, &terminate_rec); do { msleep(100); cyttsp_putbl(ts, 4, true, false, false); } while (g_bl_data.bl_status != 0x10 && g_bl_data.bl_status != 0x11 && tries++ < 100); return rc; } static int get_hex_fw_ver(u8 *p, u8 *ttspver_hi, u8 *ttspver_lo, u8 *appid_hi, u8 *appid_lo, u8 *appver_hi, u8 *appver_lo, u8 *cid_0, u8 *cid_1, u8 *cid_2) { int rc; p = p + ID_INFO_OFFSET_IN_REC; rc = str2uc(p, ttspver_hi); if (rc < 0) return rc; p += 2; rc = str2uc(p, ttspver_lo); if (rc < 0) return rc; p += 2; rc = str2uc(p, appid_hi); if (rc < 0) return rc; p += 2; rc = str2uc(p, appid_lo); if (rc < 0) return rc; p += 2; rc = str2uc(p, appver_hi); if (rc < 0) return rc; p += 2; rc = str2uc(p, appver_lo); if (rc < 0) return rc; p += 2; rc = str2uc(p, cid_0); if (rc < 0) return rc; p += 2; rc = str2uc(p, cid_1); if (rc < 0) return rc; p += 2; rc = str2uc(p, cid_2); if (rc < 0) return rc; return 0; } static void cyttspfw_flash_start(struct cyttsp *ts, const u8 *data, int data_len, u8 *buf, bool force) { int rc; u8 ttspver_hi = 0, ttspver_lo = 0, fw_upgrade = 0; u8 appid_hi = 0, appid_lo = 0; u8 appver_hi = 0, appver_lo = 0; u8 cid_0 = 0, cid_1 = 0, cid_2 = 0; char *p = buf; /* get hex firmware version */ rc = get_hex_fw_ver(p, &ttspver_hi, &ttspver_lo, &appid_hi, &appid_lo, &appver_hi, &appver_lo, &cid_0, &cid_1, &cid_2); if (rc < 0) { pr_err("%s: unable to get hex firmware version\n", __func__); return; } /* disable interrupts before flashing */ if (ts->client->irq == 0) del_timer(&ts->timer); else disable_irq(ts->client->irq); /* enter bootloader idle mode */ rc = cyttsp_soft_reset(ts); if (rc < 0) { pr_err("%s: try entering into idle mode" " second time\n", __func__); msleep(1000); rc = cyttsp_soft_reset(ts); } if (rc < 0) { pr_err("%s: try again later\n", __func__); return; } pr_info("Current firmware: %d.%d.%d", g_bl_data.appid_lo, g_bl_data.appver_hi, g_bl_data.appver_lo); pr_info("New firmware: %d.%d.%d", appid_lo, appver_hi, appver_lo); if (force) fw_upgrade = 1; else if (!(g_bl_data.bl_status & BL_CHECKSUM_MASK) && (appid_lo == ts->platform_data->correct_fw_ver)) fw_upgrade = 1; else if ((appid_hi == g_bl_data.appid_hi) && (appid_lo == g_bl_data.appid_lo)) if (appver_hi > g_bl_data.appver_hi) fw_upgrade = 1; else if ((appver_hi == g_bl_data.appver_hi) && (appver_lo > g_bl_data.appver_lo)) fw_upgrade = 1; else { fw_upgrade = 0; pr_info("%s: Firmware version " "lesser/equal to existing firmware, " "upgrade not needed\n", __func__); } else if (appid_lo == ts->platform_data->correct_fw_ver) fw_upgrade = 1; else { fw_upgrade = 0; pr_info("%s: Firmware versions do not match, " "cannot upgrade\n", __func__); } if (fw_upgrade) { pr_info("%s: Starting firmware upgrade\n", __func__); rc = cyttspfw_flash_firmware(ts, data, data_len); if (rc < 0) pr_err("%s: firmware upgrade failed\n", __func__); else pr_info("%s: firmware upgrade success\n", __func__); } /* enter bootloader idle mode */ cyttsp_soft_reset(ts); /* exit bootloader mode */ cyttsp_exit_bl_mode(ts); msleep(100); /* set sysinfo details */ cyttsp_set_sysinfo_mode(ts); /* enter application mode */ cyttsp_set_opmode(ts); /* enable interrupts */ if (ts->client->irq == 0) mod_timer(&ts->timer, jiffies + TOUCHSCREEN_TIMEOUT); else enable_irq(ts->client->irq); } static void cyttspfw_upgrade_start(struct cyttsp *ts, const u8 *data, int data_len, bool force) { int i, j; u8 *buf; buf = kzalloc(REC_LINE_SIZE + 1, GFP_KERNEL); if (!buf) { pr_err("%s: no memory\n", __func__); return; } for (i = 0, j = 0; i < data_len; i++, j++) { if ((data[i] == REC_START_CHR) && j) { buf[j] = 0; j = 0; if (!strncmp(buf, ID_INFO_REC, strnlen(ID_INFO_REC, ID_INFO_REC_LEN))) { cyttspfw_flash_start(ts, data, data_len, buf, force); break; } } buf[j] = data[i]; } /* check in the last record of firmware */ if (j) { buf[j] = 0; if (!strncmp(buf, ID_INFO_REC, strnlen(ID_INFO_REC, ID_INFO_REC_LEN))) { cyttspfw_flash_start(ts, data, data_len, buf, force); } } kfree(buf); } static void cyttspfw_upgrade(struct device *dev, bool force) { struct cyttsp *ts = dev_get_drvdata(dev); const struct firmware *cyttsp_fw; int retval = 0; if (ts->is_suspended == true) { pr_err("%s: in suspend state, resume it\n", __func__); retval = cyttsp_resume(dev); if (retval < 0) { pr_err("%s: unable to resume\n", __func__); return; } } retval = request_firmware(&cyttsp_fw, ts->fw_fname, dev); if (retval < 0) { pr_err("%s: %s request failed(%d)\n", __func__, ts->fw_fname, retval); } else { /* check and start upgrade */ cyttspfw_upgrade_start(ts, cyttsp_fw->data, cyttsp_fw->size, force); release_firmware(cyttsp_fw); } } static ssize_t cyttsp_update_fw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cyttsp *ts = dev_get_drvdata(dev); return snprintf(buf, 2, "%d\n", ts->cyttsp_fwloader_mode); } static ssize_t cyttsp_force_update_fw_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct cyttsp *ts = dev_get_drvdata(dev); unsigned long val; int rc; if (size > 2) return -EINVAL; rc = strict_strtoul(buf, 10, &val); if (rc != 0) return rc; mutex_lock(&ts->mutex); if (!ts->cyttsp_fwloader_mode && val) { ts->cyttsp_fwloader_mode = 1; cyttspfw_upgrade(dev, true); ts->cyttsp_fwloader_mode = 0; } mutex_unlock(&ts->mutex); return size; } static DEVICE_ATTR(cyttsp_force_update_fw, 0664, cyttsp_update_fw_show, cyttsp_force_update_fw_store); static ssize_t cyttsp_update_fw_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct cyttsp *ts = dev_get_drvdata(dev); unsigned long val; int rc; if (size > 2) return -EINVAL; rc = strict_strtoul(buf, 10, &val); if (rc != 0) return rc; mutex_lock(&ts->mutex); if (!ts->cyttsp_fwloader_mode && val) { ts->cyttsp_fwloader_mode = 1; cyttspfw_upgrade(dev, false); ts->cyttsp_fwloader_mode = 0; } mutex_unlock(&ts->mutex); return size; } static DEVICE_ATTR(cyttsp_update_fw, 0664, cyttsp_update_fw_show, cyttsp_update_fw_store); static ssize_t cyttsp_fw_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cyttsp *ts = dev_get_drvdata(dev); return snprintf(buf, FW_FNAME_LEN - 1, "%s\n", ts->fw_fname); } static ssize_t cyttsp_fw_name_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct cyttsp *ts = dev_get_drvdata(dev); if (size > FW_FNAME_LEN - 1) return -EINVAL; strlcpy(ts->fw_fname, buf, size); if (ts->fw_fname[size-1] == '\n') ts->fw_fname[size-1] = 0; return size; } static DEVICE_ATTR(cyttsp_fw_name, 0664, cyttsp_fw_name_show, cyttsp_fw_name_store); static void cyttsp_xy_handler(struct cyttsp *ts) { u8 id, tilt, rev_x, rev_y; u8 i, loc; u8 prv_tch; /* number of previous touches */ u8 cur_tch; /* number of current touches */ u16 tmp_trk[CY_NUM_MT_TCH_ID]; u16 snd_trk[CY_NUM_MT_TCH_ID]; u16 cur_trk[CY_NUM_TRK_ID]; u16 cur_st_tch[CY_NUM_ST_TCH_ID]; u16 cur_mt_tch[CY_NUM_MT_TCH_ID]; /* if NOT CY_USE_TRACKING_ID then * only uses CY_NUM_MT_TCH_ID positions */ u16 cur_mt_pos[CY_NUM_TRK_ID][2]; /* if NOT CY_USE_TRACKING_ID then * only uses CY_NUM_MT_TCH_ID positions */ u8 cur_mt_z[CY_NUM_TRK_ID]; u8 curr_tool_width; u16 st_x1, st_y1; u8 st_z1; u16 st_x2, st_y2; u8 st_z2; s32 retval; int val; cyttsp_xdebug("TTSP handler start 1:\n"); /* get event data from CYTTSP device */ i = CY_NUM_RETRY; do { retval = i2c_smbus_read_i2c_block_data(ts->client, CY_REG_BASE, sizeof(struct cyttsp_gen3_xydata_t), (u8 *)&g_xy_data); } while ((retval < CY_OK) && --i); if (retval < CY_OK) { /* return immediately on * failure to read device on the i2c bus */ goto exit_xy_handler; } cyttsp_xdebug("TTSP handler start 2:\n"); /* compare own irq counter with the device irq counter */ if (ts->client->irq) { u8 host_reg; u8 cur_cnt; if (ts->platform_data->use_hndshk) { host_reg = g_xy_data.hst_mode & CY_HNDSHK_BIT ? g_xy_data.hst_mode & ~CY_HNDSHK_BIT : g_xy_data.hst_mode | CY_HNDSHK_BIT; retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(host_reg), &host_reg); } cur_cnt = g_xy_data.tt_undef[CY_IRQ_CNT_REG]; irq_cnt_total++; irq_cnt++; if (irq_cnt != cur_cnt) { irq_err_cnt++; cyttsp_debug("i_c_ER: dv=%d fw=%d hm=%02X t=%lu te=%lu\n", \ irq_cnt, \ cur_cnt, g_xy_data.hst_mode, \ (unsigned long)irq_cnt_total, \ (unsigned long)irq_err_cnt); } else { cyttsp_debug("i_c_ok: dv=%d fw=%d hm=%02X t=%lu te=%lu\n", \ irq_cnt, \ cur_cnt, g_xy_data.hst_mode, \ (unsigned long)irq_cnt_total, \ (unsigned long)irq_err_cnt); } irq_cnt = cur_cnt; } /* Get the current num touches and return if there are no touches */ if ((GET_BOOTLOADERMODE(g_xy_data.tt_mode) == 1) || (GET_HSTMODE(g_xy_data.hst_mode) != CY_OK)) { u8 host_reg, tries; /* the TTSP device has suffered spurious reset or mode switch */ cyttsp_debug( \ "Spurious err opmode (tt_mode=%02X hst_mode=%02X)\n", \ g_xy_data.tt_mode, g_xy_data.hst_mode); cyttsp_debug("Reset TTSP Device; Terminating active tracks\n"); /* terminate all active tracks */ cur_tch = CY_NTCH; /* reset TTSP part and take it back out of Bootloader mode */ /* reset TTSP Device back to bootloader mode */ host_reg = CY_SOFT_RESET_MODE; retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(host_reg), &host_reg); /* wait for TTSP Device to complete reset back to bootloader */ tries = 0; do { usleep_range(1000, 1000); cyttsp_putbl(ts, 1, false, false, false); } while (g_bl_data.bl_status != 0x10 && g_bl_data.bl_status != 0x11 && tries++ < 100); retval = cyttsp_putbl(ts, 1, true, true, true); /* switch back to operational mode */ /* take TTSP device out of bootloader mode; * switch back to TrueTouch operational mode */ if (!(retval < CY_OK)) { int tries; retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(bl_cmd), bl_cmd); /* wait for TTSP Device to complete * switch to Operational mode */ tries = 0; do { msleep(100); cyttsp_putbl(ts, 2, false, false, false); } while (GET_BOOTLOADERMODE(g_bl_data.bl_status) && tries++ < 100); cyttsp_putbl(ts, 2, true, false, false); } goto exit_xy_handler; } else { cur_tch = GET_NUM_TOUCHES(g_xy_data.tt_stat); if (IS_LARGE_AREA(g_xy_data.tt_stat)) { /* terminate all active tracks */ cur_tch = CY_NTCH; cyttsp_debug("Large obj detect (tt_stat=0x%02X). Terminate act trks\n", \ g_xy_data.tt_stat); } else if (cur_tch > CY_NUM_MT_TCH_ID) { /* if the number of fingers on the touch surface * is more than the maximum then * there will be no new track information * even for the original touches. * Therefore, terminate all active tracks. */ cur_tch = CY_NTCH; cyttsp_debug("Num touch err (tt_stat=0x%02X). Terminate act trks\n", \ g_xy_data.tt_stat); } } /* set tool size */ curr_tool_width = CY_SMALL_TOOL_WIDTH; /* translate Gen2 interface data into comparable Gen3 data */ if (ts->platform_data->gen == CY_GEN2) { struct cyttsp_gen2_xydata_t *pxy_gen2_data; pxy_gen2_data = (struct cyttsp_gen2_xydata_t *)(&g_xy_data); /* use test data? */ cyttsp_testdat(&g_xy_data, &tt_gen2_testray, \ sizeof(struct cyttsp_gen3_xydata_t)); if (ts->platform_data->disable_ghost_det && (cur_tch == CY_GEN2_GHOST)) cur_tch = CY_GEN2_2TOUCH; if (pxy_gen2_data->evnt_idx == CY_GEN2_NOTOUCH) { cur_tch = 0; } else if (cur_tch == CY_GEN2_GHOST) { cur_tch = 0; } else if (cur_tch == CY_GEN2_2TOUCH) { /* stuff artificial track ID1 and ID2 */ g_xy_data.touch12_id = 0x12; g_xy_data.z1 = CY_MAXZ; g_xy_data.z2 = CY_MAXZ; cur_tch--; /* 2 touches */ } else if (cur_tch == CY_GEN2_1TOUCH) { /* stuff artificial track ID1 and ID2 */ g_xy_data.touch12_id = 0x12; g_xy_data.z1 = CY_MAXZ; g_xy_data.z2 = CY_NTCH; if (pxy_gen2_data->evnt_idx == CY_GEN2_TOUCH2) { /* push touch 2 data into touch1 * (first finger up; second finger down) */ /* stuff artificial track ID1 for touch2 info */ g_xy_data.touch12_id = 0x20; /* stuff touch 1 with touch 2 coordinate data */ g_xy_data.x1 = g_xy_data.x2; g_xy_data.y1 = g_xy_data.y2; } } else { cur_tch = 0; } } else { /* use test data? */ cyttsp_testdat(&g_xy_data, &tt_gen3_testray, \ sizeof(struct cyttsp_gen3_xydata_t)); } /* clear current active track ID array and count previous touches */ for (id = 0, prv_tch = CY_NTCH; id < CY_NUM_TRK_ID; id++) { cur_trk[id] = CY_NTCH; prv_tch += ts->act_trk[id]; } /* send no events if no previous touches and no new touches */ if ((prv_tch == CY_NTCH) && ((cur_tch == CY_NTCH) || (cur_tch > CY_NUM_MT_TCH_ID))) { goto exit_xy_handler; } cyttsp_debug("prev=%d curr=%d\n", prv_tch, cur_tch); for (id = 0; id < CY_NUM_ST_TCH_ID; id++) { /* clear current single touches array */ cur_st_tch[id] = CY_IGNR_TCH; } /* clear single touch positions */ st_x1 = CY_NTCH; st_y1 = CY_NTCH; st_z1 = CY_NTCH; st_x2 = CY_NTCH; st_y2 = CY_NTCH; st_z2 = CY_NTCH; for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { /* clear current multi-touches array and * multi-touch positions/z */ cur_mt_tch[id] = CY_IGNR_TCH; } if (ts->platform_data->use_trk_id) { for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { cur_mt_pos[id][CY_XPOS] = 0; cur_mt_pos[id][CY_YPOS] = 0; cur_mt_z[id] = 0; } } else { for (id = 0; id < CY_NUM_TRK_ID; id++) { cur_mt_pos[id][CY_XPOS] = 0; cur_mt_pos[id][CY_YPOS] = 0; cur_mt_z[id] = 0; } } /* Determine if display is tilted */ if (FLIP_DATA(ts->platform_data->flags)) tilt = true; else tilt = false; /* Check for switch in origin */ if (REVERSE_X(ts->platform_data->flags)) rev_x = true; else rev_x = false; if (REVERSE_Y(ts->platform_data->flags)) rev_y = true; else rev_y = false; if (cur_tch) { struct cyttsp_gen2_xydata_t *pxy_gen2_data; struct cyttsp_gen3_xydata_t *pxy_gen3_data; switch (ts->platform_data->gen) { case CY_GEN2: { pxy_gen2_data = (struct cyttsp_gen2_xydata_t *)(&g_xy_data); cyttsp_xdebug("TTSP Gen2 report:\n"); cyttsp_xdebug("%02X %02X %02X\n", \ pxy_gen2_data->hst_mode, \ pxy_gen2_data->tt_mode, \ pxy_gen2_data->tt_stat); cyttsp_xdebug("%04X %04X %02X %02X\n", \ pxy_gen2_data->x1, \ pxy_gen2_data->y1, \ pxy_gen2_data->z1, \ pxy_gen2_data->evnt_idx); cyttsp_xdebug("%04X %04X %02X\n", \ pxy_gen2_data->x2, \ pxy_gen2_data->y2, \ pxy_gen2_data->tt_undef1); cyttsp_xdebug("%02X %02X %02X\n", \ pxy_gen2_data->gest_cnt, \ pxy_gen2_data->gest_id, \ pxy_gen2_data->gest_set); break; } case CY_GEN3: default: { pxy_gen3_data = (struct cyttsp_gen3_xydata_t *)(&g_xy_data); cyttsp_xdebug("TTSP Gen3 report:\n"); cyttsp_xdebug("%02X %02X %02X\n", \ pxy_gen3_data->hst_mode, pxy_gen3_data->tt_mode, pxy_gen3_data->tt_stat); cyttsp_xdebug("%04X %04X %02X %02X", \ pxy_gen3_data->x1, pxy_gen3_data->y1, pxy_gen3_data->z1, \ pxy_gen3_data->touch12_id); cyttsp_xdebug("%04X %04X %02X\n", \ pxy_gen3_data->x2, \ pxy_gen3_data->y2, \ pxy_gen3_data->z2); cyttsp_xdebug("%02X %02X %02X\n", \ pxy_gen3_data->gest_cnt, \ pxy_gen3_data->gest_id, \ pxy_gen3_data->gest_set); cyttsp_xdebug("%04X %04X %02X %02X\n", \ pxy_gen3_data->x3, \ pxy_gen3_data->y3, \ pxy_gen3_data->z3, \ pxy_gen3_data->touch34_id); cyttsp_xdebug("%04X %04X %02X\n", \ pxy_gen3_data->x4, \ pxy_gen3_data->y4, \ pxy_gen3_data->z4); break; } } } /* process the touches */ switch (cur_tch) { case 4: { g_xy_data.x4 = be16_to_cpu(g_xy_data.x4); g_xy_data.y4 = be16_to_cpu(g_xy_data.y4); if (tilt) FLIP_XY(g_xy_data.x4, g_xy_data.y4); if (rev_x) { val = INVERT_X(g_xy_data.x4, ts->platform_data->panel_maxx); if (val >= 0) g_xy_data.x4 = val; else pr_debug("X value is negative. Please configure" " maxx in platform data structure\n"); } if (rev_y) { val = INVERT_X(g_xy_data.y4, ts->platform_data->panel_maxy); if (val >= 0) g_xy_data.y4 = val; else pr_debug("Y value is negative. Please configure" " maxy in platform data structure\n"); } id = GET_TOUCH4_ID(g_xy_data.touch34_id); if (ts->platform_data->use_trk_id) { cur_mt_pos[CY_MT_TCH4_IDX][CY_XPOS] = g_xy_data.x4; cur_mt_pos[CY_MT_TCH4_IDX][CY_YPOS] = g_xy_data.y4; cur_mt_z[CY_MT_TCH4_IDX] = g_xy_data.z4; } else { cur_mt_pos[id][CY_XPOS] = g_xy_data.x4; cur_mt_pos[id][CY_YPOS] = g_xy_data.y4; cur_mt_z[id] = g_xy_data.z4; } cur_mt_tch[CY_MT_TCH4_IDX] = id; cur_trk[id] = CY_TCH; if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) { if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) { st_x1 = g_xy_data.x4; st_y1 = g_xy_data.y4; st_z1 = g_xy_data.z4; cur_st_tch[CY_ST_FNGR1_IDX] = id; } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) { st_x2 = g_xy_data.x4; st_y2 = g_xy_data.y4; st_z2 = g_xy_data.z4; cur_st_tch[CY_ST_FNGR2_IDX] = id; } } cyttsp_xdebug("4th XYZ:% 3d,% 3d,% 3d ID:% 2d\n\n", \ g_xy_data.x4, g_xy_data.y4, g_xy_data.z4, \ (g_xy_data.touch34_id & 0x0F)); /* do not break */ } case 3: { g_xy_data.x3 = be16_to_cpu(g_xy_data.x3); g_xy_data.y3 = be16_to_cpu(g_xy_data.y3); if (tilt) FLIP_XY(g_xy_data.x3, g_xy_data.y3); if (rev_x) { val = INVERT_X(g_xy_data.x3, ts->platform_data->panel_maxx); if (val >= 0) g_xy_data.x3 = val; else pr_debug("X value is negative. Please configure" " maxx in platform data structure\n"); } if (rev_y) { val = INVERT_X(g_xy_data.y3, ts->platform_data->panel_maxy); if (val >= 0) g_xy_data.y3 = val; else pr_debug("Y value is negative. Please configure" " maxy in platform data structure\n"); } id = GET_TOUCH3_ID(g_xy_data.touch34_id); if (ts->platform_data->use_trk_id) { cur_mt_pos[CY_MT_TCH3_IDX][CY_XPOS] = g_xy_data.x3; cur_mt_pos[CY_MT_TCH3_IDX][CY_YPOS] = g_xy_data.y3; cur_mt_z[CY_MT_TCH3_IDX] = g_xy_data.z3; } else { cur_mt_pos[id][CY_XPOS] = g_xy_data.x3; cur_mt_pos[id][CY_YPOS] = g_xy_data.y3; cur_mt_z[id] = g_xy_data.z3; } cur_mt_tch[CY_MT_TCH3_IDX] = id; cur_trk[id] = CY_TCH; if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) { if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) { st_x1 = g_xy_data.x3; st_y1 = g_xy_data.y3; st_z1 = g_xy_data.z3; cur_st_tch[CY_ST_FNGR1_IDX] = id; } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) { st_x2 = g_xy_data.x3; st_y2 = g_xy_data.y3; st_z2 = g_xy_data.z3; cur_st_tch[CY_ST_FNGR2_IDX] = id; } } cyttsp_xdebug("3rd XYZ:% 3d,% 3d,% 3d ID:% 2d\n", \ g_xy_data.x3, g_xy_data.y3, g_xy_data.z3, \ ((g_xy_data.touch34_id >> 4) & 0x0F)); /* do not break */ } case 2: { g_xy_data.x2 = be16_to_cpu(g_xy_data.x2); g_xy_data.y2 = be16_to_cpu(g_xy_data.y2); if (tilt) FLIP_XY(g_xy_data.x2, g_xy_data.y2); if (rev_x) { val = INVERT_X(g_xy_data.x2, ts->platform_data->panel_maxx); if (val >= 0) g_xy_data.x2 = val; else pr_debug("X value is negative. Please configure" " maxx in platform data structure\n"); } if (rev_y) { val = INVERT_X(g_xy_data.y2, ts->platform_data->panel_maxy); if (val >= 0) g_xy_data.y2 = val; else pr_debug("Y value is negative. Please configure" " maxy in platform data structure\n"); } id = GET_TOUCH2_ID(g_xy_data.touch12_id); if (ts->platform_data->use_trk_id) { cur_mt_pos[CY_MT_TCH2_IDX][CY_XPOS] = g_xy_data.x2; cur_mt_pos[CY_MT_TCH2_IDX][CY_YPOS] = g_xy_data.y2; cur_mt_z[CY_MT_TCH2_IDX] = g_xy_data.z2; } else { cur_mt_pos[id][CY_XPOS] = g_xy_data.x2; cur_mt_pos[id][CY_YPOS] = g_xy_data.y2; cur_mt_z[id] = g_xy_data.z2; } cur_mt_tch[CY_MT_TCH2_IDX] = id; cur_trk[id] = CY_TCH; if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) { if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) { st_x1 = g_xy_data.x2; st_y1 = g_xy_data.y2; st_z1 = g_xy_data.z2; cur_st_tch[CY_ST_FNGR1_IDX] = id; } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) { st_x2 = g_xy_data.x2; st_y2 = g_xy_data.y2; st_z2 = g_xy_data.z2; cur_st_tch[CY_ST_FNGR2_IDX] = id; } } cyttsp_xdebug("2nd XYZ:% 3d,% 3d,% 3d ID:% 2d\n", \ g_xy_data.x2, g_xy_data.y2, g_xy_data.z2, \ (g_xy_data.touch12_id & 0x0F)); /* do not break */ } case 1: { g_xy_data.x1 = be16_to_cpu(g_xy_data.x1); g_xy_data.y1 = be16_to_cpu(g_xy_data.y1); if (tilt) FLIP_XY(g_xy_data.x1, g_xy_data.y1); if (rev_x) { val = INVERT_X(g_xy_data.x1, ts->platform_data->panel_maxx); if (val >= 0) g_xy_data.x1 = val; else pr_debug("X value is negative. Please configure" " maxx in platform data structure\n"); } if (rev_y) { val = INVERT_X(g_xy_data.y1, ts->platform_data->panel_maxy); if (val >= 0) g_xy_data.y1 = val; else pr_debug("Y value is negative. Please configure" " maxy in platform data structure"); } id = GET_TOUCH1_ID(g_xy_data.touch12_id); if (ts->platform_data->use_trk_id) { cur_mt_pos[CY_MT_TCH1_IDX][CY_XPOS] = g_xy_data.x1; cur_mt_pos[CY_MT_TCH1_IDX][CY_YPOS] = g_xy_data.y1; cur_mt_z[CY_MT_TCH1_IDX] = g_xy_data.z1; } else { cur_mt_pos[id][CY_XPOS] = g_xy_data.x1; cur_mt_pos[id][CY_YPOS] = g_xy_data.y1; cur_mt_z[id] = g_xy_data.z1; } cur_mt_tch[CY_MT_TCH1_IDX] = id; cur_trk[id] = CY_TCH; if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) { if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) { st_x1 = g_xy_data.x1; st_y1 = g_xy_data.y1; st_z1 = g_xy_data.z1; cur_st_tch[CY_ST_FNGR1_IDX] = id; } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) { st_x2 = g_xy_data.x1; st_y2 = g_xy_data.y1; st_z2 = g_xy_data.z1; cur_st_tch[CY_ST_FNGR2_IDX] = id; } } cyttsp_xdebug("1st XYZ:% 3d,% 3d,% 3d ID:% 2d\n", \ g_xy_data.x1, g_xy_data.y1, g_xy_data.z1, \ ((g_xy_data.touch12_id >> 4) & 0x0F)); break; } case 0: default:{ break; } } /* handle Single Touch signals */ if (ts->platform_data->use_st) { cyttsp_xdebug("ST STEP 0 - ST1 ID=%d ST2 ID=%d\n", \ cur_st_tch[CY_ST_FNGR1_IDX], \ cur_st_tch[CY_ST_FNGR2_IDX]); if (cur_st_tch[CY_ST_FNGR1_IDX] > CY_NUM_TRK_ID) { /* reassign finger 1 and 2 positions to new tracks */ if (cur_tch > 0) { /* reassign st finger1 */ if (ts->platform_data->use_trk_id) { id = CY_MT_TCH1_IDX; cur_st_tch[CY_ST_FNGR1_IDX] = cur_mt_tch[id]; } else { id = GET_TOUCH1_ID(g_xy_data.touch12_id); cur_st_tch[CY_ST_FNGR1_IDX] = id; } st_x1 = cur_mt_pos[id][CY_XPOS]; st_y1 = cur_mt_pos[id][CY_YPOS]; st_z1 = cur_mt_z[id]; cyttsp_xdebug("ST STEP 1 - ST1 ID=%3d\n", \ cur_st_tch[CY_ST_FNGR1_IDX]); if ((cur_tch > 1) && (cur_st_tch[CY_ST_FNGR2_IDX] > CY_NUM_TRK_ID)) { /* reassign st finger2 */ if (cur_tch > 1) { if (ts->platform_data->use_trk_id) { id = CY_MT_TCH2_IDX; cur_st_tch[CY_ST_FNGR2_IDX] = cur_mt_tch[id]; } else { id = GET_TOUCH2_ID(g_xy_data.touch12_id); cur_st_tch[CY_ST_FNGR2_IDX] = id; } st_x2 = cur_mt_pos[id][CY_XPOS]; st_y2 = cur_mt_pos[id][CY_YPOS]; st_z2 = cur_mt_z[id]; cyttsp_xdebug("ST STEP 2 - ST2 ID=%3d\n", \ cur_st_tch[CY_ST_FNGR2_IDX]); } } } } else if (cur_st_tch[CY_ST_FNGR2_IDX] > CY_NUM_TRK_ID) { if (cur_tch > 1) { /* reassign st finger2 */ if (ts->platform_data->use_trk_id) { /* reassign st finger2 */ id = CY_MT_TCH2_IDX; cur_st_tch[CY_ST_FNGR2_IDX] = cur_mt_tch[id]; } else { /* reassign st finger2 */ id = GET_TOUCH2_ID(g_xy_data.touch12_id); cur_st_tch[CY_ST_FNGR2_IDX] = id; } st_x2 = cur_mt_pos[id][CY_XPOS]; st_y2 = cur_mt_pos[id][CY_YPOS]; st_z2 = cur_mt_z[id]; cyttsp_xdebug("ST STEP 3 - ST2 ID=%3d\n", \ cur_st_tch[CY_ST_FNGR2_IDX]); } } /* if the 1st touch is missing and there is a 2nd touch, * then set the 1st touch to 2nd touch and terminate 2nd touch */ if ((cur_st_tch[CY_ST_FNGR1_IDX] > CY_NUM_TRK_ID) && (cur_st_tch[CY_ST_FNGR2_IDX] < CY_NUM_TRK_ID)) { st_x1 = st_x2; st_y1 = st_y2; st_z1 = st_z2; cur_st_tch[CY_ST_FNGR1_IDX] = cur_st_tch[CY_ST_FNGR2_IDX]; cur_st_tch[CY_ST_FNGR2_IDX] = CY_IGNR_TCH; } /* if the 2nd touch ends up equal to the 1st touch, * then just report a single touch */ if (cur_st_tch[CY_ST_FNGR1_IDX] == cur_st_tch[CY_ST_FNGR2_IDX]) { cur_st_tch[CY_ST_FNGR2_IDX] = CY_IGNR_TCH; } /* set Single Touch current event signals */ if (cur_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) { input_report_abs(ts->input, ABS_X, st_x1); input_report_abs(ts->input, ABS_Y, st_y1); input_report_abs(ts->input, ABS_PRESSURE, st_z1); input_report_key(ts->input, BTN_TOUCH, CY_TCH); input_report_abs(ts->input, ABS_TOOL_WIDTH, curr_tool_width); cyttsp_debug("ST->F1:%3d X:%3d Y:%3d Z:%3d\n", \ cur_st_tch[CY_ST_FNGR1_IDX], \ st_x1, st_y1, st_z1); } else { input_report_abs(ts->input, ABS_PRESSURE, CY_NTCH); input_report_key(ts->input, BTN_TOUCH, CY_NTCH); } /* update platform data for the current single touch info */ ts->prv_st_tch[CY_ST_FNGR1_IDX] = cur_st_tch[CY_ST_FNGR1_IDX]; ts->prv_st_tch[CY_ST_FNGR2_IDX] = cur_st_tch[CY_ST_FNGR2_IDX]; } /* handle Multi-touch signals */ if (ts->platform_data->use_mt) { if (ts->platform_data->use_trk_id) { /* terminate any previous touch where the track * is missing from the current event */ for (id = 0; id < CY_NUM_TRK_ID; id++) { if ((ts->act_trk[id] != CY_NTCH) && (cur_trk[id] == CY_NTCH)) { input_report_abs(ts->input, ABS_MT_TRACKING_ID, id); input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, CY_NTCH); input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR, curr_tool_width); input_report_abs(ts->input, ABS_MT_POSITION_X, ts->prv_mt_pos[id][CY_XPOS]); input_report_abs(ts->input, ABS_MT_POSITION_Y, ts->prv_mt_pos[id][CY_YPOS]); CY_MT_SYNC(ts->input); ts->act_trk[id] = CY_NTCH; ts->prv_mt_pos[id][CY_XPOS] = 0; ts->prv_mt_pos[id][CY_YPOS] = 0; } } /* set Multi-Touch current event signals */ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { if (cur_mt_tch[id] < CY_NUM_TRK_ID) { input_report_abs(ts->input, ABS_MT_TRACKING_ID, cur_mt_tch[id]); input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, cur_mt_z[id]); input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR, curr_tool_width); input_report_abs(ts->input, ABS_MT_POSITION_X, cur_mt_pos[id][CY_XPOS]); input_report_abs(ts->input, ABS_MT_POSITION_Y, cur_mt_pos[id][CY_YPOS]); CY_MT_SYNC(ts->input); ts->act_trk[id] = CY_TCH; ts->prv_mt_pos[id][CY_XPOS] = cur_mt_pos[id][CY_XPOS]; ts->prv_mt_pos[id][CY_YPOS] = cur_mt_pos[id][CY_YPOS]; } } } else { /* set temporary track array elements to voids */ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { tmp_trk[id] = CY_IGNR_TCH; snd_trk[id] = CY_IGNR_TCH; } /* get what is currently active */ for (i = 0, id = 0; id < CY_NUM_TRK_ID && i < CY_NUM_MT_TCH_ID; id++) { if (cur_trk[id] == CY_TCH) { /* only incr counter if track found */ tmp_trk[i] = id; i++; } } cyttsp_xdebug("T1: t0=%d, t1=%d, t2=%d, t3=%d\n", \ tmp_trk[0], tmp_trk[1], tmp_trk[2], \ tmp_trk[3]); cyttsp_xdebug("T1: p0=%d, p1=%d, p2=%d, p3=%d\n", \ ts->prv_mt_tch[0], ts->prv_mt_tch[1], \ ts->prv_mt_tch[2], ts->prv_mt_tch[3]); /* pack in still active previous touches */ for (id = 0, prv_tch = 0; id < CY_NUM_MT_TCH_ID; id++) { if (tmp_trk[id] < CY_NUM_TRK_ID) { if (cyttsp_inlist(ts->prv_mt_tch, tmp_trk[id], &loc, CY_NUM_MT_TCH_ID)) { loc &= CY_NUM_MT_TCH_ID - 1; snd_trk[loc] = tmp_trk[id]; prv_tch++; cyttsp_xdebug("inlist s[%d]=%d t[%d]=%d l=%d p=%d\n", \ loc, snd_trk[loc], \ id, tmp_trk[id], \ loc, prv_tch); } else { cyttsp_xdebug("not inlist s[%d]=%d t[%d]=%d l=%d \n", \ id, snd_trk[id], \ id, tmp_trk[id], \ loc); } } } cyttsp_xdebug("S1: s0=%d, s1=%d, s2=%d, s3=%d p=%d\n", \ snd_trk[0], snd_trk[1], snd_trk[2], \ snd_trk[3], prv_tch); /* pack in new touches */ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { if (tmp_trk[id] < CY_NUM_TRK_ID) { if (!cyttsp_inlist(snd_trk, tmp_trk[id], &loc, CY_NUM_MT_TCH_ID)) { cyttsp_xdebug("not inlist t[%d]=%d l=%d\n", \ id, tmp_trk[id], loc); if (cyttsp_next_avail_inlist(snd_trk, &loc, CY_NUM_MT_TCH_ID)) { loc &= CY_NUM_MT_TCH_ID - 1; snd_trk[loc] = tmp_trk[id]; cyttsp_xdebug("put inlist s[%d]=%d t[%d]=%d\n", loc, snd_trk[loc], id, tmp_trk[id]); } } else { cyttsp_xdebug("is in list s[%d]=%d t[%d]=%d loc=%d\n", \ id, snd_trk[id], id, tmp_trk[id], loc); } } } cyttsp_xdebug("S2: s0=%d, s1=%d, s2=%d, s3=%d\n", \ snd_trk[0], snd_trk[1], snd_trk[2], snd_trk[3]); /* sync motion event signals for each current touch */ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { /* z will either be 0 (NOTOUCH) or * some pressure (TOUCH) */ cyttsp_xdebug("MT0 prev[%d]=%d temp[%d]=%d send[%d]=%d\n", \ id, ts->prv_mt_tch[id], \ id, tmp_trk[id], \ id, snd_trk[id]); if (snd_trk[id] < CY_NUM_TRK_ID) { input_mt_slot(ts->input, snd_trk[id]); input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, true); input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, cur_mt_z[snd_trk[id]]); input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR, curr_tool_width); input_report_abs(ts->input, ABS_MT_POSITION_X, cur_mt_pos[snd_trk[id]][CY_XPOS]); input_report_abs(ts->input, ABS_MT_POSITION_Y, cur_mt_pos[snd_trk[id]][CY_YPOS]); cyttsp_debug("MT1->TID:%2d X:%3d Y:%3d Z:%3d touch-sent\n", \ snd_trk[id], \ cur_mt_pos[snd_trk[id]][CY_XPOS], \ cur_mt_pos[snd_trk[id]][CY_YPOS], \ cur_mt_z[snd_trk[id]]); } else if (ts->prv_mt_tch[id] < CY_NUM_TRK_ID) { /* void out this touch */ input_mt_slot(ts->input, ts->prv_mt_tch[id]); input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, false); cyttsp_debug("MT2->TID:%2d X:%3d Y:%3d Z:%3d lift off-sent\n", \ ts->prv_mt_tch[id], \ ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_XPOS], \ ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_YPOS], \ CY_NTCH); } else { /* do not stuff any signals for this * previously and currently * void touches */ cyttsp_xdebug("MT3->send[%d]=%d - No touch - NOT sent\n", \ id, snd_trk[id]); } } /* save current posted tracks to * previous track memory */ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { ts->prv_mt_tch[id] = snd_trk[id]; if (snd_trk[id] < CY_NUM_TRK_ID) { ts->prv_mt_pos[snd_trk[id]][CY_XPOS] = cur_mt_pos[snd_trk[id]][CY_XPOS]; ts->prv_mt_pos[snd_trk[id]][CY_YPOS] = cur_mt_pos[snd_trk[id]][CY_YPOS]; cyttsp_xdebug("MT4->TID:%2d X:%3d Y:%3d Z:%3d save for previous\n", \ snd_trk[id], \ ts->prv_mt_pos[snd_trk[id]][CY_XPOS], \ ts->prv_mt_pos[snd_trk[id]][CY_YPOS], \ CY_NTCH); } } for (id = 0; id < CY_NUM_TRK_ID; id++) ts->act_trk[id] = CY_NTCH; for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { if (snd_trk[id] < CY_NUM_TRK_ID) ts->act_trk[snd_trk[id]] = CY_TCH; } } } /* handle gestures */ if (ts->platform_data->use_gestures) { if (g_xy_data.gest_id) { input_report_key(ts->input, BTN_3, CY_TCH); input_report_abs(ts->input, ABS_HAT1X, g_xy_data.gest_id); input_report_abs(ts->input, ABS_HAT2Y, g_xy_data.gest_cnt); } } /* signal the view motion event */ input_sync(ts->input); for (id = 0; id < CY_NUM_TRK_ID; id++) { /* update platform data for the current MT information */ ts->act_trk[id] = cur_trk[id]; } exit_xy_handler: /* restart event timer */ if (ts->client->irq == 0) mod_timer(&ts->timer, jiffies + TOUCHSCREEN_TIMEOUT); return; } static int cyttsp_inlist(u16 prev_track[], u8 cur_trk_id, u8 *prev_loc, u8 num_touches) { u8 id = 0; *prev_loc = CY_IGNR_TCH; cyttsp_xdebug("IN p[%d]=%d c=%d n=%d loc=%d\n", \ id, prev_track[id], cur_trk_id, \ num_touches, *prev_loc); for (id = 0, *prev_loc = CY_IGNR_TCH; (id < num_touches); id++) { cyttsp_xdebug("p[%d]=%d c=%d n=%d loc=%d\n", \ id, prev_track[id], cur_trk_id, \ num_touches, *prev_loc); if (prev_track[id] == cur_trk_id) { *prev_loc = id; break; } } cyttsp_xdebug("OUT p[%d]=%d c=%d n=%d loc=%d\n", \ id, prev_track[id], cur_trk_id, num_touches, *prev_loc); return ((*prev_loc < CY_NUM_TRK_ID) ? true : false); } static int cyttsp_next_avail_inlist(u16 cur_trk[], u8 *new_loc, u8 num_touches) { u8 id; for (id = 0, *new_loc = CY_IGNR_TCH; (id < num_touches); id++) { if (cur_trk[id] > CY_NUM_TRK_ID) { *new_loc = id; break; } } return ((*new_loc < CY_NUM_TRK_ID) ? true : false); } /* Timer function used as dummy interrupt driver */ static void cyttsp_timer(unsigned long handle) { struct cyttsp *ts = (struct cyttsp *) handle; cyttsp_xdebug("TTSP Device timer event\n"); /* schedule motion signal handling */ cyttsp_xy_handler(ts); return; } /* ************************************************************************ * ISR function. This function is general, initialized in drivers init * function * ************************************************************************ */ static irqreturn_t cyttsp_irq(int irq, void *handle) { struct cyttsp *ts = (struct cyttsp *) handle; cyttsp_xdebug("%s: Got IRQ\n", CY_I2C_NAME); cyttsp_xy_handler(ts); return IRQ_HANDLED; } /* ************************************************************************ * Probe initialization functions * ************************************************************************ */ static int cyttsp_putbl(struct cyttsp *ts, int show, int show_status, int show_version, int show_cid) { int retval = CY_OK; int num_bytes = (show_status * 3) + (show_version * 6) + (show_cid * 3); if (show_cid) num_bytes = sizeof(struct cyttsp_bootloader_data_t); else if (show_version) num_bytes = sizeof(struct cyttsp_bootloader_data_t) - 3; else num_bytes = sizeof(struct cyttsp_bootloader_data_t) - 9; if (show) { retval = i2c_smbus_read_i2c_block_data(ts->client, CY_REG_BASE, num_bytes, (u8 *)&g_bl_data); if (show_status) { cyttsp_debug("BL%d: f=%02X s=%02X err=%02X bl=%02X%02X bld=%02X%02X\n", \ show, \ g_bl_data.bl_file, \ g_bl_data.bl_status, \ g_bl_data.bl_error, \ g_bl_data.blver_hi, g_bl_data.blver_lo, \ g_bl_data.bld_blver_hi, g_bl_data.bld_blver_lo); } if (show_version) { cyttsp_debug("BL%d: ttspver=0x%02X%02X appid=0x%02X%02X appver=0x%02X%02X\n", \ show, \ g_bl_data.ttspver_hi, g_bl_data.ttspver_lo, \ g_bl_data.appid_hi, g_bl_data.appid_lo, \ g_bl_data.appver_hi, g_bl_data.appver_lo); } if (show_cid) { cyttsp_debug("BL%d: cid=0x%02X%02X%02X\n", \ show, \ g_bl_data.cid_0, \ g_bl_data.cid_1, \ g_bl_data.cid_2); } } return retval; } #ifdef CY_INCLUDE_LOAD_FILE #define CY_MAX_I2C_LEN 256 #define CY_MAX_TRY 10 #define CY_BL_PAGE_SIZE 16 #define CY_BL_NUM_PAGES 5 static int cyttsp_i2c_wr_blk_chunks(struct cyttsp *ts, u8 command, u8 length, const u8 *values) { int retval = CY_OK; int block = 1; u8 dataray[CY_MAX_I2C_LEN]; /* first page already includes the bl page offset */ retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, CY_BL_PAGE_SIZE+1, values); values += CY_BL_PAGE_SIZE+1; length -= CY_BL_PAGE_SIZE+1; /* rem blocks require bl page offset stuffing */ while (length && (block < CY_BL_NUM_PAGES) && !(retval < CY_OK)) { udelay(43*2); /* TRM * 2 */ dataray[0] = CY_BL_PAGE_SIZE*block; memcpy(&dataray[1], values, length >= CY_BL_PAGE_SIZE ? CY_BL_PAGE_SIZE : length); retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, length >= CY_BL_PAGE_SIZE ? CY_BL_PAGE_SIZE + 1 : length+1, dataray); values += CY_BL_PAGE_SIZE; length = length >= CY_BL_PAGE_SIZE ? length - CY_BL_PAGE_SIZE : 0; block++; } return retval; } static int cyttsp_bootload_app(struct cyttsp *ts) { int retval = CY_OK; int i, tries; u8 host_reg; cyttsp_debug("load new firmware \n"); /* reset TTSP Device back to bootloader mode */ host_reg = CY_SOFT_RESET_MODE; retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(host_reg), &host_reg); /* wait for TTSP Device to complete reset back to bootloader */ tries = 0; do { usleep_range(1000, 1000); cyttsp_putbl(ts, 3, false, false, false); } while (g_bl_data.bl_status != 0x10 && g_bl_data.bl_status != 0x11 && tries++ < 100); cyttsp_debug("load file - tver=0x%02X%02X a_id=0x%02X%02X aver=0x%02X%02X\n", \ cyttsp_fw_tts_verh, cyttsp_fw_tts_verl, \ cyttsp_fw_app_idh, cyttsp_fw_app_idl, \ cyttsp_fw_app_verh, cyttsp_fw_app_verl); /* download new TTSP Application to the Bootloader */ if (!(retval < CY_OK)) { i = 0; /* send bootload initiation command */ if (cyttsp_fw[i].Command == CY_BL_INIT_LOAD) { g_bl_data.bl_file = 0; g_bl_data.bl_status = 0; g_bl_data.bl_error = 0; retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, cyttsp_fw[i].Length, cyttsp_fw[i].Block); /* delay to allow bl to get ready for block writes */ i++; tries = 0; do { msleep(100); cyttsp_putbl(ts, 4, false, false, false); } while (g_bl_data.bl_status != 0x10 && g_bl_data.bl_status != 0x11 && tries++ < 100); cyttsp_debug("wait init f=%02X, s=%02X, e=%02X t=%d\n", \ g_bl_data.bl_file, g_bl_data.bl_status, \ g_bl_data.bl_error, tries); /* send bootload firmware load blocks */ if (!(retval < CY_OK)) { while (cyttsp_fw[i].Command == CY_BL_WRITE_BLK) { retval = cyttsp_i2c_wr_blk_chunks(ts, CY_REG_BASE, cyttsp_fw[i].Length, cyttsp_fw[i].Block); cyttsp_xdebug("BL DNLD Rec=% 3d Len=% 3d Addr=%04X\n", \ cyttsp_fw[i].Record, \ cyttsp_fw[i].Length, \ cyttsp_fw[i].Address); i++; if (retval < CY_OK) { cyttsp_debug("BL fail Rec=%3d retval=%d\n", \ cyttsp_fw[i-1].Record, \ retval); break; } else { tries = 0; cyttsp_putbl(ts, 5, false, false, false); while (!((g_bl_data.bl_status == 0x10) && (g_bl_data.bl_error == 0x20)) && !((g_bl_data.bl_status == 0x11) && (g_bl_data.bl_error == 0x20)) && (tries++ < 100)) { usleep_range(1000, 1000); cyttsp_putbl(ts, 5, false, false, false); } } } if (!(retval < CY_OK)) { while (i < cyttsp_fw_records) { retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, cyttsp_fw[i].Length, cyttsp_fw[i].Block); i++; tries = 0; do { msleep(100); cyttsp_putbl(ts, 6, true, false, false); } while (g_bl_data.bl_status != 0x10 && g_bl_data.bl_status != 0x11 && tries++ < 100); cyttsp_debug("wait term f=%02X, s=%02X, e=%02X t=%d\n", \ g_bl_data.bl_file, \ g_bl_data.bl_status, \ g_bl_data.bl_error, \ tries); if (retval < CY_OK) break; } } } } } /* reset TTSP Device back to bootloader mode */ host_reg = CY_SOFT_RESET_MODE; retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(host_reg), &host_reg); /* wait for TTSP Device to complete reset back to bootloader */ tries = 0; do { usleep_range(1000, 1000); cyttsp_putbl(ts, 3, false, false, false); } while (g_bl_data.bl_status != 0x10 && g_bl_data.bl_status != 0x11 && tries++ < 100); /* set arg2 to non-0 to activate */ retval = cyttsp_putbl(ts, 8, true, true, true); return retval; } #else static int cyttsp_bootload_app(struct cyttsp *ts) { cyttsp_debug("no-load new firmware \n"); return CY_OK; } #endif /* CY_INCLUDE_LOAD_FILE */ static int cyttsp_power_on(struct cyttsp *ts) { int retval = CY_OK; u8 host_reg; int tries; cyttsp_debug("Power up \n"); /* check if the TTSP device has a bootloader installed */ host_reg = CY_SOFT_RESET_MODE; retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(host_reg), &host_reg); tries = 0; do { usleep_range(1000, 1000); /* set arg2 to non-0 to activate */ retval = cyttsp_putbl(ts, 1, true, true, true); cyttsp_info("BL%d: f=%02X s=%02X err=%02X bl=%02X%02X bld=%02X%02X R=%d\n", \ 101, \ g_bl_data.bl_file, g_bl_data.bl_status, \ g_bl_data.bl_error, \ g_bl_data.blver_hi, g_bl_data.blver_lo, \ g_bl_data.bld_blver_hi, g_bl_data.bld_blver_lo, retval); cyttsp_info("BL%d: tver=%02X%02X a_id=%02X%02X aver=%02X%02X\n", \ 102, \ g_bl_data.ttspver_hi, g_bl_data.ttspver_lo, \ g_bl_data.appid_hi, g_bl_data.appid_lo, \ g_bl_data.appver_hi, g_bl_data.appver_lo); cyttsp_info("BL%d: c_id=%02X%02X%02X\n", \ 103, \ g_bl_data.cid_0, g_bl_data.cid_1, g_bl_data.cid_2); } while (!(retval < CY_OK) && !GET_BOOTLOADERMODE(g_bl_data.bl_status) && !(g_bl_data.bl_file == CY_OP_MODE + CY_LOW_PWR_MODE) && tries++ < 100); /* is bootloader missing? */ if (!(retval < CY_OK)) { cyttsp_xdebug("Ret=%d Check if bootloader is missing...\n", \ retval); if (!GET_BOOTLOADERMODE(g_bl_data.bl_status)) { /* skip all bl and sys info and go to op mode */ if (!(retval < CY_OK)) { cyttsp_xdebug("Bl is missing (ret=%d)\n", \ retval); host_reg = CY_OP_MODE/* + CY_LOW_PWR_MODE*/; retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(host_reg), &host_reg); /* wait for TTSP Device to complete switch to * Operational mode */ msleep(1000); goto bypass; } } } /* take TTSP out of bootloader mode; go to TrueTouch operational mode */ if (!(retval < CY_OK)) { cyttsp_xdebug1("exit bootloader; go operational\n"); tries = 0; do { msleep(100); retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(bl_cmd), bl_cmd); if (retval == CY_OK) break; } while (tries++ < 5); if (retval == CY_OK) { tries = 0; do { msleep(100); cyttsp_putbl(ts, 4, true, false, false); cyttsp_info("BL%d: f=%02X s=%02X err=%02X" \ "bl=%02X%02X bld=%02X%02X\n", 104, \ g_bl_data.bl_file, \ g_bl_data.bl_status, \ g_bl_data.bl_error, \ g_bl_data.blver_hi, \ g_bl_data.blver_lo, \ g_bl_data.bld_blver_hi, \ g_bl_data.bld_blver_lo); } while (GET_BOOTLOADERMODE(g_bl_data.bl_status) && tries++ < 5); } } if (!(retval < CY_OK) && cyttsp_app_load()) { if (CY_DIFF(g_bl_data.ttspver_hi, cyttsp_tts_verh()) || CY_DIFF(g_bl_data.ttspver_lo, cyttsp_tts_verl()) || CY_DIFF(g_bl_data.appid_hi, cyttsp_app_idh()) || CY_DIFF(g_bl_data.appid_lo, cyttsp_app_idl()) || CY_DIFF(g_bl_data.appver_hi, cyttsp_app_verh()) || CY_DIFF(g_bl_data.appver_lo, cyttsp_app_verl()) || CY_DIFF(g_bl_data.cid_0, cyttsp_cid_0()) || CY_DIFF(g_bl_data.cid_1, cyttsp_cid_1()) || CY_DIFF(g_bl_data.cid_2, cyttsp_cid_2()) || cyttsp_force_fw_load()) { cyttsp_debug("blttsp=0x%02X%02X flttsp=0x%02X%02X force=%d\n", \ g_bl_data.ttspver_hi, g_bl_data.ttspver_lo, \ cyttsp_tts_verh(), cyttsp_tts_verl(), \ cyttsp_force_fw_load()); cyttsp_debug("blappid=0x%02X%02X flappid=0x%02X%02X\n", \ g_bl_data.appid_hi, g_bl_data.appid_lo, \ cyttsp_app_idh(), cyttsp_app_idl()); cyttsp_debug("blappver=0x%02X%02X flappver=0x%02X%02X\n", \ g_bl_data.appver_hi, g_bl_data.appver_lo, \ cyttsp_app_verh(), cyttsp_app_verl()); cyttsp_debug("blcid=0x%02X%02X%02X flcid=0x%02X%02X%02X\n", \ g_bl_data.cid_0, \ g_bl_data.cid_1, \ g_bl_data.cid_2, \ cyttsp_cid_0(), \ cyttsp_cid_1(), \ cyttsp_cid_2()); /* enter bootloader to load new app into TTSP Device */ retval = cyttsp_bootload_app(ts); /* take TTSP device out of bootloader mode; * switch back to TrueTouch operational mode */ if (!(retval < CY_OK)) { retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(bl_cmd), bl_cmd); /* wait for TTSP Device to complete * switch to Operational mode */ tries = 0; do { msleep(100); cyttsp_putbl(ts, 9, false, false, false); } while (GET_BOOTLOADERMODE(g_bl_data.bl_status) && tries++ < 100); cyttsp_putbl(ts, 9, true, false, false); } } } bypass: /* switch to System Information mode to read versions * and set interval registers */ if (!(retval < CY_OK)) { cyttsp_debug("switch to sysinfo mode\n"); host_reg = CY_SYSINFO_MODE; retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(host_reg), &host_reg); /* wait for TTSP Device to complete switch to SysInfo mode */ msleep(100); if (!(retval < CY_OK)) { retval = i2c_smbus_read_i2c_block_data(ts->client, CY_REG_BASE, sizeof(struct cyttsp_sysinfo_data_t), (u8 *)&g_sysinfo_data); cyttsp_debug("SI2: hst_mode=0x%02X mfg_cmd=0x%02X"\ "mfg_stat=0x%02X\n", g_sysinfo_data.hst_mode, g_sysinfo_data.mfg_cmd, g_sysinfo_data.mfg_stat); cyttsp_debug("SI2: bl_ver=0x%02X%02X\n", g_sysinfo_data.bl_verh, g_sysinfo_data.bl_verl); pr_debug("SI2: sysinfo act_int=0x%02X tch_tmout=0x%02X lp_int=0x%02X\n", g_sysinfo_data.act_intrvl, g_sysinfo_data.tch_tmout, g_sysinfo_data.lp_intrvl); pr_info("SI%d: tver=%02X%02X a_id=%02X%02X aver=%02X%02X\n", 102, g_sysinfo_data.tts_verh, g_sysinfo_data.tts_verl, g_sysinfo_data.app_idh, g_sysinfo_data.app_idl, g_sysinfo_data.app_verh, g_sysinfo_data.app_verl); cyttsp_info("SI%d: c_id=%02X%02X%02X\n", 103, g_sysinfo_data.cid[0], g_sysinfo_data.cid[1], g_sysinfo_data.cid[2]); if (!(retval < CY_OK) && (CY_DIFF(ts->platform_data->act_intrvl, CY_ACT_INTRVL_DFLT) || CY_DIFF(ts->platform_data->tch_tmout, CY_TCH_TMOUT_DFLT) || CY_DIFF(ts->platform_data->lp_intrvl, CY_LP_INTRVL_DFLT))) { if (!(retval < CY_OK)) { u8 intrvl_ray[sizeof(\ ts->platform_data->act_intrvl) + sizeof(\ ts->platform_data->tch_tmout) + sizeof(\ ts->platform_data->lp_intrvl)]; u8 i = 0; intrvl_ray[i++] = ts->platform_data->act_intrvl; intrvl_ray[i++] = ts->platform_data->tch_tmout; intrvl_ray[i++] = ts->platform_data->lp_intrvl; pr_debug("SI2: platinfo act_intrvl=0x%02X tch_tmout=0x%02X lp_intrvl=0x%02X\n", ts->platform_data->act_intrvl, ts->platform_data->tch_tmout, ts->platform_data->lp_intrvl); /* set intrvl registers */ retval = i2c_smbus_write_i2c_block_data( ts->client, CY_REG_ACT_INTRVL, sizeof(intrvl_ray), intrvl_ray); msleep(CY_DLY_SYSINFO); } } } /* switch back to Operational mode */ cyttsp_debug("switch back to operational mode\n"); if (!(retval < CY_OK)) { host_reg = CY_OP_MODE/* + CY_LOW_PWR_MODE*/; retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(host_reg), &host_reg); /* wait for TTSP Device to complete * switch to Operational mode */ msleep(100); } } /* init gesture setup; * this is required even if not using gestures * in order to set the active distance */ if (!(retval < CY_OK)) { u8 gesture_setup; cyttsp_debug("init gesture setup\n"); gesture_setup = ts->platform_data->gest_set; retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_GEST_SET, sizeof(gesture_setup), &gesture_setup); msleep(CY_DLY_DFLT); } if (!(retval < CY_OK)) ts->platform_data->power_state = CY_ACTIVE_STATE; else ts->platform_data->power_state = CY_IDLE_STATE; cyttsp_debug("Retval=%d Power state is %s\n", \ retval, \ ts->platform_data->power_state == CY_ACTIVE_STATE ? \ "ACTIVE" : "IDLE"); return retval; } static int cyttsp_power_device(struct cyttsp *ts, bool on) { int rc = 0, i; const struct cyttsp_regulator *reg_info = ts->platform_data->regulator_info; u8 num_reg = ts->platform_data->num_regulators; if (!reg_info) { pr_err("regulator pdata not specified\n"); return -EINVAL; } if (on == false) /* Turn off the regulators */ goto ts_reg_disable; ts->vdd = kzalloc(num_reg * sizeof(struct regulator *), GFP_KERNEL); if (!ts->vdd) { pr_err("unable to allocate memory\n"); return -ENOMEM; } for (i = 0; i < num_reg; i++) { ts->vdd[i] = regulator_get(&ts->client->dev, reg_info[i].name); if (IS_ERR(ts->vdd[i])) { rc = PTR_ERR(ts->vdd[i]); pr_err("%s:regulator get failed rc=%d\n", __func__, rc); goto error_vdd; } if (regulator_count_voltages(ts->vdd[i]) > 0) { rc = regulator_set_voltage(ts->vdd[i], reg_info[i].min_uV, reg_info[i].max_uV); if (rc) { pr_err("%s: regulator_set_voltage" "failed rc =%d\n", __func__, rc); regulator_put(ts->vdd[i]); goto error_vdd; } rc = regulator_set_optimum_mode(ts->vdd[i], reg_info[i].hpm_load_uA); if (rc < 0) { pr_err("%s: regulator_set_optimum_mode failed " "rc=%d\n", __func__, rc); regulator_set_voltage(ts->vdd[i], 0, reg_info[i].max_uV); regulator_put(ts->vdd[i]); goto error_vdd; } } rc = regulator_enable(ts->vdd[i]); if (rc) { pr_err("%s: regulator_enable failed rc =%d\n", __func__, rc); if (regulator_count_voltages(ts->vdd[i]) > 0) { regulator_set_optimum_mode(ts->vdd[i], 0); regulator_set_voltage(ts->vdd[i], 0, reg_info[i].max_uV); } regulator_put(ts->vdd[i]); goto error_vdd; } } return rc; ts_reg_disable: i = ts->platform_data->num_regulators; error_vdd: while (--i >= 0) { if (regulator_count_voltages(ts->vdd[i]) > 0) { regulator_set_voltage(ts->vdd[i], 0, reg_info[i].max_uV); regulator_set_optimum_mode(ts->vdd[i], 0); } regulator_disable(ts->vdd[i]); regulator_put(ts->vdd[i]); } kfree(ts->vdd); return rc; } /* cyttsp_initialize: Driver Initialization. This function takes * care of the following tasks: * 1. Create and register an input device with input layer * 2. Take CYTTSP device out of bootloader mode; go operational * 3. Start any timers/Work queues. */ static int cyttsp_initialize(struct i2c_client *client, struct cyttsp *ts) { struct input_dev *input_device; int error = 0; int retval = CY_OK; u8 id; /* Create the input device and register it. */ input_device = input_allocate_device(); if (!input_device) { error = -ENOMEM; cyttsp_xdebug1("err input allocate device\n"); goto error_free_device; } if (!client) { error = ~ENODEV; cyttsp_xdebug1("err client is Null\n"); goto error_free_device; } if (!ts) { error = ~ENODEV; cyttsp_xdebug1("err context is Null\n"); goto error_free_device; } ts->input = input_device; input_device->name = CY_I2C_NAME; input_device->phys = ts->phys; input_device->dev.parent = &client->dev; /* init the touch structures */ ts->num_prv_st_tch = CY_NTCH; for (id = 0; id < CY_NUM_TRK_ID; id++) { ts->act_trk[id] = CY_NTCH; ts->prv_mt_pos[id][CY_XPOS] = 0; ts->prv_mt_pos[id][CY_YPOS] = 0; } for (id = 0; id < CY_NUM_MT_TCH_ID; id++) ts->prv_mt_tch[id] = CY_IGNR_TCH; for (id = 0; id < CY_NUM_ST_TCH_ID; id++) ts->prv_st_tch[id] = CY_IGNR_TCH; set_bit(EV_SYN, input_device->evbit); set_bit(EV_KEY, input_device->evbit); set_bit(EV_ABS, input_device->evbit); set_bit(BTN_TOUCH, input_device->keybit); set_bit(INPUT_PROP_DIRECT, input_device->propbit); if (ts->platform_data->use_gestures) set_bit(BTN_3, input_device->keybit); input_set_abs_params(input_device, ABS_X, ts->platform_data->disp_minx, ts->platform_data->disp_maxx, 0, 0); input_set_abs_params(input_device, ABS_Y, ts->platform_data->disp_miny, ts->platform_data->disp_maxy, 0, 0); input_set_abs_params(input_device, ABS_TOOL_WIDTH, 0, CY_LARGE_TOOL_WIDTH, 0 , 0); input_set_abs_params(input_device, ABS_PRESSURE, 0, CY_MAXZ, 0, 0); if (ts->platform_data->use_gestures) { input_set_abs_params(input_device, ABS_HAT1X, 0, CY_MAXZ, 0, 0); input_set_abs_params(input_device, ABS_HAT1Y, 0, CY_MAXZ, 0, 0); } if (ts->platform_data->use_mt) { input_set_abs_params(input_device, ABS_MT_POSITION_X, ts->platform_data->disp_minx, ts->platform_data->disp_maxx, 0, 0); input_set_abs_params(input_device, ABS_MT_POSITION_Y, ts->platform_data->disp_miny, ts->platform_data->disp_maxy, 0, 0); input_set_abs_params(input_device, ABS_MT_TOUCH_MAJOR, 0, CY_MAXZ, 0, 0); input_set_abs_params(input_device, ABS_MT_WIDTH_MAJOR, 0, CY_LARGE_TOOL_WIDTH, 0, 0); input_mt_init_slots(input_device, CY_NUM_TRK_ID); if (ts->platform_data->use_trk_id) { input_set_abs_params(input_device, ABS_MT_TRACKING_ID, 0, CY_NUM_TRK_ID, 0, 0); } } /* set dummy key to make driver work with virtual keys */ input_set_capability(input_device, EV_KEY, KEY_PROG1); cyttsp_info("%s: Register input device\n", CY_I2C_NAME); error = input_register_device(input_device); if (error) { cyttsp_alert("%s: Failed to register input device\n", \ CY_I2C_NAME); retval = error; goto error_free_device; } if (gpio_is_valid(ts->platform_data->resout_gpio)) { /* configure touchscreen reset out gpio */ retval = gpio_request(ts->platform_data->resout_gpio, "cyttsp_resout_gpio"); if (retval) { pr_err("%s: unable to request reset gpio %d\n", __func__, ts->platform_data->resout_gpio); goto error_free_device; } retval = gpio_direction_output( ts->platform_data->resout_gpio, 1); if (retval) { pr_err("%s: unable to set direction for gpio %d\n", __func__, ts->platform_data->resout_gpio); goto error_resout_gpio_dir; } } if (gpio_is_valid(ts->platform_data->sleep_gpio)) { /* configure touchscreen reset out gpio */ retval = gpio_request(ts->platform_data->sleep_gpio, "cy8c_sleep_gpio"); if (retval) { pr_err("%s: unable to request sleep gpio %d\n", __func__, ts->platform_data->sleep_gpio); goto error_sleep_gpio_req; } retval = gpio_direction_output( ts->platform_data->sleep_gpio, 0); if (retval) { pr_err("%s: unable to set direction for gpio %d\n", __func__, ts->platform_data->resout_gpio); goto error_sleep_gpio_dir; } } if (gpio_is_valid(ts->platform_data->irq_gpio)) { /* configure touchscreen irq gpio */ retval = gpio_request(ts->platform_data->irq_gpio, "ts_irq_gpio"); if (retval) { pr_err("%s: unable to request gpio [%d]\n", __func__, ts->platform_data->irq_gpio); goto error_irq_gpio_req; } retval = gpio_direction_input(ts->platform_data->irq_gpio); if (retval) { pr_err("%s: unable to set_direction for gpio [%d]\n", __func__, ts->platform_data->irq_gpio); goto error_irq_gpio_dir; } } if (ts->platform_data->regulator_info) { retval = cyttsp_power_device(ts, true); if (retval) { pr_err("%s: Unable to power device %d\n", __func__, retval); goto error_irq_gpio_dir; } } /* Power on the chip and make sure that I/Os are set as specified * in the platform */ if (ts->platform_data->init) { retval = ts->platform_data->init(client); if (retval) { pr_err("%s: ts init failed\n", __func__); goto error_power_device; } } msleep(100); /* check this device active by reading first byte/register */ retval = i2c_smbus_read_byte_data(ts->client, 0x01); if (retval < 0) { pr_err("%s: i2c sanity check failed\n", __func__); goto error_power_device; } retval = cyttsp_power_on(ts); if (retval < 0) { pr_err("%s: cyttsp_power_on failed\n", __func__); goto error_power_device; } /* Timer or Interrupt setup */ if (ts->client->irq == 0) { cyttsp_info("Setting up timer\n"); setup_timer(&ts->timer, cyttsp_timer, (unsigned long) ts); mod_timer(&ts->timer, jiffies + TOUCHSCREEN_TIMEOUT); } else { cyttsp_info("Setting up interrupt\n"); error = request_threaded_irq(client->irq, NULL, cyttsp_irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, client->dev.driver->name, ts); if (error) { cyttsp_alert("error: could not request irq\n"); retval = error; goto error_power_device; } } irq_cnt = 0; irq_cnt_total = 0; irq_err_cnt = 0; atomic_set(&ts->irq_enabled, 1); retval = device_create_file(&ts->client->dev, &dev_attr_irq_enable); if (retval < CY_OK) { cyttsp_alert("File device creation failed: %d\n", retval); retval = -ENODEV; goto error_free_irq; } retval = device_create_file(&client->dev, &dev_attr_cyttsp_fw_ver); if (retval) { cyttsp_alert("sysfs entry for firmware version failed\n"); goto error_rm_dev_file_irq_en; } ts->cyttsp_fwloader_mode = 0; retval = device_create_file(&client->dev, &dev_attr_cyttsp_update_fw); if (retval) { cyttsp_alert("sysfs entry for firmware update failed\n"); goto error_rm_dev_file_fw_ver; } retval = device_create_file(&client->dev, &dev_attr_cyttsp_force_update_fw); if (retval) { cyttsp_alert("sysfs entry for force firmware update failed\n"); goto error_rm_dev_file_update_fw; } if (ts->platform_data->correct_fw_ver) { if (g_bl_data.appid_lo != ts->platform_data->correct_fw_ver) pr_warn("%s: Invalid firmware version detected;" " Please update.\n", __func__); } retval = device_create_file(&client->dev, &dev_attr_cyttsp_fw_name); if (retval) { cyttsp_alert("sysfs entry for file name selection failed\n"); goto error_rm_dev_file_fupdate_fw; } cyttsp_info("%s: Successful registration\n", CY_I2C_NAME); goto success; error_rm_dev_file_fupdate_fw: device_remove_file(&client->dev, &dev_attr_cyttsp_force_update_fw); error_rm_dev_file_update_fw: device_remove_file(&client->dev, &dev_attr_cyttsp_update_fw); error_rm_dev_file_fw_ver: device_remove_file(&client->dev, &dev_attr_cyttsp_fw_ver); error_rm_dev_file_irq_en: device_remove_file(&client->dev, &dev_attr_irq_enable); error_free_irq: if (ts->client->irq) free_irq(client->irq, ts); error_power_device: if (ts->platform_data->regulator_info) cyttsp_power_device(ts, false); error_irq_gpio_dir: if (gpio_is_valid(ts->platform_data->irq_gpio)) gpio_free(ts->platform_data->irq_gpio); error_irq_gpio_req: if (gpio_is_valid(ts->platform_data->sleep_gpio)) gpio_direction_output(ts->platform_data->sleep_gpio, 1); error_sleep_gpio_dir: if (gpio_is_valid(ts->platform_data->sleep_gpio)) gpio_free(ts->platform_data->sleep_gpio); error_sleep_gpio_req: if (gpio_is_valid(ts->platform_data->resout_gpio)) gpio_direction_output(ts->platform_data->resout_gpio, 0); error_resout_gpio_dir: if (gpio_is_valid(ts->platform_data->resout_gpio)) gpio_free(ts->platform_data->resout_gpio); error_free_device: if (input_device) input_free_device(input_device); success: return retval; } /* I2C driver probe function */ static int __devinit cyttsp_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct cyttsp *ts; int error; int retval = CY_OK; cyttsp_info("Start Probe 1.2\n"); /* allocate and clear memory */ ts = kzalloc(sizeof(struct cyttsp), GFP_KERNEL); if (ts == NULL) { cyttsp_xdebug1("err kzalloc for cyttsp\n"); return -ENOMEM; } /* Enable runtime PM ops, start in ACTIVE mode */ error = pm_runtime_set_active(&client->dev); if (error < 0) dev_dbg(&client->dev, "unable to set runtime pm state\n"); pm_runtime_enable(&client->dev); if (!(retval < CY_OK)) { /* register driver_data */ ts->client = client; ts->platform_data = client->dev.platform_data; if (ts->platform_data->fw_fname) strlcpy(ts->fw_fname, ts->platform_data->fw_fname, FW_FNAME_LEN - 1); else strlcpy(ts->fw_fname, "cyttsp.hex", FW_FNAME_LEN - 1); if (ts->platform_data->gen == CY_GEN3) { ts->fw_start_addr = 0x0b00; } else if (ts->platform_data->gen == CY_GEN2) { ts->fw_start_addr = 0x0880; } else { pr_err("%s: unsupported cypress chip\n", __func__); kfree(ts); return -EINVAL; } i2c_set_clientdata(client, ts); error = cyttsp_initialize(client, ts); if (error) { cyttsp_xdebug1("err cyttsp_initialize\n"); /* deallocate memory */ kfree(ts); /* i2c_del_driver(&cyttsp_driver); */ return -ENODEV; } } #ifdef CONFIG_HAS_EARLYSUSPEND if (!(retval < CY_OK)) { ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; ts->early_suspend.suspend = cyttsp_early_suspend; ts->early_suspend.resume = cyttsp_late_resume; register_early_suspend(&ts->early_suspend); } #endif /* CONFIG_HAS_EARLYSUSPEND */ device_init_wakeup(&client->dev, ts->platform_data->wakeup); mutex_init(&ts->mutex); cyttsp_info("Start Probe %s\n", \ (retval < CY_OK) ? "FAIL" : "PASS"); return retval; } #ifdef CONFIG_PM static int cyttsp_regulator_lpm(struct cyttsp *ts, bool on) { int rc = 0, i; const struct cyttsp_regulator *reg_info = ts->platform_data->regulator_info; u8 num_reg = ts->platform_data->num_regulators; if (on == false) goto regulator_hpm; for (i = 0; i < num_reg; i++) { if (regulator_count_voltages(ts->vdd[i]) < 0) continue; rc = regulator_set_optimum_mode(ts->vdd[i], reg_info[i].lpm_load_uA); if (rc < 0) { pr_err("%s: regulator_set_optimum failed rc = %d\n", __func__, rc); goto fail_regulator_lpm; } } return 0; regulator_hpm: for (i = 0; i < num_reg; i++) { if (regulator_count_voltages(ts->vdd[i]) < 0) continue; rc = regulator_set_optimum_mode(ts->vdd[i], reg_info[i].hpm_load_uA); if (rc < 0) { pr_err("%s: regulator_set_optimum failed" "rc = %d\n", __func__, rc); goto fail_regulator_hpm; } } return 0; fail_regulator_lpm: while (i--) { if (regulator_count_voltages(ts->vdd[i]) < 0) continue; regulator_set_optimum_mode(ts->vdd[i], reg_info[i].hpm_load_uA); } return rc; fail_regulator_hpm: while (i--) { if (regulator_count_voltages(ts->vdd[i]) < 0) continue; regulator_set_optimum_mode(ts->vdd[i], reg_info[i].lpm_load_uA); } return rc; } /* Function to manage power-on resume */ static int cyttsp_resume(struct device *dev) { struct cyttsp *ts = dev_get_drvdata(dev); int retval = CY_OK; cyttsp_debug("Wake Up\n"); if (ts->is_suspended == false) { pr_err("%s: in wakeup state\n", __func__); return 0; } if (device_may_wakeup(dev)) { if (ts->client->irq) disable_irq_wake(ts->client->irq); return 0; } /* re-enable the interrupt prior to wake device */ if (ts->client->irq) enable_irq(ts->client->irq); if (ts->platform_data->use_sleep && (ts->platform_data->power_state != CY_ACTIVE_STATE)) { if (ts->platform_data->resume) retval = ts->platform_data->resume(ts->client); else retval = cyttsp_regulator_lpm(ts, false); /* take TTSP device out of bootloader mode; * switch back to TrueTouch operational mode */ if (!(retval < CY_OK)) { int tries = 0; do { msleep(100); retval = i2c_smbus_write_i2c_block_data( ts->client, CY_REG_BASE, sizeof(bl_cmd), bl_cmd); if (retval == CY_OK) break; } while (tries++ < 2); /* wait for TTSP Device to complete * switch to Operational mode */ tries = 0; do { msleep(100); cyttsp_putbl(ts, 16, false, false, false); } while (GET_BOOTLOADERMODE(g_bl_data.bl_status) && tries++ < 2); cyttsp_putbl(ts, 16, true, false, false); } } if (!(retval < CY_OK) && (GET_HSTMODE(g_bl_data.bl_file) == CY_OK)) { ts->platform_data->power_state = CY_ACTIVE_STATE; /* re-enable the timer after resuming */ if (ts->client->irq == 0) mod_timer(&ts->timer, jiffies + TOUCHSCREEN_TIMEOUT); } else retval = -ENODEV; ts->is_suspended = false; cyttsp_debug("Wake Up %s\n", \ (retval < CY_OK) ? "FAIL" : "PASS"); return retval; } /* Function to manage low power suspend */ static int cyttsp_suspend(struct device *dev) { struct cyttsp *ts = dev_get_drvdata(dev); u8 sleep_mode = CY_OK; int retval = CY_OK; cyttsp_debug("Enter Sleep\n"); if (ts->is_suspended == true) { pr_err("%s: in sleep state\n", __func__); return 0; } mutex_lock(&ts->mutex); if (ts->cyttsp_fwloader_mode) { pr_err("%s:firmware upgrade mode:" "suspend not allowed\n", __func__); mutex_unlock(&ts->mutex); return -EBUSY; } mutex_unlock(&ts->mutex); if (device_may_wakeup(dev)) { if (ts->client->irq) enable_irq_wake(ts->client->irq); return 0; } if (ts->client->irq == 0) del_timer(&ts->timer); else disable_irq(ts->client->irq); if (!(retval < CY_OK)) { if (ts->platform_data->use_sleep && (ts->platform_data->power_state == CY_ACTIVE_STATE)) { if (ts->platform_data->suspend) { retval = ts->platform_data->suspend(ts->client); } else { retval = cyttsp_regulator_lpm(ts, true); } if (ts->platform_data->use_sleep & CY_USE_DEEP_SLEEP_SEL) sleep_mode = CY_DEEP_SLEEP_MODE; else sleep_mode = CY_LOW_PWR_MODE; if (!(retval < CY_OK)) { retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, sizeof(sleep_mode), &sleep_mode); } } } if (!(retval < CY_OK)) { if (sleep_mode == CY_DEEP_SLEEP_MODE) ts->platform_data->power_state = CY_SLEEP_STATE; else if (sleep_mode == CY_LOW_PWR_MODE) ts->platform_data->power_state = CY_LOW_PWR_STATE; } ts->is_suspended = true; cyttsp_debug("Sleep Power state is %s\n", \ (ts->platform_data->power_state == CY_ACTIVE_STATE) ? \ "ACTIVE" : \ ((ts->platform_data->power_state == CY_SLEEP_STATE) ? \ "SLEEP" : "LOW POWER")); return retval; } #endif /* registered in driver struct */ static int __devexit cyttsp_remove(struct i2c_client *client) { /* clientdata registered on probe */ struct cyttsp *ts = i2c_get_clientdata(client); int err; cyttsp_alert("Unregister\n"); pm_runtime_set_suspended(&client->dev); pm_runtime_disable(&client->dev); device_init_wakeup(&client->dev, 0); device_remove_file(&ts->client->dev, &dev_attr_irq_enable); device_remove_file(&client->dev, &dev_attr_cyttsp_fw_ver); device_remove_file(&client->dev, &dev_attr_cyttsp_update_fw); device_remove_file(&client->dev, &dev_attr_cyttsp_force_update_fw); device_remove_file(&client->dev, &dev_attr_cyttsp_fw_name); /* free up timer or irq */ if (ts->client->irq == 0) { err = del_timer(&ts->timer); if (err < CY_OK) cyttsp_alert("error: failed to delete timer\n"); } else free_irq(client->irq, ts); if (ts->platform_data->regulator_info) cyttsp_power_device(ts, false); #ifdef CONFIG_HAS_EARLYSUSPEND unregister_early_suspend(&ts->early_suspend); #endif /* CONFIG_HAS_EARLYSUSPEND */ mutex_destroy(&ts->mutex); if (gpio_is_valid(ts->platform_data->sleep_gpio)) { gpio_direction_output(ts->platform_data->sleep_gpio, 1); gpio_free(ts->platform_data->sleep_gpio); } if (gpio_is_valid(ts->platform_data->resout_gpio)) { gpio_direction_output(ts->platform_data->resout_gpio, 0); gpio_free(ts->platform_data->resout_gpio); } if (gpio_is_valid(ts->platform_data->irq_gpio)) gpio_free(ts->platform_data->irq_gpio); /* housekeeping */ kfree(ts); cyttsp_alert("Leaving\n"); return 0; } #ifdef CONFIG_HAS_EARLYSUSPEND static void cyttsp_early_suspend(struct early_suspend *handler) { struct cyttsp *ts; ts = container_of(handler, struct cyttsp, early_suspend); cyttsp_suspend(&ts->client->dev); } static void cyttsp_late_resume(struct early_suspend *handler) { struct cyttsp *ts; ts = container_of(handler, struct cyttsp, early_suspend); cyttsp_resume(&ts->client->dev); } #endif /* CONFIG_HAS_EARLYSUSPEND */ static int cyttsp_init(void) { int ret; cyttsp_info("Cypress TrueTouch(R) Standard Product\n"); cyttsp_info("I2C Touchscreen Driver (Built %s @ %s)\n", \ __DATE__, __TIME__); ret = i2c_add_driver(&cyttsp_driver); return ret; } static void cyttsp_exit(void) { return i2c_del_driver(&cyttsp_driver); } module_init(cyttsp_init); module_exit(cyttsp_exit); MODULE_FIRMWARE("cyttsp.fw");
gpl-2.0
Metallice/GTab2-Kernel-TW
fs/nfs/nfs4state.c
764
46402
/* * fs/nfs/nfs4state.c * * Client-side XDR for NFSv4. * * Copyright (c) 2002 The Regents of the University of Michigan. * All rights reserved. * * Kendrick Smith <kmsmith@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Implementation of the NFSv4 state model. For the time being, * this is minimal, but will be made much more complex in a * subsequent patch. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/nfs_fs.h> #include <linux/nfs_idmap.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/random.h> #include <linux/ratelimit.h> #include <linux/workqueue.h> #include <linux/bitops.h> #include "nfs4_fs.h" #include "callback.h" #include "delegation.h" #include "internal.h" #include "pnfs.h" #define OPENOWNER_POOL_SIZE 8 const nfs4_stateid zero_stateid; static LIST_HEAD(nfs4_clientid_list); int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) { struct nfs4_setclientid_res clid = { .clientid = clp->cl_clientid, .confirm = clp->cl_confirm, }; unsigned short port; int status; if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state)) goto do_confirm; port = nfs_callback_tcpport; if (clp->cl_addr.ss_family == AF_INET6) port = nfs_callback_tcpport6; status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid); if (status != 0) goto out; clp->cl_clientid = clid.clientid; clp->cl_confirm = clid.confirm; set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); do_confirm: status = nfs4_proc_setclientid_confirm(clp, &clid, cred); if (status != 0) goto out; clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); nfs4_schedule_state_renewal(clp); out: return status; } struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp) { struct rpc_cred *cred = NULL; if (clp->cl_machine_cred != NULL) cred = get_rpccred(clp->cl_machine_cred); return cred; } static void nfs4_clear_machine_cred(struct nfs_client *clp) { struct rpc_cred *cred; spin_lock(&clp->cl_lock); cred = clp->cl_machine_cred; clp->cl_machine_cred = NULL; spin_unlock(&clp->cl_lock); if (cred != NULL) put_rpccred(cred); } static struct rpc_cred * nfs4_get_renew_cred_server_locked(struct nfs_server *server) { struct rpc_cred *cred = NULL; struct nfs4_state_owner *sp; struct rb_node *pos; for (pos = rb_first(&server->state_owners); pos != NULL; pos = rb_next(pos)) { sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); if (list_empty(&sp->so_states)) continue; cred = get_rpccred(sp->so_cred); break; } return cred; } /** * nfs4_get_renew_cred_locked - Acquire credential for a renew operation * @clp: client state handle * * Returns an rpc_cred with reference count bumped, or NULL. * Caller must hold clp->cl_lock. */ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp) { struct rpc_cred *cred = NULL; struct nfs_server *server; rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { cred = nfs4_get_renew_cred_server_locked(server); if (cred != NULL) break; } rcu_read_unlock(); return cred; } #if defined(CONFIG_NFS_V4_1) static int nfs41_setup_state_renewal(struct nfs_client *clp) { int status; struct nfs_fsinfo fsinfo; if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) { nfs4_schedule_state_renewal(clp); return 0; } status = nfs4_proc_get_lease_time(clp, &fsinfo); if (status == 0) { /* Update lease time and schedule renewal */ spin_lock(&clp->cl_lock); clp->cl_lease_time = fsinfo.lease_time * HZ; clp->cl_last_renewal = jiffies; spin_unlock(&clp->cl_lock); nfs4_schedule_state_renewal(clp); } return status; } /* * Back channel returns NFS4ERR_DELAY for new requests when * NFS4_SESSION_DRAINING is set so there is no work to be done when draining * is ended. */ static void nfs4_end_drain_session(struct nfs_client *clp) { struct nfs4_session *ses = clp->cl_session; int max_slots; if (ses == NULL) return; if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { spin_lock(&ses->fc_slot_table.slot_tbl_lock); max_slots = ses->fc_slot_table.max_slots; while (max_slots--) { struct rpc_task *task; task = rpc_wake_up_next(&ses->fc_slot_table. slot_tbl_waitq); if (!task) break; rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); } spin_unlock(&ses->fc_slot_table.slot_tbl_lock); } } static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl) { spin_lock(&tbl->slot_tbl_lock); if (tbl->highest_used_slotid != -1) { INIT_COMPLETION(tbl->complete); spin_unlock(&tbl->slot_tbl_lock); return wait_for_completion_interruptible(&tbl->complete); } spin_unlock(&tbl->slot_tbl_lock); return 0; } static int nfs4_begin_drain_session(struct nfs_client *clp) { struct nfs4_session *ses = clp->cl_session; int ret = 0; set_bit(NFS4_SESSION_DRAINING, &ses->session_state); /* back channel */ ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table); if (ret) return ret; /* fore channel */ return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); } int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) { int status; if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state)) goto do_confirm; nfs4_begin_drain_session(clp); status = nfs4_proc_exchange_id(clp, cred); if (status != 0) goto out; set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); do_confirm: status = nfs4_proc_create_session(clp); if (status != 0) goto out; clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); nfs41_setup_state_renewal(clp); nfs_mark_client_ready(clp, NFS_CS_READY); out: return status; } struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp) { struct rpc_cred *cred; spin_lock(&clp->cl_lock); cred = nfs4_get_machine_cred_locked(clp); spin_unlock(&clp->cl_lock); return cred; } #endif /* CONFIG_NFS_V4_1 */ static struct rpc_cred * nfs4_get_setclientid_cred_server(struct nfs_server *server) { struct nfs_client *clp = server->nfs_client; struct rpc_cred *cred = NULL; struct nfs4_state_owner *sp; struct rb_node *pos; spin_lock(&clp->cl_lock); pos = rb_first(&server->state_owners); if (pos != NULL) { sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); cred = get_rpccred(sp->so_cred); } spin_unlock(&clp->cl_lock); return cred; } /** * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation * @clp: client state handle * * Returns an rpc_cred with reference count bumped, or NULL. */ struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) { struct nfs_server *server; struct rpc_cred *cred; spin_lock(&clp->cl_lock); cred = nfs4_get_machine_cred_locked(clp); spin_unlock(&clp->cl_lock); if (cred != NULL) goto out; rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { cred = nfs4_get_setclientid_cred_server(server); if (cred != NULL) break; } rcu_read_unlock(); out: return cred; } static void nfs_alloc_unique_id_locked(struct rb_root *root, struct nfs_unique_id *new, __u64 minval, int maxbits) { struct rb_node **p, *parent; struct nfs_unique_id *pos; __u64 mask = ~0ULL; if (maxbits < 64) mask = (1ULL << maxbits) - 1ULL; /* Ensure distribution is more or less flat */ get_random_bytes(&new->id, sizeof(new->id)); new->id &= mask; if (new->id < minval) new->id += minval; retry: p = &root->rb_node; parent = NULL; while (*p != NULL) { parent = *p; pos = rb_entry(parent, struct nfs_unique_id, rb_node); if (new->id < pos->id) p = &(*p)->rb_left; else if (new->id > pos->id) p = &(*p)->rb_right; else goto id_exists; } rb_link_node(&new->rb_node, parent, p); rb_insert_color(&new->rb_node, root); return; id_exists: for (;;) { new->id++; if (new->id < minval || (new->id & mask) != new->id) { new->id = minval; break; } parent = rb_next(parent); if (parent == NULL) break; pos = rb_entry(parent, struct nfs_unique_id, rb_node); if (new->id < pos->id) break; } goto retry; } static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id) { rb_erase(&id->rb_node, root); } static struct nfs4_state_owner * nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred) { struct rb_node **p = &server->state_owners.rb_node, *parent = NULL; struct nfs4_state_owner *sp, *res = NULL; while (*p != NULL) { parent = *p; sp = rb_entry(parent, struct nfs4_state_owner, so_server_node); if (server < sp->so_server) { p = &parent->rb_left; continue; } if (server > sp->so_server) { p = &parent->rb_right; continue; } if (cred < sp->so_cred) p = &parent->rb_left; else if (cred > sp->so_cred) p = &parent->rb_right; else { atomic_inc(&sp->so_count); res = sp; break; } } return res; } static struct nfs4_state_owner * nfs4_insert_state_owner_locked(struct nfs4_state_owner *new) { struct nfs_server *server = new->so_server; struct rb_node **p = &server->state_owners.rb_node, *parent = NULL; struct nfs4_state_owner *sp; while (*p != NULL) { parent = *p; sp = rb_entry(parent, struct nfs4_state_owner, so_server_node); if (new->so_cred < sp->so_cred) p = &parent->rb_left; else if (new->so_cred > sp->so_cred) p = &parent->rb_right; else { atomic_inc(&sp->so_count); return sp; } } nfs_alloc_unique_id_locked(&server->openowner_id, &new->so_owner_id, 1, 64); rb_link_node(&new->so_server_node, parent, p); rb_insert_color(&new->so_server_node, &server->state_owners); return new; } static void nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp) { struct nfs_server *server = sp->so_server; if (!RB_EMPTY_NODE(&sp->so_server_node)) rb_erase(&sp->so_server_node, &server->state_owners); nfs_free_unique_id(&server->openowner_id, &sp->so_owner_id); } /* * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to * create a new state_owner. * */ static struct nfs4_state_owner * nfs4_alloc_state_owner(void) { struct nfs4_state_owner *sp; sp = kzalloc(sizeof(*sp),GFP_NOFS); if (!sp) return NULL; spin_lock_init(&sp->so_lock); INIT_LIST_HEAD(&sp->so_states); rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue"); sp->so_seqid.sequence = &sp->so_sequence; spin_lock_init(&sp->so_sequence.lock); INIT_LIST_HEAD(&sp->so_sequence.list); atomic_set(&sp->so_count, 1); return sp; } static void nfs4_drop_state_owner(struct nfs4_state_owner *sp) { if (!RB_EMPTY_NODE(&sp->so_server_node)) { struct nfs_server *server = sp->so_server; struct nfs_client *clp = server->nfs_client; spin_lock(&clp->cl_lock); rb_erase(&sp->so_server_node, &server->state_owners); RB_CLEAR_NODE(&sp->so_server_node); spin_unlock(&clp->cl_lock); } } /** * nfs4_get_state_owner - Look up a state owner given a credential * @server: nfs_server to search * @cred: RPC credential to match * * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL. */ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred) { struct nfs_client *clp = server->nfs_client; struct nfs4_state_owner *sp, *new; spin_lock(&clp->cl_lock); sp = nfs4_find_state_owner_locked(server, cred); spin_unlock(&clp->cl_lock); if (sp != NULL) return sp; new = nfs4_alloc_state_owner(); if (new == NULL) return NULL; new->so_server = server; new->so_cred = cred; spin_lock(&clp->cl_lock); sp = nfs4_insert_state_owner_locked(new); spin_unlock(&clp->cl_lock); if (sp == new) get_rpccred(cred); else { rpc_destroy_wait_queue(&new->so_sequence.wait); kfree(new); } return sp; } /** * nfs4_put_state_owner - Release a nfs4_state_owner * @sp: state owner data to release * */ void nfs4_put_state_owner(struct nfs4_state_owner *sp) { struct nfs_client *clp = sp->so_server->nfs_client; struct rpc_cred *cred = sp->so_cred; if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) return; nfs4_remove_state_owner_locked(sp); spin_unlock(&clp->cl_lock); rpc_destroy_wait_queue(&sp->so_sequence.wait); put_rpccred(cred); kfree(sp); } static struct nfs4_state * nfs4_alloc_open_state(void) { struct nfs4_state *state; state = kzalloc(sizeof(*state), GFP_NOFS); if (!state) return NULL; atomic_set(&state->count, 1); INIT_LIST_HEAD(&state->lock_states); spin_lock_init(&state->state_lock); seqlock_init(&state->seqlock); return state; } void nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode) { if (state->state == fmode) return; /* NB! List reordering - see the reclaim code for why. */ if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) { if (fmode & FMODE_WRITE) list_move(&state->open_states, &state->owner->so_states); else list_move_tail(&state->open_states, &state->owner->so_states); } state->state = fmode; } static struct nfs4_state * __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs4_state *state; list_for_each_entry(state, &nfsi->open_states, inode_states) { if (state->owner != owner) continue; if (atomic_inc_not_zero(&state->count)) return state; } return NULL; } static void nfs4_free_open_state(struct nfs4_state *state) { kfree(state); } struct nfs4_state * nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner) { struct nfs4_state *state, *new; struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); state = __nfs4_find_state_byowner(inode, owner); spin_unlock(&inode->i_lock); if (state) goto out; new = nfs4_alloc_open_state(); spin_lock(&owner->so_lock); spin_lock(&inode->i_lock); state = __nfs4_find_state_byowner(inode, owner); if (state == NULL && new != NULL) { state = new; state->owner = owner; atomic_inc(&owner->so_count); list_add(&state->inode_states, &nfsi->open_states); ihold(inode); state->inode = inode; spin_unlock(&inode->i_lock); /* Note: The reclaim code dictates that we add stateless * and read-only stateids to the end of the list */ list_add_tail(&state->open_states, &owner->so_states); spin_unlock(&owner->so_lock); } else { spin_unlock(&inode->i_lock); spin_unlock(&owner->so_lock); if (new) nfs4_free_open_state(new); } out: return state; } void nfs4_put_open_state(struct nfs4_state *state) { struct inode *inode = state->inode; struct nfs4_state_owner *owner = state->owner; if (!atomic_dec_and_lock(&state->count, &owner->so_lock)) return; spin_lock(&inode->i_lock); list_del(&state->inode_states); list_del(&state->open_states); spin_unlock(&inode->i_lock); spin_unlock(&owner->so_lock); iput(inode); nfs4_free_open_state(state); nfs4_put_state_owner(owner); } /* * Close the current file. */ static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fmode, gfp_t gfp_mask, int wait) { struct nfs4_state_owner *owner = state->owner; int call_close = 0; fmode_t newstate; atomic_inc(&owner->so_count); /* Protect against nfs4_find_state() */ spin_lock(&owner->so_lock); switch (fmode & (FMODE_READ | FMODE_WRITE)) { case FMODE_READ: state->n_rdonly--; break; case FMODE_WRITE: state->n_wronly--; break; case FMODE_READ|FMODE_WRITE: state->n_rdwr--; } newstate = FMODE_READ|FMODE_WRITE; if (state->n_rdwr == 0) { if (state->n_rdonly == 0) { newstate &= ~FMODE_READ; call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); } if (state->n_wronly == 0) { newstate &= ~FMODE_WRITE; call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); } if (newstate == 0) clear_bit(NFS_DELEGATED_STATE, &state->flags); } nfs4_state_set_mode_locked(state, newstate); spin_unlock(&owner->so_lock); if (!call_close) { nfs4_put_open_state(state); nfs4_put_state_owner(owner); } else { bool roc = pnfs_roc(state->inode); nfs4_do_close(path, state, gfp_mask, wait, roc); } } void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode) { __nfs4_close(path, state, fmode, GFP_NOFS, 0); } void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode) { __nfs4_close(path, state, fmode, GFP_KERNEL, 1); } /* * Search the state->lock_states for an existing lock_owner * that is compatible with current->files */ static struct nfs4_lock_state * __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type) { struct nfs4_lock_state *pos; list_for_each_entry(pos, &state->lock_states, ls_locks) { if (type != NFS4_ANY_LOCK_TYPE && pos->ls_owner.lo_type != type) continue; switch (pos->ls_owner.lo_type) { case NFS4_POSIX_LOCK_TYPE: if (pos->ls_owner.lo_u.posix_owner != fl_owner) continue; break; case NFS4_FLOCK_LOCK_TYPE: if (pos->ls_owner.lo_u.flock_owner != fl_pid) continue; } atomic_inc(&pos->ls_count); return pos; } return NULL; } /* * Return a compatible lock_state. If no initialized lock_state structure * exists, return an uninitialized one. * */ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type) { struct nfs4_lock_state *lsp; struct nfs_server *server = state->owner->so_server; struct nfs_client *clp = server->nfs_client; lsp = kzalloc(sizeof(*lsp), GFP_NOFS); if (lsp == NULL) return NULL; rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue"); spin_lock_init(&lsp->ls_sequence.lock); INIT_LIST_HEAD(&lsp->ls_sequence.list); lsp->ls_seqid.sequence = &lsp->ls_sequence; atomic_set(&lsp->ls_count, 1); lsp->ls_state = state; lsp->ls_owner.lo_type = type; switch (lsp->ls_owner.lo_type) { case NFS4_FLOCK_LOCK_TYPE: lsp->ls_owner.lo_u.flock_owner = fl_pid; break; case NFS4_POSIX_LOCK_TYPE: lsp->ls_owner.lo_u.posix_owner = fl_owner; break; default: kfree(lsp); return NULL; } spin_lock(&clp->cl_lock); nfs_alloc_unique_id_locked(&server->lockowner_id, &lsp->ls_id, 1, 64); spin_unlock(&clp->cl_lock); INIT_LIST_HEAD(&lsp->ls_locks); return lsp; } static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) { struct nfs_server *server = lsp->ls_state->owner->so_server; struct nfs_client *clp = server->nfs_client; spin_lock(&clp->cl_lock); nfs_free_unique_id(&server->lockowner_id, &lsp->ls_id); spin_unlock(&clp->cl_lock); rpc_destroy_wait_queue(&lsp->ls_sequence.wait); kfree(lsp); } /* * Return a compatible lock_state. If no initialized lock_state structure * exists, return an uninitialized one. * */ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner, pid_t pid, unsigned int type) { struct nfs4_lock_state *lsp, *new = NULL; for(;;) { spin_lock(&state->state_lock); lsp = __nfs4_find_lock_state(state, owner, pid, type); if (lsp != NULL) break; if (new != NULL) { list_add(&new->ls_locks, &state->lock_states); set_bit(LK_STATE_IN_USE, &state->flags); lsp = new; new = NULL; break; } spin_unlock(&state->state_lock); new = nfs4_alloc_lock_state(state, owner, pid, type); if (new == NULL) return NULL; } spin_unlock(&state->state_lock); if (new != NULL) nfs4_free_lock_state(new); return lsp; } /* * Release reference to lock_state, and free it if we see that * it is no longer in use */ void nfs4_put_lock_state(struct nfs4_lock_state *lsp) { struct nfs4_state *state; if (lsp == NULL) return; state = lsp->ls_state; if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock)) return; list_del(&lsp->ls_locks); if (list_empty(&state->lock_states)) clear_bit(LK_STATE_IN_USE, &state->flags); spin_unlock(&state->state_lock); if (lsp->ls_flags & NFS_LOCK_INITIALIZED) nfs4_release_lockowner(lsp); nfs4_free_lock_state(lsp); } static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) { struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner; dst->fl_u.nfs4_fl.owner = lsp; atomic_inc(&lsp->ls_count); } static void nfs4_fl_release_lock(struct file_lock *fl) { nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); } static const struct file_lock_operations nfs4_fl_lock_ops = { .fl_copy_lock = nfs4_fl_copy_lock, .fl_release_private = nfs4_fl_release_lock, }; int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) { struct nfs4_lock_state *lsp; if (fl->fl_ops != NULL) return 0; if (fl->fl_flags & FL_POSIX) lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE); else if (fl->fl_flags & FL_FLOCK) lsp = nfs4_get_lock_state(state, 0, fl->fl_pid, NFS4_FLOCK_LOCK_TYPE); else return -EINVAL; if (lsp == NULL) return -ENOMEM; fl->fl_u.nfs4_fl.owner = lsp; fl->fl_ops = &nfs4_fl_lock_ops; return 0; } /* * Byte-range lock aware utility to initialize the stateid of read/write * requests. */ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid) { struct nfs4_lock_state *lsp; int seq; do { seq = read_seqbegin(&state->seqlock); memcpy(dst, &state->stateid, sizeof(*dst)); } while (read_seqretry(&state->seqlock, seq)); if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) return; spin_lock(&state->state_lock); lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE); if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); spin_unlock(&state->state_lock); nfs4_put_lock_state(lsp); } struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask) { struct nfs_seqid *new; new = kmalloc(sizeof(*new), gfp_mask); if (new != NULL) { new->sequence = counter; INIT_LIST_HEAD(&new->list); } return new; } void nfs_release_seqid(struct nfs_seqid *seqid) { if (!list_empty(&seqid->list)) { struct rpc_sequence *sequence = seqid->sequence->sequence; spin_lock(&sequence->lock); list_del_init(&seqid->list); spin_unlock(&sequence->lock); rpc_wake_up(&sequence->wait); } } void nfs_free_seqid(struct nfs_seqid *seqid) { nfs_release_seqid(seqid); kfree(seqid); } /* * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or * failed with a seqid incrementing error - * see comments nfs_fs.h:seqid_mutating_error() */ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) { BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid); switch (status) { case 0: break; case -NFS4ERR_BAD_SEQID: if (seqid->sequence->flags & NFS_SEQID_CONFIRMED) return; printk(KERN_WARNING "NFS: v4 server returned a bad" " sequence-id error on an" " unconfirmed sequence %p!\n", seqid->sequence); case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_BADXDR: case -NFS4ERR_RESOURCE: case -NFS4ERR_NOFILEHANDLE: /* Non-seqid mutating errors */ return; }; /* * Note: no locking needed as we are guaranteed to be first * on the sequence list */ seqid->sequence->counter++; } void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid) { struct nfs4_state_owner *sp = container_of(seqid->sequence, struct nfs4_state_owner, so_seqid); struct nfs_server *server = sp->so_server; if (status == -NFS4ERR_BAD_SEQID) nfs4_drop_state_owner(sp); if (!nfs4_has_session(server->nfs_client)) nfs_increment_seqid(status, seqid); } /* * Increment the seqid if the LOCK/LOCKU succeeded, or * failed with a seqid incrementing error - * see comments nfs_fs.h:seqid_mutating_error() */ void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) { nfs_increment_seqid(status, seqid); } int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) { struct rpc_sequence *sequence = seqid->sequence->sequence; int status = 0; spin_lock(&sequence->lock); if (list_empty(&seqid->list)) list_add_tail(&seqid->list, &sequence->list); if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid) goto unlock; rpc_sleep_on(&sequence->wait, task, NULL); status = -EAGAIN; unlock: spin_unlock(&sequence->lock); return status; } static int nfs4_run_state_manager(void *); static void nfs4_clear_state_manager_bit(struct nfs_client *clp) { smp_mb__before_clear_bit(); clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); smp_mb__after_clear_bit(); wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING); rpc_wake_up(&clp->cl_rpcwaitq); } /* * Schedule the nfs_client asynchronous state management routine */ void nfs4_schedule_state_manager(struct nfs_client *clp) { struct task_struct *task; if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) return; __module_get(THIS_MODULE); atomic_inc(&clp->cl_count); task = kthread_run(nfs4_run_state_manager, clp, "%s-manager", rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); if (!IS_ERR(task)) return; nfs4_clear_state_manager_bit(clp); nfs_put_client(clp); module_put(THIS_MODULE); } /* * Schedule a lease recovery attempt */ void nfs4_schedule_lease_recovery(struct nfs_client *clp) { if (!clp) return; if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); nfs4_schedule_state_manager(clp); } static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) { set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); /* Don't recover state that expired before the reboot */ if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) { clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); return 0; } set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags); set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); return 1; } static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) { set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags); set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); return 1; } void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state) { struct nfs_client *clp = server->nfs_client; nfs4_state_mark_reclaim_nograce(clp, state); nfs4_schedule_state_manager(clp); } static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) { struct inode *inode = state->inode; struct nfs_inode *nfsi = NFS_I(inode); struct file_lock *fl; int status = 0; if (inode->i_flock == NULL) return 0; /* Guard against delegation returns and new lock/unlock calls */ down_write(&nfsi->rwsem); /* Protect inode->i_flock using the BKL */ lock_flocks(); for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) continue; if (nfs_file_open_context(fl->fl_file)->state != state) continue; unlock_flocks(); status = ops->recover_lock(state, fl); switch (status) { case 0: break; case -ESTALE: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_EXPIRED: case -NFS4ERR_NO_GRACE: case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: goto out; default: printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", __func__, status); case -ENOMEM: case -NFS4ERR_DENIED: case -NFS4ERR_RECLAIM_BAD: case -NFS4ERR_RECLAIM_CONFLICT: /* kill_proc(fl->fl_pid, SIGLOST, 1); */ status = 0; } lock_flocks(); } unlock_flocks(); out: up_write(&nfsi->rwsem); return status; } static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops) { struct nfs4_state *state; struct nfs4_lock_state *lock; int status = 0; /* Note: we rely on the sp->so_states list being ordered * so that we always reclaim open(O_RDWR) and/or open(O_WRITE) * states first. * This is needed to ensure that the server won't give us any * read delegations that we have to return if, say, we are * recovering after a network partition or a reboot from a * server that doesn't support a grace period. */ restart: spin_lock(&sp->so_lock); list_for_each_entry(state, &sp->so_states, open_states) { if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) continue; if (state->state == 0) continue; atomic_inc(&state->count); spin_unlock(&sp->so_lock); status = ops->recover_open(sp, state); if (status >= 0) { status = nfs4_reclaim_locks(state, ops); if (status >= 0) { list_for_each_entry(lock, &state->lock_states, ls_locks) { if (!(lock->ls_flags & NFS_LOCK_INITIALIZED)) printk("%s: Lock reclaim failed!\n", __func__); } nfs4_put_open_state(state); goto restart; } } switch (status) { default: printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", __func__, status); case -ENOENT: case -ENOMEM: case -ESTALE: /* * Open state on this file cannot be recovered * All we can do is revert to using the zero stateid. */ memset(state->stateid.data, 0, sizeof(state->stateid.data)); /* Mark the file as being 'closed' */ state->state = 0; break; case -EKEYEXPIRED: /* * User RPCSEC_GSS context has expired. * We cannot recover this stateid now, so * skip it and allow recovery thread to * proceed. */ break; case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_RECLAIM_BAD: case -NFS4ERR_RECLAIM_CONFLICT: nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state); break; case -NFS4ERR_EXPIRED: case -NFS4ERR_NO_GRACE: nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state); case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: goto out_err; } nfs4_put_open_state(state); goto restart; } spin_unlock(&sp->so_lock); return 0; out_err: nfs4_put_open_state(state); return status; } static void nfs4_clear_open_state(struct nfs4_state *state) { struct nfs4_lock_state *lock; clear_bit(NFS_DELEGATED_STATE, &state->flags); clear_bit(NFS_O_RDONLY_STATE, &state->flags); clear_bit(NFS_O_WRONLY_STATE, &state->flags); clear_bit(NFS_O_RDWR_STATE, &state->flags); list_for_each_entry(lock, &state->lock_states, ls_locks) { lock->ls_seqid.flags = 0; lock->ls_flags &= ~NFS_LOCK_INITIALIZED; } } static void nfs4_reset_seqids(struct nfs_server *server, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state)) { struct nfs_client *clp = server->nfs_client; struct nfs4_state_owner *sp; struct rb_node *pos; struct nfs4_state *state; spin_lock(&clp->cl_lock); for (pos = rb_first(&server->state_owners); pos != NULL; pos = rb_next(pos)) { sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); sp->so_seqid.flags = 0; spin_lock(&sp->so_lock); list_for_each_entry(state, &sp->so_states, open_states) { if (mark_reclaim(clp, state)) nfs4_clear_open_state(state); } spin_unlock(&sp->so_lock); } spin_unlock(&clp->cl_lock); } static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state)) { struct nfs_server *server; rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) nfs4_reset_seqids(server, mark_reclaim); rcu_read_unlock(); } static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp) { /* Mark all delegations for reclaim */ nfs_delegation_mark_reclaim(clp); nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot); } static void nfs4_reclaim_complete(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops) { /* Notify the server we're done reclaiming our state */ if (ops->reclaim_complete) (void)ops->reclaim_complete(clp); } static void nfs4_clear_reclaim_server(struct nfs_server *server) { struct nfs_client *clp = server->nfs_client; struct nfs4_state_owner *sp; struct rb_node *pos; struct nfs4_state *state; spin_lock(&clp->cl_lock); for (pos = rb_first(&server->state_owners); pos != NULL; pos = rb_next(pos)) { sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); spin_lock(&sp->so_lock); list_for_each_entry(state, &sp->so_states, open_states) { if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags)) continue; nfs4_state_mark_reclaim_nograce(clp, state); } spin_unlock(&sp->so_lock); } spin_unlock(&clp->cl_lock); } static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp) { struct nfs_server *server; if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) return 0; rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) nfs4_clear_reclaim_server(server); rcu_read_unlock(); nfs_delegation_reap_unclaimed(clp); return 1; } static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp) { if (!nfs4_state_clear_reclaim_reboot(clp)) return; nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops); } static void nfs_delegation_clear_all(struct nfs_client *clp) { nfs_delegation_mark_reclaim(clp); nfs_delegation_reap_unclaimed(clp); } static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp) { nfs_delegation_clear_all(clp); nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce); } static void nfs4_warn_keyexpired(const char *s) { printk_ratelimited(KERN_WARNING "Error: state manager" " encountered RPCSEC_GSS session" " expired against NFSv4 server %s.\n", s); } static int nfs4_recovery_handle_error(struct nfs_client *clp, int error) { switch (error) { case -NFS4ERR_CB_PATH_DOWN: nfs_handle_cb_pathdown(clp); return 0; case -NFS4ERR_NO_GRACE: nfs4_state_end_reclaim_reboot(clp); return 0; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_LEASE_MOVED: set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); nfs4_state_clear_reclaim_reboot(clp); nfs4_state_start_reclaim_reboot(clp); break; case -NFS4ERR_EXPIRED: set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); nfs4_state_start_reclaim_nograce(clp); break; case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_DEADSESSION: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_SEQ_FALSE_RETRY: case -NFS4ERR_SEQ_MISORDERED: set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); /* Zero session reset errors */ return 0; case -EKEYEXPIRED: /* Nothing we can do */ nfs4_warn_keyexpired(clp->cl_hostname); return 0; } return error; } static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops) { struct nfs4_state_owner *sp; struct nfs_server *server; struct rb_node *pos; int status = 0; restart: rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { spin_lock(&clp->cl_lock); for (pos = rb_first(&server->state_owners); pos != NULL; pos = rb_next(pos)) { sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags)) continue; atomic_inc(&sp->so_count); spin_unlock(&clp->cl_lock); rcu_read_unlock(); status = nfs4_reclaim_open_state(sp, ops); if (status < 0) { set_bit(ops->owner_flag_bit, &sp->so_flags); nfs4_put_state_owner(sp); return nfs4_recovery_handle_error(clp, status); } nfs4_put_state_owner(sp); goto restart; } spin_unlock(&clp->cl_lock); } rcu_read_unlock(); return status; } static int nfs4_check_lease(struct nfs_client *clp) { struct rpc_cred *cred; const struct nfs4_state_maintenance_ops *ops = clp->cl_mvops->state_renewal_ops; int status = -NFS4ERR_EXPIRED; /* Is the client already known to have an expired lease? */ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) return 0; spin_lock(&clp->cl_lock); cred = ops->get_state_renewal_cred_locked(clp); spin_unlock(&clp->cl_lock); if (cred == NULL) { cred = nfs4_get_setclientid_cred(clp); if (cred == NULL) goto out; } status = ops->renew_lease(clp, cred); put_rpccred(cred); out: return nfs4_recovery_handle_error(clp, status); } static int nfs4_reclaim_lease(struct nfs_client *clp) { struct rpc_cred *cred; const struct nfs4_state_recovery_ops *ops = clp->cl_mvops->reboot_recovery_ops; int status = -ENOENT; cred = ops->get_clid_cred(clp); if (cred != NULL) { status = ops->establish_clid(clp, cred); put_rpccred(cred); /* Handle case where the user hasn't set up machine creds */ if (status == -EACCES && cred == clp->cl_machine_cred) { nfs4_clear_machine_cred(clp); status = -EAGAIN; } if (status == -NFS4ERR_MINOR_VERS_MISMATCH) status = -EPROTONOSUPPORT; } return status; } #ifdef CONFIG_NFS_V4_1 void nfs4_schedule_session_recovery(struct nfs4_session *session) { struct nfs_client *clp = session->clp; set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); nfs4_schedule_lease_recovery(clp); } EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery); void nfs41_handle_recall_slot(struct nfs_client *clp) { set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); nfs4_schedule_state_manager(clp); } static void nfs4_reset_all_state(struct nfs_client *clp) { if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { clp->cl_boot_time = CURRENT_TIME; nfs4_state_start_reclaim_nograce(clp); nfs4_schedule_state_manager(clp); } } static void nfs41_handle_server_reboot(struct nfs_client *clp) { if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { nfs4_state_start_reclaim_reboot(clp); nfs4_schedule_state_manager(clp); } } static void nfs41_handle_state_revoked(struct nfs_client *clp) { /* Temporary */ nfs4_reset_all_state(clp); } static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) { /* This will need to handle layouts too */ nfs_expire_all_delegations(clp); } static void nfs41_handle_cb_path_down(struct nfs_client *clp) { nfs_expire_all_delegations(clp); if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) nfs4_schedule_state_manager(clp); } void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) { if (!flags) return; else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) nfs41_handle_server_reboot(clp); else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | SEQ4_STATUS_ADMIN_STATE_REVOKED | SEQ4_STATUS_LEASE_MOVED)) nfs41_handle_state_revoked(clp); else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) nfs41_handle_recallable_state_revoked(clp); else if (flags & (SEQ4_STATUS_CB_PATH_DOWN | SEQ4_STATUS_BACKCHANNEL_FAULT | SEQ4_STATUS_CB_PATH_DOWN_SESSION)) nfs41_handle_cb_path_down(clp); } static int nfs4_reset_session(struct nfs_client *clp) { int status; nfs4_begin_drain_session(clp); status = nfs4_proc_destroy_session(clp->cl_session); if (status && status != -NFS4ERR_BADSESSION && status != -NFS4ERR_DEADSESSION) { status = nfs4_recovery_handle_error(clp, status); goto out; } memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN); status = nfs4_proc_create_session(clp); if (status) { status = nfs4_recovery_handle_error(clp, status); goto out; } clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); /* create_session negotiated new slot table */ clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); /* Let the state manager reestablish state */ if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) nfs41_setup_state_renewal(clp); out: return status; } static int nfs4_recall_slot(struct nfs_client *clp) { struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table; struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs; struct nfs4_slot *new, *old; int i; nfs4_begin_drain_session(clp); new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot), GFP_NOFS); if (!new) return -ENOMEM; spin_lock(&fc_tbl->slot_tbl_lock); for (i = 0; i < fc_tbl->target_max_slots; i++) new[i].seq_nr = fc_tbl->slots[i].seq_nr; old = fc_tbl->slots; fc_tbl->slots = new; fc_tbl->max_slots = fc_tbl->target_max_slots; fc_tbl->target_max_slots = 0; fc_attrs->max_reqs = fc_tbl->max_slots; spin_unlock(&fc_tbl->slot_tbl_lock); kfree(old); nfs4_end_drain_session(clp); return 0; } #else /* CONFIG_NFS_V4_1 */ static int nfs4_reset_session(struct nfs_client *clp) { return 0; } static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; } static int nfs4_recall_slot(struct nfs_client *clp) { return 0; } #endif /* CONFIG_NFS_V4_1 */ /* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors * on EXCHANGE_ID for v4.1 */ static void nfs4_set_lease_expired(struct nfs_client *clp, int status) { switch (status) { case -NFS4ERR_CLID_INUSE: case -NFS4ERR_STALE_CLIENTID: clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); break; case -NFS4ERR_DELAY: case -ETIMEDOUT: case -EAGAIN: ssleep(1); break; case -EKEYEXPIRED: nfs4_warn_keyexpired(clp->cl_hostname); case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery * in nfs4_exchange_id */ default: return; } set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); } static void nfs4_state_manager(struct nfs_client *clp) { int status = 0; /* Ensure exclusive access to NFSv4 state */ do { if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) { /* We're going to have to re-establish a clientid */ status = nfs4_reclaim_lease(clp); if (status) { nfs4_set_lease_expired(clp, status); if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) continue; if (clp->cl_cons_state == NFS_CS_SESSION_INITING) nfs_mark_client_ready(clp, status); goto out_error; } clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); pnfs_destroy_all_layouts(clp); } if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) { status = nfs4_check_lease(clp); if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) continue; if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN) goto out_error; } /* Initialize or reset the session */ if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) && nfs4_has_session(clp)) { status = nfs4_reset_session(clp); if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) continue; if (status < 0) goto out_error; } /* First recover reboot state... */ if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) { status = nfs4_do_reclaim(clp, clp->cl_mvops->reboot_recovery_ops); if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) || test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) continue; nfs4_state_end_reclaim_reboot(clp); if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) continue; if (status < 0) goto out_error; } /* Now recover expired state... */ if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) { status = nfs4_do_reclaim(clp, clp->cl_mvops->nograce_recovery_ops); if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) || test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) || test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) continue; if (status < 0) goto out_error; } nfs4_end_drain_session(clp); if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) { nfs_client_return_marked_delegations(clp); continue; } /* Recall session slots */ if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state) && nfs4_has_session(clp)) { status = nfs4_recall_slot(clp); if (status < 0) goto out_error; continue; } nfs4_clear_state_manager_bit(clp); /* Did we race with an attempt to give us more work? */ if (clp->cl_state == 0) break; if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) break; } while (atomic_read(&clp->cl_count) > 1); return; out_error: printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s" " with error %d\n", clp->cl_hostname, -status); nfs4_end_drain_session(clp); nfs4_clear_state_manager_bit(clp); } static int nfs4_run_state_manager(void *ptr) { struct nfs_client *clp = ptr; allow_signal(SIGKILL); nfs4_state_manager(clp); nfs_put_client(clp); module_put_and_exit(0); return 0; } /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
yazidkucrit/android_kernel_samsung_aries
drivers/gpu/drm/drm_gem.c
1788
15397
/* * Copyright © 2008 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Eric Anholt <eric@anholt.net> * */ #include <linux/types.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/module.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/shmem_fs.h> #include "drmP.h" /** @file drm_gem.c * * This file provides some of the base ioctls and library routines for * the graphics memory manager implemented by each device driver. * * Because various devices have different requirements in terms of * synchronization and migration strategies, implementing that is left up to * the driver, and all that the general API provides should be generic -- * allocating objects, reading/writing data with the cpu, freeing objects. * Even there, platform-dependent optimizations for reading/writing data with * the CPU mean we'll likely hook those out to driver-specific calls. However, * the DRI2 implementation wants to have at least allocate/mmap be generic. * * The goal was to have swap-backed object allocation managed through * struct file. However, file descriptors as handles to a struct file have * two major failings: * - Process limits prevent more than 1024 or so being used at a time by * default. * - Inability to allocate high fds will aggravate the X Server's select() * handling, and likely that of many GL client applications as well. * * This led to a plan of using our own integer IDs (called handles, following * DRM terminology) to mimic fds, and implement the fd syscalls we need as * ioctls. The objects themselves will still include the struct file so * that we can transition to fds if the required kernel infrastructure shows * up at a later date, and as our interface with shmfs for memory allocation. */ /* * We make up offsets for buffer objects so we can recognize them at * mmap time. */ /* pgoff in mmap is an unsigned long, so we need to make sure that * the faked up offset will fit */ #if BITS_PER_LONG == 64 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) #else #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) #endif /** * Initialize the GEM device fields */ int drm_gem_init(struct drm_device *dev) { struct drm_gem_mm *mm; spin_lock_init(&dev->object_name_lock); idr_init(&dev->object_name_idr); mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); if (!mm) { DRM_ERROR("out of memory\n"); return -ENOMEM; } dev->mm_private = mm; if (drm_ht_create(&mm->offset_hash, 12)) { kfree(mm); return -ENOMEM; } if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, DRM_FILE_PAGE_OFFSET_SIZE)) { drm_ht_remove(&mm->offset_hash); kfree(mm); return -ENOMEM; } return 0; } void drm_gem_destroy(struct drm_device *dev) { struct drm_gem_mm *mm = dev->mm_private; drm_mm_takedown(&mm->offset_manager); drm_ht_remove(&mm->offset_hash); kfree(mm); dev->mm_private = NULL; } /** * Initialize an already allocate GEM object of the specified size with * shmfs backing store. */ int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size) { BUG_ON((size & (PAGE_SIZE - 1)) != 0); obj->dev = dev; obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); if (IS_ERR(obj->filp)) return -ENOMEM; kref_init(&obj->refcount); atomic_set(&obj->handle_count, 0); obj->size = size; return 0; } EXPORT_SYMBOL(drm_gem_object_init); /** * Allocate a GEM object of the specified size with shmfs backing store */ struct drm_gem_object * drm_gem_object_alloc(struct drm_device *dev, size_t size) { struct drm_gem_object *obj; obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) goto free; if (drm_gem_object_init(dev, obj, size) != 0) goto free; if (dev->driver->gem_init_object != NULL && dev->driver->gem_init_object(obj) != 0) { goto fput; } return obj; fput: /* Object_init mangles the global counters - readjust them. */ fput(obj->filp); free: kfree(obj); return NULL; } EXPORT_SYMBOL(drm_gem_object_alloc); /** * Removes the mapping from handle to filp for this object. */ int drm_gem_handle_delete(struct drm_file *filp, u32 handle) { struct drm_device *dev; struct drm_gem_object *obj; /* This is gross. The idr system doesn't let us try a delete and * return an error code. It just spews if you fail at deleting. * So, we have to grab a lock around finding the object and then * doing the delete on it and dropping the refcount, or the user * could race us to double-decrement the refcount and cause a * use-after-free later. Given the frequency of our handle lookups, * we may want to use ida for number allocation and a hash table * for the pointers, anyway. */ spin_lock(&filp->table_lock); /* Check if we currently have a reference on the object */ obj = idr_find(&filp->object_idr, handle); if (obj == NULL) { spin_unlock(&filp->table_lock); return -EINVAL; } dev = obj->dev; /* Release reference and decrement refcount. */ idr_remove(&filp->object_idr, handle); spin_unlock(&filp->table_lock); drm_gem_object_handle_unreference_unlocked(obj); return 0; } EXPORT_SYMBOL(drm_gem_handle_delete); /** * Create a handle for this object. This adds a handle reference * to the object, which includes a regular reference count. Callers * will likely want to dereference the object afterwards. */ int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep) { int ret; /* * Get the user-visible handle using idr. */ again: /* ensure there is space available to allocate a handle */ if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) return -ENOMEM; /* do the allocation under our spinlock */ spin_lock(&file_priv->table_lock); ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep); spin_unlock(&file_priv->table_lock); if (ret == -EAGAIN) goto again; if (ret != 0) return ret; drm_gem_object_handle_reference(obj); return 0; } EXPORT_SYMBOL(drm_gem_handle_create); /** Returns a reference to the object named by the handle. */ struct drm_gem_object * drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, u32 handle) { struct drm_gem_object *obj; spin_lock(&filp->table_lock); /* Check if we currently have a reference on the object */ obj = idr_find(&filp->object_idr, handle); if (obj == NULL) { spin_unlock(&filp->table_lock); return NULL; } drm_gem_object_reference(obj); spin_unlock(&filp->table_lock); return obj; } EXPORT_SYMBOL(drm_gem_object_lookup); /** * Releases the handle to an mm object. */ int drm_gem_close_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_gem_close *args = data; int ret; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; ret = drm_gem_handle_delete(file_priv, args->handle); return ret; } /** * Create a global name for an object, returning the name. * * Note that the name does not hold a reference; when the object * is freed, the name goes away. */ int drm_gem_flink_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_gem_flink *args = data; struct drm_gem_object *obj; int ret; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -ENOENT; again: if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { ret = -ENOMEM; goto err; } spin_lock(&dev->object_name_lock); if (!obj->name) { ret = idr_get_new_above(&dev->object_name_idr, obj, 1, &obj->name); args->name = (uint64_t) obj->name; spin_unlock(&dev->object_name_lock); if (ret == -EAGAIN) goto again; if (ret != 0) goto err; /* Allocate a reference for the name table. */ drm_gem_object_reference(obj); } else { args->name = (uint64_t) obj->name; spin_unlock(&dev->object_name_lock); ret = 0; } err: drm_gem_object_unreference_unlocked(obj); return ret; } /** * Open an object using the global name, returning a handle and the size. * * This handle (of course) holds a reference to the object, so the object * will not go away until the handle is deleted. */ int drm_gem_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_gem_open *args = data; struct drm_gem_object *obj; int ret; u32 handle; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; spin_lock(&dev->object_name_lock); obj = idr_find(&dev->object_name_idr, (int) args->name); if (obj) drm_gem_object_reference(obj); spin_unlock(&dev->object_name_lock); if (!obj) return -ENOENT; ret = drm_gem_handle_create(file_priv, obj, &handle); drm_gem_object_unreference_unlocked(obj); if (ret) return ret; args->handle = handle; args->size = obj->size; return 0; } /** * Called at device open time, sets up the structure for handling refcounting * of mm objects. */ void drm_gem_open(struct drm_device *dev, struct drm_file *file_private) { idr_init(&file_private->object_idr); spin_lock_init(&file_private->table_lock); } /** * Called at device close to release the file's * handle references on objects. */ static int drm_gem_object_release_handle(int id, void *ptr, void *data) { struct drm_gem_object *obj = ptr; drm_gem_object_handle_unreference_unlocked(obj); return 0; } /** * Called at close time when the filp is going away. * * Releases any remaining references on objects by this filp. */ void drm_gem_release(struct drm_device *dev, struct drm_file *file_private) { idr_for_each(&file_private->object_idr, &drm_gem_object_release_handle, NULL); idr_remove_all(&file_private->object_idr); idr_destroy(&file_private->object_idr); } void drm_gem_object_release(struct drm_gem_object *obj) { fput(obj->filp); } EXPORT_SYMBOL(drm_gem_object_release); /** * Called after the last reference to the object has been lost. * Must be called holding struct_ mutex * * Frees the object */ void drm_gem_object_free(struct kref *kref) { struct drm_gem_object *obj = (struct drm_gem_object *) kref; struct drm_device *dev = obj->dev; BUG_ON(!mutex_is_locked(&dev->struct_mutex)); if (dev->driver->gem_free_object != NULL) dev->driver->gem_free_object(obj); } EXPORT_SYMBOL(drm_gem_object_free); static void drm_gem_object_ref_bug(struct kref *list_kref) { BUG(); } /** * Called after the last handle to the object has been closed * * Removes any name for the object. Note that this must be * called before drm_gem_object_free or we'll be touching * freed memory */ void drm_gem_object_handle_free(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; /* Remove any name for this object */ spin_lock(&dev->object_name_lock); if (obj->name) { idr_remove(&dev->object_name_idr, obj->name); obj->name = 0; spin_unlock(&dev->object_name_lock); /* * The object name held a reference to this object, drop * that now. * * This cannot be the last reference, since the handle holds one too. */ kref_put(&obj->refcount, drm_gem_object_ref_bug); } else spin_unlock(&dev->object_name_lock); } EXPORT_SYMBOL(drm_gem_object_handle_free); void drm_gem_vm_open(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; drm_gem_object_reference(obj); mutex_lock(&obj->dev->struct_mutex); drm_vm_open_locked(vma); mutex_unlock(&obj->dev->struct_mutex); } EXPORT_SYMBOL(drm_gem_vm_open); void drm_gem_vm_close(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; struct drm_device *dev = obj->dev; mutex_lock(&dev->struct_mutex); drm_vm_close_locked(vma); drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); } EXPORT_SYMBOL(drm_gem_vm_close); /** * drm_gem_mmap - memory map routine for GEM objects * @filp: DRM file pointer * @vma: VMA for the area to be mapped * * If a driver supports GEM object mapping, mmap calls on the DRM file * descriptor will end up here. * * If we find the object based on the offset passed in (vma->vm_pgoff will * contain the fake offset we created when the GTT map ioctl was called on * the object), we set up the driver fault handler so that any accesses * to the object can be trapped, to perform migration, GTT binding, surface * register allocation, or performance monitoring. */ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; struct drm_gem_mm *mm = dev->mm_private; struct drm_local_map *map = NULL; struct drm_gem_object *obj; struct drm_hash_item *hash; int ret = 0; mutex_lock(&dev->struct_mutex); if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { mutex_unlock(&dev->struct_mutex); return drm_mmap(filp, vma); } map = drm_hash_entry(hash, struct drm_map_list, hash)->map; if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { ret = -EPERM; goto out_unlock; } /* Check for valid size. */ if (map->size < vma->vm_end - vma->vm_start) { ret = -EINVAL; goto out_unlock; } obj = map->handle; if (!obj->dev->driver->gem_vm_ops) { ret = -EINVAL; goto out_unlock; } vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; vma->vm_ops = obj->dev->driver->gem_vm_ops; vma->vm_private_data = map->handle; vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); /* Take a ref for this mapping of the object, so that the fault * handler can dereference the mmap offset's pointer to the object. * This reference is cleaned up by the corresponding vm_close * (which should happen whether the vma was created by this call, or * by a vm_open due to mremap or partial unmap or whatever). */ drm_gem_object_reference(obj); vma->vm_file = filp; /* Needed for drm_vm_open() */ drm_vm_open_locked(vma); out_unlock: mutex_unlock(&dev->struct_mutex); return ret; } EXPORT_SYMBOL(drm_gem_mmap);
gpl-2.0
AICP/kernel_oneplus_msm8974
drivers/net/ethernet/intel/e1000e/phy.c
2044
91915
/******************************************************************************* Intel PRO/1000 Linux driver Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include "e1000.h" static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); static s32 e1000_wait_autoneg(struct e1000_hw *hw); static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data, bool read, bool page_set); static u32 e1000_get_phy_addr_for_hv_page(u32 page); static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool read); /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ ARRAY_SIZE(e1000_m88_cable_length_table) static const u16 e1000_igp_2_cable_length_table[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, 124}; #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ ARRAY_SIZE(e1000_igp_2_cable_length_table) #define BM_PHY_REG_PAGE(offset) \ ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) #define BM_PHY_REG_NUM(offset) \ ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\ (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\ ~MAX_PHY_REG_ADDRESS))) #define HV_INTC_FC_PAGE_START 768 #define I82578_ADDR_REG 29 #define I82577_ADDR_REG 16 #define I82577_CFG_REG 22 #define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) #define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ #define I82577_CTRL_REG 23 /* 82577 specific PHY registers */ #define I82577_PHY_CTRL_2 18 #define I82577_PHY_STATUS_2 26 #define I82577_PHY_DIAG_STATUS 31 /* I82577 PHY Status 2 */ #define I82577_PHY_STATUS2_REV_POLARITY 0x0400 #define I82577_PHY_STATUS2_MDIX 0x0800 #define I82577_PHY_STATUS2_SPEED_MASK 0x0300 #define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 /* I82577 PHY Control 2 */ #define I82577_PHY_CTRL2_AUTO_MDIX 0x0400 #define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 /* I82577 PHY Diagnostics Status */ #define I82577_DSTATUS_CABLE_LENGTH 0x03FC #define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 /* BM PHY Copper Specific Control 1 */ #define BM_CS_CTRL1 16 #define HV_MUX_DATA_CTRL PHY_REG(776, 16) #define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 #define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 /** * e1000e_check_reset_block_generic - Check if PHY reset is blocked * @hw: pointer to the HW structure * * Read the PHY management control register and check whether a PHY reset * is blocked. If a reset is not blocked return 0, otherwise * return E1000_BLK_PHY_RESET (12). **/ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) { u32 manc; manc = er32(MANC); return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; } /** * e1000e_get_phy_id - Retrieve the PHY ID and revision * @hw: pointer to the HW structure * * Reads the PHY registers and stores the PHY ID and possibly the PHY * revision in the hardware structure. **/ s32 e1000e_get_phy_id(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = 0; u16 phy_id; u16 retry_count = 0; if (!phy->ops.read_reg) return 0; while (retry_count < 2) { ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); if (ret_val) return ret_val; phy->id = (u32)(phy_id << 16); udelay(20); ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); if (ret_val) return ret_val; phy->id |= (u32)(phy_id & PHY_REVISION_MASK); phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); if (phy->id != 0 && phy->id != PHY_REVISION_MASK) return 0; retry_count++; } return 0; } /** * e1000e_phy_reset_dsp - Reset PHY DSP * @hw: pointer to the HW structure * * Reset the digital signal processor. **/ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) { s32 ret_val; ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); if (ret_val) return ret_val; return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0); } /** * e1000e_read_phy_reg_mdic - Read MDI control register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the MDI control register in the PHY at offset and stores the * information read to data. **/ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) { struct e1000_phy_info *phy = &hw->phy; u32 i, mdic = 0; if (offset > MAX_PHY_REG_ADDRESS) { e_dbg("PHY Address %d is out of range\n", offset); return -E1000_ERR_PARAM; } /* * Set up Op-code, Phy Address, and register offset in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ mdic = ((offset << E1000_MDIC_REG_SHIFT) | (phy->addr << E1000_MDIC_PHY_SHIFT) | (E1000_MDIC_OP_READ)); ew32(MDIC, mdic); /* * Poll the ready bit to see if the MDI read completed * Increasing the time out as testing showed failures with * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { udelay(50); mdic = er32(MDIC); if (mdic & E1000_MDIC_READY) break; } if (!(mdic & E1000_MDIC_READY)) { e_dbg("MDI Read did not complete\n"); return -E1000_ERR_PHY; } if (mdic & E1000_MDIC_ERROR) { e_dbg("MDI Error\n"); return -E1000_ERR_PHY; } *data = (u16) mdic; /* * Allow some time after each MDIC transaction to avoid * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) udelay(100); return 0; } /** * e1000e_write_phy_reg_mdic - Write MDI control register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write to register at offset * * Writes data to MDI control register in the PHY at offset. **/ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) { struct e1000_phy_info *phy = &hw->phy; u32 i, mdic = 0; if (offset > MAX_PHY_REG_ADDRESS) { e_dbg("PHY Address %d is out of range\n", offset); return -E1000_ERR_PARAM; } /* * Set up Op-code, Phy Address, and register offset in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ mdic = (((u32)data) | (offset << E1000_MDIC_REG_SHIFT) | (phy->addr << E1000_MDIC_PHY_SHIFT) | (E1000_MDIC_OP_WRITE)); ew32(MDIC, mdic); /* * Poll the ready bit to see if the MDI read completed * Increasing the time out as testing showed failures with * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { udelay(50); mdic = er32(MDIC); if (mdic & E1000_MDIC_READY) break; } if (!(mdic & E1000_MDIC_READY)) { e_dbg("MDI Write did not complete\n"); return -E1000_ERR_PHY; } if (mdic & E1000_MDIC_ERROR) { e_dbg("MDI Error\n"); return -E1000_ERR_PHY; } /* * Allow some time after each MDIC transaction to avoid * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) udelay(100); return 0; } /** * e1000e_read_phy_reg_m88 - Read m88 PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore, if necessary, then reads the PHY register at offset * and storing the retrieved information in data. Release any acquired * semaphores before exiting. **/ s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); hw->phy.ops.release(hw); return ret_val; } /** * e1000e_write_phy_reg_m88 - Write m88 PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); hw->phy.ops.release(hw); return ret_val; } /** * e1000_set_page_igp - Set page as on IGP-like PHY(s) * @hw: pointer to the HW structure * @page: page to set (shifted left when necessary) * * Sets PHY page required for PHY register access. Assumes semaphore is * already acquired. Note, this function sets phy.addr to 1 so the caller * must set it appropriately (if necessary) after this function returns. **/ s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) { e_dbg("Setting page 0x%x\n", page); hw->phy.addr = 1; return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); } /** * __e1000e_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then reads the PHY register at offset * and stores the retrieved information in data. Release any acquired * semaphores before exiting. **/ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, bool locked) { s32 ret_val = 0; if (!locked) { if (!hw->phy.ops.acquire) return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } if (offset > MAX_PHY_MULTI_PAGE_REG) ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); if (!ret_val) ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000e_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore then reads the PHY register at offset and stores the * retrieved information in data. * Release the acquired semaphore before exiting. **/ s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000e_read_phy_reg_igp(hw, offset, data, false); } /** * e1000e_read_phy_reg_igp_locked - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset and stores the retrieved information * in data. Assumes semaphore already acquired. **/ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000e_read_phy_reg_igp(hw, offset, data, true); } /** * e1000e_write_phy_reg_igp - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, bool locked) { s32 ret_val = 0; if (!locked) { if (!hw->phy.ops.acquire) return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } if (offset > MAX_PHY_MULTI_PAGE_REG) ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); if (!ret_val) ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000e_write_phy_reg_igp - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000e_write_phy_reg_igp(hw, offset, data, false); } /** * e1000e_write_phy_reg_igp_locked - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset. * Assumes semaphore already acquired. **/ s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000e_write_phy_reg_igp(hw, offset, data, true); } /** * __e1000_read_kmrn_reg - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary. Then reads the PHY register at offset * using the kumeran interface. The information retrieved is stored in data. * Release any acquired semaphores before exiting. **/ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, bool locked) { u32 kmrnctrlsta; if (!locked) { s32 ret_val = 0; if (!hw->phy.ops.acquire) return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); udelay(2); kmrnctrlsta = er32(KMRNCTRLSTA); *data = (u16)kmrnctrlsta; if (!locked) hw->phy.ops.release(hw); return 0; } /** * e1000e_read_kmrn_reg - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore then reads the PHY register at offset using the * kumeran interface. The information retrieved is stored in data. * Release the acquired semaphore before exiting. **/ s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_kmrn_reg(hw, offset, data, false); } /** * e1000e_read_kmrn_reg_locked - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset using the kumeran interface. The * information retrieved is stored in data. * Assumes semaphore already acquired. **/ s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_kmrn_reg(hw, offset, data, true); } /** * __e1000_write_kmrn_reg - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary. Then write the data to PHY register * at the offset using the kumeran interface. Release any acquired semaphores * before exiting. **/ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, bool locked) { u32 kmrnctrlsta; if (!locked) { s32 ret_val = 0; if (!hw->phy.ops.acquire) return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | data; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); udelay(2); if (!locked) hw->phy.ops.release(hw); return 0; } /** * e1000e_write_kmrn_reg - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore then writes the data to the PHY register at the offset * using the kumeran interface. Release the acquired semaphore before exiting. **/ s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_kmrn_reg(hw, offset, data, false); } /** * e1000e_write_kmrn_reg_locked - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Write the data to PHY register at the offset using the kumeran interface. * Assumes semaphore already acquired. **/ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_kmrn_reg(hw, offset, data, true); } /** * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link * @hw: pointer to the HW structure * * Sets up Carrier-sense on Transmit and downshift values. **/ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) { s32 ret_val; u16 phy_data; /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); if (ret_val) return ret_val; phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; /* Enable downshift */ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; return e1e_wphy(hw, I82577_CFG_REG, phy_data); } /** * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link * @hw: pointer to the HW structure * * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock * and downshift values are set also. **/ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* For BM PHY this bit is downshift enable */ if (phy->type != e1000_phy_bm) phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; /* * Options: * MDI/MDI-X = 0 (default) * 0 - Auto for all speeds * 1 - MDI mode * 2 - MDI-X mode * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) */ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; switch (phy->mdix) { case 1: phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; break; case 2: phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; break; case 3: phy_data |= M88E1000_PSCR_AUTO_X_1000T; break; case 0: default: phy_data |= M88E1000_PSCR_AUTO_X_MODE; break; } /* * Options: * disable_polarity_correction = 0 (default) * Automatic Correction for Reversed Cable Polarity * 0 - Disabled * 1 - Enabled */ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; if (phy->disable_polarity_correction == 1) phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; /* Enable downshift on BM (disabled by default) */ if (phy->type == e1000_phy_bm) phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; if ((phy->type == e1000_phy_m88) && (phy->revision < E1000_REVISION_4) && (phy->id != BME1000_E_PHY_ID_R2)) { /* * Force TX_CLK in the Extended PHY Specific Control Register * to 25MHz clock. */ ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy_data |= M88E1000_EPSCR_TX_CLK_25; if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) { /* 82573L PHY - set the downshift counter to 5x. */ phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; } else { /* Configure Master and Slave downshift values */ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); } ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; } if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) { /* Set PHY page 0, register 29 to 0x0003 */ ret_val = e1e_wphy(hw, 29, 0x0003); if (ret_val) return ret_val; /* Set PHY page 0, register 30 to 0x0000 */ ret_val = e1e_wphy(hw, 30, 0x0000); if (ret_val) return ret_val; } /* Commit the changes. */ ret_val = e1000e_commit_phy(hw); if (ret_val) { e_dbg("Error committing the PHY changes\n"); return ret_val; } if (phy->type == e1000_phy_82578) { ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* 82578 PHY - set the downshift count to 1x. */ phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; } return 0; } /** * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link * @hw: pointer to the HW structure * * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for * igp PHY's. **/ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = e1000_phy_hw_reset(hw); if (ret_val) { e_dbg("Error resetting the PHY.\n"); return ret_val; } /* * Wait 100ms for MAC to configure PHY from NVM settings, to avoid * timeout issues when LFS is enabled. */ msleep(100); /* disable lplu d0 during driver init */ ret_val = e1000_set_d0_lplu_state(hw, false); if (ret_val) { e_dbg("Error Disabling LPLU D0\n"); return ret_val; } /* Configure mdi-mdix settings */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCR_AUTO_MDIX; switch (phy->mdix) { case 1: data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; break; case 2: data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; break; case 0: default: data |= IGP01E1000_PSCR_AUTO_MDIX; break; } ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data); if (ret_val) return ret_val; /* set auto-master slave resolution settings */ if (hw->mac.autoneg) { /* * when autonegotiation advertisement is only 1000Mbps then we * should disable SmartSpeed and enable Auto MasterSlave * resolution as hardware default. */ if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { /* Disable SmartSpeed */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; /* Set auto Master/Slave resolution process */ ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); if (ret_val) return ret_val; data &= ~CR_1000T_MS_ENABLE; ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); if (ret_val) return ret_val; } ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); if (ret_val) return ret_val; /* load defaults for future use */ phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? ((data & CR_1000T_MS_VALUE) ? e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto; switch (phy->ms_type) { case e1000_ms_force_master: data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); break; case e1000_ms_force_slave: data |= CR_1000T_MS_ENABLE; data &= ~(CR_1000T_MS_VALUE); break; case e1000_ms_auto: data &= ~CR_1000T_MS_ENABLE; default: break; } ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); } return ret_val; } /** * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation * @hw: pointer to the HW structure * * Reads the MII auto-neg advertisement register and/or the 1000T control * register and if the PHY is already setup for auto-negotiation, then * return successful. Otherwise, setup advertisement and flow control to * the appropriate values for the wanted auto-negotiation. **/ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 mii_autoneg_adv_reg; u16 mii_1000t_ctrl_reg = 0; phy->autoneg_advertised &= phy->autoneg_mask; /* Read the MII Auto-Neg Advertisement Register (Address 4). */ ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); if (ret_val) return ret_val; if (phy->autoneg_mask & ADVERTISE_1000_FULL) { /* Read the MII 1000Base-T Control Register (Address 9). */ ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); if (ret_val) return ret_val; } /* * Need to parse both autoneg_advertised and fc and set up * the appropriate PHY registers. First we will parse for * autoneg_advertised software override. Since we can advertise * a plethora of combinations, we need to check each bit * individually. */ /* * First we clear all the 10/100 mb speed bits in the Auto-Neg * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T Control Register (Address 9). */ mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | NWAY_AR_100TX_HD_CAPS | NWAY_AR_10T_FD_CAPS | NWAY_AR_10T_HD_CAPS); mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); /* Do we want to advertise 10 Mb Half Duplex? */ if (phy->autoneg_advertised & ADVERTISE_10_HALF) { e_dbg("Advertise 10mb Half duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; } /* Do we want to advertise 10 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_10_FULL) { e_dbg("Advertise 10mb Full duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; } /* Do we want to advertise 100 Mb Half Duplex? */ if (phy->autoneg_advertised & ADVERTISE_100_HALF) { e_dbg("Advertise 100mb Half duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; } /* Do we want to advertise 100 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_100_FULL) { e_dbg("Advertise 100mb Full duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; } /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ if (phy->autoneg_advertised & ADVERTISE_1000_HALF) e_dbg("Advertise 1000mb Half duplex request denied!\n"); /* Do we want to advertise 1000 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { e_dbg("Advertise 1000mb Full duplex\n"); mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; } /* * Check for a software override of the flow control settings, and * setup the PHY advertisement registers accordingly. If * auto-negotiation is enabled, then software will have to set the * "PAUSE" bits to the correct value in the Auto-Negotiation * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- * negotiation. * * The possible values of the "fc" parameter are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames * but we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: No software override. The flow control configuration * in the EEPROM is used. */ switch (hw->fc.current_mode) { case e1000_fc_none: /* * Flow control (Rx & Tx) is completely disabled by a * software over-ride. */ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; case e1000_fc_rx_pause: /* * Rx Flow control is enabled, and Tx Flow control is * disabled, by a software over-ride. * * Since there really isn't a way to advertise that we are * capable of Rx Pause ONLY, we will advertise that we * support both symmetric and asymmetric Rx PAUSE. Later * (in e1000e_config_fc_after_link_up) we will disable the * hw's ability to send PAUSE frames. */ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; case e1000_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is * disabled, by a software over-ride. */ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; break; case e1000_fc_full: /* * Flow control (both Rx and Tx) is enabled by a software * over-ride. */ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; default: e_dbg("Flow control param set incorrectly\n"); return -E1000_ERR_CONFIG; } ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); if (ret_val) return ret_val; e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); if (phy->autoneg_mask & ADVERTISE_1000_FULL) ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); return ret_val; } /** * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link * @hw: pointer to the HW structure * * Performs initial bounds checking on autoneg advertisement parameter, then * configure to advertise the full capability. Setup the PHY to autoneg * and restart the negotiation process between the link partner. If * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. **/ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_ctrl; /* * Perform some bounds checking on the autoneg advertisement * parameter. */ phy->autoneg_advertised &= phy->autoneg_mask; /* * If autoneg_advertised is zero, we assume it was not defaulted * by the calling code so we set to advertise full capability. */ if (phy->autoneg_advertised == 0) phy->autoneg_advertised = phy->autoneg_mask; e_dbg("Reconfiguring auto-neg advertisement params\n"); ret_val = e1000_phy_setup_autoneg(hw); if (ret_val) { e_dbg("Error Setting up Auto-Negotiation\n"); return ret_val; } e_dbg("Restarting Auto-Neg\n"); /* * Restart auto-negotiation by setting the Auto Neg Enable bit and * the Auto Neg Restart bit in the PHY control register. */ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); if (ret_val) return ret_val; phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); if (ret_val) return ret_val; /* * Does the user want to wait for Auto-Neg to complete here, or * check at a later time (for example, callback routine). */ if (phy->autoneg_wait_to_complete) { ret_val = e1000_wait_autoneg(hw); if (ret_val) { e_dbg("Error while waiting for autoneg to complete\n"); return ret_val; } } hw->mac.get_link_status = true; return ret_val; } /** * e1000e_setup_copper_link - Configure copper link settings * @hw: pointer to the HW structure * * Calls the appropriate function to configure the link for auto-neg or forced * speed and duplex. Then we check for link, once link is established calls * to configure collision distance and flow control are called. If link is * not established, we return -E1000_ERR_PHY (-2). **/ s32 e1000e_setup_copper_link(struct e1000_hw *hw) { s32 ret_val; bool link; if (hw->mac.autoneg) { /* * Setup autoneg and flow control advertisement and perform * autonegotiation. */ ret_val = e1000_copper_link_autoneg(hw); if (ret_val) return ret_val; } else { /* * PHY will be set to 10H, 10F, 100H or 100F * depending on user settings. */ e_dbg("Forcing Speed and Duplex\n"); ret_val = e1000_phy_force_speed_duplex(hw); if (ret_val) { e_dbg("Error Forcing Speed and Duplex\n"); return ret_val; } } /* * Check link status. Wait up to 100 microseconds for link to become * valid. */ ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, &link); if (ret_val) return ret_val; if (link) { e_dbg("Valid link established!!!\n"); hw->mac.ops.config_collision_dist(hw); ret_val = e1000e_config_fc_after_link_up(hw); } else { e_dbg("Unable to establish link!!!\n"); } return ret_val; } /** * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. Clears the * auto-crossover to force MDI manually. Waits for link and returns * successful if link up is successful, else -E1000_ERR_PHY (-2). **/ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); if (ret_val) return ret_val; /* * Clear Auto-Crossover to force MDI manually. IGP requires MDI * forced whenever speed and duplex are forced. */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); if (ret_val) return ret_val; phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); if (ret_val) return ret_val; e_dbg("IGP PSCR: %X\n", phy_data); udelay(1); if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) e_dbg("Link taking longer than expected.\n"); /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); } return ret_val; } /** * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. Clears the * auto-crossover to force MDI manually. Resets the PHY to commit the * changes. If time expires while waiting for link up, we reset the DSP. * After reset, TX_CLK and CRS on Tx must be set. Return successful upon * successful completion, else return corresponding error code. **/ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; /* * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI * forced whenever speed and duplex are forced. */ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; e_dbg("M88E1000 PSCR: %X\n", phy_data); ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); if (ret_val) return ret_val; /* Reset the phy to commit changes. */ ret_val = e1000e_commit_phy(hw); if (ret_val) return ret_val; if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) { if (hw->phy.type != e1000_phy_m88) { e_dbg("Link taking longer than expected.\n"); } else { /* * We didn't get link. * Reset the DSP and cross our fingers. */ ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 0x001d); if (ret_val) return ret_val; ret_val = e1000e_phy_reset_dsp(hw); if (ret_val) return ret_val; } } /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; } if (hw->phy.type != e1000_phy_m88) return 0; ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* * Resetting the phy means we need to re-force TX_CLK in the * Extended PHY Specific Control Register to 25MHz clock from * the reset value of 2.5MHz. */ phy_data |= M88E1000_EPSCR_TX_CLK_25; ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; /* * In addition, we must re-enable CRS on Tx for both half and full * duplex. */ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); return ret_val; } /** * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex * @hw: pointer to the HW structure * * Forces the speed and duplex settings of the PHY. * This is a function pointer entry point only called by * PHY setup routines. **/ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; ret_val = e1e_rphy(hw, PHY_CONTROL, &data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &data); ret_val = e1e_wphy(hw, PHY_CONTROL, data); if (ret_val) return ret_val; /* Disable MDI-X support for 10/100 */ ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); if (ret_val) return ret_val; data &= ~IFE_PMC_AUTO_MDIX; data &= ~IFE_PMC_FORCE_MDIX; ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); if (ret_val) return ret_val; e_dbg("IFE PMC: %X\n", data); udelay(1); if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) e_dbg("Link taking longer than expected.\n"); /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; } return 0; } /** * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex * @hw: pointer to the HW structure * @phy_ctrl: pointer to current value of PHY_CONTROL * * Forces speed and duplex on the PHY by doing the following: disable flow * control, force speed/duplex on the MAC, disable auto speed detection, * disable auto-negotiation, configure duplex, configure speed, configure * the collision distance, write configuration to CTRL register. The * caller must write to the PHY_CONTROL register for these settings to * take affect. **/ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) { struct e1000_mac_info *mac = &hw->mac; u32 ctrl; /* Turn off flow control when forcing speed/duplex */ hw->fc.current_mode = e1000_fc_none; /* Force speed/duplex on the mac */ ctrl = er32(CTRL); ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ctrl &= ~E1000_CTRL_SPD_SEL; /* Disable Auto Speed Detection */ ctrl &= ~E1000_CTRL_ASDE; /* Disable autoneg on the phy */ *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; /* Forcing Full or Half Duplex? */ if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { ctrl &= ~E1000_CTRL_FD; *phy_ctrl &= ~MII_CR_FULL_DUPLEX; e_dbg("Half Duplex\n"); } else { ctrl |= E1000_CTRL_FD; *phy_ctrl |= MII_CR_FULL_DUPLEX; e_dbg("Full Duplex\n"); } /* Forcing 10mb or 100mb? */ if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { ctrl |= E1000_CTRL_SPD_100; *phy_ctrl |= MII_CR_SPEED_100; *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); e_dbg("Forcing 100mb\n"); } else { ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); *phy_ctrl |= MII_CR_SPEED_10; *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); e_dbg("Forcing 10mb\n"); } hw->mac.ops.config_collision_dist(hw); ew32(CTRL, ctrl); } /** * e1000e_set_d3_lplu_state - Sets low power link up state for D3 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * * Success returns 0, Failure returns 1 * * The low power link up (lplu) state is set to the power management level D3 * and SmartSpeed is disabled when active is true, else clear lplu for D3 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU * is used during Dx states where the power conservation is most important. * During driver activity, SmartSpeed should be enabled so performance is * maintained. **/ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); if (ret_val) return ret_val; if (!active) { data &= ~IGP02E1000_PM_D3_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) return ret_val; /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { data |= IGP02E1000_PM_D3_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) return ret_val; /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); } return ret_val; } /** * e1000e_check_downshift - Checks whether a downshift in speed occurred * @hw: pointer to the HW structure * * Success returns 0, Failure returns 1 * * A downshift is detected by querying the PHY link health. **/ s32 e1000e_check_downshift(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, offset, mask; switch (phy->type) { case e1000_phy_m88: case e1000_phy_gg82563: case e1000_phy_bm: case e1000_phy_82578: offset = M88E1000_PHY_SPEC_STATUS; mask = M88E1000_PSSR_DOWNSHIFT; break; case e1000_phy_igp_2: case e1000_phy_igp_3: offset = IGP01E1000_PHY_LINK_HEALTH; mask = IGP01E1000_PLHR_SS_DOWNGRADE; break; default: /* speed downshift not supported */ phy->speed_downgraded = false; return 0; } ret_val = e1e_rphy(hw, offset, &phy_data); if (!ret_val) phy->speed_downgraded = (phy_data & mask); return ret_val; } /** * e1000_check_polarity_m88 - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) * * Polarity is determined based on the PHY specific status register. **/ s32 e1000_check_polarity_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); if (!ret_val) phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal; return ret_val; } /** * e1000_check_polarity_igp - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) * * Polarity is determined based on the PHY port status register, and the * current speed (since there is no polarity at 100Mbps). **/ s32 e1000_check_polarity_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data, offset, mask; /* * Polarity is determined based on the speed of * our connection. */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); if (ret_val) return ret_val; if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { offset = IGP01E1000_PHY_PCS_INIT_REG; mask = IGP01E1000_PHY_POLARITY_MASK; } else { /* * This really only applies to 10Mbps since * there is no polarity for 100Mbps (always 0). */ offset = IGP01E1000_PHY_PORT_STATUS; mask = IGP01E1000_PSSR_POLARITY_REVERSED; } ret_val = e1e_rphy(hw, offset, &data); if (!ret_val) phy->cable_polarity = (data & mask) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal; return ret_val; } /** * e1000_check_polarity_ife - Check cable polarity for IFE PHY * @hw: pointer to the HW structure * * Polarity is determined on the polarity reversal feature being enabled. **/ s32 e1000_check_polarity_ife(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, offset, mask; /* * Polarity is determined based on the reversal feature being enabled. */ if (phy->polarity_correction) { offset = IFE_PHY_EXTENDED_STATUS_CONTROL; mask = IFE_PESC_POLARITY_REVERSED; } else { offset = IFE_PHY_SPECIAL_CONTROL; mask = IFE_PSC_FORCE_POLARITY; } ret_val = e1e_rphy(hw, offset, &phy_data); if (!ret_val) phy->cable_polarity = (phy_data & mask) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal; return ret_val; } /** * e1000_wait_autoneg - Wait for auto-neg completion * @hw: pointer to the HW structure * * Waits for auto-negotiation to complete or for the auto-negotiation time * limit to expire, which ever happens first. **/ static s32 e1000_wait_autoneg(struct e1000_hw *hw) { s32 ret_val = 0; u16 i, phy_status; /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); if (ret_val) break; ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); if (ret_val) break; if (phy_status & MII_SR_AUTONEG_COMPLETE) break; msleep(100); } /* * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation * has completed. */ return ret_val; } /** * e1000e_phy_has_link_generic - Polls PHY for link * @hw: pointer to the HW structure * @iterations: number of times to poll for link * @usec_interval: delay between polling attempts * @success: pointer to whether polling was successful or not * * Polls the PHY status register for link, 'iterations' number of times. **/ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, u32 usec_interval, bool *success) { s32 ret_val = 0; u16 i, phy_status; for (i = 0; i < iterations; i++) { /* * Some PHYs require the PHY_STATUS register to be read * twice due to the link bit being sticky. No harm doing * it across the board. */ ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); if (ret_val) /* * If the first read fails, another entity may have * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ udelay(usec_interval); ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); if (ret_val) break; if (phy_status & MII_SR_LINK_STATUS) break; if (usec_interval >= 1000) mdelay(usec_interval/1000); else udelay(usec_interval); } *success = (i < iterations); return ret_val; } /** * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY * @hw: pointer to the HW structure * * Reads the PHY specific status register to retrieve the cable length * information. The cable length is determined by averaging the minimum and * maximum values to get the "average" cable length. The m88 PHY has four * possible cable length values, which are: * Register Value Cable Length * 0 < 50 meters * 1 50 - 80 meters * 2 80 - 110 meters * 3 110 - 140 meters * 4 > 140 meters **/ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, index; ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) return ret_val; index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) return -E1000_ERR_PHY; phy->min_cable_length = e1000_m88_cable_length_table[index]; phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; return 0; } /** * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY * @hw: pointer to the HW structure * * The automatic gain control (agc) normalizes the amplitude of the * received signal, adjusting for the attenuation produced by the * cable. By reading the AGC registers, which represent the * combination of coarse and fine gain value, the value can be put * into a lookup table to obtain the approximate cable length * for each channel. **/ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, i, agc_value = 0; u16 cur_agc_index, max_agc_index = 0; u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { IGP02E1000_PHY_AGC_A, IGP02E1000_PHY_AGC_B, IGP02E1000_PHY_AGC_C, IGP02E1000_PHY_AGC_D }; /* Read the AGC registers for all channels */ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data); if (ret_val) return ret_val; /* * Getting bits 15:9, which represent the combination of * coarse and fine gain values. The result is a number * that can be put into the lookup table to obtain the * approximate cable length. */ cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & IGP02E1000_AGC_LENGTH_MASK; /* Array index bound check. */ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || (cur_agc_index == 0)) return -E1000_ERR_PHY; /* Remove min & max AGC values from calculation. */ if (e1000_igp_2_cable_length_table[min_agc_index] > e1000_igp_2_cable_length_table[cur_agc_index]) min_agc_index = cur_agc_index; if (e1000_igp_2_cable_length_table[max_agc_index] < e1000_igp_2_cable_length_table[cur_agc_index]) max_agc_index = cur_agc_index; agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; } agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + e1000_igp_2_cable_length_table[max_agc_index]); agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); /* Calculate cable length with the error range of +/- 10 meters. */ phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? (agc_value - IGP02E1000_AGC_RANGE) : 0; phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; return 0; } /** * e1000e_get_phy_info_m88 - Retrieve PHY information * @hw: pointer to the HW structure * * Valid for only copper links. Read the PHY status register (sticky read) * to verify that link is up. Read the PHY special control register to * determine the polarity and 10base-T extended distance. Read the PHY * special status register to determine MDI/MDIx and current speed. If * speed is 1000, then determine cable length, local and remote receiver. **/ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; if (phy->media_type != e1000_media_type_copper) { e_dbg("Phy info is only valid for copper media\n"); return -E1000_ERR_CONFIG; } ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { e_dbg("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL); ret_val = e1000_check_polarity_m88(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) return ret_val; phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX); if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { ret_val = e1000_get_cable_length(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); if (ret_val) return ret_val; phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { /* Set values to "undefined" */ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } return ret_val; } /** * e1000e_get_phy_info_igp - Retrieve igp PHY information * @hw: pointer to the HW structure * * Read PHY status to determine if link is up. If link is up, then * set/determine 10base-T extended distance and polarity correction. Read * PHY port status to determine MDI/MDIx and speed. Based on the speed, * determine on the cable length, local and remote receiver. **/ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { e_dbg("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } phy->polarity_correction = true; ret_val = e1000_check_polarity_igp(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); if (ret_val) return ret_val; phy->is_mdix = (data & IGP01E1000_PSSR_MDIX); if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { ret_val = e1000_get_cable_length(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); if (ret_val) return ret_val; phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } return ret_val; } /** * e1000_get_phy_info_ife - Retrieves various IFE PHY states * @hw: pointer to the HW structure * * Populates "phy" structure with various feature states. **/ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { e_dbg("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); if (ret_val) return ret_val; phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) ? false : true; if (phy->polarity_correction) { ret_val = e1000_check_polarity_ife(hw); if (ret_val) return ret_val; } else { /* Polarity is forced */ phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal; } ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); if (ret_val) return ret_val; phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; /* The following parameters are undefined for 10/100 operation. */ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; return 0; } /** * e1000e_phy_sw_reset - PHY software reset * @hw: pointer to the HW structure * * Does a software reset of the PHY by reading the PHY control register and * setting/write the control register reset bit to the PHY. **/ s32 e1000e_phy_sw_reset(struct e1000_hw *hw) { s32 ret_val; u16 phy_ctrl; ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); if (ret_val) return ret_val; phy_ctrl |= MII_CR_RESET; ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); if (ret_val) return ret_val; udelay(1); return ret_val; } /** * e1000e_phy_hw_reset_generic - PHY hardware reset * @hw: pointer to the HW structure * * Verify the reset block is not blocking us from resetting. Acquire * semaphore (if necessary) and read/set/write the device control reset * bit in the PHY. Wait the appropriate delay time for the device to * reset and release the semaphore (if necessary). **/ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u32 ctrl; if (phy->ops.check_reset_block) { ret_val = phy->ops.check_reset_block(hw); if (ret_val) return 0; } ret_val = phy->ops.acquire(hw); if (ret_val) return ret_val; ctrl = er32(CTRL); ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); e1e_flush(); udelay(phy->reset_delay_us); ew32(CTRL, ctrl); e1e_flush(); udelay(150); phy->ops.release(hw); return e1000_get_phy_cfg_done(hw); } /** * e1000e_get_cfg_done - Generic configuration done * @hw: pointer to the HW structure * * Generic function to wait 10 milli-seconds for configuration to complete * and return success. **/ s32 e1000e_get_cfg_done(struct e1000_hw *hw) { mdelay(10); return 0; } /** * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY * @hw: pointer to the HW structure * * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. **/ s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) { e_dbg("Running IGP 3 PHY init script\n"); /* PHY init IGP 3 */ /* Enable rise/fall, 10-mode work in class-A */ e1e_wphy(hw, 0x2F5B, 0x9018); /* Remove all caps from Replica path filter */ e1e_wphy(hw, 0x2F52, 0x0000); /* Bias trimming for ADC, AFE and Driver (Default) */ e1e_wphy(hw, 0x2FB1, 0x8B24); /* Increase Hybrid poly bias */ e1e_wphy(hw, 0x2FB2, 0xF8F0); /* Add 4% to Tx amplitude in Gig mode */ e1e_wphy(hw, 0x2010, 0x10B0); /* Disable trimming (TTT) */ e1e_wphy(hw, 0x2011, 0x0000); /* Poly DC correction to 94.6% + 2% for all channels */ e1e_wphy(hw, 0x20DD, 0x249A); /* ABS DC correction to 95.9% */ e1e_wphy(hw, 0x20DE, 0x00D3); /* BG temp curve trim */ e1e_wphy(hw, 0x28B4, 0x04CE); /* Increasing ADC OPAMP stage 1 currents to max */ e1e_wphy(hw, 0x2F70, 0x29E4); /* Force 1000 ( required for enabling PHY regs configuration) */ e1e_wphy(hw, 0x0000, 0x0140); /* Set upd_freq to 6 */ e1e_wphy(hw, 0x1F30, 0x1606); /* Disable NPDFE */ e1e_wphy(hw, 0x1F31, 0xB814); /* Disable adaptive fixed FFE (Default) */ e1e_wphy(hw, 0x1F35, 0x002A); /* Enable FFE hysteresis */ e1e_wphy(hw, 0x1F3E, 0x0067); /* Fixed FFE for short cable lengths */ e1e_wphy(hw, 0x1F54, 0x0065); /* Fixed FFE for medium cable lengths */ e1e_wphy(hw, 0x1F55, 0x002A); /* Fixed FFE for long cable lengths */ e1e_wphy(hw, 0x1F56, 0x002A); /* Enable Adaptive Clip Threshold */ e1e_wphy(hw, 0x1F72, 0x3FB0); /* AHT reset limit to 1 */ e1e_wphy(hw, 0x1F76, 0xC0FF); /* Set AHT master delay to 127 msec */ e1e_wphy(hw, 0x1F77, 0x1DEC); /* Set scan bits for AHT */ e1e_wphy(hw, 0x1F78, 0xF9EF); /* Set AHT Preset bits */ e1e_wphy(hw, 0x1F79, 0x0210); /* Change integ_factor of channel A to 3 */ e1e_wphy(hw, 0x1895, 0x0003); /* Change prop_factor of channels BCD to 8 */ e1e_wphy(hw, 0x1796, 0x0008); /* Change cg_icount + enable integbp for channels BCD */ e1e_wphy(hw, 0x1798, 0xD008); /* * Change cg_icount + enable integbp + change prop_factor_master * to 8 for channel A */ e1e_wphy(hw, 0x1898, 0xD918); /* Disable AHT in Slave mode on channel A */ e1e_wphy(hw, 0x187A, 0x0800); /* * Enable LPLU and disable AN to 1000 in non-D0a states, * Enable SPD+B2B */ e1e_wphy(hw, 0x0019, 0x008D); /* Enable restart AN on an1000_dis change */ e1e_wphy(hw, 0x001B, 0x2080); /* Enable wh_fifo read clock in 10/100 modes */ e1e_wphy(hw, 0x0014, 0x0045); /* Restart AN, Speed selection is 1000 */ e1e_wphy(hw, 0x0000, 0x1340); return 0; } /* Internal function pointers */ /** * e1000_get_phy_cfg_done - Generic PHY configuration done * @hw: pointer to the HW structure * * Return success if silicon family did not implement a family specific * get_cfg_done function. **/ static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) { if (hw->phy.ops.get_cfg_done) return hw->phy.ops.get_cfg_done(hw); return 0; } /** * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex * @hw: pointer to the HW structure * * When the silicon family has not implemented a forced speed/duplex * function for the PHY, simply return 0. **/ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) { if (hw->phy.ops.force_speed_duplex) return hw->phy.ops.force_speed_duplex(hw); return 0; } /** * e1000e_get_phy_type_from_id - Get PHY type from id * @phy_id: phy_id read from the phy * * Returns the phy type from the id. **/ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) { enum e1000_phy_type phy_type = e1000_phy_unknown; switch (phy_id) { case M88E1000_I_PHY_ID: case M88E1000_E_PHY_ID: case M88E1111_I_PHY_ID: case M88E1011_I_PHY_ID: phy_type = e1000_phy_m88; break; case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ phy_type = e1000_phy_igp_2; break; case GG82563_E_PHY_ID: phy_type = e1000_phy_gg82563; break; case IGP03E1000_E_PHY_ID: phy_type = e1000_phy_igp_3; break; case IFE_E_PHY_ID: case IFE_PLUS_E_PHY_ID: case IFE_C_E_PHY_ID: phy_type = e1000_phy_ife; break; case BME1000_E_PHY_ID: case BME1000_E_PHY_ID_R2: phy_type = e1000_phy_bm; break; case I82578_E_PHY_ID: phy_type = e1000_phy_82578; break; case I82577_E_PHY_ID: phy_type = e1000_phy_82577; break; case I82579_E_PHY_ID: phy_type = e1000_phy_82579; break; default: phy_type = e1000_phy_unknown; break; } return phy_type; } /** * e1000e_determine_phy_address - Determines PHY address. * @hw: pointer to the HW structure * * This uses a trial and error method to loop through possible PHY * addresses. It tests each by reading the PHY ID registers and * checking for a match. **/ s32 e1000e_determine_phy_address(struct e1000_hw *hw) { u32 phy_addr = 0; u32 i; enum e1000_phy_type phy_type = e1000_phy_unknown; hw->phy.id = phy_type; for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { hw->phy.addr = phy_addr; i = 0; do { e1000e_get_phy_id(hw); phy_type = e1000e_get_phy_type_from_id(hw->phy.id); /* * If phy_type is valid, break - we found our * PHY address */ if (phy_type != e1000_phy_unknown) return 0; usleep_range(1000, 2000); i++; } while (i < 10); } return -E1000_ERR_PHY_TYPE; } /** * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address * @page: page to access * * Returns the phy address for the page requested. **/ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) { u32 phy_addr = 2; if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) phy_addr = 1; return phy_addr; } /** * e1000e_write_phy_reg_bm - Write BM PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; u32 page = offset >> IGP_PAGE_SHIFT; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, false, false); goto release; } hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { u32 page_shift, page_select; /* * Page select is register 31 for phy address 1 and 22 for * phy address 2 and 3. Page select is shifted only for * phy address 1. */ if (hw->phy.addr == 1) { page_shift = IGP_PAGE_SHIFT; page_select = IGP01E1000_PHY_PAGE_SELECT; } else { page_shift = 0; page_select = BM_PHY_PAGE_SELECT; } /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, (page << page_shift)); if (ret_val) goto release; } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000e_read_phy_reg_bm - Read BM PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore, if necessary, then reads the PHY register at offset * and storing the retrieved information in data. Release any acquired * semaphores before exiting. **/ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; u32 page = offset >> IGP_PAGE_SHIFT; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, true, false); goto release; } hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { u32 page_shift, page_select; /* * Page select is register 31 for phy address 1 and 22 for * phy address 2 and 3. Page select is shifted only for * phy address 1. */ if (hw->phy.addr == 1) { page_shift = IGP_PAGE_SHIFT; page_select = IGP01E1000_PHY_PAGE_SELECT; } else { page_shift = 0; page_select = BM_PHY_PAGE_SELECT; } /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, (page << page_shift)); if (ret_val) goto release; } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000e_read_phy_reg_bm2 - Read BM PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore, if necessary, then reads the PHY register at offset * and storing the retrieved information in data. Release any acquired * semaphores before exiting. **/ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; u16 page = (u16)(offset >> IGP_PAGE_SHIFT); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, true, false); goto release; } hw->phy.addr = 1; if (offset > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, page); if (ret_val) goto release; } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000e_write_phy_reg_bm2 - Write BM PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; u16 page = (u16)(offset >> IGP_PAGE_SHIFT); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, false, false); goto release; } hw->phy.addr = 1; if (offset > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, page); if (ret_val) goto release; } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers * @hw: pointer to the HW structure * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG * * Assumes semaphore already acquired and phy_reg points to a valid memory * address to store contents of the BM_WUC_ENABLE_REG register. **/ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) { s32 ret_val; u16 temp; /* All page select, port ctrl and wakeup registers use phy address 1 */ hw->phy.addr = 1; /* Select Port Control Registers page */ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); if (ret_val) { e_dbg("Could not set Port Control page\n"); return ret_val; } ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); if (ret_val) { e_dbg("Could not read PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); return ret_val; } /* * Enable both PHY wakeup mode and Wakeup register page writes. * Prevent a power state change by disabling ME and Host PHY wakeup. */ temp = *phy_reg; temp |= BM_WUC_ENABLE_BIT; temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp); if (ret_val) { e_dbg("Could not write PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); return ret_val; } /* * Select Host Wakeup Registers page - caller now able to write * registers on the Wakeup registers page */ return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); } /** * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs * @hw: pointer to the HW structure * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG * * Restore BM_WUC_ENABLE_REG to its original value. * * Assumes semaphore already acquired and *phy_reg is the contents of the * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by * caller. **/ s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) { s32 ret_val = 0; /* Select Port Control Registers page */ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); if (ret_val) { e_dbg("Could not set Port Control page\n"); return ret_val; } /* Restore 769.17 to its original value */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg); if (ret_val) e_dbg("Could not restore PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); return ret_val; } /** * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register * @hw: pointer to the HW structure * @offset: register offset to be read or written * @data: pointer to the data to read or write * @read: determines if operation is read or write * @page_set: BM_WUC_PAGE already set and access enabled * * Read the PHY register at offset and store the retrieved information in * data, or write data to PHY register at offset. Note the procedure to * access the PHY wakeup registers is different than reading the other PHY * registers. It works as such: * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 * 2) Set page to 800 for host (801 if we were manageability) * 3) Write the address using the address opcode (0x11) * 4) Read or write the data using the data opcode (0x12) * 5) Restore 769.17.2 to its original value * * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm(). * * Assumes semaphore is already acquired. When page_set==true, assumes * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()). **/ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data, bool read, bool page_set) { s32 ret_val; u16 reg = BM_PHY_REG_NUM(offset); u16 page = BM_PHY_REG_PAGE(offset); u16 phy_reg = 0; /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ if ((hw->mac.type == e1000_pchlan) && (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) e_dbg("Attempting to access page %d while gig enabled.\n", page); if (!page_set) { /* Enable access to PHY wakeup registers */ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); if (ret_val) { e_dbg("Could not enable PHY wakeup reg access\n"); return ret_val; } } e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg); /* Write the Wakeup register page offset value using opcode 0x11 */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); if (ret_val) { e_dbg("Could not write address opcode to page %d\n", page); return ret_val; } if (read) { /* Read the Wakeup register page value using opcode 0x12 */ ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, data); } else { /* Write the Wakeup register page value using opcode 0x12 */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, *data); } if (ret_val) { e_dbg("Could not access PHY reg %d.%d\n", page, reg); return ret_val; } if (!page_set) ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); return ret_val; } /** * e1000_power_up_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, restore the link to previous * settings. **/ void e1000_power_up_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; /* The PHY will retain its settings across a power down/up cycle */ e1e_rphy(hw, PHY_CONTROL, &mii_reg); mii_reg &= ~MII_CR_POWER_DOWN; e1e_wphy(hw, PHY_CONTROL, mii_reg); } /** * e1000_power_down_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, restore the link to previous * settings. **/ void e1000_power_down_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; /* The PHY will retain its settings across a power down/up cycle */ e1e_rphy(hw, PHY_CONTROL, &mii_reg); mii_reg |= MII_CR_POWER_DOWN; e1e_wphy(hw, PHY_CONTROL, mii_reg); usleep_range(1000, 2000); } /** * e1000e_commit_phy - Soft PHY reset * @hw: pointer to the HW structure * * Performs a soft PHY reset on those that apply. This is a function pointer * entry point called by drivers. **/ s32 e1000e_commit_phy(struct e1000_hw *hw) { if (hw->phy.ops.commit) return hw->phy.ops.commit(hw); return 0; } /** * e1000_set_d0_lplu_state - Sets low power link up state for D0 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * * Success returns 0, Failure returns 1 * * The low power link up (lplu) state is set to the power management level D0 * and SmartSpeed is disabled when active is true, else clear lplu for D0 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU * is used during Dx states where the power conservation is most important. * During driver activity, SmartSpeed should be enabled so performance is * maintained. This is a function pointer entry point called by drivers. **/ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) { if (hw->phy.ops.set_d0_lplu_state) return hw->phy.ops.set_d0_lplu_state(hw, active); return 0; } /** * __e1000_read_phy_reg_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then reads the PHY register at offset * and stores the retrieved information in data. Release any acquired * semaphore before exiting. **/ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool locked, bool page_set) { s32 ret_val; u16 page = BM_PHY_REG_PAGE(offset); u16 reg = BM_PHY_REG_NUM(offset); u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); if (!locked) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, true, page_set); goto out; } if (page > 0 && page < HV_INTC_FC_PAGE_START) { ret_val = e1000_access_phy_debug_regs_hv(hw, offset, data, true); goto out; } if (!page_set) { if (page == HV_INTC_FC_PAGE_START) page = 0; if (reg > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000_set_page_igp(hw, (page << IGP_PAGE_SHIFT)); hw->phy.addr = phy_addr; if (ret_val) goto out; } } e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, page << IGP_PAGE_SHIFT, reg); ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); out: if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000_read_phy_reg_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore then reads the PHY register at offset and stores * the retrieved information in data. Release the acquired semaphore * before exiting. **/ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_phy_reg_hv(hw, offset, data, false, false); } /** * e1000_read_phy_reg_hv_locked - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset and stores the retrieved information * in data. Assumes semaphore already acquired. **/ s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_phy_reg_hv(hw, offset, data, true, false); } /** * e1000_read_phy_reg_page_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Reads the PHY register at offset and stores the retrieved information * in data. Assumes semaphore already acquired and page already set. **/ s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_phy_reg_hv(hw, offset, data, true, true); } /** * __e1000_write_phy_reg_hv - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, bool locked, bool page_set) { s32 ret_val; u16 page = BM_PHY_REG_PAGE(offset); u16 reg = BM_PHY_REG_NUM(offset); u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); if (!locked) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, false, page_set); goto out; } if (page > 0 && page < HV_INTC_FC_PAGE_START) { ret_val = e1000_access_phy_debug_regs_hv(hw, offset, &data, false); goto out; } if (!page_set) { if (page == HV_INTC_FC_PAGE_START) page = 0; /* * Workaround MDIO accesses being disabled after entering IEEE * Power Down (when bit 11 of the PHY Control register is set) */ if ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision >= 1) && (hw->phy.addr == 2) && ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) { u16 data2 = 0x7EFF; ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3, &data2, false); if (ret_val) goto out; } if (reg > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000_set_page_igp(hw, (page << IGP_PAGE_SHIFT)); hw->phy.addr = phy_addr; if (ret_val) goto out; } } e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page, page << IGP_PAGE_SHIFT, reg); ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); out: if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000_write_phy_reg_hv - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore then writes the data to PHY register at the offset. * Release the acquired semaphores before exiting. **/ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_phy_reg_hv(hw, offset, data, false, false); } /** * e1000_write_phy_reg_hv_locked - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset. Assumes semaphore * already acquired. **/ s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_phy_reg_hv(hw, offset, data, true, false); } /** * e1000_write_phy_reg_page_hv - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset. Assumes semaphore * already acquired and page already set. **/ s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_phy_reg_hv(hw, offset, data, true, true); } /** * e1000_get_phy_addr_for_hv_page - Get PHY address based on page * @page: page to be accessed **/ static u32 e1000_get_phy_addr_for_hv_page(u32 page) { u32 phy_addr = 2; if (page >= HV_INTC_FC_PAGE_START) phy_addr = 1; return phy_addr; } /** * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers * @hw: pointer to the HW structure * @offset: register offset to be read or written * @data: pointer to the data to be read or written * @read: determines if operation is read or write * * Reads the PHY register at offset and stores the retreived information * in data. Assumes semaphore already acquired. Note that the procedure * to access these regs uses the address port and data port to read/write. * These accesses done with PHY address 2 and without using pages. **/ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool read) { s32 ret_val; u32 addr_reg = 0; u32 data_reg = 0; /* This takes care of the difference with desktop vs mobile phy */ addr_reg = (hw->phy.type == e1000_phy_82578) ? I82578_ADDR_REG : I82577_ADDR_REG; data_reg = addr_reg + 1; /* All operations in this function are phy address 2 */ hw->phy.addr = 2; /* masking with 0x3F to remove the page from offset */ ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); if (ret_val) { e_dbg("Could not write the Address Offset port register\n"); return ret_val; } /* Read or write the data value next */ if (read) ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data); else ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); if (ret_val) e_dbg("Could not access the Data port register\n"); return ret_val; } /** * e1000_link_stall_workaround_hv - Si workaround * @hw: pointer to the HW structure * * This function works around a Si bug where the link partner can get * a link up indication before the PHY does. If small packets are sent * by the link partner they can be placed in the packet buffer without * being properly accounted for by the PHY and will stall preventing * further packets from being received. The workaround is to clear the * packet buffer after the PHY detects link up. **/ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) { s32 ret_val = 0; u16 data; if (hw->phy.type != e1000_phy_82578) return 0; /* Do not apply workaround if in PHY loopback bit 14 set */ e1e_rphy(hw, PHY_CONTROL, &data); if (data & PHY_CONTROL_LB) return 0; /* check if link is up and at 1Gbps */ ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); if (ret_val) return ret_val; data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_MASK; if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_1000)) return 0; msleep(200); /* flush the packets in the fifo buffer */ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED); if (ret_val) return ret_val; return e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); } /** * e1000_check_polarity_82577 - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) * * Polarity is determined based on the PHY specific status register. **/ s32 e1000_check_polarity_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); if (!ret_val) phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal; return ret_val; } /** * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. **/ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); if (ret_val) return ret_val; udelay(1); if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) e_dbg("Link taking longer than expected.\n"); /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); } return ret_val; } /** * e1000_get_phy_info_82577 - Retrieve I82577 PHY information * @hw: pointer to the HW structure * * Read PHY status to determine if link is up. If link is up, then * set/determine 10base-T extended distance and polarity correction. Read * PHY port status to determine MDI/MDIx and speed. Based on the speed, * determine on the cable length, local and remote receiver. **/ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { e_dbg("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } phy->polarity_correction = true; ret_val = e1000_check_polarity_82577(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); if (ret_val) return ret_val; phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false; if ((data & I82577_PHY_STATUS2_SPEED_MASK) == I82577_PHY_STATUS2_SPEED_1000MBPS) { ret_val = hw->phy.ops.get_cable_length(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); if (ret_val) return ret_val; phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } return 0; } /** * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY * @hw: pointer to the HW structure * * Reads the diagnostic status register and verifies result is valid before * placing it in the phy_cable_length field. **/ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, length; ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); if (ret_val) return ret_val; length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> I82577_DSTATUS_CABLE_LENGTH_SHIFT; if (length == E1000_CABLE_LENGTH_UNDEFINED) ret_val = -E1000_ERR_PHY; phy->cable_length = length; return 0; }
gpl-2.0
caplio/sc02c
drivers/mtd/maps/tsunami_flash.c
2044
2473
/* * tsunami_flash.c * * flash chip on alpha ds10... */ #include <asm/io.h> #include <asm/core_tsunami.h> #include <linux/init.h> #include <linux/mtd/map.h> #include <linux/mtd/mtd.h> #define FLASH_ENABLE_PORT 0x00C00001 #define FLASH_ENABLE_BYTE 0x01 #define FLASH_DISABLE_BYTE 0x00 #define MAX_TIG_FLASH_SIZE (12*1024*1024) static inline map_word tsunami_flash_read8(struct map_info *map, unsigned long offset) { map_word val; val.x[0] = tsunami_tig_readb(offset); return val; } static void tsunami_flash_write8(struct map_info *map, map_word value, unsigned long offset) { tsunami_tig_writeb(value.x[0], offset); } static void tsunami_flash_copy_from( struct map_info *map, void *addr, unsigned long offset, ssize_t len) { unsigned char *dest; dest = addr; while(len && (offset < MAX_TIG_FLASH_SIZE)) { *dest = tsunami_tig_readb(offset); offset++; dest++; len--; } } static void tsunami_flash_copy_to( struct map_info *map, unsigned long offset, const void *addr, ssize_t len) { const unsigned char *src; src = addr; while(len && (offset < MAX_TIG_FLASH_SIZE)) { tsunami_tig_writeb(*src, offset); offset++; src++; len--; } } /* * Deliberately don't provide operations wider than 8 bits. I don't * have then and it scares me to think how you could mess up if * you tried to use them. Buswidth is correctly so I'm safe. */ static struct map_info tsunami_flash_map = { .name = "flash chip on the Tsunami TIG bus", .size = MAX_TIG_FLASH_SIZE, .phys = NO_XIP, .bankwidth = 1, .read = tsunami_flash_read8, .copy_from = tsunami_flash_copy_from, .write = tsunami_flash_write8, .copy_to = tsunami_flash_copy_to, }; static struct mtd_info *tsunami_flash_mtd; static void __exit cleanup_tsunami_flash(void) { struct mtd_info *mtd; mtd = tsunami_flash_mtd; if (mtd) { del_mtd_device(mtd); map_destroy(mtd); } tsunami_flash_mtd = 0; } static int __init init_tsunami_flash(void) { static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL }; char **type; tsunami_tig_writeb(FLASH_ENABLE_BYTE, FLASH_ENABLE_PORT); tsunami_flash_mtd = 0; type = rom_probe_types; for(; !tsunami_flash_mtd && *type; type++) { tsunami_flash_mtd = do_map_probe(*type, &tsunami_flash_map); } if (tsunami_flash_mtd) { tsunami_flash_mtd->owner = THIS_MODULE; add_mtd_device(tsunami_flash_mtd); return 0; } return -ENXIO; } module_init(init_tsunami_flash); module_exit(cleanup_tsunami_flash);
gpl-2.0
hiikezoe/android_kernel_lge_lgl21
arch/arm/mach-ux500/cpu-db8500.c
2556
5590
/* * Copyright (C) 2008-2009 ST-Ericsson * * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, as * published by the Free Software Foundation. * */ #include <linux/types.h> #include <linux/init.h> #include <linux/device.h> #include <linux/amba/bus.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/io.h> #include <asm/mach/map.h> #include <asm/pmu.h> #include <mach/hardware.h> #include <mach/setup.h> #include <mach/devices.h> #include <mach/usb.h> #include "devices-db8500.h" #include "ste-dma40-db8500.h" /* minimum static i/o mapping required to boot U8500 platforms */ static struct map_desc u8500_uart_io_desc[] __initdata = { __IO_DEV_DESC(U8500_UART0_BASE, SZ_4K), __IO_DEV_DESC(U8500_UART2_BASE, SZ_4K), }; static struct map_desc u8500_io_desc[] __initdata = { __IO_DEV_DESC(U8500_GIC_CPU_BASE, SZ_4K), __IO_DEV_DESC(U8500_GIC_DIST_BASE, SZ_4K), __IO_DEV_DESC(U8500_L2CC_BASE, SZ_4K), __IO_DEV_DESC(U8500_TWD_BASE, SZ_4K), __IO_DEV_DESC(U8500_MTU0_BASE, SZ_4K), __IO_DEV_DESC(U8500_SCU_BASE, SZ_4K), __IO_DEV_DESC(U8500_BACKUPRAM0_BASE, SZ_8K), __IO_DEV_DESC(U8500_CLKRST1_BASE, SZ_4K), __IO_DEV_DESC(U8500_CLKRST2_BASE, SZ_4K), __IO_DEV_DESC(U8500_CLKRST3_BASE, SZ_4K), __IO_DEV_DESC(U8500_CLKRST5_BASE, SZ_4K), __IO_DEV_DESC(U8500_CLKRST6_BASE, SZ_4K), __IO_DEV_DESC(U8500_PRCMU_BASE, SZ_4K), __IO_DEV_DESC(U8500_GPIO0_BASE, SZ_4K), __IO_DEV_DESC(U8500_GPIO1_BASE, SZ_4K), __IO_DEV_DESC(U8500_GPIO2_BASE, SZ_4K), __IO_DEV_DESC(U8500_GPIO3_BASE, SZ_4K), }; static struct map_desc u8500_ed_io_desc[] __initdata = { __IO_DEV_DESC(U8500_MTU0_BASE_ED, SZ_4K), __IO_DEV_DESC(U8500_CLKRST7_BASE_ED, SZ_8K), }; static struct map_desc u8500_v1_io_desc[] __initdata = { __IO_DEV_DESC(U8500_MTU0_BASE, SZ_4K), __IO_DEV_DESC(U8500_PRCMU_TCDM_BASE_V1, SZ_4K), }; static struct map_desc u8500_v2_io_desc[] __initdata = { __IO_DEV_DESC(U8500_PRCMU_TCDM_BASE, SZ_4K), }; void __init u8500_map_io(void) { /* * Map the UARTs early so that the DEBUG_LL stuff continues to work. */ iotable_init(u8500_uart_io_desc, ARRAY_SIZE(u8500_uart_io_desc)); ux500_map_io(); iotable_init(u8500_io_desc, ARRAY_SIZE(u8500_io_desc)); if (cpu_is_u8500ed()) iotable_init(u8500_ed_io_desc, ARRAY_SIZE(u8500_ed_io_desc)); else if (cpu_is_u8500v1()) iotable_init(u8500_v1_io_desc, ARRAY_SIZE(u8500_v1_io_desc)); else if (cpu_is_u8500v2()) iotable_init(u8500_v2_io_desc, ARRAY_SIZE(u8500_v2_io_desc)); _PRCMU_BASE = __io_address(U8500_PRCMU_BASE); } static struct resource db8500_pmu_resources[] = { [0] = { .start = IRQ_DB8500_PMU, .end = IRQ_DB8500_PMU, .flags = IORESOURCE_IRQ, }, }; /* * The PMU IRQ lines of two cores are wired together into a single interrupt. * Bounce the interrupt to the other core if it's not ours. */ static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler) { irqreturn_t ret = handler(irq, dev); int other = !smp_processor_id(); if (ret == IRQ_NONE && cpu_online(other)) irq_set_affinity(irq, cpumask_of(other)); /* * We should be able to get away with the amount of IRQ_NONEs we give, * while still having the spurious IRQ detection code kick in if the * interrupt really starts hitting spuriously. */ return ret; } static struct arm_pmu_platdata db8500_pmu_platdata = { .handle_irq = db8500_pmu_handler, }; static struct platform_device db8500_pmu_device = { .name = "arm-pmu", .id = ARM_PMU_DEVICE_CPU, .num_resources = ARRAY_SIZE(db8500_pmu_resources), .resource = db8500_pmu_resources, .dev.platform_data = &db8500_pmu_platdata, }; static struct platform_device db8500_prcmu_device = { .name = "db8500-prcmu", }; static struct platform_device *platform_devs[] __initdata = { &u8500_dma40_device, &db8500_pmu_device, &db8500_prcmu_device, }; static resource_size_t __initdata db8500_gpio_base[] = { U8500_GPIOBANK0_BASE, U8500_GPIOBANK1_BASE, U8500_GPIOBANK2_BASE, U8500_GPIOBANK3_BASE, U8500_GPIOBANK4_BASE, U8500_GPIOBANK5_BASE, U8500_GPIOBANK6_BASE, U8500_GPIOBANK7_BASE, U8500_GPIOBANK8_BASE, }; static void __init db8500_add_gpios(void) { struct nmk_gpio_platform_data pdata = { /* No custom data yet */ }; if (cpu_is_u8500v2()) pdata.supports_sleepmode = true; dbx500_add_gpios(ARRAY_AND_SIZE(db8500_gpio_base), IRQ_DB8500_GPIO0, &pdata); } static int usb_db8500_rx_dma_cfg[] = { DB8500_DMA_DEV38_USB_OTG_IEP_1_9, DB8500_DMA_DEV37_USB_OTG_IEP_2_10, DB8500_DMA_DEV36_USB_OTG_IEP_3_11, DB8500_DMA_DEV19_USB_OTG_IEP_4_12, DB8500_DMA_DEV18_USB_OTG_IEP_5_13, DB8500_DMA_DEV17_USB_OTG_IEP_6_14, DB8500_DMA_DEV16_USB_OTG_IEP_7_15, DB8500_DMA_DEV39_USB_OTG_IEP_8 }; static int usb_db8500_tx_dma_cfg[] = { DB8500_DMA_DEV38_USB_OTG_OEP_1_9, DB8500_DMA_DEV37_USB_OTG_OEP_2_10, DB8500_DMA_DEV36_USB_OTG_OEP_3_11, DB8500_DMA_DEV19_USB_OTG_OEP_4_12, DB8500_DMA_DEV18_USB_OTG_OEP_5_13, DB8500_DMA_DEV17_USB_OTG_OEP_6_14, DB8500_DMA_DEV16_USB_OTG_OEP_7_15, DB8500_DMA_DEV39_USB_OTG_OEP_8 }; /* * This function is called from the board init */ void __init u8500_init_devices(void) { if (cpu_is_u8500ed()) dma40_u8500ed_fixup(); db8500_add_rtc(); db8500_add_gpios(); db8500_add_usb(usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg); platform_device_register_simple("cpufreq-u8500", -1, NULL, 0); platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); return ; }
gpl-2.0
sleshepic/l900_MC2_Kernel
arch/arm/mach-pxa/pcm990-baseboard.c
2812
13622
/* * arch/arm/mach-pxa/pcm990-baseboard.c * Support for the Phytec phyCORE-PXA270 Development Platform (PCM-990). * * Refer * http://www.phytec.com/products/rdk/ARM-XScale/phyCORE-XScale-PXA270.html * for additional hardware info * * Author: Juergen Kilb * Created: April 05, 2005 * Copyright: Phytec Messtechnik GmbH * e-Mail: armlinux@phytec.de * * based on Intel Mainstone Board * * Copyright 2007 Juergen Beisert @ Pengutronix (j.beisert@pengutronix.de) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/i2c/pxa-i2c.h> #include <linux/pwm_backlight.h> #include <media/soc_camera.h> #include <asm/gpio.h> #include <mach/camera.h> #include <asm/mach/map.h> #include <mach/pxa27x.h> #include <mach/audio.h> #include <mach/mmc.h> #include <mach/ohci.h> #include <mach/pcm990_baseboard.h> #include <mach/pxafb.h> #include "devices.h" #include "generic.h" static unsigned long pcm990_pin_config[] __initdata = { /* MMC */ GPIO32_MMC_CLK, GPIO112_MMC_CMD, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, /* USB */ GPIO88_USBH1_PWR, GPIO89_USBH1_PEN, /* PWM0 */ GPIO16_PWM0_OUT, /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, /* AC97 */ GPIO28_AC97_BITCLK, GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, }; /* * pcm990_lcd_power - control power supply to the LCD * @on: 0 = switch off, 1 = switch on * * Called by the pxafb driver */ #ifndef CONFIG_PCM990_DISPLAY_NONE static void pcm990_lcd_power(int on, struct fb_var_screeninfo *var) { if (on) { /* enable LCD-Latches * power on LCD */ __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG3) = PCM990_CTRL_LCDPWR + PCM990_CTRL_LCDON; } else { /* disable LCD-Latches * power off LCD */ __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG3) = 0x00; } } #endif #if defined(CONFIG_PCM990_DISPLAY_SHARP) static struct pxafb_mode_info fb_info_sharp_lq084v1dg21 = { .pixclock = 28000, .xres = 640, .yres = 480, .bpp = 16, .hsync_len = 20, .left_margin = 103, .right_margin = 47, .vsync_len = 6, .upper_margin = 28, .lower_margin = 5, .sync = 0, .cmap_greyscale = 0, }; static struct pxafb_mach_info pcm990_fbinfo __initdata = { .modes = &fb_info_sharp_lq084v1dg21, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, .pxafb_lcd_power = pcm990_lcd_power, }; #elif defined(CONFIG_PCM990_DISPLAY_NEC) struct pxafb_mode_info fb_info_nec_nl6448bc20_18d = { .pixclock = 39720, .xres = 640, .yres = 480, .bpp = 16, .hsync_len = 32, .left_margin = 16, .right_margin = 48, .vsync_len = 2, .upper_margin = 12, .lower_margin = 17, .sync = 0, .cmap_greyscale = 0, }; static struct pxafb_mach_info pcm990_fbinfo __initdata = { .modes = &fb_info_nec_nl6448bc20_18d, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, .pxafb_lcd_power = pcm990_lcd_power, }; #endif static struct platform_pwm_backlight_data pcm990_backlight_data = { .pwm_id = 0, .max_brightness = 1023, .dft_brightness = 1023, .pwm_period_ns = 78770, }; static struct platform_device pcm990_backlight_device = { .name = "pwm-backlight", .dev = { .parent = &pxa27x_device_pwm0.dev, .platform_data = &pcm990_backlight_data, }, }; /* * The PCM-990 development baseboard uses PCM-027's hardware in the * following way: * * - LCD support is in use * - GPIO16 is output for back light on/off with PWM * - GPIO58 ... GPIO73 are outputs for display data * - GPIO74 is output output for LCDFCLK * - GPIO75 is output for LCDLCLK * - GPIO76 is output for LCDPCLK * - GPIO77 is output for LCDBIAS * - MMC support is in use * - GPIO32 is output for MMCCLK * - GPIO92 is MMDAT0 * - GPIO109 is MMDAT1 * - GPIO110 is MMCS0 * - GPIO111 is MMCS1 * - GPIO112 is MMCMD * - IDE/CF card is in use * - GPIO48 is output /POE * - GPIO49 is output /PWE * - GPIO50 is output /PIOR * - GPIO51 is output /PIOW * - GPIO54 is output /PCE2 * - GPIO55 is output /PREG * - GPIO56 is input /PWAIT * - GPIO57 is output /PIOS16 * - GPIO79 is output PSKTSEL * - GPIO85 is output /PCE1 * - FFUART is in use * - GPIO34 is input FFRXD * - GPIO35 is input FFCTS * - GPIO36 is input FFDCD * - GPIO37 is input FFDSR * - GPIO38 is input FFRI * - GPIO39 is output FFTXD * - GPIO40 is output FFDTR * - GPIO41 is output FFRTS * - BTUART is in use * - GPIO42 is input BTRXD * - GPIO43 is output BTTXD * - GPIO44 is input BTCTS * - GPIO45 is output BTRTS * - IRUART is in use * - GPIO46 is input STDRXD * - GPIO47 is output STDTXD * - AC97 is in use*) * - GPIO28 is input AC97CLK * - GPIO29 is input AC97DatIn * - GPIO30 is output AC97DatO * - GPIO31 is output AC97SYNC * - GPIO113 is output AC97_RESET * - SSP is in use * - GPIO23 is output SSPSCLK * - GPIO24 is output chip select to Max7301 * - GPIO25 is output SSPTXD * - GPIO26 is input SSPRXD * - GPIO27 is input for Max7301 IRQ * - GPIO53 is input SSPSYSCLK * - SSP3 is in use * - GPIO81 is output SSPTXD3 * - GPIO82 is input SSPRXD3 * - GPIO83 is output SSPSFRM * - GPIO84 is output SSPCLK3 * * Otherwise claimed GPIOs: * GPIO1 -> IRQ from user switch * GPIO9 -> IRQ from power management * GPIO10 -> IRQ from WML9712 AC97 controller * GPIO11 -> IRQ from IDE controller * GPIO12 -> IRQ from CF controller * GPIO13 -> IRQ from CF controller * GPIO14 -> GPIO free * GPIO15 -> /CS1 selects baseboard's Control CPLD (U7, 16 bit wide data path) * GPIO19 -> GPIO free * GPIO20 -> /SDCS2 * GPIO21 -> /CS3 PC card socket select * GPIO33 -> /CS5 network controller select * GPIO78 -> /CS2 (16 bit wide data path) * GPIO80 -> /CS4 (16 bit wide data path) * GPIO86 -> GPIO free * GPIO87 -> GPIO free * GPIO90 -> LED0 on CPU module * GPIO91 -> LED1 on CPI module * GPIO117 -> SCL * GPIO118 -> SDA */ static unsigned long pcm990_irq_enabled; static void pcm990_mask_ack_irq(struct irq_data *d) { int pcm990_irq = (d->irq - PCM027_IRQ(0)); PCM990_INTMSKENA = (pcm990_irq_enabled &= ~(1 << pcm990_irq)); } static void pcm990_unmask_irq(struct irq_data *d) { int pcm990_irq = (d->irq - PCM027_IRQ(0)); /* the irq can be acknowledged only if deasserted, so it's done here */ PCM990_INTSETCLR |= 1 << pcm990_irq; PCM990_INTMSKENA = (pcm990_irq_enabled |= (1 << pcm990_irq)); } static struct irq_chip pcm990_irq_chip = { .irq_mask_ack = pcm990_mask_ack_irq, .irq_unmask = pcm990_unmask_irq, }; static void pcm990_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned long pending = (~PCM990_INTSETCLR) & pcm990_irq_enabled; do { /* clear our parent IRQ */ desc->irq_data.chip->irq_ack(&desc->irq_data); if (likely(pending)) { irq = PCM027_IRQ(0) + __ffs(pending); generic_handle_irq(irq); } pending = (~PCM990_INTSETCLR) & pcm990_irq_enabled; } while (pending); } static void __init pcm990_init_irq(void) { int irq; /* setup extra PCM990 irqs */ for (irq = PCM027_IRQ(0); irq <= PCM027_IRQ(3); irq++) { irq_set_chip_and_handler(irq, &pcm990_irq_chip, handle_level_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } PCM990_INTMSKENA = 0x00; /* disable all Interrupts */ PCM990_INTSETCLR = 0xFF; irq_set_chained_handler(PCM990_CTRL_INT_IRQ, pcm990_irq_handler); irq_set_irq_type(PCM990_CTRL_INT_IRQ, PCM990_CTRL_INT_IRQ_EDGE); } static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int, void *data) { int err; err = request_irq(PCM027_MMCDET_IRQ, mci_detect_int, IRQF_DISABLED, "MMC card detect", data); if (err) printk(KERN_ERR "pcm990_mci_init: MMC/SD: can't request MMC " "card detect IRQ\n"); return err; } static void pcm990_mci_setpower(struct device *dev, unsigned int vdd) { struct pxamci_platform_data *p_d = dev->platform_data; if ((1 << vdd) & p_d->ocr_mask) __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG5) = PCM990_CTRL_MMC2PWR; else __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG5) = ~PCM990_CTRL_MMC2PWR; } static void pcm990_mci_exit(struct device *dev, void *data) { free_irq(PCM027_MMCDET_IRQ, data); } #define MSECS_PER_JIFFY (1000/HZ) static struct pxamci_platform_data pcm990_mci_platform_data = { .detect_delay_ms = 250, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, .init = pcm990_mci_init, .setpower = pcm990_mci_setpower, .exit = pcm990_mci_exit, .gpio_card_detect = -1, .gpio_card_ro = -1, .gpio_power = -1, }; static struct pxaohci_platform_data pcm990_ohci_platform_data = { .port_mode = PMM_PERPORT_MODE, .flags = ENABLE_PORT1 | POWER_CONTROL_LOW | POWER_SENSE_LOW, .power_on_delay = 10, }; /* * PXA27x Camera specific stuff */ #if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE) static unsigned long pcm990_camera_pin_config[] = { /* CIF */ GPIO98_CIF_DD_0, GPIO105_CIF_DD_1, GPIO104_CIF_DD_2, GPIO103_CIF_DD_3, GPIO95_CIF_DD_4, GPIO94_CIF_DD_5, GPIO93_CIF_DD_6, GPIO108_CIF_DD_7, GPIO107_CIF_DD_8, GPIO106_CIF_DD_9, GPIO42_CIF_MCLK, GPIO45_CIF_PCLK, GPIO43_CIF_FV, GPIO44_CIF_LV, }; /* * CICR4: PCLK_EN: Pixel clock is supplied by the sensor * MCLK_EN: Master clock is generated by PXA * PCP: Data sampled on the falling edge of pixel clock */ struct pxacamera_platform_data pcm990_pxacamera_platform_data = { .flags = PXA_CAMERA_MASTER | PXA_CAMERA_DATAWIDTH_8 | PXA_CAMERA_DATAWIDTH_10 | PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN/* | PXA_CAMERA_PCP*/, .mclk_10khz = 1000, }; #include <linux/i2c/pca953x.h> static struct pca953x_platform_data pca9536_data = { .gpio_base = NR_BUILTIN_GPIO, }; static int gpio_bus_switch = -EINVAL; static int pcm990_camera_set_bus_param(struct soc_camera_link *link, unsigned long flags) { if (gpio_bus_switch < 0) { if (flags == SOCAM_DATAWIDTH_10) return 0; else return -EINVAL; } if (flags & SOCAM_DATAWIDTH_8) gpio_set_value(gpio_bus_switch, 1); else gpio_set_value(gpio_bus_switch, 0); return 0; } static unsigned long pcm990_camera_query_bus_param(struct soc_camera_link *link) { int ret; if (gpio_bus_switch < 0) { ret = gpio_request(NR_BUILTIN_GPIO, "camera"); if (!ret) { gpio_bus_switch = NR_BUILTIN_GPIO; gpio_direction_output(gpio_bus_switch, 0); } } if (gpio_bus_switch >= 0) return SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_10; else return SOCAM_DATAWIDTH_10; } static void pcm990_camera_free_bus(struct soc_camera_link *link) { if (gpio_bus_switch < 0) return; gpio_free(gpio_bus_switch); gpio_bus_switch = -EINVAL; } /* Board I2C devices. */ static struct i2c_board_info __initdata pcm990_i2c_devices[] = { { /* Must initialize before the camera(s) */ I2C_BOARD_INFO("pca9536", 0x41), .platform_data = &pca9536_data, }, }; static struct i2c_board_info pcm990_camera_i2c[] = { { I2C_BOARD_INFO("mt9v022", 0x48), }, { I2C_BOARD_INFO("mt9m001", 0x5d), }, }; static struct soc_camera_link iclink[] = { { .bus_id = 0, /* Must match with the camera ID */ .board_info = &pcm990_camera_i2c[0], .i2c_adapter_id = 0, .query_bus_param = pcm990_camera_query_bus_param, .set_bus_param = pcm990_camera_set_bus_param, .free_bus = pcm990_camera_free_bus, }, { .bus_id = 0, /* Must match with the camera ID */ .board_info = &pcm990_camera_i2c[1], .i2c_adapter_id = 0, .query_bus_param = pcm990_camera_query_bus_param, .set_bus_param = pcm990_camera_set_bus_param, .free_bus = pcm990_camera_free_bus, }, }; static struct platform_device pcm990_camera[] = { { .name = "soc-camera-pdrv", .id = 0, .dev = { .platform_data = &iclink[0], }, }, { .name = "soc-camera-pdrv", .id = 1, .dev = { .platform_data = &iclink[1], }, }, }; #endif /* CONFIG_VIDEO_PXA27x ||CONFIG_VIDEO_PXA27x_MODULE */ /* * enable generic access to the base board control CPLDs U6 and U7 */ static struct map_desc pcm990_io_desc[] __initdata = { { .virtual = PCM990_CTRL_BASE, .pfn = __phys_to_pfn(PCM990_CTRL_PHYS), .length = PCM990_CTRL_SIZE, .type = MT_DEVICE /* CPLD */ }, { .virtual = PCM990_CF_PLD_BASE, .pfn = __phys_to_pfn(PCM990_CF_PLD_PHYS), .length = PCM990_CF_PLD_SIZE, .type = MT_DEVICE /* CPLD */ } }; /* * system init for baseboard usage. Will be called by pcm027 init. * * Add platform devices present on this baseboard and init * them from CPU side as far as required to use them later on */ void __init pcm990_baseboard_init(void) { pxa2xx_mfp_config(ARRAY_AND_SIZE(pcm990_pin_config)); /* register CPLD access */ iotable_init(ARRAY_AND_SIZE(pcm990_io_desc)); /* register CPLD's IRQ controller */ pcm990_init_irq(); #ifndef CONFIG_PCM990_DISPLAY_NONE pxa_set_fb_info(NULL, &pcm990_fbinfo); #endif platform_device_register(&pcm990_backlight_device); /* MMC */ pxa_set_mci_info(&pcm990_mci_platform_data); /* USB host */ pxa_set_ohci_info(&pcm990_ohci_platform_data); pxa_set_i2c_info(NULL); pxa_set_ac97_info(NULL); #if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE) pxa2xx_mfp_config(ARRAY_AND_SIZE(pcm990_camera_pin_config)); pxa_set_camera_info(&pcm990_pxacamera_platform_data); i2c_register_board_info(0, ARRAY_AND_SIZE(pcm990_i2c_devices)); platform_device_register(&pcm990_camera[0]); platform_device_register(&pcm990_camera[1]); #endif printk(KERN_INFO "PCM-990 Evaluation baseboard initialized\n"); }
gpl-2.0
5victor/linux
drivers/staging/line6/pcm.c
3068
12775
/* * Line6 Linux USB driver - 0.9.1beta * * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #include <linux/slab.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "audio.h" #include "capture.h" #include "driver.h" #include "playback.h" #include "pod.h" #ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE static struct snd_line6_pcm *dev2pcm(struct device *dev) { struct usb_interface *interface = to_usb_interface(dev); struct usb_line6 *line6 = usb_get_intfdata(interface); struct snd_line6_pcm *line6pcm = line6->line6pcm; return line6pcm; } /* "read" request on "impulse_volume" special file. */ static ssize_t pcm_get_impulse_volume(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", dev2pcm(dev)->impulse_volume); } /* "write" request on "impulse_volume" special file. */ static ssize_t pcm_set_impulse_volume(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct snd_line6_pcm *line6pcm = dev2pcm(dev); int value = simple_strtoul(buf, NULL, 10); line6pcm->impulse_volume = value; if (value > 0) line6_pcm_start(line6pcm, MASK_PCM_IMPULSE); else line6_pcm_stop(line6pcm, MASK_PCM_IMPULSE); return count; } /* "read" request on "impulse_period" special file. */ static ssize_t pcm_get_impulse_period(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", dev2pcm(dev)->impulse_period); } /* "write" request on "impulse_period" special file. */ static ssize_t pcm_set_impulse_period(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { dev2pcm(dev)->impulse_period = simple_strtoul(buf, NULL, 10); return count; } static DEVICE_ATTR(impulse_volume, S_IWUSR | S_IRUGO, pcm_get_impulse_volume, pcm_set_impulse_volume); static DEVICE_ATTR(impulse_period, S_IWUSR | S_IRUGO, pcm_get_impulse_period, pcm_set_impulse_period); #endif int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels) { unsigned long flags_old = __sync_fetch_and_or(&line6pcm->flags, channels); unsigned long flags_new = flags_old | channels; int err = 0; #if LINE6_BACKUP_MONITOR_SIGNAL if (!(line6pcm->line6->properties->capabilities & LINE6_BIT_HWMON)) { line6pcm->prev_fbuf = kmalloc(LINE6_ISO_PACKETS * line6pcm->max_packet_size, GFP_KERNEL); if (!line6pcm->prev_fbuf) { dev_err(line6pcm->line6->ifcdev, "cannot malloc monitor buffer\n"); return -ENOMEM; } } #else line6pcm->prev_fbuf = NULL; #endif if (((flags_old & MASK_CAPTURE) == 0) && ((flags_new & MASK_CAPTURE) != 0)) { /* Waiting for completion of active URBs in the stop handler is a bug, we therefore report an error if capturing is restarted too soon. */ if (line6pcm->active_urb_in | line6pcm->unlink_urb_in) return -EBUSY; line6pcm->buffer_in = kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS * line6pcm->max_packet_size, GFP_KERNEL); if (!line6pcm->buffer_in) { dev_err(line6pcm->line6->ifcdev, "cannot malloc capture buffer\n"); return -ENOMEM; } line6pcm->count_in = 0; line6pcm->prev_fsize = 0; err = line6_submit_audio_in_all_urbs(line6pcm); if (err < 0) { __sync_fetch_and_and(&line6pcm->flags, ~channels); return err; } } if (((flags_old & MASK_PLAYBACK) == 0) && ((flags_new & MASK_PLAYBACK) != 0)) { /* See comment above regarding PCM restart. */ if (line6pcm->active_urb_out | line6pcm->unlink_urb_out) return -EBUSY; line6pcm->buffer_out = kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS * line6pcm->max_packet_size, GFP_KERNEL); if (!line6pcm->buffer_out) { dev_err(line6pcm->line6->ifcdev, "cannot malloc playback buffer\n"); return -ENOMEM; } line6pcm->count_out = 0; err = line6_submit_audio_out_all_urbs(line6pcm); if (err < 0) { __sync_fetch_and_and(&line6pcm->flags, ~channels); return err; } } return 0; } int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels) { unsigned long flags_old = __sync_fetch_and_and(&line6pcm->flags, ~channels); unsigned long flags_new = flags_old & ~channels; if (((flags_old & MASK_CAPTURE) != 0) && ((flags_new & MASK_CAPTURE) == 0)) { line6_unlink_audio_in_urbs(line6pcm); kfree(line6pcm->buffer_in); line6pcm->buffer_in = NULL; } if (((flags_old & MASK_PLAYBACK) != 0) && ((flags_new & MASK_PLAYBACK) == 0)) { line6_unlink_audio_out_urbs(line6pcm); kfree(line6pcm->buffer_out); line6pcm->buffer_out = NULL; } #if LINE6_BACKUP_MONITOR_SIGNAL kfree(line6pcm->prev_fbuf); #endif return 0; } /* trigger callback */ int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); struct snd_pcm_substream *s; int err; unsigned long flags; spin_lock_irqsave(&line6pcm->lock_trigger, flags); clear_bit(BIT_PREPARED, &line6pcm->flags); snd_pcm_group_for_each_entry(s, substream) { switch (s->stream) { case SNDRV_PCM_STREAM_PLAYBACK: err = snd_line6_playback_trigger(line6pcm, cmd); if (err < 0) { spin_unlock_irqrestore(&line6pcm->lock_trigger, flags); return err; } break; case SNDRV_PCM_STREAM_CAPTURE: err = snd_line6_capture_trigger(line6pcm, cmd); if (err < 0) { spin_unlock_irqrestore(&line6pcm->lock_trigger, flags); return err; } break; default: dev_err(line6pcm->line6->ifcdev, "Unknown stream direction %d\n", s->stream); } } spin_unlock_irqrestore(&line6pcm->lock_trigger, flags); return 0; } /* control info callback */ static int snd_line6_control_playback_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 256; return 0; } /* control get callback */ static int snd_line6_control_playback_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int i; struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); for (i = 2; i--;) ucontrol->value.integer.value[i] = line6pcm->volume_playback[i]; return 0; } /* control put callback */ static int snd_line6_control_playback_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int i, changed = 0; struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); for (i = 2; i--;) if (line6pcm->volume_playback[i] != ucontrol->value.integer.value[i]) { line6pcm->volume_playback[i] = ucontrol->value.integer.value[i]; changed = 1; } return changed; } /* control definition */ static struct snd_kcontrol_new line6_control_playback = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Playback Volume", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .info = snd_line6_control_playback_info, .get = snd_line6_control_playback_get, .put = snd_line6_control_playback_put }; /* Cleanup the PCM device. */ static void line6_cleanup_pcm(struct snd_pcm *pcm) { int i; struct snd_line6_pcm *line6pcm = snd_pcm_chip(pcm); #ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE device_remove_file(line6pcm->line6->ifcdev, &dev_attr_impulse_volume); device_remove_file(line6pcm->line6->ifcdev, &dev_attr_impulse_period); #endif for (i = LINE6_ISO_BUFFERS; i--;) { if (line6pcm->urb_audio_out[i]) { usb_kill_urb(line6pcm->urb_audio_out[i]); usb_free_urb(line6pcm->urb_audio_out[i]); } if (line6pcm->urb_audio_in[i]) { usb_kill_urb(line6pcm->urb_audio_in[i]); usb_free_urb(line6pcm->urb_audio_in[i]); } } } /* create a PCM device */ static int snd_line6_new_pcm(struct snd_line6_pcm *line6pcm) { struct snd_pcm *pcm; int err; err = snd_pcm_new(line6pcm->line6->card, (char *)line6pcm->line6->properties->name, 0, 1, 1, &pcm); if (err < 0) return err; pcm->private_data = line6pcm; pcm->private_free = line6_cleanup_pcm; line6pcm->pcm = pcm; strcpy(pcm->name, line6pcm->line6->properties->name); /* set operators */ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_line6_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_line6_capture_ops); /* pre-allocation of buffers */ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, snd_dma_continuous_data (GFP_KERNEL), 64 * 1024, 128 * 1024); return 0; } /* PCM device destructor */ static int snd_line6_pcm_free(struct snd_device *device) { return 0; } /* Stop substream if still running. */ static void pcm_disconnect_substream(struct snd_pcm_substream *substream) { if (substream->runtime && snd_pcm_running(substream)) snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED); } /* Stop PCM stream. */ void line6_pcm_disconnect(struct snd_line6_pcm *line6pcm) { pcm_disconnect_substream(get_substream (line6pcm, SNDRV_PCM_STREAM_CAPTURE)); pcm_disconnect_substream(get_substream (line6pcm, SNDRV_PCM_STREAM_PLAYBACK)); line6_unlink_wait_clear_audio_out_urbs(line6pcm); line6_unlink_wait_clear_audio_in_urbs(line6pcm); } /* Create and register the PCM device and mixer entries. Create URBs for playback and capture. */ int line6_init_pcm(struct usb_line6 *line6, struct line6_pcm_properties *properties) { static struct snd_device_ops pcm_ops = { .dev_free = snd_line6_pcm_free, }; int err; int ep_read = 0, ep_write = 0; struct snd_line6_pcm *line6pcm; if (!(line6->properties->capabilities & LINE6_BIT_PCM)) return 0; /* skip PCM initialization and report success */ /* initialize PCM subsystem based on product id: */ switch (line6->product) { case LINE6_DEVID_BASSPODXT: case LINE6_DEVID_BASSPODXTLIVE: case LINE6_DEVID_BASSPODXTPRO: case LINE6_DEVID_PODXT: case LINE6_DEVID_PODXTLIVE: case LINE6_DEVID_PODXTPRO: ep_read = 0x82; ep_write = 0x01; break; case LINE6_DEVID_PODX3: case LINE6_DEVID_PODX3LIVE: ep_read = 0x86; ep_write = 0x02; break; case LINE6_DEVID_POCKETPOD: ep_read = 0x82; ep_write = 0x02; break; case LINE6_DEVID_GUITARPORT: case LINE6_DEVID_PODSTUDIO_GX: case LINE6_DEVID_PODSTUDIO_UX1: case LINE6_DEVID_PODSTUDIO_UX2: case LINE6_DEVID_TONEPORT_GX: case LINE6_DEVID_TONEPORT_UX1: case LINE6_DEVID_TONEPORT_UX2: ep_read = 0x82; ep_write = 0x01; break; /* this is for interface_number == 1: case LINE6_DEVID_TONEPORT_UX2: case LINE6_DEVID_PODSTUDIO_UX2: ep_read = 0x87; ep_write = 0x00; break; */ default: MISSING_CASE; } line6pcm = kzalloc(sizeof(struct snd_line6_pcm), GFP_KERNEL); if (line6pcm == NULL) return -ENOMEM; line6pcm->volume_playback[0] = line6pcm->volume_playback[1] = 255; line6pcm->volume_monitor = 255; line6pcm->line6 = line6; line6pcm->ep_audio_read = ep_read; line6pcm->ep_audio_write = ep_write; line6pcm->max_packet_size = usb_maxpacket(line6->usbdev, usb_rcvintpipe(line6->usbdev, ep_read), 0); line6pcm->properties = properties; line6->line6pcm = line6pcm; /* PCM device: */ err = snd_device_new(line6->card, SNDRV_DEV_PCM, line6, &pcm_ops); if (err < 0) return err; snd_card_set_dev(line6->card, line6->ifcdev); err = snd_line6_new_pcm(line6pcm); if (err < 0) return err; spin_lock_init(&line6pcm->lock_audio_out); spin_lock_init(&line6pcm->lock_audio_in); spin_lock_init(&line6pcm->lock_trigger); err = line6_create_audio_out_urbs(line6pcm); if (err < 0) return err; err = line6_create_audio_in_urbs(line6pcm); if (err < 0) return err; /* mixer: */ err = snd_ctl_add(line6->card, snd_ctl_new1(&line6_control_playback, line6pcm)); if (err < 0) return err; #ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE /* impulse response test: */ err = device_create_file(line6->ifcdev, &dev_attr_impulse_volume); if (err < 0) return err; err = device_create_file(line6->ifcdev, &dev_attr_impulse_period); if (err < 0) return err; line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD; #endif return 0; } /* prepare pcm callback */ int snd_line6_prepare(struct snd_pcm_substream *substream) { struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); if (!test_and_set_bit(BIT_PREPARED, &line6pcm->flags)) { line6pcm->count_out = 0; line6pcm->pos_out = 0; line6pcm->pos_out_done = 0; line6pcm->bytes_out = 0; line6pcm->count_in = 0; line6pcm->pos_in_done = 0; line6pcm->bytes_in = 0; } return 0; }
gpl-2.0
androthan/android_kernel_samsung_aalto-eu
arch/arm/mm/cache-xsc3l2.c
3324
5062
/* * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support * * Copyright (C) 2007 ARM Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/highmem.h> #include <asm/system.h> #include <asm/cputype.h> #include <asm/cacheflush.h> #define CR_L2 (1 << 26) #define CACHE_LINE_SIZE 32 #define CACHE_LINE_SHIFT 5 #define CACHE_WAY_PER_SET 8 #define CACHE_WAY_SIZE(l2ctype) (8192 << (((l2ctype) >> 8) & 0xf)) #define CACHE_SET_SIZE(l2ctype) (CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT) static inline int xsc3_l2_present(void) { unsigned long l2ctype; __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); return !!(l2ctype & 0xf8); } static inline void xsc3_l2_clean_mva(unsigned long addr) { __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr)); } static inline void xsc3_l2_inv_mva(unsigned long addr) { __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr)); } static inline void xsc3_l2_inv_all(void) { unsigned long l2ctype, set_way; int set, way; __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) { for (way = 0; way < CACHE_WAY_PER_SET; way++) { set_way = (way << 29) | (set << 5); __asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way)); } } dsb(); } static inline void l2_unmap_va(unsigned long va) { #ifdef CONFIG_HIGHMEM if (va != -1) kunmap_atomic((void *)va); #endif } static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va) { #ifdef CONFIG_HIGHMEM unsigned long va = prev_va & PAGE_MASK; unsigned long pa_offset = pa << (32 - PAGE_SHIFT); if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) { /* * Switching to a new page. Because cache ops are * using virtual addresses only, we must put a mapping * in place for it. */ l2_unmap_va(prev_va); va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT); } return va + (pa_offset >> (32 - PAGE_SHIFT)); #else return __phys_to_virt(pa); #endif } static void xsc3_l2_inv_range(unsigned long start, unsigned long end) { unsigned long vaddr; if (start == 0 && end == -1ul) { xsc3_l2_inv_all(); return; } vaddr = -1; /* to force the first mapping */ /* * Clean and invalidate partial first cache line. */ if (start & (CACHE_LINE_SIZE - 1)) { vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); xsc3_l2_clean_mva(vaddr); xsc3_l2_inv_mva(vaddr); start = (start | (CACHE_LINE_SIZE - 1)) + 1; } /* * Invalidate all full cache lines between 'start' and 'end'. */ while (start < (end & ~(CACHE_LINE_SIZE - 1))) { vaddr = l2_map_va(start, vaddr); xsc3_l2_inv_mva(vaddr); start += CACHE_LINE_SIZE; } /* * Clean and invalidate partial last cache line. */ if (start < end) { vaddr = l2_map_va(start, vaddr); xsc3_l2_clean_mva(vaddr); xsc3_l2_inv_mva(vaddr); } l2_unmap_va(vaddr); dsb(); } static void xsc3_l2_clean_range(unsigned long start, unsigned long end) { unsigned long vaddr; vaddr = -1; /* to force the first mapping */ start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { vaddr = l2_map_va(start, vaddr); xsc3_l2_clean_mva(vaddr); start += CACHE_LINE_SIZE; } l2_unmap_va(vaddr); dsb(); } /* * optimize L2 flush all operation by set/way format */ static inline void xsc3_l2_flush_all(void) { unsigned long l2ctype, set_way; int set, way; __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) { for (way = 0; way < CACHE_WAY_PER_SET; way++) { set_way = (way << 29) | (set << 5); __asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way)); } } dsb(); } static void xsc3_l2_flush_range(unsigned long start, unsigned long end) { unsigned long vaddr; if (start == 0 && end == -1ul) { xsc3_l2_flush_all(); return; } vaddr = -1; /* to force the first mapping */ start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { vaddr = l2_map_va(start, vaddr); xsc3_l2_clean_mva(vaddr); xsc3_l2_inv_mva(vaddr); start += CACHE_LINE_SIZE; } l2_unmap_va(vaddr); dsb(); } static int __init xsc3_l2_init(void) { if (!cpu_is_xsc3() || !xsc3_l2_present()) return 0; if (get_cr() & CR_L2) { pr_info("XScale3 L2 cache enabled.\n"); xsc3_l2_inv_all(); outer_cache.inv_range = xsc3_l2_inv_range; outer_cache.clean_range = xsc3_l2_clean_range; outer_cache.flush_range = xsc3_l2_flush_range; } return 0; } core_initcall(xsc3_l2_init);
gpl-2.0
surdupetru/android_kernel_huawei_msm8916-caf
arch/mips/pci/fixup-sni.c
4348
4981
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * SNI specific PCI support for RM200/RM300. * * Copyright (C) 1997 - 2000, 2003, 04 Ralf Baechle (ralf@linux-mips.org) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/pci.h> #include <asm/mipsregs.h> #include <asm/sni.h> #include <irq.h> /* * PCIMT Shortcuts ... */ #define SCSI PCIMT_IRQ_SCSI #define ETH PCIMT_IRQ_ETHERNET #define INTA PCIMT_IRQ_INTA #define INTB PCIMT_IRQ_INTB #define INTC PCIMT_IRQ_INTC #define INTD PCIMT_IRQ_INTD /* * Device 0: PCI EISA Bridge (directly routed) * Device 1: NCR53c810 SCSI (directly routed) * Device 2: PCnet32 Ethernet (directly routed) * Device 3: VGA (routed to INTB) * Device 4: Unused * Device 5: Slot 2 * Device 6: Slot 3 * Device 7: Slot 4 * * Documentation says the VGA is device 5 and device 3 is unused but that * seem to be a documentation error. At least on my RM200C the Cirrus * Logic CL-GD5434 VGA is device 3. */ static char irq_tab_rm200[8][5] __initdata = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* EISA bridge */ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */ { INTB, INTB, INTB, INTB, INTB }, /* VGA */ { 0, 0, 0, 0, 0 }, /* Unused */ { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */ { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */ { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */ }; /* * In Revision D of the RM300 Device 2 has become a normal purpose Slot 1 * * The VGA card is optional for RM300 systems. */ static char irq_tab_rm300d[8][5] __initdata = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* EISA bridge */ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ { 0, INTC, INTD, INTA, INTB }, /* Slot 1 */ { INTB, INTB, INTB, INTB, INTB }, /* VGA */ { 0, 0, 0, 0, 0 }, /* Unused */ { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */ { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */ { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */ }; static char irq_tab_rm300e[5][5] __initdata = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* HOST bridge */ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ { 0, INTC, INTD, INTA, INTB }, /* Bridge/i960 */ { 0, INTD, INTA, INTB, INTC }, /* Slot 1 */ { 0, INTA, INTB, INTC, INTD }, /* Slot 2 */ }; #undef SCSI #undef ETH #undef INTA #undef INTB #undef INTC #undef INTD /* * PCIT Shortcuts ... */ #define SCSI0 PCIT_IRQ_SCSI0 #define SCSI1 PCIT_IRQ_SCSI1 #define ETH PCIT_IRQ_ETHERNET #define INTA PCIT_IRQ_INTA #define INTB PCIT_IRQ_INTB #define INTC PCIT_IRQ_INTC #define INTD PCIT_IRQ_INTD static char irq_tab_pcit[13][5] __initdata = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* HOST bridge */ { SCSI0, SCSI0, SCSI0, SCSI0, SCSI0 }, /* SCSI */ { SCSI1, SCSI1, SCSI1, SCSI1, SCSI1 }, /* SCSI */ { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */ { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */ { 0, 0, 0, 0, 0 }, /* Unused */ { 0, 0, 0, 0, 0 }, /* Unused */ { 0, 0, 0, 0, 0 }, /* Unused */ { 0, INTA, INTB, INTC, INTD }, /* Slot 1 */ { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */ { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */ { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */ { 0, INTA, INTB, INTC, INTD }, /* Slot 5 */ }; static char irq_tab_pcit_cplus[13][5] __initdata = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* HOST bridge */ { 0, INTB, INTC, INTD, INTA }, /* PCI Slot 9 */ { 0, 0, 0, 0, 0 }, /* PCI-EISA */ { 0, 0, 0, 0, 0 }, /* Unused */ { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */ { 0, INTB, INTC, INTD, INTA }, /* fixup */ }; static inline int is_rm300_revd(void) { unsigned char csmsr = *(volatile unsigned char *)PCIMT_CSMSR; return (csmsr & 0xa0) == 0x20; } int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { switch (sni_brd_type) { case SNI_BRD_PCI_TOWER_CPLUS: if (slot == 4) { /* * SNI messed up interrupt wiring for onboard * PCI bus 1; we need to fix this up here */ while (dev && dev->bus->number != 1) dev = dev->bus->self; if (dev && dev->devfn >= PCI_DEVFN(4, 0)) slot = 5; } return irq_tab_pcit_cplus[slot][pin]; case SNI_BRD_PCI_TOWER: return irq_tab_pcit[slot][pin]; case SNI_BRD_PCI_MTOWER: if (is_rm300_revd()) return irq_tab_rm300d[slot][pin]; /* fall through */ case SNI_BRD_PCI_DESKTOP: return irq_tab_rm200[slot][pin]; case SNI_BRD_PCI_MTOWER_CPLUS: return irq_tab_rm300e[slot][pin]; } return 0; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; }
gpl-2.0
Snuzzo/funky_jewel
net/rxrpc/ar-recvmsg.c
4604
11038
/* RxRPC recvmsg() implementation * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/net.h> #include <linux/skbuff.h> #include <linux/export.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include "ar-internal.h" /* * removal a call's user ID from the socket tree to make the user ID available * again and so that it won't be seen again in association with that call */ void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call) { _debug("RELEASE CALL %d", call->debug_id); if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { write_lock_bh(&rx->call_lock); rb_erase(&call->sock_node, &call->socket->calls); clear_bit(RXRPC_CALL_HAS_USERID, &call->flags); write_unlock_bh(&rx->call_lock); } read_lock_bh(&call->state_lock); if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) rxrpc_queue_call(call); read_unlock_bh(&call->state_lock); } /* * receive a message from an RxRPC socket * - we need to be careful about two or more threads calling recvmsg * simultaneously */ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct rxrpc_skb_priv *sp; struct rxrpc_call *call = NULL, *continue_call = NULL; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct sk_buff *skb; long timeo; int copy, ret, ullen, offset, copied = 0; u32 abort_code; DEFINE_WAIT(wait); _enter(",,,%zu,%d", len, flags); if (flags & (MSG_OOB | MSG_TRUNC)) return -EOPNOTSUPP; ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long); timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); msg->msg_flags |= MSG_MORE; lock_sock(&rx->sk); for (;;) { /* return immediately if a client socket has no outstanding * calls */ if (RB_EMPTY_ROOT(&rx->calls)) { if (copied) goto out; if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); if (continue_call) rxrpc_put_call(continue_call); return -ENODATA; } } /* get the next message on the Rx queue */ skb = skb_peek(&rx->sk.sk_receive_queue); if (!skb) { /* nothing remains on the queue */ if (copied && (msg->msg_flags & MSG_PEEK || timeo == 0)) goto out; /* wait for a message to turn up */ release_sock(&rx->sk); prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, TASK_INTERRUPTIBLE); ret = sock_error(&rx->sk); if (ret) goto wait_error; if (skb_queue_empty(&rx->sk.sk_receive_queue)) { if (signal_pending(current)) goto wait_interrupted; timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(&rx->sk), &wait); lock_sock(&rx->sk); continue; } peek_next_packet: sp = rxrpc_skb(skb); call = sp->call; ASSERT(call != NULL); _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]); /* make sure we wait for the state to be updated in this call */ spin_lock_bh(&call->lock); spin_unlock_bh(&call->lock); if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) { _debug("packet from released call"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); continue; } /* determine whether to continue last data receive */ if (continue_call) { _debug("maybe cont"); if (call != continue_call || skb->mark != RXRPC_SKB_MARK_DATA) { release_sock(&rx->sk); rxrpc_put_call(continue_call); _leave(" = %d [noncont]", copied); return copied; } } rxrpc_get_call(call); /* copy the peer address and timestamp */ if (!continue_call) { if (msg->msg_name && msg->msg_namelen > 0) memcpy(msg->msg_name, &call->conn->trans->peer->srx, sizeof(call->conn->trans->peer->srx)); sock_recv_ts_and_drops(msg, &rx->sk, skb); } /* receive the message */ if (skb->mark != RXRPC_SKB_MARK_DATA) goto receive_non_data_message; _debug("recvmsg DATA #%u { %d, %d }", ntohl(sp->hdr.seq), skb->len, sp->offset); if (!continue_call) { /* only set the control data once per recvmsg() */ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, ullen, &call->user_call_ID); if (ret < 0) goto copy_error; ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); } ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv); ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1); call->rx_data_recv = ntohl(sp->hdr.seq); ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten); offset = sp->offset; copy = skb->len - offset; if (copy > len - copied) copy = len - copied; if (skb->ip_summed == CHECKSUM_UNNECESSARY) { ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy); } else { ret = skb_copy_and_csum_datagram_iovec(skb, offset, msg->msg_iov); if (ret == -EINVAL) goto csum_copy_error; } if (ret < 0) goto copy_error; /* handle piecemeal consumption of data packets */ _debug("copied %d+%d", copy, copied); offset += copy; copied += copy; if (!(flags & MSG_PEEK)) sp->offset = offset; if (sp->offset < skb->len) { _debug("buffer full"); ASSERTCMP(copied, ==, len); break; } /* we transferred the whole data packet */ if (sp->hdr.flags & RXRPC_LAST_PACKET) { _debug("last"); if (call->conn->out_clientflag) { /* last byte of reply received */ ret = copied; goto terminal_message; } /* last bit of request received */ if (!(flags & MSG_PEEK)) { _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); } msg->msg_flags &= ~MSG_MORE; break; } /* move on to the next data message */ _debug("next"); if (!continue_call) continue_call = sp->call; else rxrpc_put_call(call); call = NULL; if (flags & MSG_PEEK) { _debug("peek next"); skb = skb->next; if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue) break; goto peek_next_packet; } _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); } /* end of non-terminal data packet reception for the moment */ _debug("end rcv data"); out: release_sock(&rx->sk); if (call) rxrpc_put_call(call); if (continue_call) rxrpc_put_call(continue_call); _leave(" = %d [data]", copied); return copied; /* handle non-DATA messages such as aborts, incoming connections and * final ACKs */ receive_non_data_message: _debug("non-data"); if (skb->mark == RXRPC_SKB_MARK_NEW_CALL) { _debug("RECV NEW CALL"); ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &abort_code); if (ret < 0) goto copy_error; if (!(flags & MSG_PEEK)) { if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); } goto out; } ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, ullen, &call->user_call_ID); if (ret < 0) goto copy_error; ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); switch (skb->mark) { case RXRPC_SKB_MARK_DATA: BUG(); case RXRPC_SKB_MARK_FINAL_ACK: ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &abort_code); break; case RXRPC_SKB_MARK_BUSY: ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code); break; case RXRPC_SKB_MARK_REMOTE_ABORT: abort_code = call->abort_code; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code); break; case RXRPC_SKB_MARK_NET_ERROR: _debug("RECV NET ERROR %d", sp->error); abort_code = sp->error; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &abort_code); break; case RXRPC_SKB_MARK_LOCAL_ERROR: _debug("RECV LOCAL ERROR %d", sp->error); abort_code = sp->error; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &abort_code); break; default: BUG(); break; } if (ret < 0) goto copy_error; terminal_message: _debug("terminal"); msg->msg_flags &= ~MSG_MORE; msg->msg_flags |= MSG_EOR; if (!(flags & MSG_PEEK)) { _net("free terminal skb %p", skb); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); rxrpc_remove_user_ID(rx, call); } release_sock(&rx->sk); rxrpc_put_call(call); if (continue_call) rxrpc_put_call(continue_call); _leave(" = %d", ret); return ret; copy_error: _debug("copy error"); release_sock(&rx->sk); rxrpc_put_call(call); if (continue_call) rxrpc_put_call(continue_call); _leave(" = %d", ret); return ret; csum_copy_error: _debug("csum error"); release_sock(&rx->sk); if (continue_call) rxrpc_put_call(continue_call); rxrpc_kill_skb(skb); skb_kill_datagram(&rx->sk, skb, flags); rxrpc_put_call(call); return -EAGAIN; wait_interrupted: ret = sock_intr_errno(timeo); wait_error: finish_wait(sk_sleep(&rx->sk), &wait); if (continue_call) rxrpc_put_call(continue_call); if (copied) copied = ret; _leave(" = %d [waitfail %d]", copied, ret); return copied; } /** * rxrpc_kernel_data_delivered - Record delivery of data message * @skb: Message holding data * * Record the delivery of a data message. This permits RxRPC to keep its * tracking correct. The socket buffer will be deleted. */ void rxrpc_kernel_data_delivered(struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_call *call = sp->call; ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv); ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1); call->rx_data_recv = ntohl(sp->hdr.seq); ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten); rxrpc_free_skb(skb); } EXPORT_SYMBOL(rxrpc_kernel_data_delivered); /** * rxrpc_kernel_is_data_last - Determine if data message is last one * @skb: Message holding data * * Determine if data message is last one for the parent call. */ bool rxrpc_kernel_is_data_last(struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_DATA); return sp->hdr.flags & RXRPC_LAST_PACKET; } EXPORT_SYMBOL(rxrpc_kernel_is_data_last); /** * rxrpc_kernel_get_abort_code - Get the abort code from an RxRPC abort message * @skb: Message indicating an abort * * Get the abort code from an RxRPC abort message. */ u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_REMOTE_ABORT); return sp->call->abort_code; } EXPORT_SYMBOL(rxrpc_kernel_get_abort_code); /** * rxrpc_kernel_get_error - Get the error number from an RxRPC error message * @skb: Message indicating an error * * Get the error number from an RxRPC error message. */ int rxrpc_kernel_get_error_number(struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); return sp->error; } EXPORT_SYMBOL(rxrpc_kernel_get_error_number);
gpl-2.0
clumsy1991/M8_GPE_Kernel
tools/perf/builtin-bench.c
4860
5028
/* * * builtin-bench.c * * General benchmarking subsystem provided by perf * * Copyright (C) 2009, Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp> * */ /* * * Available subsystem list: * sched ... scheduler and IPC mechanism * mem ... memory access performance * */ #include "perf.h" #include "util/util.h" #include "util/parse-options.h" #include "builtin.h" #include "bench/bench.h" #include <stdio.h> #include <stdlib.h> #include <string.h> struct bench_suite { const char *name; const char *summary; int (*fn)(int, const char **, const char *); }; \ /* sentinel: easy for help */ #define suite_all { "all", "test all suite (pseudo suite)", NULL } static struct bench_suite sched_suites[] = { { "messaging", "Benchmark for scheduler and IPC mechanisms", bench_sched_messaging }, { "pipe", "Flood of communication over pipe() between two processes", bench_sched_pipe }, suite_all, { NULL, NULL, NULL } }; static struct bench_suite mem_suites[] = { { "memcpy", "Simple memory copy in various ways", bench_mem_memcpy }, { "memset", "Simple memory set in various ways", bench_mem_memset }, suite_all, { NULL, NULL, NULL } }; struct bench_subsys { const char *name; const char *summary; struct bench_suite *suites; }; static struct bench_subsys subsystems[] = { { "sched", "scheduler and IPC mechanism", sched_suites }, { "mem", "memory access performance", mem_suites }, { "all", /* sentinel: easy for help */ "test all subsystem (pseudo subsystem)", NULL }, { NULL, NULL, NULL } }; static void dump_suites(int subsys_index) { int i; printf("# List of available suites for %s...\n\n", subsystems[subsys_index].name); for (i = 0; subsystems[subsys_index].suites[i].name; i++) printf("%14s: %s\n", subsystems[subsys_index].suites[i].name, subsystems[subsys_index].suites[i].summary); printf("\n"); return; } static const char *bench_format_str; int bench_format = BENCH_FORMAT_DEFAULT; static const struct option bench_options[] = { OPT_STRING('f', "format", &bench_format_str, "default", "Specify format style"), OPT_END() }; static const char * const bench_usage[] = { "perf bench [<common options>] <subsystem> <suite> [<options>]", NULL }; static void print_usage(void) { int i; printf("Usage: \n"); for (i = 0; bench_usage[i]; i++) printf("\t%s\n", bench_usage[i]); printf("\n"); printf("# List of available subsystems...\n\n"); for (i = 0; subsystems[i].name; i++) printf("%14s: %s\n", subsystems[i].name, subsystems[i].summary); printf("\n"); } static int bench_str2int(const char *str) { if (!str) return BENCH_FORMAT_DEFAULT; if (!strcmp(str, BENCH_FORMAT_DEFAULT_STR)) return BENCH_FORMAT_DEFAULT; else if (!strcmp(str, BENCH_FORMAT_SIMPLE_STR)) return BENCH_FORMAT_SIMPLE; return BENCH_FORMAT_UNKNOWN; } static void all_suite(struct bench_subsys *subsys) /* FROM HERE */ { int i; const char *argv[2]; struct bench_suite *suites = subsys->suites; argv[1] = NULL; /* * TODO: * preparing preset parameters for * embedded, ordinary PC, HPC, etc... * will be helpful */ for (i = 0; suites[i].fn; i++) { printf("# Running %s/%s benchmark...\n", subsys->name, suites[i].name); argv[1] = suites[i].name; suites[i].fn(1, argv, NULL); printf("\n"); } } static void all_subsystem(void) { int i; for (i = 0; subsystems[i].suites; i++) all_suite(&subsystems[i]); } int cmd_bench(int argc, const char **argv, const char *prefix __used) { int i, j, status = 0; if (argc < 2) { /* No subsystem specified. */ print_usage(); goto end; } argc = parse_options(argc, argv, bench_options, bench_usage, PARSE_OPT_STOP_AT_NON_OPTION); bench_format = bench_str2int(bench_format_str); if (bench_format == BENCH_FORMAT_UNKNOWN) { printf("Unknown format descriptor:%s\n", bench_format_str); goto end; } if (argc < 1) { print_usage(); goto end; } if (!strcmp(argv[0], "all")) { all_subsystem(); goto end; } for (i = 0; subsystems[i].name; i++) { if (strcmp(subsystems[i].name, argv[0])) continue; if (argc < 2) { /* No suite specified. */ dump_suites(i); goto end; } if (!strcmp(argv[1], "all")) { all_suite(&subsystems[i]); goto end; } for (j = 0; subsystems[i].suites[j].name; j++) { if (strcmp(subsystems[i].suites[j].name, argv[1])) continue; if (bench_format == BENCH_FORMAT_DEFAULT) printf("# Running %s/%s benchmark...\n", subsystems[i].name, subsystems[i].suites[j].name); status = subsystems[i].suites[j].fn(argc - 1, argv + 1, prefix); goto end; } if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) { dump_suites(i); goto end; } printf("Unknown suite:%s for %s\n", argv[1], argv[0]); status = 1; goto end; } printf("Unknown subsystem:%s\n", argv[0]); status = 1; end: return status; }
gpl-2.0
duydb2/android_kernel_sony_msm8x60
drivers/media/video/hdpvr/hdpvr-video.c
4860
31004
/* * Hauppauge HD PVR USB driver - video 4 linux 2 interface * * Copyright (C) 2008 Janne Grunau (j@jannau.net) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/videodev2.h> #include <media/v4l2-dev.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include "hdpvr.h" #define BULK_URB_TIMEOUT 90 /* 0.09 seconds */ #define print_buffer_status() { \ v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, \ "%s:%d buffer stat: %d free, %d proc\n", \ __func__, __LINE__, \ list_size(&dev->free_buff_list), \ list_size(&dev->rec_buff_list)); } struct hdpvr_fh { struct hdpvr_device *dev; }; static uint list_size(struct list_head *list) { struct list_head *tmp; uint count = 0; list_for_each(tmp, list) { count++; } return count; } /*=========================================================================*/ /* urb callback */ static void hdpvr_read_bulk_callback(struct urb *urb) { struct hdpvr_buffer *buf = (struct hdpvr_buffer *)urb->context; struct hdpvr_device *dev = buf->dev; /* marking buffer as received and wake waiting */ buf->status = BUFSTAT_READY; wake_up_interruptible(&dev->wait_data); } /*=========================================================================*/ /* bufffer bits */ /* function expects dev->io_mutex to be hold by caller */ int hdpvr_cancel_queue(struct hdpvr_device *dev) { struct hdpvr_buffer *buf; list_for_each_entry(buf, &dev->rec_buff_list, buff_list) { usb_kill_urb(buf->urb); buf->status = BUFSTAT_AVAILABLE; } list_splice_init(&dev->rec_buff_list, dev->free_buff_list.prev); return 0; } static int hdpvr_free_queue(struct list_head *q) { struct list_head *tmp; struct list_head *p; struct hdpvr_buffer *buf; struct urb *urb; for (p = q->next; p != q;) { buf = list_entry(p, struct hdpvr_buffer, buff_list); urb = buf->urb; usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); tmp = p->next; list_del(p); kfree(buf); p = tmp; } return 0; } /* function expects dev->io_mutex to be hold by caller */ int hdpvr_free_buffers(struct hdpvr_device *dev) { hdpvr_cancel_queue(dev); hdpvr_free_queue(&dev->free_buff_list); hdpvr_free_queue(&dev->rec_buff_list); return 0; } /* function expects dev->io_mutex to be hold by caller */ int hdpvr_alloc_buffers(struct hdpvr_device *dev, uint count) { uint i; int retval = -ENOMEM; u8 *mem; struct hdpvr_buffer *buf; struct urb *urb; v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "allocating %u buffers\n", count); for (i = 0; i < count; i++) { buf = kzalloc(sizeof(struct hdpvr_buffer), GFP_KERNEL); if (!buf) { v4l2_err(&dev->v4l2_dev, "cannot allocate buffer\n"); goto exit; } buf->dev = dev; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { v4l2_err(&dev->v4l2_dev, "cannot allocate urb\n"); goto exit_urb; } buf->urb = urb; mem = usb_alloc_coherent(dev->udev, dev->bulk_in_size, GFP_KERNEL, &urb->transfer_dma); if (!mem) { v4l2_err(&dev->v4l2_dev, "cannot allocate usb transfer buffer\n"); goto exit_urb_buffer; } usb_fill_bulk_urb(buf->urb, dev->udev, usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr), mem, dev->bulk_in_size, hdpvr_read_bulk_callback, buf); buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; buf->status = BUFSTAT_AVAILABLE; list_add_tail(&buf->buff_list, &dev->free_buff_list); } return 0; exit_urb_buffer: usb_free_urb(urb); exit_urb: kfree(buf); exit: hdpvr_free_buffers(dev); return retval; } static int hdpvr_submit_buffers(struct hdpvr_device *dev) { struct hdpvr_buffer *buf; struct urb *urb; int ret = 0, err_count = 0; mutex_lock(&dev->io_mutex); while (dev->status == STATUS_STREAMING && !list_empty(&dev->free_buff_list)) { buf = list_entry(dev->free_buff_list.next, struct hdpvr_buffer, buff_list); if (buf->status != BUFSTAT_AVAILABLE) { v4l2_err(&dev->v4l2_dev, "buffer not marked as available\n"); ret = -EFAULT; goto err; } urb = buf->urb; urb->status = 0; urb->actual_length = 0; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { v4l2_err(&dev->v4l2_dev, "usb_submit_urb in %s returned %d\n", __func__, ret); if (++err_count > 2) break; continue; } buf->status = BUFSTAT_INPROGRESS; list_move_tail(&buf->buff_list, &dev->rec_buff_list); } err: print_buffer_status(); mutex_unlock(&dev->io_mutex); return ret; } static struct hdpvr_buffer *hdpvr_get_next_buffer(struct hdpvr_device *dev) { struct hdpvr_buffer *buf; mutex_lock(&dev->io_mutex); if (list_empty(&dev->rec_buff_list)) { mutex_unlock(&dev->io_mutex); return NULL; } buf = list_entry(dev->rec_buff_list.next, struct hdpvr_buffer, buff_list); mutex_unlock(&dev->io_mutex); return buf; } static void hdpvr_transmit_buffers(struct work_struct *work) { struct hdpvr_device *dev = container_of(work, struct hdpvr_device, worker); while (dev->status == STATUS_STREAMING) { if (hdpvr_submit_buffers(dev)) { v4l2_err(&dev->v4l2_dev, "couldn't submit buffers\n"); goto error; } if (wait_event_interruptible(dev->wait_buffer, !list_empty(&dev->free_buff_list) || dev->status != STATUS_STREAMING)) goto error; } v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "transmit worker exited\n"); return; error: v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "transmit buffers errored\n"); dev->status = STATUS_ERROR; } /* function expects dev->io_mutex to be hold by caller */ static int hdpvr_start_streaming(struct hdpvr_device *dev) { int ret; struct hdpvr_video_info *vidinf; if (dev->status == STATUS_STREAMING) return 0; else if (dev->status != STATUS_IDLE) return -EAGAIN; vidinf = get_video_info(dev); if (vidinf) { v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, "video signal: %dx%d@%dhz\n", vidinf->width, vidinf->height, vidinf->fps); kfree(vidinf); /* start streaming 2 request */ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0xb8, 0x38, 0x1, 0, NULL, 0, 8000); v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, "encoder start control request returned %d\n", ret); hdpvr_config_call(dev, CTRL_START_STREAMING_VALUE, 0x00); dev->status = STATUS_STREAMING; INIT_WORK(&dev->worker, hdpvr_transmit_buffers); queue_work(dev->workqueue, &dev->worker); v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, "streaming started\n"); return 0; } msleep(250); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "no video signal at input %d\n", dev->options.video_input); return -EAGAIN; } /* function expects dev->io_mutex to be hold by caller */ static int hdpvr_stop_streaming(struct hdpvr_device *dev) { int actual_length; uint c = 0; u8 *buf; if (dev->status == STATUS_IDLE) return 0; else if (dev->status != STATUS_STREAMING) return -EAGAIN; buf = kmalloc(dev->bulk_in_size, GFP_KERNEL); if (!buf) v4l2_err(&dev->v4l2_dev, "failed to allocate temporary buffer " "for emptying the internal device buffer. " "Next capture start will be slow\n"); dev->status = STATUS_SHUTTING_DOWN; hdpvr_config_call(dev, CTRL_STOP_STREAMING_VALUE, 0x00); mutex_unlock(&dev->io_mutex); wake_up_interruptible(&dev->wait_buffer); msleep(50); flush_workqueue(dev->workqueue); mutex_lock(&dev->io_mutex); /* kill the still outstanding urbs */ hdpvr_cancel_queue(dev); /* emptying the device buffer beforeshutting it down */ while (buf && ++c < 500 && !usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr), buf, dev->bulk_in_size, &actual_length, BULK_URB_TIMEOUT)) { v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, "%2d: got %d bytes\n", c, actual_length); } kfree(buf); v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, "used %d urbs to empty device buffers\n", c-1); msleep(10); dev->status = STATUS_IDLE; return 0; } /*=======================================================================*/ /* * video 4 linux 2 file operations */ static int hdpvr_open(struct file *file) { struct hdpvr_device *dev; struct hdpvr_fh *fh; int retval = -ENOMEM; dev = (struct hdpvr_device *)video_get_drvdata(video_devdata(file)); if (!dev) { pr_err("open failing with with ENODEV\n"); retval = -ENODEV; goto err; } fh = kzalloc(sizeof(struct hdpvr_fh), GFP_KERNEL); if (!fh) { v4l2_err(&dev->v4l2_dev, "Out of memory\n"); goto err; } /* lock the device to allow correctly handling errors * in resumption */ mutex_lock(&dev->io_mutex); dev->open_count++; mutex_unlock(&dev->io_mutex); fh->dev = dev; /* save our object in the file's private structure */ file->private_data = fh; retval = 0; err: return retval; } static int hdpvr_release(struct file *file) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; if (!dev) return -ENODEV; mutex_lock(&dev->io_mutex); if (!(--dev->open_count) && dev->status == STATUS_STREAMING) hdpvr_stop_streaming(dev); mutex_unlock(&dev->io_mutex); return 0; } /* * hdpvr_v4l2_read() * will allocate buffers when called for the first time */ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count, loff_t *pos) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; struct hdpvr_buffer *buf = NULL; struct urb *urb; unsigned int ret = 0; int rem, cnt; if (*pos) return -ESPIPE; if (!dev) return -ENODEV; mutex_lock(&dev->io_mutex); if (dev->status == STATUS_IDLE) { if (hdpvr_start_streaming(dev)) { v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "start_streaming failed\n"); ret = -EIO; msleep(200); dev->status = STATUS_IDLE; mutex_unlock(&dev->io_mutex); goto err; } print_buffer_status(); } mutex_unlock(&dev->io_mutex); /* wait for the first buffer */ if (!(file->f_flags & O_NONBLOCK)) { if (wait_event_interruptible(dev->wait_data, hdpvr_get_next_buffer(dev))) return -ERESTARTSYS; } buf = hdpvr_get_next_buffer(dev); while (count > 0 && buf) { if (buf->status != BUFSTAT_READY && dev->status != STATUS_DISCONNECTED) { /* return nonblocking */ if (file->f_flags & O_NONBLOCK) { if (!ret) ret = -EAGAIN; goto err; } if (wait_event_interruptible(dev->wait_data, buf->status == BUFSTAT_READY)) { ret = -ERESTARTSYS; goto err; } } if (buf->status != BUFSTAT_READY) break; /* set remaining bytes to copy */ urb = buf->urb; rem = urb->actual_length - buf->pos; cnt = rem > count ? count : rem; if (copy_to_user(buffer, urb->transfer_buffer + buf->pos, cnt)) { v4l2_err(&dev->v4l2_dev, "read: copy_to_user failed\n"); if (!ret) ret = -EFAULT; goto err; } buf->pos += cnt; count -= cnt; buffer += cnt; ret += cnt; /* finished, take next buffer */ if (buf->pos == urb->actual_length) { mutex_lock(&dev->io_mutex); buf->pos = 0; buf->status = BUFSTAT_AVAILABLE; list_move_tail(&buf->buff_list, &dev->free_buff_list); print_buffer_status(); mutex_unlock(&dev->io_mutex); wake_up_interruptible(&dev->wait_buffer); buf = hdpvr_get_next_buffer(dev); } } err: if (!ret && !buf) ret = -EAGAIN; return ret; } static unsigned int hdpvr_poll(struct file *filp, poll_table *wait) { struct hdpvr_buffer *buf = NULL; struct hdpvr_fh *fh = filp->private_data; struct hdpvr_device *dev = fh->dev; unsigned int mask = 0; mutex_lock(&dev->io_mutex); if (!video_is_registered(dev->video_dev)) { mutex_unlock(&dev->io_mutex); return -EIO; } if (dev->status == STATUS_IDLE) { if (hdpvr_start_streaming(dev)) { v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, "start_streaming failed\n"); dev->status = STATUS_IDLE; } print_buffer_status(); } mutex_unlock(&dev->io_mutex); buf = hdpvr_get_next_buffer(dev); /* only wait if no data is available */ if (!buf || buf->status != BUFSTAT_READY) { poll_wait(filp, &dev->wait_data, wait); buf = hdpvr_get_next_buffer(dev); } if (buf && buf->status == BUFSTAT_READY) mask |= POLLIN | POLLRDNORM; return mask; } static const struct v4l2_file_operations hdpvr_fops = { .owner = THIS_MODULE, .open = hdpvr_open, .release = hdpvr_release, .read = hdpvr_read, .poll = hdpvr_poll, .unlocked_ioctl = video_ioctl2, }; /*=======================================================================*/ /* * V4L2 ioctl handling */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct hdpvr_device *dev = video_drvdata(file); strcpy(cap->driver, "hdpvr"); strcpy(cap->card, "Hauppauge HD PVR"); usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE; return 0; } static int vidioc_s_std(struct file *file, void *private_data, v4l2_std_id *std) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; u8 std_type = 1; if (*std & (V4L2_STD_NTSC | V4L2_STD_PAL_60)) std_type = 0; return hdpvr_config_call(dev, CTRL_VIDEO_STD_TYPE, std_type); } static const char *iname[] = { [HDPVR_COMPONENT] = "Component", [HDPVR_SVIDEO] = "S-Video", [HDPVR_COMPOSITE] = "Composite", }; static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; unsigned int n; n = i->index; if (n >= HDPVR_VIDEO_INPUTS) return -EINVAL; i->type = V4L2_INPUT_TYPE_CAMERA; strncpy(i->name, iname[n], sizeof(i->name) - 1); i->name[sizeof(i->name) - 1] = '\0'; i->audioset = 1<<HDPVR_RCA_FRONT | 1<<HDPVR_RCA_BACK | 1<<HDPVR_SPDIF; i->std = dev->video_dev->tvnorms; return 0; } static int vidioc_s_input(struct file *file, void *private_data, unsigned int index) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; int retval; if (index >= HDPVR_VIDEO_INPUTS) return -EINVAL; if (dev->status != STATUS_IDLE) return -EAGAIN; retval = hdpvr_config_call(dev, CTRL_VIDEO_INPUT_VALUE, index+1); if (!retval) dev->options.video_input = index; return retval; } static int vidioc_g_input(struct file *file, void *private_data, unsigned int *index) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; *index = dev->options.video_input; return 0; } static const char *audio_iname[] = { [HDPVR_RCA_FRONT] = "RCA front", [HDPVR_RCA_BACK] = "RCA back", [HDPVR_SPDIF] = "SPDIF", }; static int vidioc_enumaudio(struct file *file, void *priv, struct v4l2_audio *audio) { unsigned int n; n = audio->index; if (n >= HDPVR_AUDIO_INPUTS) return -EINVAL; audio->capability = V4L2_AUDCAP_STEREO; strncpy(audio->name, audio_iname[n], sizeof(audio->name) - 1); audio->name[sizeof(audio->name) - 1] = '\0'; return 0; } static int vidioc_s_audio(struct file *file, void *private_data, struct v4l2_audio *audio) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; int retval; if (audio->index >= HDPVR_AUDIO_INPUTS) return -EINVAL; if (dev->status != STATUS_IDLE) return -EAGAIN; retval = hdpvr_set_audio(dev, audio->index+1, dev->options.audio_codec); if (!retval) dev->options.audio_input = audio->index; return retval; } static int vidioc_g_audio(struct file *file, void *private_data, struct v4l2_audio *audio) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; audio->index = dev->options.audio_input; audio->capability = V4L2_AUDCAP_STEREO; strncpy(audio->name, audio_iname[audio->index], sizeof(audio->name)); audio->name[sizeof(audio->name) - 1] = '\0'; return 0; } static const s32 supported_v4l2_ctrls[] = { V4L2_CID_BRIGHTNESS, V4L2_CID_CONTRAST, V4L2_CID_SATURATION, V4L2_CID_HUE, V4L2_CID_SHARPNESS, V4L2_CID_MPEG_AUDIO_ENCODING, V4L2_CID_MPEG_VIDEO_ENCODING, V4L2_CID_MPEG_VIDEO_BITRATE_MODE, V4L2_CID_MPEG_VIDEO_BITRATE, V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, }; static int fill_queryctrl(struct hdpvr_options *opt, struct v4l2_queryctrl *qc, int ac3, int fw_ver) { int err; if (fw_ver > 0x15) { switch (qc->id) { case V4L2_CID_BRIGHTNESS: return v4l2_ctrl_query_fill(qc, 0x0, 0xff, 1, 0x80); case V4L2_CID_CONTRAST: return v4l2_ctrl_query_fill(qc, 0x0, 0xff, 1, 0x40); case V4L2_CID_SATURATION: return v4l2_ctrl_query_fill(qc, 0x0, 0xff, 1, 0x40); case V4L2_CID_HUE: return v4l2_ctrl_query_fill(qc, 0x0, 0x1e, 1, 0xf); case V4L2_CID_SHARPNESS: return v4l2_ctrl_query_fill(qc, 0x0, 0xff, 1, 0x80); } } else { switch (qc->id) { case V4L2_CID_BRIGHTNESS: return v4l2_ctrl_query_fill(qc, 0x0, 0xff, 1, 0x86); case V4L2_CID_CONTRAST: return v4l2_ctrl_query_fill(qc, 0x0, 0xff, 1, 0x80); case V4L2_CID_SATURATION: return v4l2_ctrl_query_fill(qc, 0x0, 0xff, 1, 0x80); case V4L2_CID_HUE: return v4l2_ctrl_query_fill(qc, 0x0, 0xff, 1, 0x80); case V4L2_CID_SHARPNESS: return v4l2_ctrl_query_fill(qc, 0x0, 0xff, 1, 0x80); } } switch (qc->id) { case V4L2_CID_MPEG_AUDIO_ENCODING: return v4l2_ctrl_query_fill( qc, V4L2_MPEG_AUDIO_ENCODING_AAC, ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC, 1, V4L2_MPEG_AUDIO_ENCODING_AAC); case V4L2_CID_MPEG_VIDEO_ENCODING: return v4l2_ctrl_query_fill( qc, V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 1, V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC); /* case V4L2_CID_MPEG_VIDEO_? maybe keyframe interval: */ /* return v4l2_ctrl_query_fill(qc, 0, 128, 128, 0); */ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return v4l2_ctrl_query_fill( qc, V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 1, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR); case V4L2_CID_MPEG_VIDEO_BITRATE: return v4l2_ctrl_query_fill(qc, 1000000, 13500000, 100000, 6500000); case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: err = v4l2_ctrl_query_fill(qc, 1100000, 20200000, 100000, 9000000); if (!err && opt->bitrate_mode == HDPVR_CONSTANT) qc->flags |= V4L2_CTRL_FLAG_INACTIVE; return err; default: return -EINVAL; } } static int vidioc_queryctrl(struct file *file, void *private_data, struct v4l2_queryctrl *qc) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; int i, next; u32 id = qc->id; memset(qc, 0, sizeof(*qc)); next = !!(id & V4L2_CTRL_FLAG_NEXT_CTRL); qc->id = id & ~V4L2_CTRL_FLAG_NEXT_CTRL; for (i = 0; i < ARRAY_SIZE(supported_v4l2_ctrls); i++) { if (next) { if (qc->id < supported_v4l2_ctrls[i]) qc->id = supported_v4l2_ctrls[i]; else continue; } if (qc->id == supported_v4l2_ctrls[i]) return fill_queryctrl(&dev->options, qc, dev->flags & HDPVR_FLAG_AC3_CAP, dev->fw_ver); if (qc->id < supported_v4l2_ctrls[i]) break; } return -EINVAL; } static int vidioc_g_ctrl(struct file *file, void *private_data, struct v4l2_control *ctrl) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: ctrl->value = dev->options.brightness; break; case V4L2_CID_CONTRAST: ctrl->value = dev->options.contrast; break; case V4L2_CID_SATURATION: ctrl->value = dev->options.saturation; break; case V4L2_CID_HUE: ctrl->value = dev->options.hue; break; case V4L2_CID_SHARPNESS: ctrl->value = dev->options.sharpness; break; default: return -EINVAL; } return 0; } static int vidioc_s_ctrl(struct file *file, void *private_data, struct v4l2_control *ctrl) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; int retval; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: retval = hdpvr_config_call(dev, CTRL_BRIGHTNESS, ctrl->value); if (!retval) dev->options.brightness = ctrl->value; break; case V4L2_CID_CONTRAST: retval = hdpvr_config_call(dev, CTRL_CONTRAST, ctrl->value); if (!retval) dev->options.contrast = ctrl->value; break; case V4L2_CID_SATURATION: retval = hdpvr_config_call(dev, CTRL_SATURATION, ctrl->value); if (!retval) dev->options.saturation = ctrl->value; break; case V4L2_CID_HUE: retval = hdpvr_config_call(dev, CTRL_HUE, ctrl->value); if (!retval) dev->options.hue = ctrl->value; break; case V4L2_CID_SHARPNESS: retval = hdpvr_config_call(dev, CTRL_SHARPNESS, ctrl->value); if (!retval) dev->options.sharpness = ctrl->value; break; default: return -EINVAL; } return retval; } static int hdpvr_get_ctrl(struct hdpvr_options *opt, struct v4l2_ext_control *ctrl) { switch (ctrl->id) { case V4L2_CID_MPEG_AUDIO_ENCODING: ctrl->value = opt->audio_codec; break; case V4L2_CID_MPEG_VIDEO_ENCODING: ctrl->value = V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC; break; /* case V4L2_CID_MPEG_VIDEO_B_FRAMES: */ /* ctrl->value = (opt->gop_mode & 0x2) ? 0 : 128; */ /* break; */ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: ctrl->value = opt->bitrate_mode == HDPVR_CONSTANT ? V4L2_MPEG_VIDEO_BITRATE_MODE_CBR : V4L2_MPEG_VIDEO_BITRATE_MODE_VBR; break; case V4L2_CID_MPEG_VIDEO_BITRATE: ctrl->value = opt->bitrate * 100000; break; case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: ctrl->value = opt->peak_bitrate * 100000; break; case V4L2_CID_MPEG_STREAM_TYPE: ctrl->value = V4L2_MPEG_STREAM_TYPE_MPEG2_TS; break; default: return -EINVAL; } return 0; } static int vidioc_g_ext_ctrls(struct file *file, void *priv, struct v4l2_ext_controls *ctrls) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; int i, err = 0; if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) { for (i = 0; i < ctrls->count; i++) { struct v4l2_ext_control *ctrl = ctrls->controls + i; err = hdpvr_get_ctrl(&dev->options, ctrl); if (err) { ctrls->error_idx = i; break; } } return err; } return -EINVAL; } static int hdpvr_try_ctrl(struct v4l2_ext_control *ctrl, int ac3) { int ret = -EINVAL; switch (ctrl->id) { case V4L2_CID_MPEG_AUDIO_ENCODING: if (ctrl->value == V4L2_MPEG_AUDIO_ENCODING_AAC || (ac3 && ctrl->value == V4L2_MPEG_AUDIO_ENCODING_AC3)) ret = 0; break; case V4L2_CID_MPEG_VIDEO_ENCODING: if (ctrl->value == V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC) ret = 0; break; /* case V4L2_CID_MPEG_VIDEO_B_FRAMES: */ /* if (ctrl->value == 0 || ctrl->value == 128) */ /* ret = 0; */ /* break; */ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: if (ctrl->value == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR || ctrl->value == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) ret = 0; break; case V4L2_CID_MPEG_VIDEO_BITRATE: { uint bitrate = ctrl->value / 100000; if (bitrate >= 10 && bitrate <= 135) ret = 0; break; } case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: { uint peak_bitrate = ctrl->value / 100000; if (peak_bitrate >= 10 && peak_bitrate <= 202) ret = 0; break; } case V4L2_CID_MPEG_STREAM_TYPE: if (ctrl->value == V4L2_MPEG_STREAM_TYPE_MPEG2_TS) ret = 0; break; default: return -EINVAL; } return 0; } static int vidioc_try_ext_ctrls(struct file *file, void *priv, struct v4l2_ext_controls *ctrls) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; int i, err = 0; if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) { for (i = 0; i < ctrls->count; i++) { struct v4l2_ext_control *ctrl = ctrls->controls + i; err = hdpvr_try_ctrl(ctrl, dev->flags & HDPVR_FLAG_AC3_CAP); if (err) { ctrls->error_idx = i; break; } } return err; } return -EINVAL; } static int hdpvr_set_ctrl(struct hdpvr_device *dev, struct v4l2_ext_control *ctrl) { struct hdpvr_options *opt = &dev->options; int ret = 0; switch (ctrl->id) { case V4L2_CID_MPEG_AUDIO_ENCODING: if (dev->flags & HDPVR_FLAG_AC3_CAP) { opt->audio_codec = ctrl->value; ret = hdpvr_set_audio(dev, opt->audio_input, opt->audio_codec); } break; case V4L2_CID_MPEG_VIDEO_ENCODING: break; /* case V4L2_CID_MPEG_VIDEO_B_FRAMES: */ /* if (ctrl->value == 0 && !(opt->gop_mode & 0x2)) { */ /* opt->gop_mode |= 0x2; */ /* hdpvr_config_call(dev, CTRL_GOP_MODE_VALUE, */ /* opt->gop_mode); */ /* } */ /* if (ctrl->value == 128 && opt->gop_mode & 0x2) { */ /* opt->gop_mode &= ~0x2; */ /* hdpvr_config_call(dev, CTRL_GOP_MODE_VALUE, */ /* opt->gop_mode); */ /* } */ /* break; */ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: if (ctrl->value == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR && opt->bitrate_mode != HDPVR_CONSTANT) { opt->bitrate_mode = HDPVR_CONSTANT; hdpvr_config_call(dev, CTRL_BITRATE_MODE_VALUE, opt->bitrate_mode); } if (ctrl->value == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR && opt->bitrate_mode == HDPVR_CONSTANT) { opt->bitrate_mode = HDPVR_VARIABLE_AVERAGE; hdpvr_config_call(dev, CTRL_BITRATE_MODE_VALUE, opt->bitrate_mode); } break; case V4L2_CID_MPEG_VIDEO_BITRATE: { uint bitrate = ctrl->value / 100000; opt->bitrate = bitrate; if (bitrate >= opt->peak_bitrate) opt->peak_bitrate = bitrate+1; hdpvr_set_bitrate(dev); break; } case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: { uint peak_bitrate = ctrl->value / 100000; if (opt->bitrate_mode == HDPVR_CONSTANT) break; if (opt->bitrate < peak_bitrate) { opt->peak_bitrate = peak_bitrate; hdpvr_set_bitrate(dev); } else ret = -EINVAL; break; } case V4L2_CID_MPEG_STREAM_TYPE: break; default: return -EINVAL; } return ret; } static int vidioc_s_ext_ctrls(struct file *file, void *priv, struct v4l2_ext_controls *ctrls) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; int i, err = 0; if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) { for (i = 0; i < ctrls->count; i++) { struct v4l2_ext_control *ctrl = ctrls->controls + i; err = hdpvr_try_ctrl(ctrl, dev->flags & HDPVR_FLAG_AC3_CAP); if (err) { ctrls->error_idx = i; break; } err = hdpvr_set_ctrl(dev, ctrl); if (err) { ctrls->error_idx = i; break; } } return err; } return -EINVAL; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *private_data, struct v4l2_fmtdesc *f) { if (f->index != 0 || f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; f->flags = V4L2_FMT_FLAG_COMPRESSED; strncpy(f->description, "MPEG2-TS with AVC/AAC streams", 32); f->pixelformat = V4L2_PIX_FMT_MPEG; return 0; } static int vidioc_g_fmt_vid_cap(struct file *file, void *private_data, struct v4l2_format *f) { struct hdpvr_fh *fh = file->private_data; struct hdpvr_device *dev = fh->dev; struct hdpvr_video_info *vid_info; if (!dev) return -ENODEV; vid_info = get_video_info(dev); if (!vid_info) return -EFAULT; f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; f->fmt.pix.width = vid_info->width; f->fmt.pix.height = vid_info->height; f->fmt.pix.sizeimage = dev->bulk_in_size; f->fmt.pix.colorspace = 0; f->fmt.pix.bytesperline = 0; f->fmt.pix.field = V4L2_FIELD_ANY; kfree(vid_info); return 0; } static int vidioc_encoder_cmd(struct file *filp, void *priv, struct v4l2_encoder_cmd *a) { struct hdpvr_fh *fh = filp->private_data; struct hdpvr_device *dev = fh->dev; int res; mutex_lock(&dev->io_mutex); memset(&a->raw, 0, sizeof(a->raw)); switch (a->cmd) { case V4L2_ENC_CMD_START: a->flags = 0; res = hdpvr_start_streaming(dev); break; case V4L2_ENC_CMD_STOP: res = hdpvr_stop_streaming(dev); break; default: v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "Unsupported encoder cmd %d\n", a->cmd); res = -EINVAL; } mutex_unlock(&dev->io_mutex); return res; } static int vidioc_try_encoder_cmd(struct file *filp, void *priv, struct v4l2_encoder_cmd *a) { switch (a->cmd) { case V4L2_ENC_CMD_START: case V4L2_ENC_CMD_STOP: return 0; default: return -EINVAL; } } static const struct v4l2_ioctl_ops hdpvr_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_s_std = vidioc_s_std, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_enumaudio = vidioc_enumaudio, .vidioc_g_audio = vidioc_g_audio, .vidioc_s_audio = vidioc_s_audio, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls, .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls, .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_encoder_cmd = vidioc_encoder_cmd, .vidioc_try_encoder_cmd = vidioc_try_encoder_cmd, }; static void hdpvr_device_release(struct video_device *vdev) { struct hdpvr_device *dev = video_get_drvdata(vdev); hdpvr_delete(dev); mutex_lock(&dev->io_mutex); destroy_workqueue(dev->workqueue); mutex_unlock(&dev->io_mutex); v4l2_device_unregister(&dev->v4l2_dev); /* deregister I2C adapter */ #if defined(CONFIG_I2C) || (CONFIG_I2C_MODULE) mutex_lock(&dev->i2c_mutex); i2c_del_adapter(&dev->i2c_adapter); mutex_unlock(&dev->i2c_mutex); #endif /* CONFIG_I2C */ kfree(dev->usbc_buf); kfree(dev); } static const struct video_device hdpvr_video_template = { /* .type = VFL_TYPE_GRABBER, */ /* .type2 = VID_TYPE_CAPTURE | VID_TYPE_MPEG_ENCODER, */ .fops = &hdpvr_fops, .release = hdpvr_device_release, .ioctl_ops = &hdpvr_ioctl_ops, .tvnorms = V4L2_STD_NTSC | V4L2_STD_SECAM | V4L2_STD_PAL_B | V4L2_STD_PAL_G | V4L2_STD_PAL_H | V4L2_STD_PAL_I | V4L2_STD_PAL_D | V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_60, .current_norm = V4L2_STD_NTSC | V4L2_STD_PAL_M | V4L2_STD_PAL_60, }; int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent, int devnum) { /* setup and register video device */ dev->video_dev = video_device_alloc(); if (!dev->video_dev) { v4l2_err(&dev->v4l2_dev, "video_device_alloc() failed\n"); goto error; } *(dev->video_dev) = hdpvr_video_template; strcpy(dev->video_dev->name, "Hauppauge HD PVR"); dev->video_dev->parent = parent; video_set_drvdata(dev->video_dev, dev); if (video_register_device(dev->video_dev, VFL_TYPE_GRABBER, devnum)) { v4l2_err(&dev->v4l2_dev, "video_device registration failed\n"); goto error; } return 0; error: return -ENOMEM; }
gpl-2.0
angelbbs/linux-sunxi
drivers/input/mouse/synaptics_usb.c
4860
14807
/* * USB Synaptics device driver * * Copyright (c) 2002 Rob Miller (rob@inpharmatica . co . uk) * Copyright (c) 2003 Ron Lee (ron@debian.org) * cPad driver for kernel 2.4 * * Copyright (c) 2004 Jan Steinhoff (cpad@jan-steinhoff . de) * Copyright (c) 2004 Ron Lee (ron@debian.org) * rewritten for kernel 2.6 * * cPad display character device part is not included. It can be found at * http://jan-steinhoff.de/linux/synaptics-usb.html * * Bases on: usb_skeleton.c v2.2 by Greg Kroah-Hartman * drivers/hid/usbhid/usbmouse.c by Vojtech Pavlik * drivers/input/mouse/synaptics.c by Peter Osterlund * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * Trademarks are the property of their respective owners. */ /* * There are three different types of Synaptics USB devices: Touchpads, * touchsticks (or trackpoints), and touchscreens. Touchpads are well supported * by this driver, touchstick support has not been tested much yet, and * touchscreens have not been tested at all. * * Up to three alternate settings are possible: * setting 0: one int endpoint for relative movement (used by usbhid.ko) * setting 1: one int endpoint for absolute finger position * setting 2 (cPad only): one int endpoint for absolute finger position and * two bulk endpoints for the display (in/out) * This driver uses setting 1. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/usb.h> #include <linux/input.h> #include <linux/usb/input.h> #define USB_VENDOR_ID_SYNAPTICS 0x06cb #define USB_DEVICE_ID_SYNAPTICS_TP 0x0001 /* Synaptics USB TouchPad */ #define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002 /* Integrated USB TouchPad */ #define USB_DEVICE_ID_SYNAPTICS_CPAD 0x0003 /* Synaptics cPad */ #define USB_DEVICE_ID_SYNAPTICS_TS 0x0006 /* Synaptics TouchScreen */ #define USB_DEVICE_ID_SYNAPTICS_STICK 0x0007 /* Synaptics USB Styk */ #define USB_DEVICE_ID_SYNAPTICS_WP 0x0008 /* Synaptics USB WheelPad */ #define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009 /* Composite USB TouchPad */ #define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010 /* Wireless TouchPad */ #define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013 /* DisplayPad */ #define SYNUSB_TOUCHPAD (1 << 0) #define SYNUSB_STICK (1 << 1) #define SYNUSB_TOUCHSCREEN (1 << 2) #define SYNUSB_AUXDISPLAY (1 << 3) /* For cPad */ #define SYNUSB_COMBO (1 << 4) /* Composite device (TP + stick) */ #define SYNUSB_IO_ALWAYS (1 << 5) #define USB_DEVICE_SYNAPTICS(prod, kind) \ USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, \ USB_DEVICE_ID_SYNAPTICS_##prod), \ .driver_info = (kind), #define SYNUSB_RECV_SIZE 8 #define XMIN_NOMINAL 1472 #define XMAX_NOMINAL 5472 #define YMIN_NOMINAL 1408 #define YMAX_NOMINAL 4448 struct synusb { struct usb_device *udev; struct usb_interface *intf; struct urb *urb; unsigned char *data; /* input device related data structures */ struct input_dev *input; char name[128]; char phys[64]; /* characteristics of the device */ unsigned long flags; }; static void synusb_report_buttons(struct synusb *synusb) { struct input_dev *input_dev = synusb->input; input_report_key(input_dev, BTN_LEFT, synusb->data[1] & 0x04); input_report_key(input_dev, BTN_RIGHT, synusb->data[1] & 0x01); input_report_key(input_dev, BTN_MIDDLE, synusb->data[1] & 0x02); } static void synusb_report_stick(struct synusb *synusb) { struct input_dev *input_dev = synusb->input; int x, y; unsigned int pressure; pressure = synusb->data[6]; x = (s16)(be16_to_cpup((__be16 *)&synusb->data[2]) << 3) >> 7; y = (s16)(be16_to_cpup((__be16 *)&synusb->data[4]) << 3) >> 7; if (pressure > 0) { input_report_rel(input_dev, REL_X, x); input_report_rel(input_dev, REL_Y, -y); } input_report_abs(input_dev, ABS_PRESSURE, pressure); synusb_report_buttons(synusb); input_sync(input_dev); } static void synusb_report_touchpad(struct synusb *synusb) { struct input_dev *input_dev = synusb->input; unsigned int num_fingers, tool_width; unsigned int x, y; unsigned int pressure, w; pressure = synusb->data[6]; x = be16_to_cpup((__be16 *)&synusb->data[2]); y = be16_to_cpup((__be16 *)&synusb->data[4]); w = synusb->data[0] & 0x0f; if (pressure > 0) { num_fingers = 1; tool_width = 5; switch (w) { case 0 ... 1: num_fingers = 2 + w; break; case 2: /* pen, pretend its a finger */ break; case 4 ... 15: tool_width = w; break; } } else { num_fingers = 0; tool_width = 0; } /* * Post events * BTN_TOUCH has to be first as mousedev relies on it when doing * absolute -> relative conversion */ if (pressure > 30) input_report_key(input_dev, BTN_TOUCH, 1); if (pressure < 25) input_report_key(input_dev, BTN_TOUCH, 0); if (num_fingers > 0) { input_report_abs(input_dev, ABS_X, x); input_report_abs(input_dev, ABS_Y, YMAX_NOMINAL + YMIN_NOMINAL - y); } input_report_abs(input_dev, ABS_PRESSURE, pressure); input_report_abs(input_dev, ABS_TOOL_WIDTH, tool_width); input_report_key(input_dev, BTN_TOOL_FINGER, num_fingers == 1); input_report_key(input_dev, BTN_TOOL_DOUBLETAP, num_fingers == 2); input_report_key(input_dev, BTN_TOOL_TRIPLETAP, num_fingers == 3); synusb_report_buttons(synusb); if (synusb->flags & SYNUSB_AUXDISPLAY) input_report_key(input_dev, BTN_MIDDLE, synusb->data[1] & 0x08); input_sync(input_dev); } static void synusb_irq(struct urb *urb) { struct synusb *synusb = urb->context; int error; /* Check our status in case we need to bail out early. */ switch (urb->status) { case 0: usb_mark_last_busy(synusb->udev); break; /* Device went away so don't keep trying to read from it. */ case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: return; default: goto resubmit; break; } if (synusb->flags & SYNUSB_STICK) synusb_report_stick(synusb); else synusb_report_touchpad(synusb); resubmit: error = usb_submit_urb(urb, GFP_ATOMIC); if (error && error != -EPERM) dev_err(&synusb->intf->dev, "%s - usb_submit_urb failed with result: %d", __func__, error); } static struct usb_endpoint_descriptor * synusb_get_in_endpoint(struct usb_host_interface *iface) { struct usb_endpoint_descriptor *endpoint; int i; for (i = 0; i < iface->desc.bNumEndpoints; ++i) { endpoint = &iface->endpoint[i].desc; if (usb_endpoint_is_int_in(endpoint)) { /* we found our interrupt in endpoint */ return endpoint; } } return NULL; } static int synusb_open(struct input_dev *dev) { struct synusb *synusb = input_get_drvdata(dev); int retval; retval = usb_autopm_get_interface(synusb->intf); if (retval) { dev_err(&synusb->intf->dev, "%s - usb_autopm_get_interface failed, error: %d\n", __func__, retval); return retval; } retval = usb_submit_urb(synusb->urb, GFP_KERNEL); if (retval) { dev_err(&synusb->intf->dev, "%s - usb_submit_urb failed, error: %d\n", __func__, retval); retval = -EIO; goto out; } synusb->intf->needs_remote_wakeup = 1; out: usb_autopm_put_interface(synusb->intf); return retval; } static void synusb_close(struct input_dev *dev) { struct synusb *synusb = input_get_drvdata(dev); int autopm_error; autopm_error = usb_autopm_get_interface(synusb->intf); usb_kill_urb(synusb->urb); synusb->intf->needs_remote_wakeup = 0; if (!autopm_error) usb_autopm_put_interface(synusb->intf); } static int synusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *ep; struct synusb *synusb; struct input_dev *input_dev; unsigned int intf_num = intf->cur_altsetting->desc.bInterfaceNumber; unsigned int altsetting = min(intf->num_altsetting, 1U); int error; error = usb_set_interface(udev, intf_num, altsetting); if (error) { dev_err(&udev->dev, "Can not set alternate setting to %i, error: %i", altsetting, error); return error; } ep = synusb_get_in_endpoint(intf->cur_altsetting); if (!ep) return -ENODEV; synusb = kzalloc(sizeof(*synusb), GFP_KERNEL); input_dev = input_allocate_device(); if (!synusb || !input_dev) { error = -ENOMEM; goto err_free_mem; } synusb->udev = udev; synusb->intf = intf; synusb->input = input_dev; synusb->flags = id->driver_info; if (synusb->flags & SYNUSB_COMBO) { /* * This is a combo device, we need to set proper * capability, depending on the interface. */ synusb->flags |= intf_num == 1 ? SYNUSB_STICK : SYNUSB_TOUCHPAD; } synusb->urb = usb_alloc_urb(0, GFP_KERNEL); if (!synusb->urb) { error = -ENOMEM; goto err_free_mem; } synusb->data = usb_alloc_coherent(udev, SYNUSB_RECV_SIZE, GFP_KERNEL, &synusb->urb->transfer_dma); if (!synusb->data) { error = -ENOMEM; goto err_free_urb; } usb_fill_int_urb(synusb->urb, udev, usb_rcvintpipe(udev, ep->bEndpointAddress), synusb->data, SYNUSB_RECV_SIZE, synusb_irq, synusb, ep->bInterval); synusb->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if (udev->manufacturer) strlcpy(synusb->name, udev->manufacturer, sizeof(synusb->name)); if (udev->product) { if (udev->manufacturer) strlcat(synusb->name, " ", sizeof(synusb->name)); strlcat(synusb->name, udev->product, sizeof(synusb->name)); } if (!strlen(synusb->name)) snprintf(synusb->name, sizeof(synusb->name), "USB Synaptics Device %04x:%04x", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); if (synusb->flags & SYNUSB_STICK) strlcat(synusb->name, " (Stick) ", sizeof(synusb->name)); usb_make_path(udev, synusb->phys, sizeof(synusb->phys)); strlcat(synusb->phys, "/input0", sizeof(synusb->phys)); input_dev->name = synusb->name; input_dev->phys = synusb->phys; usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &synusb->intf->dev; if (!(synusb->flags & SYNUSB_IO_ALWAYS)) { input_dev->open = synusb_open; input_dev->close = synusb_close; } input_set_drvdata(input_dev, synusb); __set_bit(EV_ABS, input_dev->evbit); __set_bit(EV_KEY, input_dev->evbit); if (synusb->flags & SYNUSB_STICK) { __set_bit(EV_REL, input_dev->evbit); __set_bit(REL_X, input_dev->relbit); __set_bit(REL_Y, input_dev->relbit); input_set_abs_params(input_dev, ABS_PRESSURE, 0, 127, 0, 0); } else { input_set_abs_params(input_dev, ABS_X, XMIN_NOMINAL, XMAX_NOMINAL, 0, 0); input_set_abs_params(input_dev, ABS_Y, YMIN_NOMINAL, YMAX_NOMINAL, 0, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, 255, 0, 0); input_set_abs_params(input_dev, ABS_TOOL_WIDTH, 0, 15, 0, 0); __set_bit(BTN_TOUCH, input_dev->keybit); __set_bit(BTN_TOOL_FINGER, input_dev->keybit); __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); } __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); __set_bit(BTN_MIDDLE, input_dev->keybit); usb_set_intfdata(intf, synusb); if (synusb->flags & SYNUSB_IO_ALWAYS) { error = synusb_open(input_dev); if (error) goto err_free_dma; } error = input_register_device(input_dev); if (error) { dev_err(&udev->dev, "Failed to register input device, error %d\n", error); goto err_stop_io; } return 0; err_stop_io: if (synusb->flags & SYNUSB_IO_ALWAYS) synusb_close(synusb->input); err_free_dma: usb_free_coherent(udev, SYNUSB_RECV_SIZE, synusb->data, synusb->urb->transfer_dma); err_free_urb: usb_free_urb(synusb->urb); err_free_mem: input_free_device(input_dev); kfree(synusb); usb_set_intfdata(intf, NULL); return error; } static void synusb_disconnect(struct usb_interface *intf) { struct synusb *synusb = usb_get_intfdata(intf); struct usb_device *udev = interface_to_usbdev(intf); if (synusb->flags & SYNUSB_IO_ALWAYS) synusb_close(synusb->input); input_unregister_device(synusb->input); usb_free_coherent(udev, SYNUSB_RECV_SIZE, synusb->data, synusb->urb->transfer_dma); usb_free_urb(synusb->urb); kfree(synusb); usb_set_intfdata(intf, NULL); } static int synusb_suspend(struct usb_interface *intf, pm_message_t message) { struct synusb *synusb = usb_get_intfdata(intf); struct input_dev *input_dev = synusb->input; mutex_lock(&input_dev->mutex); usb_kill_urb(synusb->urb); mutex_unlock(&input_dev->mutex); return 0; } static int synusb_resume(struct usb_interface *intf) { struct synusb *synusb = usb_get_intfdata(intf); struct input_dev *input_dev = synusb->input; int retval = 0; mutex_lock(&input_dev->mutex); if ((input_dev->users || (synusb->flags & SYNUSB_IO_ALWAYS)) && usb_submit_urb(synusb->urb, GFP_NOIO) < 0) { retval = -EIO; } mutex_unlock(&input_dev->mutex); return retval; } static int synusb_pre_reset(struct usb_interface *intf) { struct synusb *synusb = usb_get_intfdata(intf); struct input_dev *input_dev = synusb->input; mutex_lock(&input_dev->mutex); usb_kill_urb(synusb->urb); return 0; } static int synusb_post_reset(struct usb_interface *intf) { struct synusb *synusb = usb_get_intfdata(intf); struct input_dev *input_dev = synusb->input; int retval = 0; if ((input_dev->users || (synusb->flags & SYNUSB_IO_ALWAYS)) && usb_submit_urb(synusb->urb, GFP_NOIO) < 0) { retval = -EIO; } mutex_unlock(&input_dev->mutex); return retval; } static int synusb_reset_resume(struct usb_interface *intf) { return synusb_resume(intf); } static struct usb_device_id synusb_idtable[] = { { USB_DEVICE_SYNAPTICS(TP, SYNUSB_TOUCHPAD) }, { USB_DEVICE_SYNAPTICS(INT_TP, SYNUSB_TOUCHPAD) }, { USB_DEVICE_SYNAPTICS(CPAD, SYNUSB_TOUCHPAD | SYNUSB_AUXDISPLAY | SYNUSB_IO_ALWAYS) }, { USB_DEVICE_SYNAPTICS(TS, SYNUSB_TOUCHSCREEN) }, { USB_DEVICE_SYNAPTICS(STICK, SYNUSB_STICK) }, { USB_DEVICE_SYNAPTICS(WP, SYNUSB_TOUCHPAD) }, { USB_DEVICE_SYNAPTICS(COMP_TP, SYNUSB_COMBO) }, { USB_DEVICE_SYNAPTICS(WTP, SYNUSB_TOUCHPAD) }, { USB_DEVICE_SYNAPTICS(DPAD, SYNUSB_TOUCHPAD) }, { } }; MODULE_DEVICE_TABLE(usb, synusb_idtable); static struct usb_driver synusb_driver = { .name = "synaptics_usb", .probe = synusb_probe, .disconnect = synusb_disconnect, .id_table = synusb_idtable, .suspend = synusb_suspend, .resume = synusb_resume, .pre_reset = synusb_pre_reset, .post_reset = synusb_post_reset, .reset_resume = synusb_reset_resume, .supports_autosuspend = 1, }; module_usb_driver(synusb_driver); MODULE_AUTHOR("Rob Miller <rob@inpharmatica.co.uk>, " "Ron Lee <ron@debian.org>, " "Jan Steinhoff <cpad@jan-steinhoff.de>"); MODULE_DESCRIPTION("Synaptics USB device driver"); MODULE_LICENSE("GPL");
gpl-2.0
MikeC84/mac_kernel_lge_hammerhead
arch/arm/mach-omap2/clock2xxx.c
4860
1567
/* * clock2xxx.c - OMAP2xxx-specific clock integration code * * Copyright (C) 2005-2008 Texas Instruments, Inc. * Copyright (C) 2004-2010 Nokia Corporation * * Contacts: * Richard Woodruff <r-woodruff2@ti.com> * Paul Walmsley * * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, * Gordon McNutt and RidgeRun, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/io.h> #include <plat/cpu.h> #include <plat/clock.h> #include "clock.h" #include "clock2xxx.h" #include "cm.h" #include "cm-regbits-24xx.h" struct clk *vclk, *sclk, *dclk; /* * Omap24xx specific clock functions */ /* * Set clocks for bypass mode for reboot to work. */ void omap2xxx_clk_prepare_for_reboot(void) { u32 rate; if (vclk == NULL || sclk == NULL) return; rate = clk_get_rate(sclk); clk_set_rate(vclk, rate); } /* * Switch the MPU rate if specified on cmdline. We cannot do this * early until cmdline is parsed. XXX This should be removed from the * clock code and handled by the OPP layer code in the near future. */ static int __init omap2xxx_clk_arch_init(void) { int ret; if (!cpu_is_omap24xx()) return 0; ret = omap2_clk_switch_mpurate_at_boot("virt_prcm_set"); if (!ret) omap2_clk_print_new_rates("sys_ck", "dpll_ck", "mpu_ck"); return ret; } arch_initcall(omap2xxx_clk_arch_init);
gpl-2.0
RobbieL811/kernel_asus_flo
kernel/debug/kdb/kdb_debugger.c
5116
4340
/* * Created by: Jason Wessel <jason.wessel@windriver.com> * * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kgdb.h> #include <linux/kdb.h> #include <linux/kdebug.h> #include <linux/export.h> #include "kdb_private.h" #include "../debug_core.h" /* * KDB interface to KGDB internals */ get_char_func kdb_poll_funcs[] = { dbg_io_get_char, NULL, NULL, NULL, NULL, NULL, }; EXPORT_SYMBOL_GPL(kdb_poll_funcs); int kdb_poll_idx = 1; EXPORT_SYMBOL_GPL(kdb_poll_idx); static struct kgdb_state *kdb_ks; int kdb_stub(struct kgdb_state *ks) { int error = 0; kdb_bp_t *bp; unsigned long addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs); kdb_reason_t reason = KDB_REASON_OOPS; kdb_dbtrap_t db_result = KDB_DB_NOBPT; int i; kdb_ks = ks; if (KDB_STATE(REENTRY)) { reason = KDB_REASON_SWITCH; KDB_STATE_CLEAR(REENTRY); addr = instruction_pointer(ks->linux_regs); } ks->pass_exception = 0; if (atomic_read(&kgdb_setting_breakpoint)) reason = KDB_REASON_KEYBOARD; for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { if ((bp->bp_enabled) && (bp->bp_addr == addr)) { reason = KDB_REASON_BREAK; db_result = KDB_DB_BPT; if (addr != instruction_pointer(ks->linux_regs)) kgdb_arch_set_pc(ks->linux_regs, addr); break; } } if (reason == KDB_REASON_BREAK || reason == KDB_REASON_SWITCH) { for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { if (bp->bp_free) continue; if (bp->bp_addr == addr) { bp->bp_delay = 1; bp->bp_delayed = 1; /* * SSBPT is set when the kernel debugger must single step a * task in order to re-establish an instruction breakpoint * which uses the instruction replacement mechanism. It is * cleared by any action that removes the need to single-step * the breakpoint. */ reason = KDB_REASON_BREAK; db_result = KDB_DB_BPT; KDB_STATE_SET(SSBPT); break; } } } if (reason != KDB_REASON_BREAK && ks->ex_vector == 0 && ks->signo == SIGTRAP) { reason = KDB_REASON_SSTEP; db_result = KDB_DB_BPT; } /* Set initial kdb state variables */ KDB_STATE_CLEAR(KGDB_TRANS); kdb_initial_cpu = atomic_read(&kgdb_active); kdb_current_task = kgdb_info[ks->cpu].task; kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo; /* Remove any breakpoints as needed by kdb and clear single step */ kdb_bp_remove(); KDB_STATE_CLEAR(DOING_SS); KDB_STATE_CLEAR(DOING_SSB); KDB_STATE_SET(PAGER); /* zero out any offline cpu data */ for_each_present_cpu(i) { if (!cpu_online(i)) { kgdb_info[i].debuggerinfo = NULL; kgdb_info[i].task = NULL; } } if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) { ks->pass_exception = 1; KDB_FLAG_SET(CATASTROPHIC); } if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { KDB_STATE_CLEAR(SSBPT); KDB_STATE_CLEAR(DOING_SS); } else { /* Start kdb main loop */ error = kdb_main_loop(KDB_REASON_ENTER, reason, ks->err_code, db_result, ks->linux_regs); } /* * Upon exit from the kdb main loop setup break points and restart * the system based on the requested continue state */ kdb_initial_cpu = -1; kdb_current_task = NULL; kdb_current_regs = NULL; KDB_STATE_CLEAR(PAGER); kdbnearsym_cleanup(); if (error == KDB_CMD_KGDB) { if (KDB_STATE(DOING_KGDB)) KDB_STATE_CLEAR(DOING_KGDB); return DBG_PASS_EVENT; } kdb_bp_install(ks->linux_regs); dbg_activate_sw_breakpoints(); /* Set the exit state to a single step or a continue */ if (KDB_STATE(DOING_SS)) gdbstub_state(ks, "s"); else gdbstub_state(ks, "c"); KDB_FLAG_CLEAR(CATASTROPHIC); /* Invoke arch specific exception handling prior to system resume */ kgdb_info[ks->cpu].ret_state = gdbstub_state(ks, "e"); if (ks->pass_exception) kgdb_info[ks->cpu].ret_state = 1; if (error == KDB_CMD_CPU) { KDB_STATE_SET(REENTRY); /* * Force clear the single step bit because kdb emulates this * differently vs the gdbstub */ kgdb_single_step = 0; dbg_deactivate_sw_breakpoints(); return DBG_SWITCH_CPU_EVENT; } return kgdb_info[ks->cpu].ret_state; } void kdb_gdb_state_pass(char *buf) { gdbstub_state(kdb_ks, buf); }
gpl-2.0
drgogeta86/android_kernel_asus_flo
drivers/scsi/libfc/fc_disc.c
5116
20480
/* * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained at www.Open-FCoE.org */ /* * Target Discovery * * This block discovers all FC-4 remote ports, including FCP initiators. It * also handles RSCN events and re-discovery if necessary. */ /* * DISC LOCKING * * The disc mutex is can be locked when acquiring rport locks, but may not * be held when acquiring the lport lock. Refer to fc_lport.c for more * details. */ #include <linux/timer.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/export.h> #include <asm/unaligned.h> #include <scsi/fc/fc_gs.h> #include <scsi/libfc.h> #include "fc_libfc.h" #define FC_DISC_RETRY_LIMIT 3 /* max retries */ #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */ static void fc_disc_gpn_ft_req(struct fc_disc *); static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); static void fc_disc_done(struct fc_disc *, enum fc_disc_event); static void fc_disc_timeout(struct work_struct *); static int fc_disc_single(struct fc_lport *, struct fc_disc_port *); static void fc_disc_restart(struct fc_disc *); /** * fc_disc_stop_rports() - Delete all the remote ports associated with the lport * @disc: The discovery job to stop remote ports on * * Locking Note: This function expects that the lport mutex is locked before * calling it. */ static void fc_disc_stop_rports(struct fc_disc *disc) { struct fc_lport *lport; struct fc_rport_priv *rdata; lport = fc_disc_lport(disc); mutex_lock(&disc->disc_mutex); list_for_each_entry_rcu(rdata, &disc->rports, peers) lport->tt.rport_logoff(rdata); mutex_unlock(&disc->disc_mutex); } /** * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) * @disc: The discovery object to which the RSCN applies * @fp: The RSCN frame * * Locking Note: This function expects that the disc_mutex is locked * before it is called. */ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp) { struct fc_lport *lport; struct fc_els_rscn *rp; struct fc_els_rscn_page *pp; struct fc_seq_els_data rjt_data; unsigned int len; int redisc = 0; enum fc_els_rscn_ev_qual ev_qual; enum fc_els_rscn_addr_fmt fmt; LIST_HEAD(disc_ports); struct fc_disc_port *dp, *next; lport = fc_disc_lport(disc); FC_DISC_DBG(disc, "Received an RSCN event\n"); /* make sure the frame contains an RSCN message */ rp = fc_frame_payload_get(fp, sizeof(*rp)); if (!rp) goto reject; /* make sure the page length is as expected (4 bytes) */ if (rp->rscn_page_len != sizeof(*pp)) goto reject; /* get the RSCN payload length */ len = ntohs(rp->rscn_plen); if (len < sizeof(*rp)) goto reject; /* make sure the frame contains the expected payload */ rp = fc_frame_payload_get(fp, len); if (!rp) goto reject; /* payload must be a multiple of the RSCN page size */ len -= sizeof(*rp); if (len % sizeof(*pp)) goto reject; for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) { ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT; ev_qual &= ELS_RSCN_EV_QUAL_MASK; fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT; fmt &= ELS_RSCN_ADDR_FMT_MASK; /* * if we get an address format other than port * (area, domain, fabric), then do a full discovery */ switch (fmt) { case ELS_ADDR_FMT_PORT: FC_DISC_DBG(disc, "Port address format for port " "(%6.6x)\n", ntoh24(pp->rscn_fid)); dp = kzalloc(sizeof(*dp), GFP_KERNEL); if (!dp) { redisc = 1; break; } dp->lp = lport; dp->port_id = ntoh24(pp->rscn_fid); list_add_tail(&dp->peers, &disc_ports); break; case ELS_ADDR_FMT_AREA: case ELS_ADDR_FMT_DOM: case ELS_ADDR_FMT_FAB: default: FC_DISC_DBG(disc, "Address format is (%d)\n", fmt); redisc = 1; break; } } lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); /* * If not doing a complete rediscovery, do GPN_ID on * the individual ports mentioned in the list. * If any of these get an error, do a full rediscovery. * In any case, go through the list and free the entries. */ list_for_each_entry_safe(dp, next, &disc_ports, peers) { list_del(&dp->peers); if (!redisc) redisc = fc_disc_single(lport, dp); kfree(dp); } if (redisc) { FC_DISC_DBG(disc, "RSCN received: rediscovering\n"); fc_disc_restart(disc); } else { FC_DISC_DBG(disc, "RSCN received: not rediscovering. " "redisc %d state %d in_prog %d\n", redisc, lport->state, disc->pending); } fc_frame_free(fp); return; reject: FC_DISC_DBG(disc, "Received a bad RSCN frame\n"); rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_NONE; lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); fc_frame_free(fp); } /** * fc_disc_recv_req() - Handle incoming requests * @lport: The local port receiving the request * @fp: The request frame * * Locking Note: This function is called from the EM and will lock * the disc_mutex before calling the handler for the * request. */ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp) { u8 op; struct fc_disc *disc = &lport->disc; op = fc_frame_payload_op(fp); switch (op) { case ELS_RSCN: mutex_lock(&disc->disc_mutex); fc_disc_recv_rscn_req(disc, fp); mutex_unlock(&disc->disc_mutex); break; default: FC_DISC_DBG(disc, "Received an unsupported request, " "the opcode is (%x)\n", op); fc_frame_free(fp); break; } } /** * fc_disc_restart() - Restart discovery * @disc: The discovery object to be restarted * * Locking Note: This function expects that the disc mutex * is already locked. */ static void fc_disc_restart(struct fc_disc *disc) { if (!disc->disc_callback) return; FC_DISC_DBG(disc, "Restarting discovery\n"); disc->requested = 1; if (disc->pending) return; /* * Advance disc_id. This is an arbitrary non-zero number that will * match the value in the fc_rport_priv after discovery for all * freshly-discovered remote ports. Avoid wrapping to zero. */ disc->disc_id = (disc->disc_id + 2) | 1; disc->retry_count = 0; fc_disc_gpn_ft_req(disc); } /** * fc_disc_start() - Start discovery on a local port * @lport: The local port to have discovery started on * @disc_callback: Callback function to be called when discovery is complete */ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, enum fc_disc_event), struct fc_lport *lport) { struct fc_disc *disc = &lport->disc; /* * At this point we may have a new disc job or an existing * one. Either way, let's lock when we make changes to it * and send the GPN_FT request. */ mutex_lock(&disc->disc_mutex); disc->disc_callback = disc_callback; fc_disc_restart(disc); mutex_unlock(&disc->disc_mutex); } /** * fc_disc_done() - Discovery has been completed * @disc: The discovery context * @event: The discovery completion status * * Locking Note: This function expects that the disc mutex is locked before * it is called. The discovery callback is then made with the lock released, * and the lock is re-taken before returning from this function */ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) { struct fc_lport *lport = fc_disc_lport(disc); struct fc_rport_priv *rdata; FC_DISC_DBG(disc, "Discovery complete\n"); disc->pending = 0; if (disc->requested) { fc_disc_restart(disc); return; } /* * Go through all remote ports. If they were found in the latest * discovery, reverify or log them in. Otherwise, log them out. * Skip ports which were never discovered. These are the dNS port * and ports which were created by PLOGI. */ list_for_each_entry_rcu(rdata, &disc->rports, peers) { if (!rdata->disc_id) continue; if (rdata->disc_id == disc->disc_id) lport->tt.rport_login(rdata); else lport->tt.rport_logoff(rdata); } mutex_unlock(&disc->disc_mutex); disc->disc_callback(lport, event); mutex_lock(&disc->disc_mutex); } /** * fc_disc_error() - Handle error on dNS request * @disc: The discovery context * @fp: The error code encoded as a frame pointer */ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) { struct fc_lport *lport = fc_disc_lport(disc); unsigned long delay = 0; FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n", PTR_ERR(fp), disc->retry_count, FC_DISC_RETRY_LIMIT); if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { /* * Memory allocation failure, or the exchange timed out, * retry after delay. */ if (disc->retry_count < FC_DISC_RETRY_LIMIT) { /* go ahead and retry */ if (!fp) delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY); else { delay = msecs_to_jiffies(lport->e_d_tov); /* timeout faster first time */ if (!disc->retry_count) delay /= 4; } disc->retry_count++; schedule_delayed_work(&disc->disc_work, delay); } else fc_disc_done(disc, DISC_EV_FAILED); } else if (PTR_ERR(fp) == -FC_EX_CLOSED) { /* * if discovery fails due to lport reset, clear * pending flag so that subsequent discovery can * continue */ disc->pending = 0; } } /** * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request * @lport: The discovery context * * Locking Note: This function expects that the disc_mutex is locked * before it is called. */ static void fc_disc_gpn_ft_req(struct fc_disc *disc) { struct fc_frame *fp; struct fc_lport *lport = fc_disc_lport(disc); WARN_ON(!fc_lport_test_ready(lport)); disc->pending = 1; disc->requested = 0; disc->buf_len = 0; disc->seq_count = 0; fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + sizeof(struct fc_ns_gid_ft)); if (!fp) goto err; if (lport->tt.elsct_send(lport, 0, fp, FC_NS_GPN_FT, fc_disc_gpn_ft_resp, disc, 3 * lport->r_a_tov)) return; err: fc_disc_error(disc, NULL); } /** * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response. * @lport: The local port the GPN_FT was received on * @buf: The GPN_FT response buffer * @len: The size of response buffer * * Goes through the list of IDs and names resulting from a request. */ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) { struct fc_lport *lport; struct fc_gpn_ft_resp *np; char *bp; size_t plen; size_t tlen; int error = 0; struct fc_rport_identifiers ids; struct fc_rport_priv *rdata; lport = fc_disc_lport(disc); disc->seq_count++; /* * Handle partial name record left over from previous call. */ bp = buf; plen = len; np = (struct fc_gpn_ft_resp *)bp; tlen = disc->buf_len; disc->buf_len = 0; if (tlen) { WARN_ON(tlen >= sizeof(*np)); plen = sizeof(*np) - tlen; WARN_ON(plen <= 0); WARN_ON(plen >= sizeof(*np)); if (plen > len) plen = len; np = &disc->partial_buf; memcpy((char *)np + tlen, bp, plen); /* * Set bp so that the loop below will advance it to the * first valid full name element. */ bp -= tlen; len += tlen; plen += tlen; disc->buf_len = (unsigned char) plen; if (plen == sizeof(*np)) disc->buf_len = 0; } /* * Handle full name records, including the one filled from above. * Normally, np == bp and plen == len, but from the partial case above, * bp, len describe the overall buffer, and np, plen describe the * partial buffer, which if would usually be full now. * After the first time through the loop, things return to "normal". */ while (plen >= sizeof(*np)) { ids.port_id = ntoh24(np->fp_fid); ids.port_name = ntohll(np->fp_wwpn); if (ids.port_id != lport->port_id && ids.port_name != lport->wwpn) { rdata = lport->tt.rport_create(lport, ids.port_id); if (rdata) { rdata->ids.port_name = ids.port_name; rdata->disc_id = disc->disc_id; } else { printk(KERN_WARNING "libfc: Failed to allocate " "memory for the newly discovered port " "(%6.6x)\n", ids.port_id); error = -ENOMEM; } } if (np->fp_flags & FC_NS_FID_LAST) { fc_disc_done(disc, DISC_EV_SUCCESS); len = 0; break; } len -= sizeof(*np); bp += sizeof(*np); np = (struct fc_gpn_ft_resp *)bp; plen = len; } /* * Save any partial record at the end of the buffer for next time. */ if (error == 0 && len > 0 && len < sizeof(*np)) { if (np != &disc->partial_buf) { FC_DISC_DBG(disc, "Partial buffer remains " "for discovery\n"); memcpy(&disc->partial_buf, np, len); } disc->buf_len = (unsigned char) len; } return error; } /** * fc_disc_timeout() - Handler for discovery timeouts * @work: Structure holding discovery context that needs to retry discovery */ static void fc_disc_timeout(struct work_struct *work) { struct fc_disc *disc = container_of(work, struct fc_disc, disc_work.work); mutex_lock(&disc->disc_mutex); fc_disc_gpn_ft_req(disc); mutex_unlock(&disc->disc_mutex); } /** * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT) * @sp: The sequence that the GPN_FT response was received on * @fp: The GPN_FT response frame * @lp_arg: The discovery context * * Locking Note: This function is called without disc mutex held, and * should do all its processing with the mutex held */ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, void *disc_arg) { struct fc_disc *disc = disc_arg; struct fc_ct_hdr *cp; struct fc_frame_header *fh; enum fc_disc_event event = DISC_EV_NONE; unsigned int seq_cnt; unsigned int len; int error = 0; mutex_lock(&disc->disc_mutex); FC_DISC_DBG(disc, "Received a GPN_FT response\n"); if (IS_ERR(fp)) { fc_disc_error(disc, fp); mutex_unlock(&disc->disc_mutex); return; } WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */ fh = fc_frame_header_get(fp); len = fr_len(fp) - sizeof(*fh); seq_cnt = ntohs(fh->fh_seq_cnt); if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && disc->seq_count == 0) { cp = fc_frame_payload_get(fp, sizeof(*cp)); if (!cp) { FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n", fr_len(fp)); event = DISC_EV_FAILED; } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { /* Accepted, parse the response. */ len -= sizeof(*cp); error = fc_disc_gpn_ft_parse(disc, cp + 1, len); } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x " "(check zoning)\n", cp->ct_reason, cp->ct_explan); event = DISC_EV_FAILED; if (cp->ct_reason == FC_FS_RJT_UNABL && cp->ct_explan == FC_FS_EXP_FTNR) event = DISC_EV_SUCCESS; } else { FC_DISC_DBG(disc, "GPN_FT unexpected response code " "%x\n", ntohs(cp->ct_cmd)); event = DISC_EV_FAILED; } } else if (fr_sof(fp) == FC_SOF_N3 && seq_cnt == disc->seq_count) { error = fc_disc_gpn_ft_parse(disc, fh + 1, len); } else { FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? " "seq_cnt %x expected %x sof %x eof %x\n", seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp)); event = DISC_EV_FAILED; } if (error) fc_disc_error(disc, fp); else if (event != DISC_EV_NONE) fc_disc_done(disc, event); fc_frame_free(fp); mutex_unlock(&disc->disc_mutex); } /** * fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID) * @sp: The sequence the GPN_ID is on * @fp: The response frame * @rdata_arg: The remote port that sent the GPN_ID response * * Locking Note: This function is called without disc mutex held. */ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, void *rdata_arg) { struct fc_rport_priv *rdata = rdata_arg; struct fc_rport_priv *new_rdata; struct fc_lport *lport; struct fc_disc *disc; struct fc_ct_hdr *cp; struct fc_ns_gid_pn *pn; u64 port_name; lport = rdata->local_port; disc = &lport->disc; mutex_lock(&disc->disc_mutex); if (PTR_ERR(fp) == -FC_EX_CLOSED) goto out; if (IS_ERR(fp)) goto redisc; cp = fc_frame_payload_get(fp, sizeof(*cp)); if (!cp) goto redisc; if (ntohs(cp->ct_cmd) == FC_FS_ACC) { if (fr_len(fp) < sizeof(struct fc_frame_header) + sizeof(*cp) + sizeof(*pn)) goto redisc; pn = (struct fc_ns_gid_pn *)(cp + 1); port_name = get_unaligned_be64(&pn->fn_wwpn); if (rdata->ids.port_name == -1) rdata->ids.port_name = port_name; else if (rdata->ids.port_name != port_name) { FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. " "Port-id %6.6x wwpn %16.16llx\n", rdata->ids.port_id, port_name); lport->tt.rport_logoff(rdata); new_rdata = lport->tt.rport_create(lport, rdata->ids.port_id); if (new_rdata) { new_rdata->disc_id = disc->disc_id; lport->tt.rport_login(new_rdata); } goto out; } rdata->disc_id = disc->disc_id; lport->tt.rport_login(rdata); } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n", cp->ct_reason, cp->ct_explan); lport->tt.rport_logoff(rdata); } else { FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n", ntohs(cp->ct_cmd)); redisc: fc_disc_restart(disc); } out: mutex_unlock(&disc->disc_mutex); kref_put(&rdata->kref, lport->tt.rport_destroy); } /** * fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request * @lport: The local port to initiate discovery on * @rdata: remote port private data * * Locking Note: This function expects that the disc_mutex is locked * before it is called. * On failure, an error code is returned. */ static int fc_disc_gpn_id_req(struct fc_lport *lport, struct fc_rport_priv *rdata) { struct fc_frame *fp; fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + sizeof(struct fc_ns_fid)); if (!fp) return -ENOMEM; if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID, fc_disc_gpn_id_resp, rdata, 3 * lport->r_a_tov)) return -ENOMEM; kref_get(&rdata->kref); return 0; } /** * fc_disc_single() - Discover the directory information for a single target * @lport: The local port the remote port is associated with * @dp: The port to rediscover * * Locking Note: This function expects that the disc_mutex is locked * before it is called. */ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp) { struct fc_rport_priv *rdata; rdata = lport->tt.rport_create(lport, dp->port_id); if (!rdata) return -ENOMEM; rdata->disc_id = 0; return fc_disc_gpn_id_req(lport, rdata); } /** * fc_disc_stop() - Stop discovery for a given lport * @lport: The local port that discovery should stop on */ static void fc_disc_stop(struct fc_lport *lport) { struct fc_disc *disc = &lport->disc; if (disc->pending) cancel_delayed_work_sync(&disc->disc_work); fc_disc_stop_rports(disc); } /** * fc_disc_stop_final() - Stop discovery for a given lport * @lport: The lport that discovery should stop on * * This function will block until discovery has been * completely stopped and all rports have been deleted. */ static void fc_disc_stop_final(struct fc_lport *lport) { fc_disc_stop(lport); lport->tt.rport_flush_queue(); } /** * fc_disc_init() - Initialize the discovery layer for a local port * @lport: The local port that needs the discovery layer to be initialized */ int fc_disc_init(struct fc_lport *lport) { struct fc_disc *disc; if (!lport->tt.disc_start) lport->tt.disc_start = fc_disc_start; if (!lport->tt.disc_stop) lport->tt.disc_stop = fc_disc_stop; if (!lport->tt.disc_stop_final) lport->tt.disc_stop_final = fc_disc_stop_final; if (!lport->tt.disc_recv_req) lport->tt.disc_recv_req = fc_disc_recv_req; disc = &lport->disc; INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); mutex_init(&disc->disc_mutex); INIT_LIST_HEAD(&disc->rports); disc->priv = lport; return 0; } EXPORT_SYMBOL(fc_disc_init);
gpl-2.0
Stuxnet-Kernel/kernel_mako
kernel/time/timecompare.c
7932
4966
/* * Copyright (C) 2009 Intel Corporation. * Author: Patrick Ohly <patrick.ohly@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/timecompare.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/math64.h> #include <linux/kernel.h> /* * fixed point arithmetic scale factor for skew * * Usually one would measure skew in ppb (parts per billion, 1e9), but * using a factor of 2 simplifies the math. */ #define TIMECOMPARE_SKEW_RESOLUTION (((s64)1)<<30) ktime_t timecompare_transform(struct timecompare *sync, u64 source_tstamp) { u64 nsec; nsec = source_tstamp + sync->offset; nsec += (s64)(source_tstamp - sync->last_update) * sync->skew / TIMECOMPARE_SKEW_RESOLUTION; return ns_to_ktime(nsec); } EXPORT_SYMBOL_GPL(timecompare_transform); int timecompare_offset(struct timecompare *sync, s64 *offset, u64 *source_tstamp) { u64 start_source = 0, end_source = 0; struct { s64 offset; s64 duration_target; } buffer[10], sample, *samples; int counter = 0, i; int used; int index; int num_samples = sync->num_samples; if (num_samples > ARRAY_SIZE(buffer)) { samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC); if (!samples) { samples = buffer; num_samples = ARRAY_SIZE(buffer); } } else { samples = buffer; } /* run until we have enough valid samples, but do not try forever */ i = 0; counter = 0; while (1) { u64 ts; ktime_t start, end; start = sync->target(); ts = timecounter_read(sync->source); end = sync->target(); if (!i) start_source = ts; /* ignore negative durations */ sample.duration_target = ktime_to_ns(ktime_sub(end, start)); if (sample.duration_target >= 0) { /* * assume symetric delay to and from source: * average target time corresponds to measured * source time */ sample.offset = (ktime_to_ns(end) + ktime_to_ns(start)) / 2 - ts; /* simple insertion sort based on duration */ index = counter - 1; while (index >= 0) { if (samples[index].duration_target < sample.duration_target) break; samples[index + 1] = samples[index]; index--; } samples[index + 1] = sample; counter++; } i++; if (counter >= num_samples || i >= 100000) { end_source = ts; break; } } *source_tstamp = (end_source + start_source) / 2; /* remove outliers by only using 75% of the samples */ used = counter * 3 / 4; if (!used) used = counter; if (used) { /* calculate average */ s64 off = 0; for (index = 0; index < used; index++) off += samples[index].offset; *offset = div_s64(off, used); } if (samples && samples != buffer) kfree(samples); return used; } EXPORT_SYMBOL_GPL(timecompare_offset); void __timecompare_update(struct timecompare *sync, u64 source_tstamp) { s64 offset; u64 average_time; if (!timecompare_offset(sync, &offset, &average_time)) return; if (!sync->last_update) { sync->last_update = average_time; sync->offset = offset; sync->skew = 0; } else { s64 delta_nsec = average_time - sync->last_update; /* avoid division by negative or small deltas */ if (delta_nsec >= 10000) { s64 delta_offset_nsec = offset - sync->offset; s64 skew; /* delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION / delta_nsec */ u64 divisor; /* div_s64() is limited to 32 bit divisor */ skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION; divisor = delta_nsec; while (unlikely(divisor >= ((s64)1) << 32)) { /* divide both by 2; beware, right shift of negative value has undefined behavior and can only be used for the positive divisor */ skew = div_s64(skew, 2); divisor >>= 1; } skew = div_s64(skew, divisor); /* * Calculate new overall skew as 4/16 the * old value and 12/16 the new one. This is * a rather arbitrary tradeoff between * only using the latest measurement (0/16 and * 16/16) and even more weight on past measurements. */ #define TIMECOMPARE_NEW_SKEW_PER_16 12 sync->skew = div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) * sync->skew + TIMECOMPARE_NEW_SKEW_PER_16 * skew, 16); sync->last_update = average_time; sync->offset = offset; } } } EXPORT_SYMBOL_GPL(__timecompare_update);
gpl-2.0
Snuzzo/dlx_kernel
drivers/mtd/maps/mbx860.c
8188
2439
/* * Handle mapping of the flash on MBX860 boards * * Author: Anton Todorov * Copyright: (C) 2001 Emness Technology * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #define WINDOW_ADDR 0xfe000000 #define WINDOW_SIZE 0x00200000 /* Flash / Partition sizing */ #define MAX_SIZE_KiB 8192 #define BOOT_PARTITION_SIZE_KiB 512 #define KERNEL_PARTITION_SIZE_KiB 5632 #define APP_PARTITION_SIZE_KiB 2048 #define NUM_PARTITIONS 3 /* partition_info gives details on the logical partitions that the split the * single flash device into. If the size if zero we use up to the end of the * device. */ static struct mtd_partition partition_info[]={ { .name = "MBX flash BOOT partition", .offset = 0, .size = BOOT_PARTITION_SIZE_KiB*1024 }, { .name = "MBX flash DATA partition", .offset = BOOT_PARTITION_SIZE_KiB*1024, .size = (KERNEL_PARTITION_SIZE_KiB)*1024 }, { .name = "MBX flash APPLICATION partition", .offset = (BOOT_PARTITION_SIZE_KiB+KERNEL_PARTITION_SIZE_KiB)*1024 } }; static struct mtd_info *mymtd; struct map_info mbx_map = { .name = "MBX flash", .size = WINDOW_SIZE, .phys = WINDOW_ADDR, .bankwidth = 4, }; static int __init init_mbx(void) { printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR); mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4); if (!mbx_map.virt) { printk("Failed to ioremap\n"); return -EIO; } simple_map_init(&mbx_map); mymtd = do_map_probe("jedec_probe", &mbx_map); if (mymtd) { mymtd->owner = THIS_MODULE; mtd_device_register(mymtd, NULL, 0); mtd_device_register(mymtd, partition_info, NUM_PARTITIONS); return 0; } iounmap((void *)mbx_map.virt); return -ENXIO; } static void __exit cleanup_mbx(void) { if (mymtd) { mtd_device_unregister(mymtd); map_destroy(mymtd); } if (mbx_map.virt) { iounmap((void *)mbx_map.virt); mbx_map.virt = 0; } } module_init(init_mbx); module_exit(cleanup_mbx); MODULE_AUTHOR("Anton Todorov <a.todorov@emness.com>"); MODULE_DESCRIPTION("MTD map driver for Motorola MBX860 board"); MODULE_LICENSE("GPL");
gpl-2.0
billy-wang/smdkc110-Gingerbread-kernel
crypto/cbc.c
11516
7621
/* * CBC: Cipher Block Chaining mode * * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/algapi.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/log2.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/slab.h> struct crypto_cbc_ctx { struct crypto_cipher *child; }; static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key, unsigned int keylen) { struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent); struct crypto_cipher *child = ctx->child; int err; crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_cipher_setkey(child, key, keylen); crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & CRYPTO_TFM_RES_MASK); return err; } static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc, struct blkcipher_walk *walk, struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; u8 *iv = walk->iv; do { crypto_xor(iv, src, bsize); fn(crypto_cipher_tfm(tfm), dst, iv); memcpy(iv, dst, bsize); src += bsize; dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; } static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc, struct blkcipher_walk *walk, struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *iv = walk->iv; do { crypto_xor(src, iv, bsize); fn(crypto_cipher_tfm(tfm), src, src); iv = src; src += bsize; } while ((nbytes -= bsize) >= bsize); memcpy(walk->iv, iv, bsize); return nbytes; } static int crypto_cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; struct crypto_blkcipher *tfm = desc->tfm; struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_cipher *child = ctx->child; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child); else nbytes = crypto_cbc_encrypt_segment(desc, &walk, child); err = blkcipher_walk_done(desc, &walk, nbytes); } return err; } static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc, struct blkcipher_walk *walk, struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_decrypt; int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; u8 *iv = walk->iv; do { fn(crypto_cipher_tfm(tfm), dst, src); crypto_xor(dst, iv, bsize); iv = src; src += bsize; dst += bsize; } while ((nbytes -= bsize) >= bsize); memcpy(walk->iv, iv, bsize); return nbytes; } static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc, struct blkcipher_walk *walk, struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_decrypt; int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 last_iv[bsize]; /* Start of the last block. */ src += nbytes - (nbytes & (bsize - 1)) - bsize; memcpy(last_iv, src, bsize); for (;;) { fn(crypto_cipher_tfm(tfm), src, src); if ((nbytes -= bsize) < bsize) break; crypto_xor(src, src - bsize, bsize); src -= bsize; } crypto_xor(src, walk->iv, bsize); memcpy(walk->iv, last_iv, bsize); return nbytes; } static int crypto_cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; struct crypto_blkcipher *tfm = desc->tfm; struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_cipher *child = ctx->child; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child); else nbytes = crypto_cbc_decrypt_segment(desc, &walk, child); err = blkcipher_walk_done(desc, &walk, nbytes); } return err; } static int crypto_cbc_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_cipher *cipher; cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; return 0; } static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm) { struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_cipher(ctx->child); } static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); if (err) return ERR_PTR(err); alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_CAST(alg); inst = ERR_PTR(-EINVAL); if (!is_power_of_2(alg->cra_blocksize)) goto out_put_alg; inst = crypto_alloc_instance("cbc", alg); if (IS_ERR(inst)) goto out_put_alg; inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = alg->cra_blocksize; inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_blkcipher_type; /* We access the data as u32s when xoring. */ inst->alg.cra_alignmask |= __alignof__(u32) - 1; inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx); inst->alg.cra_init = crypto_cbc_init_tfm; inst->alg.cra_exit = crypto_cbc_exit_tfm; inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey; inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt; inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt; out_put_alg: crypto_mod_put(alg); return inst; } static void crypto_cbc_free(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); } static struct crypto_template crypto_cbc_tmpl = { .name = "cbc", .alloc = crypto_cbc_alloc, .free = crypto_cbc_free, .module = THIS_MODULE, }; static int __init crypto_cbc_module_init(void) { return crypto_register_template(&crypto_cbc_tmpl); } static void __exit crypto_cbc_module_exit(void) { crypto_unregister_template(&crypto_cbc_tmpl); } module_init(crypto_cbc_module_init); module_exit(crypto_cbc_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CBC block cipher algorithm");
gpl-2.0
daveti/prov-kernel
drivers/staging/rt3090/rt_profile.c
509
2993
/* ************************************************************************* * Ralink Tech Inc. * 5F., No.36, Taiyuan St., Jhubei City, * Hsinchu County 302, * Taiwan, R.O.C. * * (c) Copyright 2002-2007, Ralink Technology, Inc. * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * ************************************************************************* Module Name: rt_profile.c Abstract: Revision History: Who When What --------- ---------- ---------------------------------------------- */ #include "rt_config.h" NDIS_STATUS RTMPReadParametersHook( IN PRTMP_ADAPTER pAd) { PSTRING src = NULL; RTMP_OS_FD srcf; RTMP_OS_FS_INFO osFSInfo; INT retval = NDIS_STATUS_FAILURE; PSTRING buffer; buffer = kmalloc(MAX_INI_BUFFER_SIZE, MEM_ALLOC_FLAG); if(buffer == NULL) return NDIS_STATUS_FAILURE; memset(buffer, 0x00, MAX_INI_BUFFER_SIZE); { #ifdef CONFIG_STA_SUPPORT IF_DEV_CONFIG_OPMODE_ON_STA(pAd) { src = STA_PROFILE_PATH; } #endif // CONFIG_STA_SUPPORT // #ifdef MULTIPLE_CARD_SUPPORT src = (PSTRING)pAd->MC_FileName; #endif // MULTIPLE_CARD_SUPPORT // } if (src && *src) { RtmpOSFSInfoChange(&osFSInfo, TRUE); srcf = RtmpOSFileOpen(src, O_RDONLY, 0); if (IS_FILE_OPEN_ERR(srcf)) { DBGPRINT(RT_DEBUG_ERROR, ("Open file \"%s\" failed!\n", src)); } else { retval =RtmpOSFileRead(srcf, buffer, MAX_INI_BUFFER_SIZE); if (retval > 0) { RTMPSetProfileParameters(pAd, buffer); retval = NDIS_STATUS_SUCCESS; } else DBGPRINT(RT_DEBUG_ERROR, ("Read file \"%s\" failed(errCode=%d)!\n", src, retval)); retval = RtmpOSFileClose(srcf); if ( retval != 0) { retval = NDIS_STATUS_FAILURE; DBGPRINT(RT_DEBUG_ERROR, ("Close file \"%s\" failed(errCode=%d)!\n", src, retval)); } } RtmpOSFSInfoChange(&osFSInfo, FALSE); } kfree(buffer); return (retval); }
gpl-2.0
thoemy/android_kernel_htc_endeavoru
arch/x86/kernel/tsc.c
509
25930
#include <linux/kernel.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/acpi_pmtmr.h> #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/clocksource.h> #include <linux/percpu.h> #include <linux/timex.h> #include <asm/hpet.h> #include <asm/timer.h> #include <asm/vgtod.h> #include <asm/time.h> #include <asm/delay.h> #include <asm/hypervisor.h> #include <asm/nmi.h> #include <asm/x86_init.h> unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ EXPORT_SYMBOL(cpu_khz); unsigned int __read_mostly tsc_khz; EXPORT_SYMBOL(tsc_khz); /* * TSC can be unstable due to cpufreq or due to unsynced TSCs */ static int __read_mostly tsc_unstable; /* native_sched_clock() is called before tsc_init(), so we must start with the TSC soft disabled to prevent erroneous rdtsc usage on !cpu_has_tsc processors */ static int __read_mostly tsc_disabled = -1; static int tsc_clocksource_reliable; /* * Scheduler clock - returns current time in nanosec units. */ u64 native_sched_clock(void) { u64 this_offset; /* * Fall back to jiffies if there's no TSC available: * ( But note that we still use it if the TSC is marked * unstable. We do this because unlike Time Of Day, * the scheduler clock tolerates small errors and it's * very important for it to be as fast as the platform * can achieve it. ) */ if (unlikely(tsc_disabled)) { /* No locking but a rare wrong value is not a big deal: */ return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); } /* read the Time Stamp Counter: */ rdtscll(this_offset); /* return the value in ns */ return __cycles_2_ns(this_offset); } /* We need to define a real function for sched_clock, to override the weak default version */ #ifdef CONFIG_PARAVIRT unsigned long long sched_clock(void) { return paravirt_sched_clock(); } #else unsigned long long sched_clock(void) __attribute__((alias("native_sched_clock"))); #endif int check_tsc_unstable(void) { return tsc_unstable; } EXPORT_SYMBOL_GPL(check_tsc_unstable); #ifdef CONFIG_X86_TSC int __init notsc_setup(char *str) { printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " "cannot disable TSC completely.\n"); tsc_disabled = 1; return 1; } #else /* * disable flag for tsc. Takes effect by clearing the TSC cpu flag * in cpu/common.c */ int __init notsc_setup(char *str) { setup_clear_cpu_cap(X86_FEATURE_TSC); return 1; } #endif __setup("notsc", notsc_setup); static int no_sched_irq_time; static int __init tsc_setup(char *str) { if (!strcmp(str, "reliable")) tsc_clocksource_reliable = 1; if (!strncmp(str, "noirqtime", 9)) no_sched_irq_time = 1; return 1; } __setup("tsc=", tsc_setup); #define MAX_RETRIES 5 #define SMI_TRESHOLD 50000 /* * Read TSC and the reference counters. Take care of SMI disturbance */ static u64 tsc_read_refs(u64 *p, int hpet) { u64 t1, t2; int i; for (i = 0; i < MAX_RETRIES; i++) { t1 = get_cycles(); if (hpet) *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; else *p = acpi_pm_read_early(); t2 = get_cycles(); if ((t2 - t1) < SMI_TRESHOLD) return t2; } return ULLONG_MAX; } /* * Calculate the TSC frequency from HPET reference */ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) { u64 tmp; if (hpet2 < hpet1) hpet2 += 0x100000000ULL; hpet2 -= hpet1; tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); do_div(tmp, 1000000); do_div(deltatsc, tmp); return (unsigned long) deltatsc; } /* * Calculate the TSC frequency from PMTimer reference */ static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2) { u64 tmp; if (!pm1 && !pm2) return ULONG_MAX; if (pm2 < pm1) pm2 += (u64)ACPI_PM_OVRRUN; pm2 -= pm1; tmp = pm2 * 1000000000LL; do_div(tmp, PMTMR_TICKS_PER_SEC); do_div(deltatsc, tmp); return (unsigned long) deltatsc; } #define CAL_MS 10 #define CAL_LATCH (CLOCK_TICK_RATE / (1000 / CAL_MS)) #define CAL_PIT_LOOPS 1000 #define CAL2_MS 50 #define CAL2_LATCH (CLOCK_TICK_RATE / (1000 / CAL2_MS)) #define CAL2_PIT_LOOPS 5000 /* * Try to calibrate the TSC against the Programmable * Interrupt Timer and return the frequency of the TSC * in kHz. * * Return ULONG_MAX on failure to calibrate. */ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) { u64 tsc, t1, t2, delta; unsigned long tscmin, tscmax; int pitcnt; /* Set the Gate high, disable speaker */ outb((inb(0x61) & ~0x02) | 0x01, 0x61); /* * Setup CTC channel 2* for mode 0, (interrupt on terminal * count mode), binary count. Set the latch register to 50ms * (LSB then MSB) to begin countdown. */ outb(0xb0, 0x43); outb(latch & 0xff, 0x42); outb(latch >> 8, 0x42); tsc = t1 = t2 = get_cycles(); pitcnt = 0; tscmax = 0; tscmin = ULONG_MAX; while ((inb(0x61) & 0x20) == 0) { t2 = get_cycles(); delta = t2 - tsc; tsc = t2; if ((unsigned long) delta < tscmin) tscmin = (unsigned int) delta; if ((unsigned long) delta > tscmax) tscmax = (unsigned int) delta; pitcnt++; } /* * Sanity checks: * * If we were not able to read the PIT more than loopmin * times, then we have been hit by a massive SMI * * If the maximum is 10 times larger than the minimum, * then we got hit by an SMI as well. */ if (pitcnt < loopmin || tscmax > 10 * tscmin) return ULONG_MAX; /* Calculate the PIT value */ delta = t2 - t1; do_div(delta, ms); return delta; } /* * This reads the current MSB of the PIT counter, and * checks if we are running on sufficiently fast and * non-virtualized hardware. * * Our expectations are: * * - the PIT is running at roughly 1.19MHz * * - each IO is going to take about 1us on real hardware, * but we allow it to be much faster (by a factor of 10) or * _slightly_ slower (ie we allow up to a 2us read+counter * update - anything else implies a unacceptably slow CPU * or PIT for the fast calibration to work. * * - with 256 PIT ticks to read the value, we have 214us to * see the same MSB (and overhead like doing a single TSC * read per MSB value etc). * * - We're doing 2 reads per loop (LSB, MSB), and we expect * them each to take about a microsecond on real hardware. * So we expect a count value of around 100. But we'll be * generous, and accept anything over 50. * * - if the PIT is stuck, and we see *many* more reads, we * return early (and the next caller of pit_expect_msb() * then consider it a failure when they don't see the * next expected value). * * These expectations mean that we know that we have seen the * transition from one expected value to another with a fairly * high accuracy, and we didn't miss any events. We can thus * use the TSC value at the transitions to calculate a pretty * good value for the TSC frequencty. */ static inline int pit_verify_msb(unsigned char val) { /* Ignore LSB */ inb(0x42); return inb(0x42) == val; } static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) { int count; u64 tsc = 0; for (count = 0; count < 50000; count++) { if (!pit_verify_msb(val)) break; tsc = get_cycles(); } *deltap = get_cycles() - tsc; *tscp = tsc; /* * We require _some_ success, but the quality control * will be based on the error terms on the TSC values. */ return count > 5; } /* * How many MSB values do we want to see? We aim for * a maximum error rate of 500ppm (in practice the * real error is much smaller), but refuse to spend * more than 25ms on it. */ #define MAX_QUICK_PIT_MS 25 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) static unsigned long quick_pit_calibrate(void) { int i; u64 tsc, delta; unsigned long d1, d2; /* Set the Gate high, disable speaker */ outb((inb(0x61) & ~0x02) | 0x01, 0x61); /* * Counter 2, mode 0 (one-shot), binary count * * NOTE! Mode 2 decrements by two (and then the * output is flipped each time, giving the same * final output frequency as a decrement-by-one), * so mode 0 is much better when looking at the * individual counts. */ outb(0xb0, 0x43); /* Start at 0xffff */ outb(0xff, 0x42); outb(0xff, 0x42); /* * The PIT starts counting at the next edge, so we * need to delay for a microsecond. The easiest way * to do that is to just read back the 16-bit counter * once from the PIT. */ pit_verify_msb(0); if (pit_expect_msb(0xff, &tsc, &d1)) { for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { if (!pit_expect_msb(0xff-i, &delta, &d2)) break; /* * Iterate until the error is less than 500 ppm */ delta -= tsc; if (d1+d2 >= delta >> 11) continue; /* * Check the PIT one more time to verify that * all TSC reads were stable wrt the PIT. * * This also guarantees serialization of the * last cycle read ('d2') in pit_expect_msb. */ if (!pit_verify_msb(0xfe - i)) break; goto success; } } printk("Fast TSC calibration failed\n"); return 0; success: /* * Ok, if we get here, then we've seen the * MSB of the PIT decrement 'i' times, and the * error has shrunk to less than 500 ppm. * * As a result, we can depend on there not being * any odd delays anywhere, and the TSC reads are * reliable (within the error). We also adjust the * delta to the middle of the error bars, just * because it looks nicer. * * kHz = ticks / time-in-seconds / 1000; * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) */ delta += (long)(d2 - d1)/2; delta *= PIT_TICK_RATE; do_div(delta, i*256*1000); printk("Fast TSC calibration using PIT\n"); return delta; } /** * native_calibrate_tsc - calibrate the tsc on boot */ unsigned long native_calibrate_tsc(void) { u64 tsc1, tsc2, delta, ref1, ref2; unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; unsigned long flags, latch, ms, fast_calibrate; int hpet = is_hpet_enabled(), i, loopmin; local_irq_save(flags); fast_calibrate = quick_pit_calibrate(); local_irq_restore(flags); if (fast_calibrate) return fast_calibrate; /* * Run 5 calibration loops to get the lowest frequency value * (the best estimate). We use two different calibration modes * here: * * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and * load a timeout of 50ms. We read the time right after we * started the timer and wait until the PIT count down reaches * zero. In each wait loop iteration we read the TSC and check * the delta to the previous read. We keep track of the min * and max values of that delta. The delta is mostly defined * by the IO time of the PIT access, so we can detect when a * SMI/SMM disturbance happened between the two reads. If the * maximum time is significantly larger than the minimum time, * then we discard the result and have another try. * * 2) Reference counter. If available we use the HPET or the * PMTIMER as a reference to check the sanity of that value. * We use separate TSC readouts and check inside of the * reference read for a SMI/SMM disturbance. We dicard * disturbed values here as well. We do that around the PIT * calibration delay loop as we have to wait for a certain * amount of time anyway. */ /* Preset PIT loop values */ latch = CAL_LATCH; ms = CAL_MS; loopmin = CAL_PIT_LOOPS; for (i = 0; i < 3; i++) { unsigned long tsc_pit_khz; /* * Read the start value and the reference count of * hpet/pmtimer when available. Then do the PIT * calibration, which will take at least 50ms, and * read the end value. */ local_irq_save(flags); tsc1 = tsc_read_refs(&ref1, hpet); tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin); tsc2 = tsc_read_refs(&ref2, hpet); local_irq_restore(flags); /* Pick the lowest PIT TSC calibration so far */ tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); /* hpet or pmtimer available ? */ if (ref1 == ref2) continue; /* Check, whether the sampling was disturbed by an SMI */ if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) continue; tsc2 = (tsc2 - tsc1) * 1000000LL; if (hpet) tsc2 = calc_hpet_ref(tsc2, ref1, ref2); else tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2); tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); /* Check the reference deviation */ delta = ((u64) tsc_pit_min) * 100; do_div(delta, tsc_ref_min); /* * If both calibration results are inside a 10% window * then we can be sure, that the calibration * succeeded. We break out of the loop right away. We * use the reference value, as it is more precise. */ if (delta >= 90 && delta <= 110) { printk(KERN_INFO "TSC: PIT calibration matches %s. %d loops\n", hpet ? "HPET" : "PMTIMER", i + 1); return tsc_ref_min; } /* * Check whether PIT failed more than once. This * happens in virtualized environments. We need to * give the virtual PC a slightly longer timeframe for * the HPET/PMTIMER to make the result precise. */ if (i == 1 && tsc_pit_min == ULONG_MAX) { latch = CAL2_LATCH; ms = CAL2_MS; loopmin = CAL2_PIT_LOOPS; } } /* * Now check the results. */ if (tsc_pit_min == ULONG_MAX) { /* PIT gave no useful value */ printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n"); /* We don't have an alternative source, disable TSC */ if (!hpet && !ref1 && !ref2) { printk("TSC: No reference (HPET/PMTIMER) available\n"); return 0; } /* The alternative source failed as well, disable TSC */ if (tsc_ref_min == ULONG_MAX) { printk(KERN_WARNING "TSC: HPET/PMTIMER calibration " "failed.\n"); return 0; } /* Use the alternative source */ printk(KERN_INFO "TSC: using %s reference calibration\n", hpet ? "HPET" : "PMTIMER"); return tsc_ref_min; } /* We don't have an alternative source, use the PIT calibration value */ if (!hpet && !ref1 && !ref2) { printk(KERN_INFO "TSC: Using PIT calibration value\n"); return tsc_pit_min; } /* The alternative source failed, use the PIT calibration value */ if (tsc_ref_min == ULONG_MAX) { printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed. " "Using PIT calibration\n"); return tsc_pit_min; } /* * The calibration values differ too much. In doubt, we use * the PIT value as we know that there are PMTIMERs around * running at double speed. At least we let the user know: */ printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n", hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); printk(KERN_INFO "TSC: Using PIT calibration value\n"); return tsc_pit_min; } int recalibrate_cpu_khz(void) { #ifndef CONFIG_SMP unsigned long cpu_khz_old = cpu_khz; if (cpu_has_tsc) { tsc_khz = x86_platform.calibrate_tsc(); cpu_khz = tsc_khz; cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy, cpu_khz_old, cpu_khz); return 0; } else return -ENODEV; #else return -ENODEV; #endif } EXPORT_SYMBOL(recalibrate_cpu_khz); /* Accelerators for sched_clock() * convert from cycles(64bits) => nanoseconds (64bits) * basic equation: * ns = cycles / (freq / ns_per_sec) * ns = cycles * (ns_per_sec / freq) * ns = cycles * (10^9 / (cpu_khz * 10^3)) * ns = cycles * (10^6 / cpu_khz) * * Then we use scaling math (suggested by george@mvista.com) to get: * ns = cycles * (10^6 * SC / cpu_khz) / SC * ns = cycles * cyc2ns_scale / SC * * And since SC is a constant power of two, we can convert the div * into a shift. * * We can use khz divisor instead of mhz to keep a better precision, since * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. * (mathieu.desnoyers@polymtl.ca) * * -johnstul@us.ibm.com "math is hard, lets go shopping!" */ DEFINE_PER_CPU(unsigned long, cyc2ns); DEFINE_PER_CPU(unsigned long long, cyc2ns_offset); static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) { unsigned long long tsc_now, ns_now, *offset; unsigned long flags, *scale; local_irq_save(flags); sched_clock_idle_sleep_event(); scale = &per_cpu(cyc2ns, cpu); offset = &per_cpu(cyc2ns_offset, cpu); rdtscll(tsc_now); ns_now = __cycles_2_ns(tsc_now); if (cpu_khz) { *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; *offset = ns_now - (tsc_now * *scale >> CYC2NS_SCALE_FACTOR); } sched_clock_idle_wakeup_event(0); local_irq_restore(flags); } static unsigned long long cyc2ns_suspend; void save_sched_clock_state(void) { if (!sched_clock_stable) return; cyc2ns_suspend = sched_clock(); } /* * Even on processors with invariant TSC, TSC gets reset in some the * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to * arbitrary value (still sync'd across cpu's) during resume from such sleep * states. To cope up with this, recompute the cyc2ns_offset for each cpu so * that sched_clock() continues from the point where it was left off during * suspend. */ void restore_sched_clock_state(void) { unsigned long long offset; unsigned long flags; int cpu; if (!sched_clock_stable) return; local_irq_save(flags); __this_cpu_write(cyc2ns_offset, 0); offset = cyc2ns_suspend - sched_clock(); for_each_possible_cpu(cpu) per_cpu(cyc2ns_offset, cpu) = offset; local_irq_restore(flags); } #ifdef CONFIG_CPU_FREQ /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency * changes. * * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's * not that important because current Opteron setups do not support * scaling on SMP anyroads. * * Should fix up last_tsc too. Currently gettimeofday in the * first tick after the change will be slightly wrong. */ static unsigned int ref_freq; static unsigned long loops_per_jiffy_ref; static unsigned long tsc_khz_ref; static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; unsigned long *lpj; if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) return 0; lpj = &boot_cpu_data.loops_per_jiffy; #ifdef CONFIG_SMP if (!(freq->flags & CPUFREQ_CONST_LOOPS)) lpj = &cpu_data(freq->cpu).loops_per_jiffy; #endif if (!ref_freq) { ref_freq = freq->old; loops_per_jiffy_ref = *lpj; tsc_khz_ref = tsc_khz; } if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || (val == CPUFREQ_RESUMECHANGE)) { *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); if (!(freq->flags & CPUFREQ_CONST_LOOPS)) mark_tsc_unstable("cpufreq changes"); } set_cyc2ns_scale(tsc_khz, freq->cpu); return 0; } static struct notifier_block time_cpufreq_notifier_block = { .notifier_call = time_cpufreq_notifier }; static int __init cpufreq_tsc(void) { if (!cpu_has_tsc) return 0; if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) return 0; cpufreq_register_notifier(&time_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); return 0; } core_initcall(cpufreq_tsc); #endif /* CONFIG_CPU_FREQ */ /* clocksource code */ static struct clocksource clocksource_tsc; /* * We compare the TSC to the cycle_last value in the clocksource * structure to avoid a nasty time-warp. This can be observed in a * very small window right after one CPU updated cycle_last under * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which * is smaller than the cycle_last reference value due to a TSC which * is slighty behind. This delta is nowhere else observable, but in * that case it results in a forward time jump in the range of hours * due to the unsigned delta calculation of the time keeping core * code, which is necessary to support wrapping clocksources like pm * timer. */ static cycle_t read_tsc(struct clocksource *cs) { cycle_t ret = (cycle_t)get_cycles(); return ret >= clocksource_tsc.cycle_last ? ret : clocksource_tsc.cycle_last; } static void resume_tsc(struct clocksource *cs) { clocksource_tsc.cycle_last = 0; } static struct clocksource clocksource_tsc = { .name = "tsc", .rating = 300, .read = read_tsc, .resume = resume_tsc, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_MUST_VERIFY, #ifdef CONFIG_X86_64 .archdata = { .vclock_mode = VCLOCK_TSC }, #endif }; void mark_tsc_unstable(char *reason) { if (!tsc_unstable) { tsc_unstable = 1; sched_clock_stable = 0; disable_sched_clock_irqtime(); printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); /* Change only the rating, when not registered */ if (clocksource_tsc.mult) clocksource_mark_unstable(&clocksource_tsc); else { clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; clocksource_tsc.rating = 0; } } } EXPORT_SYMBOL_GPL(mark_tsc_unstable); static void __init check_system_tsc_reliable(void) { #ifdef CONFIG_MGEODE_LX /* RTSC counts during suspend */ #define RTSC_SUSP 0x100 unsigned long res_low, res_high; rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); /* Geode_LX - the OLPC CPU has a very reliable TSC */ if (res_low & RTSC_SUSP) tsc_clocksource_reliable = 1; #endif if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) tsc_clocksource_reliable = 1; } /* * Make an educated guess if the TSC is trustworthy and synchronized * over all CPUs. */ __cpuinit int unsynchronized_tsc(void) { if (!cpu_has_tsc || tsc_unstable) return 1; #ifdef CONFIG_SMP if (apic_is_clustered_box()) return 1; #endif if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) return 0; if (tsc_clocksource_reliable) return 0; /* * Intel systems are normally all synchronized. * Exceptions must mark TSC as unstable: */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { /* assume multi socket systems are not synchronized: */ if (num_possible_cpus() > 1) return 1; } return 0; } static void tsc_refine_calibration_work(struct work_struct *work); static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); /** * tsc_refine_calibration_work - Further refine tsc freq calibration * @work - ignored. * * This functions uses delayed work over a period of a * second to further refine the TSC freq value. Since this is * timer based, instead of loop based, we don't block the boot * process while this longer calibration is done. * * If there are any calibration anomalies (too many SMIs, etc), * or the refined calibration is off by 1% of the fast early * calibration, we throw out the new calibration and use the * early calibration. */ static void tsc_refine_calibration_work(struct work_struct *work) { static u64 tsc_start = -1, ref_start; static int hpet; u64 tsc_stop, ref_stop, delta; unsigned long freq; /* Don't bother refining TSC on unstable systems */ if (check_tsc_unstable()) goto out; /* * Since the work is started early in boot, we may be * delayed the first time we expire. So set the workqueue * again once we know timers are working. */ if (tsc_start == -1) { /* * Only set hpet once, to avoid mixing hardware * if the hpet becomes enabled later. */ hpet = is_hpet_enabled(); schedule_delayed_work(&tsc_irqwork, HZ); tsc_start = tsc_read_refs(&ref_start, hpet); return; } tsc_stop = tsc_read_refs(&ref_stop, hpet); /* hpet or pmtimer available ? */ if (ref_start == ref_stop) goto out; /* Check, whether the sampling was disturbed by an SMI */ if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX) goto out; delta = tsc_stop - tsc_start; delta *= 1000000LL; if (hpet) freq = calc_hpet_ref(delta, ref_start, ref_stop); else freq = calc_pmtimer_ref(delta, ref_start, ref_stop); /* Make sure we're within 1% */ if (abs(tsc_khz - freq) > tsc_khz/100) goto out; tsc_khz = freq; printk(KERN_INFO "Refined TSC clocksource calibration: " "%lu.%03lu MHz.\n", (unsigned long)tsc_khz / 1000, (unsigned long)tsc_khz % 1000); out: clocksource_register_khz(&clocksource_tsc, tsc_khz); } static int __init init_tsc_clocksource(void) { if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz) return 0; if (tsc_clocksource_reliable) clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; /* lower the rating if we already know its unstable: */ if (check_tsc_unstable()) { clocksource_tsc.rating = 0; clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; } schedule_delayed_work(&tsc_irqwork, 0); return 0; } /* * We use device_initcall here, to ensure we run after the hpet * is fully initialized, which may occur at fs_initcall time. */ device_initcall(init_tsc_clocksource); void __init tsc_init(void) { u64 lpj; int cpu; x86_init.timers.tsc_pre_init(); if (!cpu_has_tsc) return; tsc_khz = x86_platform.calibrate_tsc(); cpu_khz = tsc_khz; if (!tsc_khz) { mark_tsc_unstable("could not calculate TSC khz"); return; } printk("Detected %lu.%03lu MHz processor.\n", (unsigned long)cpu_khz / 1000, (unsigned long)cpu_khz % 1000); /* * Secondary CPUs do not run through tsc_init(), so set up * all the scale factors for all CPUs, assuming the same * speed as the bootup CPU. (cpufreq notifiers will fix this * up if their speed diverges) */ for_each_possible_cpu(cpu) set_cyc2ns_scale(cpu_khz, cpu); if (tsc_disabled > 0) return; /* now allow native_sched_clock() to use rdtsc */ tsc_disabled = 0; if (!no_sched_irq_time) enable_sched_clock_irqtime(); lpj = ((u64)tsc_khz * 1000); do_div(lpj, HZ); lpj_fine = lpj; use_tsc_delay(); if (unsynchronized_tsc()) mark_tsc_unstable("TSCs unsynchronized"); check_system_tsc_reliable(); }
gpl-2.0
EpicCM/SPH-D700-Kernel
drivers/gpu_atlas/drm/nouveau/nv50_gpio.c
765
2203
/* * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_hw.h" static int nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) { const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; if (gpio->line >= 32) return -EINVAL; *reg = nv50_gpio_reg[gpio->line >> 3]; *shift = (gpio->line & 7) << 2; return 0; } int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) { struct dcb_gpio_entry *gpio; uint32_t r, s, v; gpio = nouveau_bios_gpio_entry(dev, tag); if (!gpio) return -ENOENT; if (nv50_gpio_location(gpio, &r, &s)) return -EINVAL; v = nv_rd32(dev, r) >> (s + 2); return ((v & 1) == (gpio->state[1] & 1)); } int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) { struct dcb_gpio_entry *gpio; uint32_t r, s, v; gpio = nouveau_bios_gpio_entry(dev, tag); if (!gpio) return -ENOENT; if (nv50_gpio_location(gpio, &r, &s)) return -EINVAL; v = nv_rd32(dev, r) & ~(0x3 << s); v |= (gpio->state[state] ^ 2) << s; nv_wr32(dev, r, v); return 0; }
gpl-2.0
Bdaman80/BD-Ace
drivers/gpu/drm/nouveau/nouveau_i2c.c
765
6909
/* * Copyright 2009 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_i2c.h" #include "nouveau_hw.h" static void nv04_i2c_setscl(void *data, int state) { struct nouveau_i2c_chan *i2c = data; struct drm_device *dev = i2c->dev; uint8_t val; val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0); NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01); } static void nv04_i2c_setsda(void *data, int state) { struct nouveau_i2c_chan *i2c = data; struct drm_device *dev = i2c->dev; uint8_t val; val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0); NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01); } static int nv04_i2c_getscl(void *data) { struct nouveau_i2c_chan *i2c = data; struct drm_device *dev = i2c->dev; return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4); } static int nv04_i2c_getsda(void *data) { struct nouveau_i2c_chan *i2c = data; struct drm_device *dev = i2c->dev; return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8); } static void nv4e_i2c_setscl(void *data, int state) { struct nouveau_i2c_chan *i2c = data; struct drm_device *dev = i2c->dev; uint8_t val; val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0); nv_wr32(dev, i2c->wr, val | 0x01); } static void nv4e_i2c_setsda(void *data, int state) { struct nouveau_i2c_chan *i2c = data; struct drm_device *dev = i2c->dev; uint8_t val; val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0); nv_wr32(dev, i2c->wr, val | 0x01); } static int nv4e_i2c_getscl(void *data) { struct nouveau_i2c_chan *i2c = data; struct drm_device *dev = i2c->dev; return !!((nv_rd32(dev, i2c->rd) >> 16) & 4); } static int nv4e_i2c_getsda(void *data) { struct nouveau_i2c_chan *i2c = data; struct drm_device *dev = i2c->dev; return !!((nv_rd32(dev, i2c->rd) >> 16) & 8); } static int nv50_i2c_getscl(void *data) { struct nouveau_i2c_chan *i2c = data; struct drm_device *dev = i2c->dev; return !!(nv_rd32(dev, i2c->rd) & 1); } static int nv50_i2c_getsda(void *data) { struct nouveau_i2c_chan *i2c = data; struct drm_device *dev = i2c->dev; return !!(nv_rd32(dev, i2c->rd) & 2); } static void nv50_i2c_setscl(void *data, int state) { struct nouveau_i2c_chan *i2c = data; struct drm_device *dev = i2c->dev; nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0)); } static void nv50_i2c_setsda(void *data, int state) { struct nouveau_i2c_chan *i2c = data; struct drm_device *dev = i2c->dev; nv_wr32(dev, i2c->wr, (nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0)); i2c->data = state; } static const uint32_t nv50_i2c_port[] = { 0x00e138, 0x00e150, 0x00e168, 0x00e180, 0x00e254, 0x00e274, 0x00e764, 0x00e780, 0x00e79c, 0x00e7b8 }; #define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port) int nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_i2c_chan *i2c; int ret; if (entry->chan) return -EEXIST; if (dev_priv->card_type == NV_50 && entry->read >= NV50_I2C_PORTS) { NV_ERROR(dev, "unknown i2c port %d\n", entry->read); return -EINVAL; } i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); if (i2c == NULL) return -ENOMEM; switch (entry->port_type) { case 0: i2c->algo.bit.setsda = nv04_i2c_setsda; i2c->algo.bit.setscl = nv04_i2c_setscl; i2c->algo.bit.getsda = nv04_i2c_getsda; i2c->algo.bit.getscl = nv04_i2c_getscl; i2c->rd = entry->read; i2c->wr = entry->write; break; case 4: i2c->algo.bit.setsda = nv4e_i2c_setsda; i2c->algo.bit.setscl = nv4e_i2c_setscl; i2c->algo.bit.getsda = nv4e_i2c_getsda; i2c->algo.bit.getscl = nv4e_i2c_getscl; i2c->rd = 0x600800 + entry->read; i2c->wr = 0x600800 + entry->write; break; case 5: i2c->algo.bit.setsda = nv50_i2c_setsda; i2c->algo.bit.setscl = nv50_i2c_setscl; i2c->algo.bit.getsda = nv50_i2c_getsda; i2c->algo.bit.getscl = nv50_i2c_getscl; i2c->rd = nv50_i2c_port[entry->read]; i2c->wr = i2c->rd; break; case 6: i2c->rd = entry->read; i2c->wr = entry->write; break; default: NV_ERROR(dev, "DCB I2C port type %d unknown\n", entry->port_type); kfree(i2c); return -EINVAL; } snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "nouveau-%s-%d", pci_name(dev->pdev), index); i2c->adapter.owner = THIS_MODULE; i2c->adapter.dev.parent = &dev->pdev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); if (entry->port_type < 6) { i2c->adapter.algo_data = &i2c->algo.bit; i2c->algo.bit.udelay = 40; i2c->algo.bit.timeout = usecs_to_jiffies(5000); i2c->algo.bit.data = i2c; ret = i2c_bit_add_bus(&i2c->adapter); } else { i2c->adapter.algo_data = &i2c->algo.dp; i2c->algo.dp.running = false; i2c->algo.dp.address = 0; i2c->algo.dp.aux_ch = nouveau_dp_i2c_aux_ch; ret = i2c_dp_aux_add_bus(&i2c->adapter); } if (ret) { NV_ERROR(dev, "Failed to register i2c %d\n", index); kfree(i2c); return ret; } entry->chan = i2c; return 0; } void nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry) { if (!entry->chan) return; i2c_del_adapter(&entry->chan->adapter); kfree(entry->chan); entry->chan = NULL; } struct nouveau_i2c_chan * nouveau_i2c_find(struct drm_device *dev, int index) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index]; if (index >= DCB_MAX_NUM_I2C_ENTRIES) return NULL; if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) { uint32_t reg = 0xe500, val; if (i2c->port_type == 6) { reg += i2c->read * 0x50; val = 0x2002; } else { reg += ((i2c->entry & 0x1e00) >> 9) * 0x50; val = 0xe001; } nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val); } if (!i2c->chan && nouveau_i2c_init(dev, i2c, index)) return NULL; return i2c->chan; }
gpl-2.0
cattleprod/GT-I9100_Kernel
drivers/net/ne-h8300.c
765
19859
/* ne-h8300.c: A NE2000 clone on H8/300 driver for linux. */ /* original ne.c Written 1992-94 by Donald Becker. Copyright 1993 United States Government as represented by the Director, National Security Agency. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 H8/300 modified Yoshinori Sato <ysato@users.sourceforge.jp> */ static const char version1[] = "ne-h8300.c:v1.00 2004/04/11 ysato\n"; #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/jiffies.h> #include <asm/system.h> #include <asm/io.h> #include <asm/irq.h> #define EI_SHIFT(x) (ei_local->reg_offset[x]) #include "8390.h" #define DRV_NAME "ne-h8300" /* Some defines that people can play with if so inclined. */ /* Do we perform extra sanity checks on stuff ? */ /* #define NE_SANITY_CHECK */ /* Do we implement the read before write bugfix ? */ /* #define NE_RW_BUGFIX */ /* Do we have a non std. amount of memory? (in units of 256 byte pages) */ /* #define PACKETBUF_MEMSIZE 0x40 */ /* A zero-terminated list of I/O addresses to be probed at boot. */ /* ---- No user-serviceable parts below ---- */ static const char version[] = "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; #include "lib8390.c" #define NE_BASE (dev->base_addr) #define NE_CMD 0x00 #define NE_DATAPORT (ei_status.word16?0x20:0x10) /* NatSemi-defined port window offset. */ #define NE_RESET (ei_status.word16?0x3f:0x1f) /* Issue a read to reset, a write to clear. */ #define NE_IO_EXTENT (ei_status.word16?0x40:0x20) #define NESM_START_PG 0x40 /* First page of TX buffer */ #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ static int ne_probe1(struct net_device *dev, int ioaddr); static int ne_open(struct net_device *dev); static int ne_close(struct net_device *dev); static void ne_reset_8390(struct net_device *dev); static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); static void ne_block_output(struct net_device *dev, const int count, const unsigned char *buf, const int start_page); static u32 reg_offset[16]; static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr) { struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); int i; unsigned char bus_width; bus_width = *(volatile unsigned char *)ABWCR; bus_width &= 1 << ((base_addr >> 21) & 7); for (i = 0; i < ARRAY_SIZE(reg_offset); i++) if (bus_width == 0) reg_offset[i] = i * 2 + 1; else reg_offset[i] = i; ei_local->reg_offset = reg_offset; return 0; } static int __initdata h8300_ne_count = 0; #ifdef CONFIG_H8300H_H8MAX static unsigned long __initdata h8300_ne_base[] = { 0x800600 }; static int h8300_ne_irq[] = {EXT_IRQ4}; #endif #ifdef CONFIG_H8300H_AKI3068NET static unsigned long __initdata h8300_ne_base[] = { 0x200000 }; static int h8300_ne_irq[] = {EXT_IRQ5}; #endif static inline int init_dev(struct net_device *dev) { if (h8300_ne_count < ARRAY_SIZE(h8300_ne_base)) { dev->base_addr = h8300_ne_base[h8300_ne_count]; dev->irq = h8300_ne_irq[h8300_ne_count]; h8300_ne_count++; return 0; } else return -ENODEV; } /* Probe for various non-shared-memory ethercards. NEx000-clone boards have a Station Address PROM (SAPROM) in the packet buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be detected by their SA prefix. Reading the SAPROM from a word-wide card with the 8390 set in byte-wide mode results in doubled values, which can be detected and compensated for. The probe is also responsible for initializing the card and filling in the 'dev' and 'ei_status' structures. We use the minimum memory size for some ethercard product lines, iff we can't distinguish models. You can increase the packet buffer size by setting PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are: E1010 starts at 0x100 and ends at 0x2000. E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory") E2010 starts at 0x100 and ends at 0x4000. E2010-x starts at 0x100 and ends at 0xffff. */ static int __init do_ne_probe(struct net_device *dev) { unsigned int base_addr = dev->base_addr; /* First check any supplied i/o locations. User knows best. <cough> */ if (base_addr > 0x1ff) /* Check a single specified location. */ return ne_probe1(dev, base_addr); else if (base_addr != 0) /* Don't probe at all. */ return -ENXIO; return -ENODEV; } static void cleanup_card(struct net_device *dev) { free_irq(dev->irq, dev); release_region(dev->base_addr, NE_IO_EXTENT); } #ifndef MODULE struct net_device * __init ne_probe(int unit) { struct net_device *dev = alloc_ei_netdev(); int err; if (!dev) return ERR_PTR(-ENOMEM); if (init_dev(dev)) return ERR_PTR(-ENODEV); sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); err = init_reg_offset(dev, dev->base_addr); if (err) goto out; err = do_ne_probe(dev); if (err) goto out; return dev; out: free_netdev(dev); return ERR_PTR(err); } #endif static const struct net_device_ops ne_netdev_ops = { .ndo_open = ne_open, .ndo_stop = ne_close, .ndo_start_xmit = ei_start_xmit, .ndo_tx_timeout = ei_tx_timeout, .ndo_get_stats = ei_get_stats, .ndo_set_multicast_list = ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ei_poll, #endif }; static int __init ne_probe1(struct net_device *dev, int ioaddr) { int i; unsigned char SA_prom[16]; int wordlength = 2; const char *name = NULL; int start_page, stop_page; int reg0, ret; static unsigned version_printed; struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); unsigned char bus_width; if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) return -EBUSY; reg0 = inb_p(ioaddr); if (reg0 == 0xFF) { ret = -ENODEV; goto err_out; } /* Do a preliminary verification that we have a 8390. */ { int regd; outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); regd = inb_p(ioaddr + EI_SHIFT(0x0d)); outb_p(0xff, ioaddr + EI_SHIFT(0x0d)); outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ if (inb_p(ioaddr + EN0_COUNTER0) != 0) { outb_p(reg0, ioaddr + EI_SHIFT(0)); outb_p(regd, ioaddr + EI_SHIFT(0x0d)); /* Restore the old values. */ ret = -ENODEV; goto err_out; } } if (ei_debug && version_printed++ == 0) printk(KERN_INFO "%s", version1); printk(KERN_INFO "NE*000 ethercard probe at %08x:", ioaddr); /* Read the 16 bytes of station address PROM. We must first initialize registers, similar to NS8390_init(eifdev, 0). We can't reliably read the SAPROM address without this. (I learned the hard way!). */ { struct {unsigned char value, offset; } program_seq[] = { {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */ {0x00, EN0_RCNTLO}, /* Clear the count regs. */ {0x00, EN0_RCNTHI}, {0x00, EN0_IMR}, /* Mask completion irq. */ {0xFF, EN0_ISR}, {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ {32, EN0_RCNTLO}, {0x00, EN0_RCNTHI}, {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ {0x00, EN0_RSARHI}, {E8390_RREAD+E8390_START, E8390_CMD}, }; for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); } bus_width = *(volatile unsigned char *)ABWCR; bus_width &= 1 << ((ioaddr >> 21) & 7); ei_status.word16 = (bus_width == 0); /* temporary setting */ for(i = 0; i < 16 /*sizeof(SA_prom)*/; i++) { SA_prom[i] = inb_p(ioaddr + NE_DATAPORT); inb_p(ioaddr + NE_DATAPORT); /* dummy read */ } start_page = NESM_START_PG; stop_page = NESM_STOP_PG; if (bus_width) wordlength = 1; else outb_p(0x49, ioaddr + EN0_DCFG); /* Set up the rest of the parameters. */ name = (wordlength == 2) ? "NE2000" : "NE1000"; if (! dev->irq) { printk(" failed to detect IRQ line.\n"); ret = -EAGAIN; goto err_out; } /* Snarf the interrupt now. There's no point in waiting since we cannot share and the board will usually be enabled. */ ret = request_irq(dev->irq, __ei_interrupt, 0, name, dev); if (ret) { printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret); goto err_out; } dev->base_addr = ioaddr; for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = SA_prom[i]; printk(" %pM\n", dev->dev_addr); printk("%s: %s found at %#x, using IRQ %d.\n", dev->name, name, ioaddr, dev->irq); ei_status.name = name; ei_status.tx_start_page = start_page; ei_status.stop_page = stop_page; ei_status.word16 = (wordlength == 2); ei_status.rx_start_page = start_page + TX_PAGES; #ifdef PACKETBUF_MEMSIZE /* Allow the packet buffer size to be overridden by know-it-alls. */ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; #endif ei_status.reset_8390 = &ne_reset_8390; ei_status.block_input = &ne_block_input; ei_status.block_output = &ne_block_output; ei_status.get_8390_hdr = &ne_get_8390_hdr; ei_status.priv = 0; dev->netdev_ops = &ne_netdev_ops; __NS8390_init(dev, 0); ret = register_netdev(dev); if (ret) goto out_irq; return 0; out_irq: free_irq(dev->irq, dev); err_out: release_region(ioaddr, NE_IO_EXTENT); return ret; } static int ne_open(struct net_device *dev) { __ei_open(dev); return 0; } static int ne_close(struct net_device *dev) { if (ei_debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); __ei_close(dev); return 0; } /* Hard reset the card. This used to pause for the same period that a 8390 reset command required, but that shouldn't be necessary. */ static void ne_reset_8390(struct net_device *dev) { unsigned long reset_start_time = jiffies; struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); if (ei_debug > 1) printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies); /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); ei_status.txing = 0; ei_status.dmaing = 0; /* This check _should_not_ be necessary, omit eventually. */ while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) if (time_after(jiffies, reset_start_time + 2*HZ/100)) { printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); break; } outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ } /* Grab the 8390 specific header. Similar to the block_input routine, but we don't need to be concerned with ring wrap as the header will be at the start of a page, so we optimize accordingly. */ static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); /* This *shouldn't* happen. If it does, it's the last thing you'll see */ if (ei_status.dmaing) { printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr " "[DMAstat:%d][irqlock:%d].\n", dev->name, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD); outb_p(sizeof(struct e8390_pkt_hdr), NE_BASE + EN0_RCNTLO); outb_p(0, NE_BASE + EN0_RCNTHI); outb_p(0, NE_BASE + EN0_RSARLO); /* On page boundary */ outb_p(ring_page, NE_BASE + EN0_RSARHI); outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); if (ei_status.word16) { int len; unsigned short *p = (unsigned short *)hdr; for (len = sizeof(struct e8390_pkt_hdr)>>1; len > 0; len--) *p++ = inw(NE_BASE + NE_DATAPORT); } else insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ ei_status.dmaing &= ~0x01; le16_to_cpus(&hdr->count); } /* Block input and output, similar to the Crynwr packet driver. If you are porting to a new ethercard, look at the packet driver source for hints. The NEx000 doesn't share the on-board packet memory -- you have to put the packet out through the "remote DMA" dataport using outb. */ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); #ifdef NE_SANITY_CHECK int xfer_count = count; #endif char *buf = skb->data; /* This *shouldn't* happen. If it does, it's the last thing you'll see */ if (ei_status.dmaing) { printk(KERN_EMERG "%s: DMAing conflict in ne_block_input " "[DMAstat:%d][irqlock:%d].\n", dev->name, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD); outb_p(count & 0xff, NE_BASE + EN0_RCNTLO); outb_p(count >> 8, NE_BASE + EN0_RCNTHI); outb_p(ring_offset & 0xff, NE_BASE + EN0_RSARLO); outb_p(ring_offset >> 8, NE_BASE + EN0_RSARHI); outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); if (ei_status.word16) { int len; unsigned short *p = (unsigned short *)buf; for (len = count>>1; len > 0; len--) *p++ = inw(NE_BASE + NE_DATAPORT); if (count & 0x01) { buf[count-1] = inb(NE_BASE + NE_DATAPORT); #ifdef NE_SANITY_CHECK xfer_count++; #endif } } else { insb(NE_BASE + NE_DATAPORT, buf, count); } #ifdef NE_SANITY_CHECK /* This was for the ALPHA version only, but enough people have been encountering problems so it is still here. If you see this message you either 1) have a slightly incompatible clone or 2) have noise/speed problems with your bus. */ if (ei_debug > 1) { /* DMA termination address check... */ int addr, tries = 20; do { /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here -- it's broken for Rx on some cards! */ int high = inb_p(NE_BASE + EN0_RSARHI); int low = inb_p(NE_BASE + EN0_RSARLO); addr = (high << 8) + low; if (((ring_offset + xfer_count) & 0xff) == low) break; } while (--tries > 0); if (tries <= 0) printk(KERN_WARNING "%s: RX transfer address mismatch," "%#4.4x (expected) vs. %#4.4x (actual).\n", dev->name, ring_offset + xfer_count, addr); } #endif outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ ei_status.dmaing &= ~0x01; } static void ne_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page) { struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); unsigned long dma_start; #ifdef NE_SANITY_CHECK int retries = 0; #endif /* Round the count up for word writes. Do we need to do this? What effect will an odd byte count have on the 8390? I should check someday. */ if (ei_status.word16 && (count & 0x01)) count++; /* This *shouldn't* happen. If it does, it's the last thing you'll see */ if (ei_status.dmaing) { printk(KERN_EMERG "%s: DMAing conflict in ne_block_output." "[DMAstat:%d][irqlock:%d]\n", dev->name, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; /* We should already be in page 0, but to be safe... */ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, NE_BASE + NE_CMD); #ifdef NE_SANITY_CHECK retry: #endif #ifdef NE8390_RW_BUGFIX /* Handle the read-before-write bug the same way as the Crynwr packet driver -- the NatSemi method doesn't work. Actually this doesn't always work either, but if you have problems with your NEx000 this is better than nothing! */ outb_p(0x42, NE_BASE + EN0_RCNTLO); outb_p(0x00, NE_BASE + EN0_RCNTHI); outb_p(0x42, NE_BASE + EN0_RSARLO); outb_p(0x00, NE_BASE + EN0_RSARHI); outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); /* Make certain that the dummy read has occurred. */ udelay(6); #endif outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Now the normal output. */ outb_p(count & 0xff, NE_BASE + EN0_RCNTLO); outb_p(count >> 8, NE_BASE + EN0_RCNTHI); outb_p(0x00, NE_BASE + EN0_RSARLO); outb_p(start_page, NE_BASE + EN0_RSARHI); outb_p(E8390_RWRITE+E8390_START, NE_BASE + NE_CMD); if (ei_status.word16) { int len; unsigned short *p = (unsigned short *)buf; for (len = count>>1; len > 0; len--) outw(*p++, NE_BASE + NE_DATAPORT); } else { outsb(NE_BASE + NE_DATAPORT, buf, count); } dma_start = jiffies; #ifdef NE_SANITY_CHECK /* This was for the ALPHA version only, but enough people have been encountering problems so it is still here. */ if (ei_debug > 1) { /* DMA termination address check... */ int addr, tries = 20; do { int high = inb_p(NE_BASE + EN0_RSARHI); int low = inb_p(NE_BASE + EN0_RSARLO); addr = (high << 8) + low; if ((start_page << 8) + count == addr) break; } while (--tries > 0); if (tries <= 0) { printk(KERN_WARNING "%s: Tx packet transfer address mismatch," "%#4.4x (expected) vs. %#4.4x (actual).\n", dev->name, (start_page << 8) + count, addr); if (retries++ == 0) goto retry; } } #endif while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0) if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); ne_reset_8390(dev); __NS8390_init(dev,1); break; } outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ ei_status.dmaing &= ~0x01; } #ifdef MODULE #define MAX_NE_CARDS 1 /* Max number of NE cards per module */ static struct net_device *dev_ne[MAX_NE_CARDS]; static int io[MAX_NE_CARDS]; static int irq[MAX_NE_CARDS]; static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */ module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(bad, int, NULL, 0); MODULE_PARM_DESC(io, "I/O base address(es)"); MODULE_PARM_DESC(irq, "IRQ number(s)"); MODULE_DESCRIPTION("H8/300 NE2000 Ethernet driver"); MODULE_LICENSE("GPL"); /* This is set up so that no ISA autoprobe takes place. We can't guarantee that the ne2k probe is the last 8390 based probe to take place (as it is at boot) and so the probe will get confused by any other 8390 cards. ISA device autoprobes on a running machine are not recommended anyway. */ int init_module(void) { int this_dev, found = 0; int err; for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { struct net_device *dev = alloc_ei_netdev(); if (!dev) break; if (io[this_dev]) { dev->irq = irq[this_dev]; dev->mem_end = bad[this_dev]; dev->base_addr = io[this_dev]; } else { dev->base_addr = h8300_ne_base[this_dev]; dev->irq = h8300_ne_irq[this_dev]; } err = init_reg_offset(dev, dev->base_addr); if (!err) { if (do_ne_probe(dev) == 0) { dev_ne[found++] = dev; continue; } } free_netdev(dev); if (found) break; if (io[this_dev] != 0) printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", dev->base_addr); else printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n"); return -ENXIO; } if (found) return 0; return -ENODEV; } void cleanup_module(void) { int this_dev; for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { struct net_device *dev = dev_ne[this_dev]; if (dev) { unregister_netdev(dev); cleanup_card(dev); free_netdev(dev); } } } #endif /* MODULE */
gpl-2.0
Canonical-kernel/Ubuntu-kernel
drivers/net/ethernet/cisco/enic/vnic_wq.c
1021
4783
/* * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/slab.h> #include "vnic_dev.h" #include "vnic_wq.h" static int vnic_wq_alloc_bufs(struct vnic_wq *wq) { struct vnic_wq_buf *buf; unsigned int i, j, count = wq->ring.desc_count; unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); for (i = 0; i < blks; i++) { wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); if (!wq->bufs[i]) return -ENOMEM; } for (i = 0; i < blks; i++) { buf = wq->bufs[i]; for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) { buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j; buf->desc = (u8 *)wq->ring.descs + wq->ring.desc_size * buf->index; if (buf->index + 1 == count) { buf->next = wq->bufs[0]; break; } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) { buf->next = wq->bufs[i + 1]; } else { buf->next = buf + 1; buf++; } } } wq->to_use = wq->to_clean = wq->bufs[0]; return 0; } void vnic_wq_free(struct vnic_wq *wq) { struct vnic_dev *vdev; unsigned int i; vdev = wq->vdev; vnic_dev_free_desc_ring(vdev, &wq->ring); for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { if (wq->bufs[i]) { kfree(wq->bufs[i]); wq->bufs[i] = NULL; } } wq->ctrl = NULL; } int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) { int err; wq->index = index; wq->vdev = vdev; wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); if (!wq->ctrl) { pr_err("Failed to hook WQ[%d] resource\n", index); return -EINVAL; } vnic_wq_disable(wq); err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); if (err) return err; err = vnic_wq_alloc_bufs(wq); if (err) { vnic_wq_free(wq); return err; } return 0; } static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { u64 paddr; unsigned int count = wq->ring.desc_count; paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; writeq(paddr, &wq->ctrl->ring_base); iowrite32(count, &wq->ctrl->ring_size); iowrite32(fetch_index, &wq->ctrl->fetch_index); iowrite32(posted_index, &wq->ctrl->posted_index); iowrite32(cq_index, &wq->ctrl->cq_index); iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); iowrite32(0, &wq->ctrl->error_status); wq->to_use = wq->to_clean = &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)]; } void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { vnic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable, error_interrupt_offset); } unsigned int vnic_wq_error_status(struct vnic_wq *wq) { return ioread32(&wq->ctrl->error_status); } void vnic_wq_enable(struct vnic_wq *wq) { iowrite32(1, &wq->ctrl->enable); } int vnic_wq_disable(struct vnic_wq *wq) { unsigned int wait; iowrite32(0, &wq->ctrl->enable); /* Wait for HW to ACK disable request */ for (wait = 0; wait < 1000; wait++) { if (!(ioread32(&wq->ctrl->running))) return 0; udelay(10); } pr_err("Failed to disable WQ[%d]\n", wq->index); return -ETIMEDOUT; } void vnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) { struct vnic_wq_buf *buf; buf = wq->to_clean; while (vnic_wq_desc_used(wq) > 0) { (*buf_clean)(wq, buf); buf = wq->to_clean = buf->next; wq->ring.desc_avail++; } wq->to_use = wq->to_clean = wq->bufs[0]; iowrite32(0, &wq->ctrl->fetch_index); iowrite32(0, &wq->ctrl->posted_index); iowrite32(0, &wq->ctrl->error_status); vnic_dev_clear_desc_ring(&wq->ring); }
gpl-2.0
elephone-dev/P8000-Kernel
drivers/mfd/arizona-spi.c
1533
2285
/* * arizona-spi.c -- Arizona SPI bus interface * * Copyright 2012 Wolfson Microelectronics plc * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/err.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/mfd/arizona/core.h> #include "arizona.h" static int arizona_spi_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); struct arizona *arizona; const struct regmap_config *regmap_config; int ret; switch (id->driver_data) { #ifdef CONFIG_MFD_WM5102 case WM5102: regmap_config = &wm5102_spi_regmap; break; #endif #ifdef CONFIG_MFD_WM5110 case WM5110: regmap_config = &wm5110_spi_regmap; break; #endif default: dev_err(&spi->dev, "Unknown device type %ld\n", id->driver_data); return -EINVAL; } arizona = devm_kzalloc(&spi->dev, sizeof(*arizona), GFP_KERNEL); if (arizona == NULL) return -ENOMEM; arizona->regmap = devm_regmap_init_spi(spi, regmap_config); if (IS_ERR(arizona->regmap)) { ret = PTR_ERR(arizona->regmap); dev_err(&spi->dev, "Failed to allocate register map: %d\n", ret); return ret; } arizona->type = id->driver_data; arizona->dev = &spi->dev; arizona->irq = spi->irq; return arizona_dev_init(arizona); } static int arizona_spi_remove(struct spi_device *spi) { struct arizona *arizona = spi_get_drvdata(spi); arizona_dev_exit(arizona); return 0; } static const struct spi_device_id arizona_spi_ids[] = { { "wm5102", WM5102 }, { "wm5110", WM5110 }, { }, }; MODULE_DEVICE_TABLE(spi, arizona_spi_ids); static struct spi_driver arizona_spi_driver = { .driver = { .name = "arizona", .owner = THIS_MODULE, .pm = &arizona_pm_ops, }, .probe = arizona_spi_probe, .remove = arizona_spi_remove, .id_table = arizona_spi_ids, }; module_spi_driver(arizona_spi_driver); MODULE_DESCRIPTION("Arizona SPI bus interface"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL");
gpl-2.0
abyssxsy/linux-tk1
arch/alpha/kernel/osf_sys.c
1533
35012
/* * linux/arch/alpha/kernel/osf_sys.c * * Copyright (C) 1995 Linus Torvalds */ /* * This file handles some of the stranger OSF/1 system call interfaces. * Some of the system calls expect a non-C calling standard, others have * special parameter blocks.. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/utsname.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/major.h> #include <linux/stat.h> #include <linux/mman.h> #include <linux/shm.h> #include <linux/poll.h> #include <linux/file.h> #include <linux/types.h> #include <linux/ipc.h> #include <linux/namei.h> #include <linux/uio.h> #include <linux/vfs.h> #include <linux/rcupdate.h> #include <linux/slab.h> #include <asm/fpu.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/sysinfo.h> #include <asm/thread_info.h> #include <asm/hwrpb.h> #include <asm/processor.h> /* * Brk needs to return an error. Still support Linux's brk(0) query idiom, * which OSF programs just shouldn't be doing. We're still not quite * identical to OSF as we don't return 0 on success, but doing otherwise * would require changes to libc. Hopefully this is good enough. */ SYSCALL_DEFINE1(osf_brk, unsigned long, brk) { unsigned long retval = sys_brk(brk); if (brk && brk != retval) retval = -ENOMEM; return retval; } /* * This is pure guess-work.. */ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start, unsigned long, text_len, unsigned long, bss_start, unsigned long, bss_len) { struct mm_struct *mm; mm = current->mm; mm->end_code = bss_start + bss_len; mm->start_brk = bss_start + bss_len; mm->brk = bss_start + bss_len; #if 0 printk("set_program_attributes(%lx %lx %lx %lx)\n", text_start, text_len, bss_start, bss_len); #endif return 0; } /* * OSF/1 directory handling functions... * * The "getdents()" interface is much more sane: the "basep" stuff is * braindamage (it can't really handle filesystems where the directory * offset differences aren't the same as "d_reclen"). */ #define NAME_OFFSET offsetof (struct osf_dirent, d_name) struct osf_dirent { unsigned int d_ino; unsigned short d_reclen; unsigned short d_namlen; char d_name[1]; }; struct osf_dirent_callback { struct osf_dirent __user *dirent; long __user *basep; unsigned int count; int error; }; static int osf_filldir(void *__buf, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct osf_dirent __user *dirent; struct osf_dirent_callback *buf = (struct osf_dirent_callback *) __buf; unsigned int reclen = ALIGN(NAME_OFFSET + namlen + 1, sizeof(u32)); unsigned int d_ino; buf->error = -EINVAL; /* only used if we fail */ if (reclen > buf->count) return -EINVAL; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->error = -EOVERFLOW; return -EOVERFLOW; } if (buf->basep) { if (put_user(offset, buf->basep)) goto Efault; buf->basep = NULL; } dirent = buf->dirent; if (put_user(d_ino, &dirent->d_ino) || put_user(namlen, &dirent->d_namlen) || put_user(reclen, &dirent->d_reclen) || copy_to_user(dirent->d_name, name, namlen) || put_user(0, dirent->d_name + namlen)) goto Efault; dirent = (void __user *)dirent + reclen; buf->dirent = dirent; buf->count -= reclen; return 0; Efault: buf->error = -EFAULT; return -EFAULT; } SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd, struct osf_dirent __user *, dirent, unsigned int, count, long __user *, basep) { int error; struct fd arg = fdget(fd); struct osf_dirent_callback buf; if (!arg.file) return -EBADF; buf.dirent = dirent; buf.basep = basep; buf.count = count; buf.error = 0; error = vfs_readdir(arg.file, osf_filldir, &buf); if (error >= 0) error = buf.error; if (count != buf.count) error = count - buf.count; fdput(arg); return error; } #undef NAME_OFFSET SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off) { unsigned long ret = -EINVAL; #if 0 if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) printk("%s: unimplemented OSF mmap flags %04lx\n", current->comm, flags); #endif if ((off + PAGE_ALIGN(len)) < off) goto out; if (off & ~PAGE_MASK) goto out; ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); out: return ret; } struct osf_stat { int st_dev; int st_pad1; unsigned st_mode; unsigned short st_nlink; short st_nlink_reserved; unsigned st_uid; unsigned st_gid; int st_rdev; int st_ldev; long st_size; int st_pad2; int st_uatime; int st_pad3; int st_umtime; int st_pad4; int st_uctime; int st_pad5; int st_pad6; unsigned st_flags; unsigned st_gen; long st_spare[4]; unsigned st_ino; int st_ino_reserved; int st_atime; int st_atime_reserved; int st_mtime; int st_mtime_reserved; int st_ctime; int st_ctime_reserved; long st_blksize; long st_blocks; }; /* * The OSF/1 statfs structure is much larger, but this should * match the beginning, at least. */ struct osf_statfs { short f_type; short f_flags; int f_fsize; int f_bsize; int f_blocks; int f_bfree; int f_bavail; int f_files; int f_ffree; __kernel_fsid_t f_fsid; }; struct osf_statfs64 { short f_type; short f_flags; int f_pad1; int f_pad2; int f_pad3; int f_pad4; int f_pad5; int f_pad6; int f_pad7; __kernel_fsid_t f_fsid; u_short f_namemax; short f_reserved1; int f_spare[8]; char f_pad8[90]; char f_pad9[90]; long mount_info[10]; u_long f_flags2; long f_spare2[14]; long f_fsize; long f_bsize; long f_blocks; long f_bfree; long f_bavail; long f_files; long f_ffree; }; static int linux_to_osf_stat(struct kstat *lstat, struct osf_stat __user *osf_stat) { struct osf_stat tmp = { 0 }; tmp.st_dev = lstat->dev; tmp.st_mode = lstat->mode; tmp.st_nlink = lstat->nlink; tmp.st_uid = from_kuid_munged(current_user_ns(), lstat->uid); tmp.st_gid = from_kgid_munged(current_user_ns(), lstat->gid); tmp.st_rdev = lstat->rdev; tmp.st_ldev = lstat->rdev; tmp.st_size = lstat->size; tmp.st_uatime = lstat->atime.tv_nsec / 1000; tmp.st_umtime = lstat->mtime.tv_nsec / 1000; tmp.st_uctime = lstat->ctime.tv_nsec / 1000; tmp.st_ino = lstat->ino; tmp.st_atime = lstat->atime.tv_sec; tmp.st_mtime = lstat->mtime.tv_sec; tmp.st_ctime = lstat->ctime.tv_sec; tmp.st_blksize = lstat->blksize; tmp.st_blocks = lstat->blocks; return copy_to_user(osf_stat, &tmp, sizeof(tmp)) ? -EFAULT : 0; } static int linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat, unsigned long bufsiz) { struct osf_statfs tmp_stat; tmp_stat.f_type = linux_stat->f_type; tmp_stat.f_flags = 0; /* mount flags */ tmp_stat.f_fsize = linux_stat->f_frsize; tmp_stat.f_bsize = linux_stat->f_bsize; tmp_stat.f_blocks = linux_stat->f_blocks; tmp_stat.f_bfree = linux_stat->f_bfree; tmp_stat.f_bavail = linux_stat->f_bavail; tmp_stat.f_files = linux_stat->f_files; tmp_stat.f_ffree = linux_stat->f_ffree; tmp_stat.f_fsid = linux_stat->f_fsid; if (bufsiz > sizeof(tmp_stat)) bufsiz = sizeof(tmp_stat); return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; } static int linux_to_osf_statfs64(struct kstatfs *linux_stat, struct osf_statfs64 __user *osf_stat, unsigned long bufsiz) { struct osf_statfs64 tmp_stat = { 0 }; tmp_stat.f_type = linux_stat->f_type; tmp_stat.f_fsize = linux_stat->f_frsize; tmp_stat.f_bsize = linux_stat->f_bsize; tmp_stat.f_blocks = linux_stat->f_blocks; tmp_stat.f_bfree = linux_stat->f_bfree; tmp_stat.f_bavail = linux_stat->f_bavail; tmp_stat.f_files = linux_stat->f_files; tmp_stat.f_ffree = linux_stat->f_ffree; tmp_stat.f_fsid = linux_stat->f_fsid; if (bufsiz > sizeof(tmp_stat)) bufsiz = sizeof(tmp_stat); return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; } SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = user_statfs(pathname, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } SYSCALL_DEFINE2(osf_stat, char __user *, name, struct osf_stat __user *, buf) { struct kstat stat; int error; error = vfs_stat(name, &stat); if (error) return error; return linux_to_osf_stat(&stat, buf); } SYSCALL_DEFINE2(osf_lstat, char __user *, name, struct osf_stat __user *, buf) { struct kstat stat; int error; error = vfs_lstat(name, &stat); if (error) return error; return linux_to_osf_stat(&stat, buf); } SYSCALL_DEFINE2(osf_fstat, int, fd, struct osf_stat __user *, buf) { struct kstat stat; int error; error = vfs_fstat(fd, &stat); if (error) return error; return linux_to_osf_stat(&stat, buf); } SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = fd_statfs(fd, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } SYSCALL_DEFINE3(osf_statfs64, char __user *, pathname, struct osf_statfs64 __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = user_statfs(pathname, &linux_stat); if (!error) error = linux_to_osf_statfs64(&linux_stat, buffer, bufsiz); return error; } SYSCALL_DEFINE3(osf_fstatfs64, unsigned long, fd, struct osf_statfs64 __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = fd_statfs(fd, &linux_stat); if (!error) error = linux_to_osf_statfs64(&linux_stat, buffer, bufsiz); return error; } /* * Uhh.. OSF/1 mount parameters aren't exactly obvious.. * * Although to be frank, neither are the native Linux/i386 ones.. */ struct ufs_args { char __user *devname; int flags; uid_t exroot; }; struct cdfs_args { char __user *devname; int flags; uid_t exroot; /* This has lots more here, which Linux handles with the option block but I'm too lazy to do the translation into ASCII. */ }; struct procfs_args { char __user *devname; int flags; uid_t exroot; }; /* * We can't actually handle ufs yet, so we translate UFS mounts to * ext2fs mounts. I wouldn't mind a UFS filesystem, but the UFS * layout is so braindead it's a major headache doing it. * * Just how long ago was it written? OTOH our UFS driver may be still * unhappy with OSF UFS. [CHECKME] */ static int osf_ufs_mount(const char *dirname, struct ufs_args __user *args, int flags) { int retval; struct cdfs_args tmp; struct filename *devname; retval = -EFAULT; if (copy_from_user(&tmp, args, sizeof(tmp))) goto out; devname = getname(tmp.devname); retval = PTR_ERR(devname); if (IS_ERR(devname)) goto out; retval = do_mount(devname->name, dirname, "ext2", flags, NULL); putname(devname); out: return retval; } static int osf_cdfs_mount(const char *dirname, struct cdfs_args __user *args, int flags) { int retval; struct cdfs_args tmp; struct filename *devname; retval = -EFAULT; if (copy_from_user(&tmp, args, sizeof(tmp))) goto out; devname = getname(tmp.devname); retval = PTR_ERR(devname); if (IS_ERR(devname)) goto out; retval = do_mount(devname->name, dirname, "iso9660", flags, NULL); putname(devname); out: return retval; } static int osf_procfs_mount(const char *dirname, struct procfs_args __user *args, int flags) { struct procfs_args tmp; if (copy_from_user(&tmp, args, sizeof(tmp))) return -EFAULT; return do_mount("", dirname, "proc", flags, NULL); } SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path, int, flag, void __user *, data) { int retval; struct filename *name; name = getname(path); retval = PTR_ERR(name); if (IS_ERR(name)) goto out; switch (typenr) { case 1: retval = osf_ufs_mount(name->name, data, flag); break; case 6: retval = osf_cdfs_mount(name->name, data, flag); break; case 9: retval = osf_procfs_mount(name->name, data, flag); break; default: retval = -EINVAL; printk("osf_mount(%ld, %x)\n", typenr, flag); } putname(name); out: return retval; } SYSCALL_DEFINE1(osf_utsname, char __user *, name) { int error; down_read(&uts_sem); error = -EFAULT; if (copy_to_user(name + 0, utsname()->sysname, 32)) goto out; if (copy_to_user(name + 32, utsname()->nodename, 32)) goto out; if (copy_to_user(name + 64, utsname()->release, 32)) goto out; if (copy_to_user(name + 96, utsname()->version, 32)) goto out; if (copy_to_user(name + 128, utsname()->machine, 32)) goto out; error = 0; out: up_read(&uts_sem); return error; } SYSCALL_DEFINE0(getpagesize) { return PAGE_SIZE; } SYSCALL_DEFINE0(getdtablesize) { return sysctl_nr_open; } /* * For compatibility with OSF/1 only. Use utsname(2) instead. */ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen) { unsigned len; int i; if (!access_ok(VERIFY_WRITE, name, namelen)) return -EFAULT; len = namelen; if (len > 32) len = 32; down_read(&uts_sem); for (i = 0; i < len; ++i) { __put_user(utsname()->domainname[i], name + i); if (utsname()->domainname[i] == '\0') break; } up_read(&uts_sem); return 0; } /* * The following stuff should move into a header file should it ever * be labeled "officially supported." Right now, there is just enough * support to avoid applications (such as tar) printing error * messages. The attributes are not really implemented. */ /* * Values for Property list entry flag */ #define PLE_PROPAGATE_ON_COPY 0x1 /* cp(1) will copy entry by default */ #define PLE_FLAG_MASK 0x1 /* Valid flag values */ #define PLE_FLAG_ALL -1 /* All flag value */ struct proplistname_args { unsigned int pl_mask; unsigned int pl_numnames; char **pl_names; }; union pl_args { struct setargs { char __user *path; long follow; long nbytes; char __user *buf; } set; struct fsetargs { long fd; long nbytes; char __user *buf; } fset; struct getargs { char __user *path; long follow; struct proplistname_args __user *name_args; long nbytes; char __user *buf; int __user *min_buf_size; } get; struct fgetargs { long fd; struct proplistname_args __user *name_args; long nbytes; char __user *buf; int __user *min_buf_size; } fget; struct delargs { char __user *path; long follow; struct proplistname_args __user *name_args; } del; struct fdelargs { long fd; struct proplistname_args __user *name_args; } fdel; }; enum pl_code { PL_SET = 1, PL_FSET = 2, PL_GET = 3, PL_FGET = 4, PL_DEL = 5, PL_FDEL = 6 }; SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code, union pl_args __user *, args) { long error; int __user *min_buf_size_ptr; switch (code) { case PL_SET: if (get_user(error, &args->set.nbytes)) error = -EFAULT; break; case PL_FSET: if (get_user(error, &args->fset.nbytes)) error = -EFAULT; break; case PL_GET: error = get_user(min_buf_size_ptr, &args->get.min_buf_size); if (error) break; error = put_user(0, min_buf_size_ptr); break; case PL_FGET: error = get_user(min_buf_size_ptr, &args->fget.min_buf_size); if (error) break; error = put_user(0, min_buf_size_ptr); break; case PL_DEL: case PL_FDEL: error = 0; break; default: error = -EOPNOTSUPP; break; }; return error; } SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss, struct sigstack __user *, uoss) { unsigned long usp = rdusp(); unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size; unsigned long oss_os = on_sig_stack(usp); int error; if (uss) { void __user *ss_sp; error = -EFAULT; if (get_user(ss_sp, &uss->ss_sp)) goto out; /* If the current stack was set with sigaltstack, don't swap stacks while we are on it. */ error = -EPERM; if (current->sas_ss_sp && on_sig_stack(usp)) goto out; /* Since we don't know the extent of the stack, and we don't track onstack-ness, but rather calculate it, we must presume a size. Ho hum this interface is lossy. */ current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; current->sas_ss_size = SIGSTKSZ; } if (uoss) { error = -EFAULT; if (! access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)) || __put_user(oss_sp, &uoss->ss_sp) || __put_user(oss_os, &uoss->ss_onstack)) goto out; } error = 0; out: return error; } SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) { const char *sysinfo_table[] = { utsname()->sysname, utsname()->nodename, utsname()->release, utsname()->version, utsname()->machine, "alpha", /* instruction set architecture */ "dummy", /* hardware serial number */ "dummy", /* hardware manufacturer */ "dummy", /* secure RPC domain */ }; unsigned long offset; const char *res; long len, err = -EINVAL; offset = command-1; if (offset >= ARRAY_SIZE(sysinfo_table)) { /* Digital UNIX has a few unpublished interfaces here */ printk("sysinfo(%d)", command); goto out; } down_read(&uts_sem); res = sysinfo_table[offset]; len = strlen(res)+1; if ((unsigned long)len > (unsigned long)count) len = count; if (copy_to_user(buf, res, len)) err = -EFAULT; else err = 0; up_read(&uts_sem); out: return err; } SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, unsigned long, nbytes, int __user *, start, void __user *, arg) { unsigned long w; struct percpu_struct *cpu; switch (op) { case GSI_IEEE_FP_CONTROL: /* Return current software fp control & status bits. */ /* Note that DU doesn't verify available space here. */ w = current_thread_info()->ieee_state & IEEE_SW_MASK; w = swcr_update_status(w, rdfpcr()); if (put_user(w, (unsigned long __user *) buffer)) return -EFAULT; return 0; case GSI_IEEE_STATE_AT_SIGNAL: /* * Not sure anybody will ever use this weird stuff. These * ops can be used (under OSF/1) to set the fpcr that should * be used when a signal handler starts executing. */ break; case GSI_UACPROC: if (nbytes < sizeof(unsigned int)) return -EINVAL; w = current_thread_info()->status & UAC_BITMASK; if (put_user(w, (unsigned int __user *)buffer)) return -EFAULT; return 1; case GSI_PROC_TYPE: if (nbytes < sizeof(unsigned long)) return -EINVAL; cpu = (struct percpu_struct*) ((char*)hwrpb + hwrpb->processor_offset); w = cpu->type; if (put_user(w, (unsigned long __user*)buffer)) return -EFAULT; return 1; case GSI_GET_HWRPB: if (nbytes > sizeof(*hwrpb)) return -EINVAL; if (copy_to_user(buffer, hwrpb, nbytes) != 0) return -EFAULT; return 1; default: break; } return -EOPNOTSUPP; } SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer, unsigned long, nbytes, int __user *, start, void __user *, arg) { switch (op) { case SSI_IEEE_FP_CONTROL: { unsigned long swcr, fpcr; unsigned int *state; /* * Alpha Architecture Handbook 4.7.7.3: * To be fully IEEE compiant, we must track the current IEEE * exception state in software, because spurious bits can be * set in the trap shadow of a software-complete insn. */ if (get_user(swcr, (unsigned long __user *)buffer)) return -EFAULT; state = &current_thread_info()->ieee_state; /* Update softare trap enable bits. */ *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); /* Update the real fpcr. */ fpcr = rdfpcr() & FPCR_DYN_MASK; fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); return 0; } case SSI_IEEE_RAISE_EXCEPTION: { unsigned long exc, swcr, fpcr, fex; unsigned int *state; if (get_user(exc, (unsigned long __user *)buffer)) return -EFAULT; state = &current_thread_info()->ieee_state; exc &= IEEE_STATUS_MASK; /* Update softare trap enable bits. */ swcr = (*state & IEEE_SW_MASK) | exc; *state |= exc; /* Update the real fpcr. */ fpcr = rdfpcr(); fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); /* If any exceptions set by this call, and are unmasked, send a signal. Old exceptions are not signaled. */ fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; if (fex) { siginfo_t info; int si_code = 0; if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES; if (fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF; if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV; if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = si_code; info.si_addr = NULL; /* FIXME */ send_sig_info(SIGFPE, &info, current); } return 0; } case SSI_IEEE_STATE_AT_SIGNAL: case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: /* * Not sure anybody will ever use this weird stuff. These * ops can be used (under OSF/1) to set the fpcr that should * be used when a signal handler starts executing. */ break; case SSI_NVPAIRS: { unsigned __user *p = buffer; unsigned i; for (i = 0, p = buffer; i < nbytes; ++i, p += 2) { unsigned v, w, status; if (get_user(v, p) || get_user(w, p + 1)) return -EFAULT; switch (v) { case SSIN_UACPROC: w &= UAC_BITMASK; status = current_thread_info()->status; status = (status & ~UAC_BITMASK) | w; current_thread_info()->status = status; break; default: return -EOPNOTSUPP; } } return 0; } case SSI_LMF: return 0; default: break; } return -EOPNOTSUPP; } /* Translations due to the fact that OSF's time_t is an int. Which affects all sorts of things, like timeval and itimerval. */ extern struct timezone sys_tz; struct timeval32 { int tv_sec, tv_usec; }; struct itimerval32 { struct timeval32 it_interval; struct timeval32 it_value; }; static inline long get_tv32(struct timeval *o, struct timeval32 __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec))); } static inline long put_tv32(struct timeval32 __user *o, struct timeval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec))); } static inline long get_it32(struct itimerval *o, struct itimerval32 __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); } static inline long put_it32(struct itimerval32 __user *o, struct itimerval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); } static inline void jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value) { value->tv_usec = (jiffies % HZ) * (1000000L / HZ); value->tv_sec = jiffies / HZ; } SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv, struct timezone __user *, tz) { if (tv) { struct timeval ktv; do_gettimeofday(&ktv); if (put_tv32(tv, &ktv)) return -EFAULT; } if (tz) { if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) return -EFAULT; } return 0; } SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv, struct timezone __user *, tz) { struct timespec kts; struct timezone ktz; if (tv) { if (get_tv32((struct timeval *)&kts, tv)) return -EFAULT; } if (tz) { if (copy_from_user(&ktz, tz, sizeof(*tz))) return -EFAULT; } kts.tv_nsec *= 1000; return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); } SYSCALL_DEFINE2(osf_getitimer, int, which, struct itimerval32 __user *, it) { struct itimerval kit; int error; error = do_getitimer(which, &kit); if (!error && put_it32(it, &kit)) error = -EFAULT; return error; } SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in, struct itimerval32 __user *, out) { struct itimerval kin, kout; int error; if (in) { if (get_it32(&kin, in)) return -EFAULT; } else memset(&kin, 0, sizeof(kin)); error = do_setitimer(which, &kin, out ? &kout : NULL); if (error || !out) return error; if (put_it32(out, &kout)) return -EFAULT; return 0; } SYSCALL_DEFINE2(osf_utimes, const char __user *, filename, struct timeval32 __user *, tvs) { struct timespec tv[2]; if (tvs) { struct timeval ktvs[2]; if (get_tv32(&ktvs[0], &tvs[0]) || get_tv32(&ktvs[1], &tvs[1])) return -EFAULT; if (ktvs[0].tv_usec < 0 || ktvs[0].tv_usec >= 1000000 || ktvs[1].tv_usec < 0 || ktvs[1].tv_usec >= 1000000) return -EINVAL; tv[0].tv_sec = ktvs[0].tv_sec; tv[0].tv_nsec = 1000 * ktvs[0].tv_usec; tv[1].tv_sec = ktvs[1].tv_sec; tv[1].tv_nsec = 1000 * ktvs[1].tv_usec; } return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0); } SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp, fd_set __user *, exp, struct timeval32 __user *, tvp) { struct timespec end_time, *to = NULL; if (tvp) { time_t sec, usec; to = &end_time; if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp)) || __get_user(sec, &tvp->tv_sec) || __get_user(usec, &tvp->tv_usec)) { return -EFAULT; } if (sec < 0 || usec < 0) return -EINVAL; if (poll_select_set_timeout(to, sec, usec * NSEC_PER_USEC)) return -EINVAL; } /* OSF does not copy back the remaining time. */ return core_sys_select(n, inp, outp, exp, to); } struct rusage32 { struct timeval32 ru_utime; /* user time used */ struct timeval32 ru_stime; /* system time used */ long ru_maxrss; /* maximum resident set size */ long ru_ixrss; /* integral shared memory size */ long ru_idrss; /* integral unshared data size */ long ru_isrss; /* integral unshared stack size */ long ru_minflt; /* page reclaims */ long ru_majflt; /* page faults */ long ru_nswap; /* swaps */ long ru_inblock; /* block input operations */ long ru_oublock; /* block output operations */ long ru_msgsnd; /* messages sent */ long ru_msgrcv; /* messages received */ long ru_nsignals; /* signals received */ long ru_nvcsw; /* voluntary context switches */ long ru_nivcsw; /* involuntary " */ }; SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) { struct rusage32 r; cputime_t utime, stime; if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) return -EINVAL; memset(&r, 0, sizeof(r)); switch (who) { case RUSAGE_SELF: task_cputime(current, &utime, &stime); jiffies_to_timeval32(utime, &r.ru_utime); jiffies_to_timeval32(stime, &r.ru_stime); r.ru_minflt = current->min_flt; r.ru_majflt = current->maj_flt; break; case RUSAGE_CHILDREN: jiffies_to_timeval32(current->signal->cutime, &r.ru_utime); jiffies_to_timeval32(current->signal->cstime, &r.ru_stime); r.ru_minflt = current->signal->cmin_flt; r.ru_majflt = current->signal->cmaj_flt; break; } return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; } SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, struct rusage32 __user *, ur) { struct rusage r; long ret, err; unsigned int status = 0; mm_segment_t old_fs; if (!ur) return sys_wait4(pid, ustatus, options, NULL); old_fs = get_fs(); set_fs (KERNEL_DS); ret = sys_wait4(pid, (unsigned int __user *) &status, options, (struct rusage __user *) &r); set_fs (old_fs); if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) return -EFAULT; err = 0; err |= put_user(status, ustatus); err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); err |= __put_user(r.ru_stime.tv_usec, &ur->ru_stime.tv_usec); err |= __put_user(r.ru_maxrss, &ur->ru_maxrss); err |= __put_user(r.ru_ixrss, &ur->ru_ixrss); err |= __put_user(r.ru_idrss, &ur->ru_idrss); err |= __put_user(r.ru_isrss, &ur->ru_isrss); err |= __put_user(r.ru_minflt, &ur->ru_minflt); err |= __put_user(r.ru_majflt, &ur->ru_majflt); err |= __put_user(r.ru_nswap, &ur->ru_nswap); err |= __put_user(r.ru_inblock, &ur->ru_inblock); err |= __put_user(r.ru_oublock, &ur->ru_oublock); err |= __put_user(r.ru_msgsnd, &ur->ru_msgsnd); err |= __put_user(r.ru_msgrcv, &ur->ru_msgrcv); err |= __put_user(r.ru_nsignals, &ur->ru_nsignals); err |= __put_user(r.ru_nvcsw, &ur->ru_nvcsw); err |= __put_user(r.ru_nivcsw, &ur->ru_nivcsw); return err ? err : ret; } /* * I don't know what the parameters are: the first one * seems to be a timeval pointer, and I suspect the second * one is the time remaining.. Ho humm.. No documentation. */ SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep, struct timeval32 __user *, remain) { struct timeval tmp; unsigned long ticks; if (get_tv32(&tmp, sleep)) goto fault; ticks = timeval_to_jiffies(&tmp); ticks = schedule_timeout_interruptible(ticks); if (remain) { jiffies_to_timeval(ticks, &tmp); if (put_tv32(remain, &tmp)) goto fault; } return 0; fault: return -EFAULT; } struct timex32 { unsigned int modes; /* mode selector */ long offset; /* time offset (usec) */ long freq; /* frequency offset (scaled ppm) */ long maxerror; /* maximum error (usec) */ long esterror; /* estimated error (usec) */ int status; /* clock command/status */ long constant; /* pll time constant */ long precision; /* clock precision (usec) (read only) */ long tolerance; /* clock frequency tolerance (ppm) * (read only) */ struct timeval32 time; /* (read only) */ long tick; /* (modified) usecs between clock ticks */ long ppsfreq; /* pps frequency (scaled ppm) (ro) */ long jitter; /* pps jitter (us) (ro) */ int shift; /* interval duration (s) (shift) (ro) */ long stabil; /* pps stability (scaled ppm) (ro) */ long jitcnt; /* jitter limit exceeded (ro) */ long calcnt; /* calibration intervals (ro) */ long errcnt; /* calibration errors (ro) */ long stbcnt; /* stability limit exceeded (ro) */ int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; }; SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) { struct timex txc; int ret; /* copy relevant bits of struct timex. */ if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - offsetof(struct timex32, time))) return -EFAULT; ret = do_adjtimex(&txc); if (ret < 0) return ret; /* copy back to timex32 */ if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) || (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) - offsetof(struct timex32, tick))) || (put_tv32(&txc_p->time, &txc.time))) return -EFAULT; return ret; } /* Get an address range which is currently unmapped. Similar to the generic version except that we know how to honor ADDR_LIMIT_32BIT. */ static unsigned long arch_get_unmapped_area_1(unsigned long addr, unsigned long len, unsigned long limit) { struct vm_unmapped_area_info info; info.flags = 0; info.length = len; info.low_limit = addr; info.high_limit = limit; info.align_mask = 0; info.align_offset = 0; return vm_unmapped_area(&info); } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long limit; /* "32 bit" actually means 31 bit, since pointers sign extend. */ if (current->personality & ADDR_LIMIT_32BIT) limit = 0x80000000; else limit = TASK_SIZE; if (len > limit) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* First, see if the given suggestion fits. The OSF/1 loader (/sbin/loader) relies on us returning an address larger than the requested if one exists, which is a terribly broken way to program. That said, I can see the use in being able to suggest not merely specific addresses, but regions of memory -- perhaps this feature should be incorporated into all ports? */ if (addr) { addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); if (addr != (unsigned long) -ENOMEM) return addr; } /* Next, try allocating at TASK_UNMAPPED_BASE. */ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), len, limit); if (addr != (unsigned long) -ENOMEM) return addr; /* Finally, try allocating in low memory. */ addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); return addr; } #ifdef CONFIG_OSF4_COMPAT /* Clear top 32 bits of iov_len in the user's buffer for compatibility with old versions of OSF/1 where iov_len was defined as int. */ static int osf_fix_iov_len(const struct iovec __user *iov, unsigned long count) { unsigned long i; for (i = 0 ; i < count ; i++) { int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1; if (put_user(0, iov_len_high)) return -EFAULT; } return 0; } SYSCALL_DEFINE3(osf_readv, unsigned long, fd, const struct iovec __user *, vector, unsigned long, count) { if (unlikely(personality(current->personality) == PER_OSF4)) if (osf_fix_iov_len(vector, count)) return -EFAULT; return sys_readv(fd, vector, count); } SYSCALL_DEFINE3(osf_writev, unsigned long, fd, const struct iovec __user *, vector, unsigned long, count) { if (unlikely(personality(current->personality) == PER_OSF4)) if (osf_fix_iov_len(vector, count)) return -EFAULT; return sys_writev(fd, vector, count); } #endif SYSCALL_DEFINE2(osf_getpriority, int, which, int, who) { int prio = sys_getpriority(which, who); if (prio >= 0) { /* Return value is the unbiased priority, i.e. 20 - prio. This does result in negative return values, so signal no error */ force_successful_syscall_return(); prio = 20 - prio; } return prio; } SYSCALL_DEFINE0(getxuid) { current_pt_regs()->r20 = sys_geteuid(); return sys_getuid(); } SYSCALL_DEFINE0(getxgid) { current_pt_regs()->r20 = sys_getegid(); return sys_getgid(); } SYSCALL_DEFINE0(getxpid) { current_pt_regs()->r20 = sys_getppid(); return sys_getpid(); } SYSCALL_DEFINE0(alpha_pipe) { int fd[2]; int res = do_pipe_flags(fd, 0); if (!res) { /* The return values are in $0 and $20. */ current_pt_regs()->r20 = fd[1]; res = fd[0]; } return res; } SYSCALL_DEFINE1(sethae, unsigned long, val) { current_pt_regs()->hae = val; return 0; }
gpl-2.0
mythos234/OnePlus2testing
drivers/hwmon/ina209.c
2301
18678
/* * Driver for the Texas Instruments / Burr Brown INA209 * Bidirectional Current/Power Monitor * * Copyright (C) 2012 Guenter Roeck <linux@roeck-us.net> * * Derived from Ira W. Snyder's original driver submission * Copyright (C) 2008 Paul Hays <Paul.Hays@cattail.ca> * Copyright (C) 2008-2009 Ira W. Snyder <iws@ovro.caltech.edu> * * Aligned with ina2xx driver * Copyright (C) 2012 Lothar Felten <l-felten@ti.com> * Thanks to Jan Volkering * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * Datasheet: * http://www.ti.com/lit/gpn/ina209 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/bug.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/platform_data/ina2xx.h> /* register definitions */ #define INA209_CONFIGURATION 0x00 #define INA209_STATUS 0x01 #define INA209_STATUS_MASK 0x02 #define INA209_SHUNT_VOLTAGE 0x03 #define INA209_BUS_VOLTAGE 0x04 #define INA209_POWER 0x05 #define INA209_CURRENT 0x06 #define INA209_SHUNT_VOLTAGE_POS_PEAK 0x07 #define INA209_SHUNT_VOLTAGE_NEG_PEAK 0x08 #define INA209_BUS_VOLTAGE_MAX_PEAK 0x09 #define INA209_BUS_VOLTAGE_MIN_PEAK 0x0a #define INA209_POWER_PEAK 0x0b #define INA209_SHUNT_VOLTAGE_POS_WARN 0x0c #define INA209_SHUNT_VOLTAGE_NEG_WARN 0x0d #define INA209_POWER_WARN 0x0e #define INA209_BUS_VOLTAGE_OVER_WARN 0x0f #define INA209_BUS_VOLTAGE_UNDER_WARN 0x10 #define INA209_POWER_OVER_LIMIT 0x11 #define INA209_BUS_VOLTAGE_OVER_LIMIT 0x12 #define INA209_BUS_VOLTAGE_UNDER_LIMIT 0x13 #define INA209_CRITICAL_DAC_POS 0x14 #define INA209_CRITICAL_DAC_NEG 0x15 #define INA209_CALIBRATION 0x16 #define INA209_REGISTERS 0x17 #define INA209_CONFIG_DEFAULT 0x3c47 /* PGA=8, full range */ #define INA209_SHUNT_DEFAULT 10000 /* uOhm */ struct ina209_data { struct device *hwmon_dev; struct mutex update_lock; bool valid; unsigned long last_updated; /* in jiffies */ u16 regs[INA209_REGISTERS]; /* All chip registers */ u16 config_orig; /* Original configuration */ u16 calibration_orig; /* Original calibration */ u16 update_interval; }; static struct ina209_data *ina209_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct ina209_data *data = i2c_get_clientdata(client); struct ina209_data *ret = data; s32 val; int i; mutex_lock(&data->update_lock); if (!data->valid || time_after(jiffies, data->last_updated + data->update_interval)) { for (i = 0; i < ARRAY_SIZE(data->regs); i++) { val = i2c_smbus_read_word_swapped(client, i); if (val < 0) { ret = ERR_PTR(val); goto abort; } data->regs[i] = val; } data->last_updated = jiffies; data->valid = true; } abort: mutex_unlock(&data->update_lock); return ret; } /* * Read a value from a device register and convert it to the * appropriate sysfs units */ static long ina209_from_reg(const u8 reg, const u16 val) { switch (reg) { case INA209_SHUNT_VOLTAGE: case INA209_SHUNT_VOLTAGE_POS_PEAK: case INA209_SHUNT_VOLTAGE_NEG_PEAK: case INA209_SHUNT_VOLTAGE_POS_WARN: case INA209_SHUNT_VOLTAGE_NEG_WARN: /* LSB=10 uV. Convert to mV. */ return DIV_ROUND_CLOSEST(val, 100); case INA209_BUS_VOLTAGE: case INA209_BUS_VOLTAGE_MAX_PEAK: case INA209_BUS_VOLTAGE_MIN_PEAK: case INA209_BUS_VOLTAGE_OVER_WARN: case INA209_BUS_VOLTAGE_UNDER_WARN: case INA209_BUS_VOLTAGE_OVER_LIMIT: case INA209_BUS_VOLTAGE_UNDER_LIMIT: /* LSB=4 mV, last 3 bits unused */ return (val >> 3) * 4; case INA209_CRITICAL_DAC_POS: /* LSB=1 mV, in the upper 8 bits */ return val >> 8; case INA209_CRITICAL_DAC_NEG: /* LSB=1 mV, in the upper 8 bits */ return -1 * (val >> 8); case INA209_POWER: case INA209_POWER_PEAK: case INA209_POWER_WARN: case INA209_POWER_OVER_LIMIT: /* LSB=20 mW. Convert to uW */ return val * 20 * 1000L; case INA209_CURRENT: /* LSB=1 mA (selected). Is in mA */ return val; } /* programmer goofed */ WARN_ON_ONCE(1); return 0; } /* * Take a value and convert it to register format, clamping the value * to the appropriate range. */ static int ina209_to_reg(u8 reg, u16 old, long val) { switch (reg) { case INA209_SHUNT_VOLTAGE_POS_WARN: case INA209_SHUNT_VOLTAGE_NEG_WARN: /* Limit to +- 320 mV, 10 uV LSB */ return clamp_val(val, -320, 320) * 100; case INA209_BUS_VOLTAGE_OVER_WARN: case INA209_BUS_VOLTAGE_UNDER_WARN: case INA209_BUS_VOLTAGE_OVER_LIMIT: case INA209_BUS_VOLTAGE_UNDER_LIMIT: /* * Limit to 0-32000 mV, 4 mV LSB * * The last three bits aren't part of the value, but we'll * preserve them in their original state. */ return (DIV_ROUND_CLOSEST(clamp_val(val, 0, 32000), 4) << 3) | (old & 0x7); case INA209_CRITICAL_DAC_NEG: /* * Limit to -255-0 mV, 1 mV LSB * Convert the value to a positive value for the register * * The value lives in the top 8 bits only, be careful * and keep original value of other bits. */ return (clamp_val(-val, 0, 255) << 8) | (old & 0xff); case INA209_CRITICAL_DAC_POS: /* * Limit to 0-255 mV, 1 mV LSB * * The value lives in the top 8 bits only, be careful * and keep original value of other bits. */ return (clamp_val(val, 0, 255) << 8) | (old & 0xff); case INA209_POWER_WARN: case INA209_POWER_OVER_LIMIT: /* 20 mW LSB */ return DIV_ROUND_CLOSEST(val, 20 * 1000); } /* Other registers are read-only, return access error */ return -EACCES; } static int ina209_interval_from_reg(u16 reg) { return 68 >> (15 - ((reg >> 3) & 0x0f)); } static u16 ina209_reg_from_interval(u16 config, long interval) { int i, adc; if (interval <= 0) { adc = 8; } else { adc = 15; for (i = 34 + 34 / 2; i; i >>= 1) { if (i < interval) break; adc--; } } return (config & 0xf807) | (adc << 3) | (adc << 7); } static ssize_t ina209_set_interval(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct ina209_data *data = ina209_update_device(dev); long val; u16 regval; int ret; if (IS_ERR(data)) return PTR_ERR(data); ret = kstrtol(buf, 10, &val); if (ret < 0) return ret; mutex_lock(&data->update_lock); regval = ina209_reg_from_interval(data->regs[INA209_CONFIGURATION], val); i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION, regval); data->regs[INA209_CONFIGURATION] = regval; data->update_interval = ina209_interval_from_reg(regval); mutex_unlock(&data->update_lock); return count; } static ssize_t ina209_show_interval(struct device *dev, struct device_attribute *da, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct ina209_data *data = i2c_get_clientdata(client); return snprintf(buf, PAGE_SIZE, "%d\n", data->update_interval); } /* * History is reset by writing 1 into bit 0 of the respective peak register. * Since more than one peak register may be affected by the scope of a * reset_history attribute write, use a bit mask in attr->index to identify * which registers are affected. */ static u16 ina209_reset_history_regs[] = { INA209_SHUNT_VOLTAGE_POS_PEAK, INA209_SHUNT_VOLTAGE_NEG_PEAK, INA209_BUS_VOLTAGE_MAX_PEAK, INA209_BUS_VOLTAGE_MIN_PEAK, INA209_POWER_PEAK }; static ssize_t ina209_reset_history(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct ina209_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); u32 mask = attr->index; long val; int i, ret; ret = kstrtol(buf, 10, &val); if (ret < 0) return ret; mutex_lock(&data->update_lock); for (i = 0; i < ARRAY_SIZE(ina209_reset_history_regs); i++) { if (mask & (1 << i)) i2c_smbus_write_word_swapped(client, ina209_reset_history_regs[i], 1); } data->valid = false; mutex_unlock(&data->update_lock); return count; } static ssize_t ina209_set_value(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct ina209_data *data = ina209_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int reg = attr->index; long val; int ret; if (IS_ERR(data)) return PTR_ERR(data); ret = kstrtol(buf, 10, &val); if (ret < 0) return ret; mutex_lock(&data->update_lock); ret = ina209_to_reg(reg, data->regs[reg], val); if (ret < 0) { count = ret; goto abort; } i2c_smbus_write_word_swapped(client, reg, ret); data->regs[reg] = ret; abort: mutex_unlock(&data->update_lock); return count; } static ssize_t ina209_show_value(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct ina209_data *data = ina209_update_device(dev); long val; if (IS_ERR(data)) return PTR_ERR(data); val = ina209_from_reg(attr->index, data->regs[attr->index]); return snprintf(buf, PAGE_SIZE, "%ld\n", val); } static ssize_t ina209_show_alarm(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct ina209_data *data = ina209_update_device(dev); const unsigned int mask = attr->index; u16 status; if (IS_ERR(data)) return PTR_ERR(data); status = data->regs[INA209_STATUS]; /* * All alarms are in the INA209_STATUS register. To avoid a long * switch statement, the mask is passed in attr->index */ return snprintf(buf, PAGE_SIZE, "%u\n", !!(status & mask)); } /* Shunt voltage, history, limits, alarms */ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina209_show_value, NULL, INA209_SHUNT_VOLTAGE); static SENSOR_DEVICE_ATTR(in0_input_highest, S_IRUGO, ina209_show_value, NULL, INA209_SHUNT_VOLTAGE_POS_PEAK); static SENSOR_DEVICE_ATTR(in0_input_lowest, S_IRUGO, ina209_show_value, NULL, INA209_SHUNT_VOLTAGE_NEG_PEAK); static SENSOR_DEVICE_ATTR(in0_reset_history, S_IWUSR, NULL, ina209_reset_history, (1 << 0) | (1 << 1)); static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR, ina209_show_value, ina209_set_value, INA209_SHUNT_VOLTAGE_POS_WARN); static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR, ina209_show_value, ina209_set_value, INA209_SHUNT_VOLTAGE_NEG_WARN); static SENSOR_DEVICE_ATTR(in0_crit_max, S_IRUGO | S_IWUSR, ina209_show_value, ina209_set_value, INA209_CRITICAL_DAC_POS); static SENSOR_DEVICE_ATTR(in0_crit_min, S_IRUGO | S_IWUSR, ina209_show_value, ina209_set_value, INA209_CRITICAL_DAC_NEG); static SENSOR_DEVICE_ATTR(in0_min_alarm, S_IRUGO, ina209_show_alarm, NULL, 1 << 11); static SENSOR_DEVICE_ATTR(in0_max_alarm, S_IRUGO, ina209_show_alarm, NULL, 1 << 12); static SENSOR_DEVICE_ATTR(in0_crit_min_alarm, S_IRUGO, ina209_show_alarm, NULL, 1 << 6); static SENSOR_DEVICE_ATTR(in0_crit_max_alarm, S_IRUGO, ina209_show_alarm, NULL, 1 << 7); /* Bus voltage, history, limits, alarms */ static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ina209_show_value, NULL, INA209_BUS_VOLTAGE); static SENSOR_DEVICE_ATTR(in1_input_highest, S_IRUGO, ina209_show_value, NULL, INA209_BUS_VOLTAGE_MAX_PEAK); static SENSOR_DEVICE_ATTR(in1_input_lowest, S_IRUGO, ina209_show_value, NULL, INA209_BUS_VOLTAGE_MIN_PEAK); static SENSOR_DEVICE_ATTR(in1_reset_history, S_IWUSR, NULL, ina209_reset_history, (1 << 2) | (1 << 3)); static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO | S_IWUSR, ina209_show_value, ina209_set_value, INA209_BUS_VOLTAGE_OVER_WARN); static SENSOR_DEVICE_ATTR(in1_min, S_IRUGO | S_IWUSR, ina209_show_value, ina209_set_value, INA209_BUS_VOLTAGE_UNDER_WARN); static SENSOR_DEVICE_ATTR(in1_crit_max, S_IRUGO | S_IWUSR, ina209_show_value, ina209_set_value, INA209_BUS_VOLTAGE_OVER_LIMIT); static SENSOR_DEVICE_ATTR(in1_crit_min, S_IRUGO | S_IWUSR, ina209_show_value, ina209_set_value, INA209_BUS_VOLTAGE_UNDER_LIMIT); static SENSOR_DEVICE_ATTR(in1_min_alarm, S_IRUGO, ina209_show_alarm, NULL, 1 << 14); static SENSOR_DEVICE_ATTR(in1_max_alarm, S_IRUGO, ina209_show_alarm, NULL, 1 << 15); static SENSOR_DEVICE_ATTR(in1_crit_min_alarm, S_IRUGO, ina209_show_alarm, NULL, 1 << 9); static SENSOR_DEVICE_ATTR(in1_crit_max_alarm, S_IRUGO, ina209_show_alarm, NULL, 1 << 10); /* Power */ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina209_show_value, NULL, INA209_POWER); static SENSOR_DEVICE_ATTR(power1_input_highest, S_IRUGO, ina209_show_value, NULL, INA209_POWER_PEAK); static SENSOR_DEVICE_ATTR(power1_reset_history, S_IWUSR, NULL, ina209_reset_history, 1 << 4); static SENSOR_DEVICE_ATTR(power1_max, S_IRUGO | S_IWUSR, ina209_show_value, ina209_set_value, INA209_POWER_WARN); static SENSOR_DEVICE_ATTR(power1_crit, S_IRUGO | S_IWUSR, ina209_show_value, ina209_set_value, INA209_POWER_OVER_LIMIT); static SENSOR_DEVICE_ATTR(power1_max_alarm, S_IRUGO, ina209_show_alarm, NULL, 1 << 13); static SENSOR_DEVICE_ATTR(power1_crit_alarm, S_IRUGO, ina209_show_alarm, NULL, 1 << 8); /* Current */ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina209_show_value, NULL, INA209_CURRENT); static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, ina209_show_interval, ina209_set_interval, 0); /* * Finally, construct an array of pointers to members of the above objects, * as required for sysfs_create_group() */ static struct attribute *ina209_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_input_highest.dev_attr.attr, &sensor_dev_attr_in0_input_lowest.dev_attr.attr, &sensor_dev_attr_in0_reset_history.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in0_crit_max.dev_attr.attr, &sensor_dev_attr_in0_crit_min.dev_attr.attr, &sensor_dev_attr_in0_max_alarm.dev_attr.attr, &sensor_dev_attr_in0_min_alarm.dev_attr.attr, &sensor_dev_attr_in0_crit_max_alarm.dev_attr.attr, &sensor_dev_attr_in0_crit_min_alarm.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_input_highest.dev_attr.attr, &sensor_dev_attr_in1_input_lowest.dev_attr.attr, &sensor_dev_attr_in1_reset_history.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_crit_max.dev_attr.attr, &sensor_dev_attr_in1_crit_min.dev_attr.attr, &sensor_dev_attr_in1_max_alarm.dev_attr.attr, &sensor_dev_attr_in1_min_alarm.dev_attr.attr, &sensor_dev_attr_in1_crit_max_alarm.dev_attr.attr, &sensor_dev_attr_in1_crit_min_alarm.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, &sensor_dev_attr_power1_input_highest.dev_attr.attr, &sensor_dev_attr_power1_reset_history.dev_attr.attr, &sensor_dev_attr_power1_max.dev_attr.attr, &sensor_dev_attr_power1_crit.dev_attr.attr, &sensor_dev_attr_power1_max_alarm.dev_attr.attr, &sensor_dev_attr_power1_crit_alarm.dev_attr.attr, &sensor_dev_attr_curr1_input.dev_attr.attr, &sensor_dev_attr_update_interval.dev_attr.attr, NULL, }; static const struct attribute_group ina209_group = { .attrs = ina209_attributes, }; static void ina209_restore_conf(struct i2c_client *client, struct ina209_data *data) { /* Restore initial configuration */ i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION, data->config_orig); i2c_smbus_write_word_swapped(client, INA209_CALIBRATION, data->calibration_orig); } static int ina209_init_client(struct i2c_client *client, struct ina209_data *data) { struct ina2xx_platform_data *pdata = dev_get_platdata(&client->dev); u32 shunt; int reg; reg = i2c_smbus_read_word_swapped(client, INA209_CALIBRATION); if (reg < 0) return reg; data->calibration_orig = reg; reg = i2c_smbus_read_word_swapped(client, INA209_CONFIGURATION); if (reg < 0) return reg; data->config_orig = reg; if (pdata) { if (pdata->shunt_uohms <= 0) return -EINVAL; shunt = pdata->shunt_uohms; } else if (!of_property_read_u32(client->dev.of_node, "shunt-resistor", &shunt)) { if (shunt == 0) return -EINVAL; } else { shunt = data->calibration_orig ? 40960000 / data->calibration_orig : INA209_SHUNT_DEFAULT; } i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION, INA209_CONFIG_DEFAULT); data->update_interval = ina209_interval_from_reg(INA209_CONFIG_DEFAULT); /* * Calibrate current LSB to 1mA. Shunt is in uOhms. * See equation 13 in datasheet. */ i2c_smbus_write_word_swapped(client, INA209_CALIBRATION, clamp_val(40960000 / shunt, 1, 65535)); /* Clear status register */ i2c_smbus_read_word_swapped(client, INA209_STATUS); return 0; } static int ina209_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = client->adapter; struct ina209_data *data; int ret; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) return -ENODEV; data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(client, data); mutex_init(&data->update_lock); ret = ina209_init_client(client, data); if (ret) return ret; /* Register sysfs hooks */ ret = sysfs_create_group(&client->dev.kobj, &ina209_group); if (ret) goto out_restore_conf; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { ret = PTR_ERR(data->hwmon_dev); goto out_hwmon_device_register; } return 0; out_hwmon_device_register: sysfs_remove_group(&client->dev.kobj, &ina209_group); out_restore_conf: ina209_restore_conf(client, data); return ret; } static int ina209_remove(struct i2c_client *client) { struct ina209_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &ina209_group); ina209_restore_conf(client, data); return 0; } static const struct i2c_device_id ina209_id[] = { { "ina209", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ina209_id); /* This is the driver that will be inserted */ static struct i2c_driver ina209_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "ina209", }, .probe = ina209_probe, .remove = ina209_remove, .id_table = ina209_id, }; module_i2c_driver(ina209_driver); MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>, Paul Hays <Paul.Hays@cattail.ca>, Guenter Roeck <linux@roeck-us.net>"); MODULE_DESCRIPTION("INA209 driver"); MODULE_LICENSE("GPL");
gpl-2.0
rutvik95/speedx_kernel_i9082
drivers/pci/hotplug/shpchp_core.c
2301
10514
/* * Standard Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com> * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/pci.h> #include "shpchp.h" /* Global variables */ int shpchp_debug; int shpchp_poll_mode; int shpchp_poll_time; struct workqueue_struct *shpchp_wq; struct workqueue_struct *shpchp_ordered_wq; #define DRIVER_VERSION "0.4" #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" #define DRIVER_DESC "Standard Hot Plug PCI Controller Driver" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(shpchp_debug, bool, 0644); module_param(shpchp_poll_mode, bool, 0644); module_param(shpchp_poll_time, int, 0644); MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not"); MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not"); MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds"); #define SHPC_MODULE_NAME "shpchp" static int set_attention_status (struct hotplug_slot *slot, u8 value); static int enable_slot (struct hotplug_slot *slot); static int disable_slot (struct hotplug_slot *slot); static int get_power_status (struct hotplug_slot *slot, u8 *value); static int get_attention_status (struct hotplug_slot *slot, u8 *value); static int get_latch_status (struct hotplug_slot *slot, u8 *value); static int get_adapter_status (struct hotplug_slot *slot, u8 *value); static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { .set_attention_status = set_attention_status, .enable_slot = enable_slot, .disable_slot = disable_slot, .get_power_status = get_power_status, .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_status, }; /** * release_slot - free up the memory used by a slot * @hotplug_slot: slot to free */ static void release_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); kfree(slot->hotplug_slot->info); kfree(slot->hotplug_slot); kfree(slot); } static int init_slots(struct controller *ctrl) { struct slot *slot; struct hotplug_slot *hotplug_slot; struct hotplug_slot_info *info; char name[SLOT_NAME_SIZE]; int retval = -ENOMEM; int i; for (i = 0; i < ctrl->num_slots; i++) { slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) goto error; hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL); if (!hotplug_slot) goto error_slot; slot->hotplug_slot = hotplug_slot; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) goto error_hpslot; hotplug_slot->info = info; slot->hp_slot = i; slot->ctrl = ctrl; slot->bus = ctrl->pci_dev->subordinate->number; slot->device = ctrl->slot_device_offset + i; slot->hpc_ops = ctrl->hpc_ops; slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i); mutex_init(&slot->lock); INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work); /* register this slot with the hotplug pci core */ hotplug_slot->private = slot; hotplug_slot->release = &release_slot; snprintf(name, SLOT_NAME_SIZE, "%d", slot->number); hotplug_slot->ops = &shpchp_hotplug_slot_ops; ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x " "hp_slot=%x sun=%x slot_device_offset=%x\n", pci_domain_nr(ctrl->pci_dev->subordinate), slot->bus, slot->device, slot->hp_slot, slot->number, ctrl->slot_device_offset); retval = pci_hp_register(slot->hotplug_slot, ctrl->pci_dev->subordinate, slot->device, name); if (retval) { ctrl_err(ctrl, "pci_hp_register failed with error %d\n", retval); goto error_info; } get_power_status(hotplug_slot, &info->power_status); get_attention_status(hotplug_slot, &info->attention_status); get_latch_status(hotplug_slot, &info->latch_status); get_adapter_status(hotplug_slot, &info->adapter_status); list_add(&slot->slot_list, &ctrl->slot_list); } return 0; error_info: kfree(info); error_hpslot: kfree(hotplug_slot); error_slot: kfree(slot); error: return retval; } void cleanup_slots(struct controller *ctrl) { struct list_head *tmp; struct list_head *next; struct slot *slot; list_for_each_safe(tmp, next, &ctrl->slot_list) { slot = list_entry(tmp, struct slot, slot_list); list_del(&slot->slot_list); cancel_delayed_work(&slot->work); flush_workqueue(shpchp_wq); flush_workqueue(shpchp_ordered_wq); pci_hp_deregister(slot->hotplug_slot); } } /* * set_attention_status - Turns the Amber LED for a slot on, off or blink */ static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status) { struct slot *slot = get_slot(hotplug_slot); ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); hotplug_slot->info->attention_status = status; slot->hpc_ops->set_attention_status(slot, status); return 0; } static int enable_slot (struct hotplug_slot *hotplug_slot) { struct slot *slot = get_slot(hotplug_slot); ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); return shpchp_sysfs_enable_slot(slot); } static int disable_slot (struct hotplug_slot *hotplug_slot) { struct slot *slot = get_slot(hotplug_slot); ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); return shpchp_sysfs_disable_slot(slot); } static int get_power_status (struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = get_slot(hotplug_slot); int retval; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); retval = slot->hpc_ops->get_power_status(slot, value); if (retval < 0) *value = hotplug_slot->info->power_status; return 0; } static int get_attention_status (struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = get_slot(hotplug_slot); int retval; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); retval = slot->hpc_ops->get_attention_status(slot, value); if (retval < 0) *value = hotplug_slot->info->attention_status; return 0; } static int get_latch_status (struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = get_slot(hotplug_slot); int retval; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); retval = slot->hpc_ops->get_latch_status(slot, value); if (retval < 0) *value = hotplug_slot->info->latch_status; return 0; } static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = get_slot(hotplug_slot); int retval; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); retval = slot->hpc_ops->get_adapter_status(slot, value); if (retval < 0) *value = hotplug_slot->info->adapter_status; return 0; } static int is_shpc_capable(struct pci_dev *dev) { if (dev->vendor == PCI_VENDOR_ID_AMD && dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) return 1; if (!pci_find_capability(dev, PCI_CAP_ID_SHPC)) return 0; if (get_hp_hw_control_from_firmware(dev)) return 0; return 1; } static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc; struct controller *ctrl; if (!is_shpc_capable(pdev)) return -ENODEV; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) { dev_err(&pdev->dev, "%s: Out of memory\n", __func__); goto err_out_none; } INIT_LIST_HEAD(&ctrl->slot_list); rc = shpc_init(ctrl, pdev); if (rc) { ctrl_dbg(ctrl, "Controller initialization failed\n"); goto err_out_free_ctrl; } pci_set_drvdata(pdev, ctrl); /* Setup the slot information structures */ rc = init_slots(ctrl); if (rc) { ctrl_err(ctrl, "Slot initialization failed\n"); goto err_out_release_ctlr; } rc = shpchp_create_ctrl_files(ctrl); if (rc) goto err_cleanup_slots; return 0; err_cleanup_slots: cleanup_slots(ctrl); err_out_release_ctlr: ctrl->hpc_ops->release_ctlr(ctrl); err_out_free_ctrl: kfree(ctrl); err_out_none: return -ENODEV; } static void shpc_remove(struct pci_dev *dev) { struct controller *ctrl = pci_get_drvdata(dev); shpchp_remove_ctrl_files(ctrl); ctrl->hpc_ops->release_ctlr(ctrl); kfree(ctrl); } static struct pci_device_id shpcd_pci_tbl[] = { {PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0)}, { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE(pci, shpcd_pci_tbl); static struct pci_driver shpc_driver = { .name = SHPC_MODULE_NAME, .id_table = shpcd_pci_tbl, .probe = shpc_probe, .remove = shpc_remove, }; static int __init shpcd_init(void) { int retval = 0; shpchp_wq = alloc_ordered_workqueue("shpchp", 0); if (!shpchp_wq) return -ENOMEM; shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0); if (!shpchp_ordered_wq) { destroy_workqueue(shpchp_wq); return -ENOMEM; } retval = pci_register_driver(&shpc_driver); dbg("%s: pci_register_driver = %d\n", __func__, retval); info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); if (retval) { destroy_workqueue(shpchp_ordered_wq); destroy_workqueue(shpchp_wq); } return retval; } static void __exit shpcd_cleanup(void) { dbg("unload_shpchpd()\n"); pci_unregister_driver(&shpc_driver); destroy_workqueue(shpchp_ordered_wq); destroy_workqueue(shpchp_wq); info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); } module_init(shpcd_init); module_exit(shpcd_cleanup);
gpl-2.0
iamroot11c/kernel_source
arch/arm/mach-omap2/clockdomains54xx_data.c
2557
14627
/* * OMAP54XX Clock domains framework * * Copyright (C) 2013 Texas Instruments, Inc. * * Abhijit Pagare (abhijitpagare@ti.com) * Benoit Cousson (b-cousson@ti.com) * Paul Walmsley (paul@pwsan.com) * * This file is automatically generated from the OMAP hardware databases. * We respectfully ask that any modifications to this file be coordinated * with the public linux-omap@vger.kernel.org mailing list and the * authors above to ensure that the autogeneration scripts are kept * up-to-date with the file contents. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/io.h> #include "clockdomain.h" #include "cm1_54xx.h" #include "cm2_54xx.h" #include "cm-regbits-54xx.h" #include "prm54xx.h" #include "prcm44xx.h" #include "prcm_mpu54xx.h" /* Static Dependencies for OMAP4 Clock Domains */ static struct clkdm_dep c2c_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l3main2_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { NULL }, }; static struct clkdm_dep cam_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { NULL }, }; static struct clkdm_dep dma_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep dsp_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l3main2_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep dss_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3main2_clkdm" }, { NULL }, }; static struct clkdm_dep gpu_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { NULL }, }; static struct clkdm_dep ipu_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "dsp_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l3main2_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep iva_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { NULL }, }; static struct clkdm_dep l3init_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clkdm_dep l4sec_wkup_sleep_deps[] = { { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { NULL }, }; static struct clkdm_dep mipiext_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l3main2_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { NULL }, }; static struct clkdm_dep mpu_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "dsp_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "emif_clkdm" }, { .clkdm_name = "gpu_clkdm" }, { .clkdm_name = "ipu_clkdm" }, { .clkdm_name = "iva_clkdm" }, { .clkdm_name = "l3init_clkdm" }, { .clkdm_name = "l3main1_clkdm" }, { .clkdm_name = "l3main2_clkdm" }, { .clkdm_name = "l4cfg_clkdm" }, { .clkdm_name = "l4per_clkdm" }, { .clkdm_name = "l4sec_clkdm" }, { .clkdm_name = "wkupaon_clkdm" }, { NULL }, }; static struct clockdomain l4sec_54xx_clkdm = { .name = "l4sec_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_L4SEC_CDOFFS, .dep_bit = OMAP54XX_L4SEC_STATDEP_SHIFT, .wkdep_srcs = l4sec_wkup_sleep_deps, .sleepdep_srcs = l4sec_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain iva_54xx_clkdm = { .name = "iva_clkdm", .pwrdm = { .name = "iva_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_IVA_INST, .clkdm_offs = OMAP54XX_CM_CORE_IVA_IVA_CDOFFS, .dep_bit = OMAP54XX_IVA_STATDEP_SHIFT, .wkdep_srcs = iva_wkup_sleep_deps, .sleepdep_srcs = iva_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain mipiext_54xx_clkdm = { .name = "mipiext_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_MIPIEXT_CDOFFS, .wkdep_srcs = mipiext_wkup_sleep_deps, .sleepdep_srcs = mipiext_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain l3main2_54xx_clkdm = { .name = "l3main2_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_L3MAIN2_CDOFFS, .dep_bit = OMAP54XX_L3MAIN2_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP, }; static struct clockdomain l3main1_54xx_clkdm = { .name = "l3main1_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_L3MAIN1_CDOFFS, .dep_bit = OMAP54XX_L3MAIN1_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP, }; static struct clockdomain custefuse_54xx_clkdm = { .name = "custefuse_clkdm", .pwrdm = { .name = "custefuse_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CUSTEFUSE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CUSTEFUSE_CUSTEFUSE_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain ipu_54xx_clkdm = { .name = "ipu_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_IPU_CDOFFS, .dep_bit = OMAP54XX_IPU_STATDEP_SHIFT, .wkdep_srcs = ipu_wkup_sleep_deps, .sleepdep_srcs = ipu_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l4cfg_54xx_clkdm = { .name = "l4cfg_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_L4CFG_CDOFFS, .dep_bit = OMAP54XX_L4CFG_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP, }; static struct clockdomain abe_54xx_clkdm = { .name = "abe_clkdm", .pwrdm = { .name = "abe_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_AON_PARTITION, .cm_inst = OMAP54XX_CM_CORE_AON_ABE_INST, .clkdm_offs = OMAP54XX_CM_CORE_AON_ABE_ABE_CDOFFS, .dep_bit = OMAP54XX_ABE_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain dss_54xx_clkdm = { .name = "dss_clkdm", .pwrdm = { .name = "dss_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_DSS_INST, .clkdm_offs = OMAP54XX_CM_CORE_DSS_DSS_CDOFFS, .dep_bit = OMAP54XX_DSS_STATDEP_SHIFT, .wkdep_srcs = dss_wkup_sleep_deps, .sleepdep_srcs = dss_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain dsp_54xx_clkdm = { .name = "dsp_clkdm", .pwrdm = { .name = "dsp_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_AON_PARTITION, .cm_inst = OMAP54XX_CM_CORE_AON_DSP_INST, .clkdm_offs = OMAP54XX_CM_CORE_AON_DSP_DSP_CDOFFS, .dep_bit = OMAP54XX_DSP_STATDEP_SHIFT, .wkdep_srcs = dsp_wkup_sleep_deps, .sleepdep_srcs = dsp_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain c2c_54xx_clkdm = { .name = "c2c_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_C2C_CDOFFS, .wkdep_srcs = c2c_wkup_sleep_deps, .sleepdep_srcs = c2c_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain l4per_54xx_clkdm = { .name = "l4per_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_L4PER_CDOFFS, .dep_bit = OMAP54XX_L4PER_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain gpu_54xx_clkdm = { .name = "gpu_clkdm", .pwrdm = { .name = "gpu_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_GPU_INST, .clkdm_offs = OMAP54XX_CM_CORE_GPU_GPU_CDOFFS, .dep_bit = OMAP54XX_GPU_STATDEP_SHIFT, .wkdep_srcs = gpu_wkup_sleep_deps, .sleepdep_srcs = gpu_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain wkupaon_54xx_clkdm = { .name = "wkupaon_clkdm", .pwrdm = { .name = "wkupaon_pwrdm" }, .prcm_partition = OMAP54XX_PRM_PARTITION, .cm_inst = OMAP54XX_PRM_WKUPAON_CM_INST, .clkdm_offs = OMAP54XX_PRM_WKUPAON_CM_WKUPAON_CDOFFS, .dep_bit = OMAP54XX_WKUPAON_STATDEP_SHIFT, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain mpu0_54xx_clkdm = { .name = "mpu0_clkdm", .pwrdm = { .name = "cpu0_pwrdm" }, .prcm_partition = OMAP54XX_PRCM_MPU_PARTITION, .cm_inst = OMAP54XX_PRCM_MPU_CM_C0_INST, .clkdm_offs = OMAP54XX_PRCM_MPU_CM_C0_CPU0_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain mpu1_54xx_clkdm = { .name = "mpu1_clkdm", .pwrdm = { .name = "cpu1_pwrdm" }, .prcm_partition = OMAP54XX_PRCM_MPU_PARTITION, .cm_inst = OMAP54XX_PRCM_MPU_CM_C1_INST, .clkdm_offs = OMAP54XX_PRCM_MPU_CM_C1_CPU1_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain coreaon_54xx_clkdm = { .name = "coreaon_clkdm", .pwrdm = { .name = "coreaon_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_COREAON_INST, .clkdm_offs = OMAP54XX_CM_CORE_COREAON_COREAON_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain mpu_54xx_clkdm = { .name = "mpu_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_AON_PARTITION, .cm_inst = OMAP54XX_CM_CORE_AON_MPU_INST, .clkdm_offs = OMAP54XX_CM_CORE_AON_MPU_MPU_CDOFFS, .wkdep_srcs = mpu_wkup_sleep_deps, .sleepdep_srcs = mpu_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain l3init_54xx_clkdm = { .name = "l3init_clkdm", .pwrdm = { .name = "l3init_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_L3INIT_INST, .clkdm_offs = OMAP54XX_CM_CORE_L3INIT_L3INIT_CDOFFS, .dep_bit = OMAP54XX_L3INIT_STATDEP_SHIFT, .wkdep_srcs = l3init_wkup_sleep_deps, .sleepdep_srcs = l3init_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain dma_54xx_clkdm = { .name = "dma_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_DMA_CDOFFS, .wkdep_srcs = dma_wkup_sleep_deps, .sleepdep_srcs = dma_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain l3instr_54xx_clkdm = { .name = "l3instr_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_L3INSTR_CDOFFS, }; static struct clockdomain emif_54xx_clkdm = { .name = "emif_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CORE_INST, .clkdm_offs = OMAP54XX_CM_CORE_CORE_EMIF_CDOFFS, .dep_bit = OMAP54XX_EMIF_STATDEP_SHIFT, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain emu_54xx_clkdm = { .name = "emu_clkdm", .pwrdm = { .name = "emu_pwrdm" }, .prcm_partition = OMAP54XX_PRM_PARTITION, .cm_inst = OMAP54XX_PRM_EMU_CM_INST, .clkdm_offs = OMAP54XX_PRM_EMU_CM_EMU_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain cam_54xx_clkdm = { .name = "cam_clkdm", .pwrdm = { .name = "cam_pwrdm" }, .prcm_partition = OMAP54XX_CM_CORE_PARTITION, .cm_inst = OMAP54XX_CM_CORE_CAM_INST, .clkdm_offs = OMAP54XX_CM_CORE_CAM_CAM_CDOFFS, .wkdep_srcs = cam_wkup_sleep_deps, .sleepdep_srcs = cam_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; /* As clockdomains are added or removed above, this list must also be changed */ static struct clockdomain *clockdomains_omap54xx[] __initdata = { &l4sec_54xx_clkdm, &iva_54xx_clkdm, &mipiext_54xx_clkdm, &l3main2_54xx_clkdm, &l3main1_54xx_clkdm, &custefuse_54xx_clkdm, &ipu_54xx_clkdm, &l4cfg_54xx_clkdm, &abe_54xx_clkdm, &dss_54xx_clkdm, &dsp_54xx_clkdm, &c2c_54xx_clkdm, &l4per_54xx_clkdm, &gpu_54xx_clkdm, &wkupaon_54xx_clkdm, &mpu0_54xx_clkdm, &mpu1_54xx_clkdm, &coreaon_54xx_clkdm, &mpu_54xx_clkdm, &l3init_54xx_clkdm, &dma_54xx_clkdm, &l3instr_54xx_clkdm, &emif_54xx_clkdm, &emu_54xx_clkdm, &cam_54xx_clkdm, NULL }; void __init omap54xx_clockdomains_init(void) { clkdm_register_platform_funcs(&omap4_clkdm_operations); clkdm_register_clkdms(clockdomains_omap54xx); clkdm_complete_init(); }
gpl-2.0
rodero95/android_kernel_huawei_ascend
arch/ia64/kernel/process.c
3069
21212
/* * Architecture-specific setup. * * Copyright (C) 1998-2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support * * 2005-10-07 Keith Owens <kaos@sgi.com> * Add notify_die() hooks. */ #include <linux/cpu.h> #include <linux/pm.h> #include <linux/elf.h> #include <linux/errno.h> #include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/personality.h> #include <linux/sched.h> #include <linux/stddef.h> #include <linux/thread_info.h> #include <linux/unistd.h> #include <linux/efi.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/kdebug.h> #include <linux/utsname.h> #include <linux/tracehook.h> #include <asm/cpu.h> #include <asm/delay.h> #include <asm/elf.h> #include <asm/irq.h> #include <asm/kexec.h> #include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/sal.h> #include <asm/tlbflush.h> #include <asm/uaccess.h> #include <asm/unwind.h> #include <asm/user.h> #include "entry.h" #ifdef CONFIG_PERFMON # include <asm/perfmon.h> #endif #include "sigframe.h" void (*ia64_mark_idle)(int); unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; EXPORT_SYMBOL(boot_option_idle_override); void (*pm_idle) (void); EXPORT_SYMBOL(pm_idle); void (*pm_power_off) (void); EXPORT_SYMBOL(pm_power_off); void ia64_do_show_stack (struct unw_frame_info *info, void *arg) { unsigned long ip, sp, bsp; char buf[128]; /* don't make it so big that it overflows the stack! */ printk("\nCall Trace:\n"); do { unw_get_ip(info, &ip); if (ip == 0) break; unw_get_sp(info, &sp); unw_get_bsp(info, &bsp); snprintf(buf, sizeof(buf), " [<%016lx>] %%s\n" " sp=%016lx bsp=%016lx\n", ip, sp, bsp); print_symbol(buf, ip); } while (unw_unwind(info) >= 0); } void show_stack (struct task_struct *task, unsigned long *sp) { if (!task) unw_init_running(ia64_do_show_stack, NULL); else { struct unw_frame_info info; unw_init_from_blocked_task(&info, task); ia64_do_show_stack(&info, NULL); } } void dump_stack (void) { show_stack(NULL, NULL); } EXPORT_SYMBOL(dump_stack); void show_regs (struct pt_regs *regs) { unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; print_modules(); printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current), smp_processor_id(), current->comm); printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n", regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(), init_utsname()->release); print_symbol("ip is at %s\n", ip); printk("unat: %016lx pfs : %016lx rsc : %016lx\n", regs->ar_unat, regs->ar_pfs, regs->ar_rsc); printk("rnat: %016lx bsps: %016lx pr : %016lx\n", regs->ar_rnat, regs->ar_bspstore, regs->pr); printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n", regs->loadrs, regs->ar_ccv, regs->ar_fpsr); printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd); printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7); printk("f6 : %05lx%016lx f7 : %05lx%016lx\n", regs->f6.u.bits[1], regs->f6.u.bits[0], regs->f7.u.bits[1], regs->f7.u.bits[0]); printk("f8 : %05lx%016lx f9 : %05lx%016lx\n", regs->f8.u.bits[1], regs->f8.u.bits[0], regs->f9.u.bits[1], regs->f9.u.bits[0]); printk("f10 : %05lx%016lx f11 : %05lx%016lx\n", regs->f10.u.bits[1], regs->f10.u.bits[0], regs->f11.u.bits[1], regs->f11.u.bits[0]); printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3); printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10); printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13); printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16); printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19); printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22); printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25); printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28); printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31); if (user_mode(regs)) { /* print the stacked registers */ unsigned long val, *bsp, ndirty; int i, sof, is_nat = 0; sof = regs->cr_ifs & 0x7f; /* size of frame */ ndirty = (regs->loadrs >> 19); bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty); for (i = 0; i < sof; ++i) { get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i)); printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val, ((i == sof - 1) || (i % 3) == 2) ? "\n" : " "); } } else show_stack(NULL, NULL); } /* local support for deprecated console_print */ void console_print(const char *s) { printk(KERN_EMERG "%s", s); } void do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) { if (fsys_mode(current, &scr->pt)) { /* * defer signal-handling etc. until we return to * privilege-level 0. */ if (!ia64_psr(&scr->pt)->lp) ia64_psr(&scr->pt)->lp = 1; return; } #ifdef CONFIG_PERFMON if (current->thread.pfm_needs_checking) /* * Note: pfm_handle_work() allow us to call it with interrupts * disabled, and may enable interrupts within the function. */ pfm_handle_work(); #endif /* deal with pending signal delivery */ if (test_thread_flag(TIF_SIGPENDING)) { local_irq_enable(); /* force interrupt enable */ ia64_do_signal(scr, in_syscall); } if (test_thread_flag(TIF_NOTIFY_RESUME)) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(&scr->pt); if (current->replacement_session_keyring) key_replace_session_keyring(); } /* copy user rbs to kernel rbs */ if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) { local_irq_enable(); /* force interrupt enable */ ia64_sync_krbs(); } local_irq_disable(); /* force interrupt disable */ } static int pal_halt = 1; static int can_do_pal_halt = 1; static int __init nohalt_setup(char * str) { pal_halt = can_do_pal_halt = 0; return 1; } __setup("nohalt", nohalt_setup); void update_pal_halt_status(int status) { can_do_pal_halt = pal_halt && status; } /* * We use this if we don't have any better idle routine.. */ void default_idle (void) { local_irq_enable(); while (!need_resched()) { if (can_do_pal_halt) { local_irq_disable(); if (!need_resched()) { safe_halt(); } local_irq_enable(); } else cpu_relax(); } } #ifdef CONFIG_HOTPLUG_CPU /* We don't actually take CPU down, just spin without interrupts. */ static inline void play_dead(void) { unsigned int this_cpu = smp_processor_id(); /* Ack it */ __get_cpu_var(cpu_state) = CPU_DEAD; max_xtp(); local_irq_disable(); idle_task_exit(); ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]); /* * The above is a point of no-return, the processor is * expected to be in SAL loop now. */ BUG(); } #else static inline void play_dead(void) { BUG(); } #endif /* CONFIG_HOTPLUG_CPU */ static void do_nothing(void *unused) { } /* * cpu_idle_wait - Used to ensure that all the CPUs discard old value of * pm_idle and update to new pm_idle value. Required while changing pm_idle * handler on SMP systems. * * Caller must have changed pm_idle to the new value before the call. Old * pm_idle value will not be used by any CPU after the return of this function. */ void cpu_idle_wait(void) { smp_mb(); /* kick all the CPUs so that they exit out of pm_idle */ smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(cpu_idle_wait); void __attribute__((noreturn)) cpu_idle (void) { void (*mark_idle)(int) = ia64_mark_idle; int cpu = smp_processor_id(); /* endless idle loop with no priority at all */ while (1) { if (can_do_pal_halt) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we * test NEED_RESCHED: */ smp_mb(); } else { current_thread_info()->status |= TS_POLLING; } if (!need_resched()) { void (*idle)(void); #ifdef CONFIG_SMP min_xtp(); #endif rmb(); if (mark_idle) (*mark_idle)(1); idle = pm_idle; if (!idle) idle = default_idle; (*idle)(); if (mark_idle) (*mark_idle)(0); #ifdef CONFIG_SMP normal_xtp(); #endif } preempt_enable_no_resched(); schedule(); preempt_disable(); check_pgt_cache(); if (cpu_is_offline(cpu)) play_dead(); } } void ia64_save_extra (struct task_struct *task) { #ifdef CONFIG_PERFMON unsigned long info; #endif if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) ia64_save_debug_regs(&task->thread.dbr[0]); #ifdef CONFIG_PERFMON if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) pfm_save_regs(task); info = __get_cpu_var(pfm_syst_info); if (info & PFM_CPUINFO_SYST_WIDE) pfm_syst_wide_update_task(task, info, 0); #endif } void ia64_load_extra (struct task_struct *task) { #ifdef CONFIG_PERFMON unsigned long info; #endif if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) ia64_load_debug_regs(&task->thread.dbr[0]); #ifdef CONFIG_PERFMON if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) pfm_load_regs(task); info = __get_cpu_var(pfm_syst_info); if (info & PFM_CPUINFO_SYST_WIDE) pfm_syst_wide_update_task(task, info, 1); #endif } /* * Copy the state of an ia-64 thread. * * We get here through the following call chain: * * from user-level: from kernel: * * <clone syscall> <some kernel call frames> * sys_clone : * do_fork do_fork * copy_thread copy_thread * * This means that the stack layout is as follows: * * +---------------------+ (highest addr) * | struct pt_regs | * +---------------------+ * | struct switch_stack | * +---------------------+ * | | * | memory stack | * | | <-- sp (lowest addr) * +---------------------+ * * Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register, * with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the * pt_regs structure in the parent is congruent to that of the child, modulo 512. Since * the stack is page aligned and the page size is at least 4KB, this is always the case, * so there is nothing to worry about. */ int copy_thread(unsigned long clone_flags, unsigned long user_stack_base, unsigned long user_stack_size, struct task_struct *p, struct pt_regs *regs) { extern char ia64_ret_from_clone; struct switch_stack *child_stack, *stack; unsigned long rbs, child_rbs, rbs_size; struct pt_regs *child_ptregs; int retval = 0; #ifdef CONFIG_SMP /* * For SMP idle threads, fork_by_hand() calls do_fork with * NULL regs. */ if (!regs) return 0; #endif stack = ((struct switch_stack *) regs) - 1; child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1; child_stack = (struct switch_stack *) child_ptregs - 1; /* copy parent's switch_stack & pt_regs to child: */ memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack)); rbs = (unsigned long) current + IA64_RBS_OFFSET; child_rbs = (unsigned long) p + IA64_RBS_OFFSET; rbs_size = stack->ar_bspstore - rbs; /* copy the parent's register backing store to the child: */ memcpy((void *) child_rbs, (void *) rbs, rbs_size); if (likely(user_mode(child_ptregs))) { if (clone_flags & CLONE_SETTLS) child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ if (user_stack_base) { child_ptregs->r12 = user_stack_base + user_stack_size - 16; child_ptregs->ar_bspstore = user_stack_base; child_ptregs->ar_rnat = 0; child_ptregs->loadrs = 0; } } else { /* * Note: we simply preserve the relative position of * the stack pointer here. There is no need to * allocate a scratch area here, since that will have * been taken care of by the caller of sys_clone() * already. */ child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */ child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ } child_stack->ar_bspstore = child_rbs + rbs_size; child_stack->b0 = (unsigned long) &ia64_ret_from_clone; /* copy parts of thread_struct: */ p->thread.ksp = (unsigned long) child_stack - 16; /* stop some PSR bits from being inherited. * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() * therefore we must specify them explicitly here and not include them in * IA64_PSR_BITS_TO_CLEAR. */ child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); /* * NOTE: The calling convention considers all floating point * registers in the high partition (fph) to be scratch. Since * the only way to get to this point is through a system call, * we know that the values in fph are all dead. Hence, there * is no need to inherit the fph state from the parent to the * child and all we have to do is to make sure that * IA64_THREAD_FPH_VALID is cleared in the child. * * XXX We could push this optimization a bit further by * clearing IA64_THREAD_FPH_VALID on ANY system call. * However, it's not clear this is worth doing. Also, it * would be a slight deviation from the normal Linux system * call behavior where scratch registers are preserved across * system calls (unless used by the system call itself). */ # define THREAD_FLAGS_TO_CLEAR (IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \ | IA64_THREAD_PM_VALID) # define THREAD_FLAGS_TO_SET 0 p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) | THREAD_FLAGS_TO_SET); ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ #ifdef CONFIG_PERFMON if (current->thread.pfm_context) pfm_inherit(p, child_ptregs); #endif return retval; } static void do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg) { unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm; unsigned long uninitialized_var(ip); /* GCC be quiet */ elf_greg_t *dst = arg; struct pt_regs *pt; char nat; int i; memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */ if (unw_unwind_to_user(info) < 0) return; unw_get_sp(info, &sp); pt = (struct pt_regs *) (sp + 16); urbs_end = ia64_get_user_rbs_end(task, pt, &cfm); if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0) return; ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end), &ar_rnat); /* * coredump format: * r0-r31 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) * predicate registers (p0-p63) * b0-b7 * ip cfm user-mask * ar.rsc ar.bsp ar.bspstore ar.rnat * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec */ /* r0 is zero */ for (i = 1, mask = (1UL << i); i < 32; ++i) { unw_get_gr(info, i, &dst[i], &nat); if (nat) nat_bits |= mask; mask <<= 1; } dst[32] = nat_bits; unw_get_pr(info, &dst[33]); for (i = 0; i < 8; ++i) unw_get_br(info, i, &dst[34 + i]); unw_get_rp(info, &ip); dst[42] = ip + ia64_psr(pt)->ri; dst[43] = cfm; dst[44] = pt->cr_ipsr & IA64_PSR_UM; unw_get_ar(info, UNW_AR_RSC, &dst[45]); /* * For bsp and bspstore, unw_get_ar() would return the kernel * addresses, but we need the user-level addresses instead: */ dst[46] = urbs_end; /* note: by convention PT_AR_BSP points to the end of the urbs! */ dst[47] = pt->ar_bspstore; dst[48] = ar_rnat; unw_get_ar(info, UNW_AR_CCV, &dst[49]); unw_get_ar(info, UNW_AR_UNAT, &dst[50]); unw_get_ar(info, UNW_AR_FPSR, &dst[51]); dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */ unw_get_ar(info, UNW_AR_LC, &dst[53]); unw_get_ar(info, UNW_AR_EC, &dst[54]); unw_get_ar(info, UNW_AR_CSD, &dst[55]); unw_get_ar(info, UNW_AR_SSD, &dst[56]); } void do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg) { elf_fpreg_t *dst = arg; int i; memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */ if (unw_unwind_to_user(info) < 0) return; /* f0 is 0.0, f1 is 1.0 */ for (i = 2; i < 32; ++i) unw_get_fr(info, i, dst + i); ia64_flush_fph(task); if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0) memcpy(dst + 32, task->thread.fph, 96*16); } void do_copy_regs (struct unw_frame_info *info, void *arg) { do_copy_task_regs(current, info, arg); } void do_dump_fpu (struct unw_frame_info *info, void *arg) { do_dump_task_fpu(current, info, arg); } void ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) { unw_init_running(do_copy_regs, dst); } int dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) { unw_init_running(do_dump_fpu, dst); return 1; /* f0-f31 are always valid so we always return 1 */ } long sys_execve (const char __user *filename, const char __user *const __user *argv, const char __user *const __user *envp, struct pt_regs *regs) { char *fname; int error; fname = getname(filename); error = PTR_ERR(fname); if (IS_ERR(fname)) goto out; error = do_execve(fname, argv, envp, regs); putname(fname); out: return error; } pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags) { extern void start_kernel_thread (void); unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread; struct { struct switch_stack sw; struct pt_regs pt; } regs; memset(&regs, 0, sizeof(regs)); regs.pt.cr_iip = helper_fptr[0]; /* set entry point (IP) */ regs.pt.r1 = helper_fptr[1]; /* set GP */ regs.pt.r9 = (unsigned long) fn; /* 1st argument */ regs.pt.r11 = (unsigned long) arg; /* 2nd argument */ /* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read. */ regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN; regs.pt.cr_ifs = 1UL << 63; /* mark as valid, empty frame */ regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR); regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET; regs.sw.pr = (1 << PRED_KERNEL_STACK); return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs.pt, 0, NULL, NULL); } EXPORT_SYMBOL(kernel_thread); /* This gets called from kernel_thread() via ia64_invoke_thread_helper(). */ int kernel_thread_helper (int (*fn)(void *), void *arg) { return (*fn)(arg); } /* * Flush thread state. This is called when a thread does an execve(). */ void flush_thread (void) { /* drop floating-point and debug-register state if it exists: */ current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); ia64_drop_fpu(current); } /* * Clean up state associated with current thread. This is called when * the thread calls exit(). */ void exit_thread (void) { ia64_drop_fpu(current); #ifdef CONFIG_PERFMON /* if needed, stop monitoring and flush state to perfmon context */ if (current->thread.pfm_context) pfm_exit_thread(current); /* free debug register resources */ if (current->thread.flags & IA64_THREAD_DBG_VALID) pfm_release_debug_registers(current); #endif } unsigned long get_wchan (struct task_struct *p) { struct unw_frame_info info; unsigned long ip; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; /* * Note: p may not be a blocked task (it could be current or * another process running on some other CPU. Rather than * trying to determine if p is really blocked, we just assume * it's blocked and rely on the unwind routines to fail * gracefully if the process wasn't really blocked after all. * --davidm 99/12/15 */ unw_init_from_blocked_task(&info, p); do { if (p->state == TASK_RUNNING) return 0; if (unw_unwind(&info) < 0) return 0; unw_get_ip(&info, &ip); if (!in_sched_functions(ip)) return ip; } while (count++ < 16); return 0; } void cpu_halt (void) { pal_power_mgmt_info_u_t power_info[8]; unsigned long min_power; int i, min_power_state; if (ia64_pal_halt_info(power_info) != 0) return; min_power_state = 0; min_power = power_info[0].pal_power_mgmt_info_s.power_consumption; for (i = 1; i < 8; ++i) if (power_info[i].pal_power_mgmt_info_s.im && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) { min_power = power_info[i].pal_power_mgmt_info_s.power_consumption; min_power_state = i; } while (1) ia64_pal_halt(min_power_state); } void machine_shutdown(void) { #ifdef CONFIG_HOTPLUG_CPU int cpu; for_each_online_cpu(cpu) { if (cpu != smp_processor_id()) cpu_down(cpu); } #endif #ifdef CONFIG_KEXEC kexec_disable_iosapic(); #endif } void machine_restart (char *restart_cmd) { (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0); (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL); } void machine_halt (void) { (void) notify_die(DIE_MACHINE_HALT, "", NULL, 0, 0, 0); cpu_halt(); } void machine_power_off (void) { if (pm_power_off) pm_power_off(); machine_halt(); }
gpl-2.0
ShinySide/HispAsian_Kernel_NI3
sound/usb/caiaq/device.c
3325
14721
/* * caiaq.c: ALSA driver for caiaq/NativeInstruments devices * * Copyright (c) 2007 Daniel Mack <daniel@caiaq.de> * Karsten Wiese <fzu@wemgehoertderstaat.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/moduleparam.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/init.h> #include <linux/gfp.h> #include <linux/usb.h> #include <sound/initval.h> #include <sound/core.h> #include <sound/pcm.h> #include "device.h" #include "audio.h" #include "midi.h" #include "control.h" #include "input.h" MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_DESCRIPTION("caiaq USB audio"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2}," "{Native Instruments, RigKontrol3}," "{Native Instruments, Kore Controller}," "{Native Instruments, Kore Controller 2}," "{Native Instruments, Audio Kontrol 1}," "{Native Instruments, Audio 2 DJ}," "{Native Instruments, Audio 4 DJ}," "{Native Instruments, Audio 8 DJ}," "{Native Instruments, Traktor Audio 2}," "{Native Instruments, Session I/O}," "{Native Instruments, GuitarRig mobile}" "{Native Instruments, Traktor Kontrol X1}" "{Native Instruments, Traktor Kontrol S4}" "{Native Instruments, Maschine Controller}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static int snd_card_used[SNDRV_CARDS]; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the caiaq sound device"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the caiaq soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable the caiaq soundcard."); enum { SAMPLERATE_44100 = 0, SAMPLERATE_48000 = 1, SAMPLERATE_96000 = 2, SAMPLERATE_192000 = 3, SAMPLERATE_88200 = 4, SAMPLERATE_INVALID = 0xff }; enum { DEPTH_NONE = 0, DEPTH_16 = 1, DEPTH_24 = 2, DEPTH_32 = 3 }; static struct usb_device_id snd_usb_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_RIGKONTROL2 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_RIGKONTROL3 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_KORECONTROLLER }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_KORECONTROLLER2 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AK1 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AUDIO8DJ }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_SESSIONIO }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_GUITARRIGMOBILE }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AUDIO4DJ }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_AUDIO2DJ }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_TRAKTORKONTROLX1 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_TRAKTORKONTROLS4 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_TRAKTORAUDIO2 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = USB_VID_NATIVEINSTRUMENTS, .idProduct = USB_PID_MASCHINECONTROLLER }, { /* terminator */ } }; static void usb_ep1_command_reply_dispatch (struct urb* urb) { int ret; struct snd_usb_caiaqdev *dev = urb->context; unsigned char *buf = urb->transfer_buffer; if (urb->status || !dev) { log("received EP1 urb->status = %i\n", urb->status); return; } switch(buf[0]) { case EP1_CMD_GET_DEVICE_INFO: memcpy(&dev->spec, buf+1, sizeof(struct caiaq_device_spec)); dev->spec.fw_version = le16_to_cpu(dev->spec.fw_version); debug("device spec (firmware %d): audio: %d in, %d out, " "MIDI: %d in, %d out, data alignment %d\n", dev->spec.fw_version, dev->spec.num_analog_audio_in, dev->spec.num_analog_audio_out, dev->spec.num_midi_in, dev->spec.num_midi_out, dev->spec.data_alignment); dev->spec_received++; wake_up(&dev->ep1_wait_queue); break; case EP1_CMD_AUDIO_PARAMS: dev->audio_parm_answer = buf[1]; wake_up(&dev->ep1_wait_queue); break; case EP1_CMD_MIDI_READ: snd_usb_caiaq_midi_handle_input(dev, buf[1], buf + 3, buf[2]); break; case EP1_CMD_READ_IO: if (dev->chip.usb_id == USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ)) { if (urb->actual_length > sizeof(dev->control_state)) urb->actual_length = sizeof(dev->control_state); memcpy(dev->control_state, buf + 1, urb->actual_length); wake_up(&dev->ep1_wait_queue); break; } #ifdef CONFIG_SND_USB_CAIAQ_INPUT case EP1_CMD_READ_ERP: case EP1_CMD_READ_ANALOG: snd_usb_caiaq_input_dispatch(dev, buf, urb->actual_length); #endif break; } dev->ep1_in_urb.actual_length = 0; ret = usb_submit_urb(&dev->ep1_in_urb, GFP_ATOMIC); if (ret < 0) log("unable to submit urb. OOM!?\n"); } int snd_usb_caiaq_send_command(struct snd_usb_caiaqdev *dev, unsigned char command, const unsigned char *buffer, int len) { int actual_len; struct usb_device *usb_dev = dev->chip.dev; if (!usb_dev) return -EIO; if (len > EP1_BUFSIZE - 1) len = EP1_BUFSIZE - 1; if (buffer && len > 0) memcpy(dev->ep1_out_buf+1, buffer, len); dev->ep1_out_buf[0] = command; return usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, 1), dev->ep1_out_buf, len+1, &actual_len, 200); } int snd_usb_caiaq_set_audio_params (struct snd_usb_caiaqdev *dev, int rate, int depth, int bpp) { int ret; char tmp[5]; switch (rate) { case 44100: tmp[0] = SAMPLERATE_44100; break; case 48000: tmp[0] = SAMPLERATE_48000; break; case 88200: tmp[0] = SAMPLERATE_88200; break; case 96000: tmp[0] = SAMPLERATE_96000; break; case 192000: tmp[0] = SAMPLERATE_192000; break; default: return -EINVAL; } switch (depth) { case 16: tmp[1] = DEPTH_16; break; case 24: tmp[1] = DEPTH_24; break; default: return -EINVAL; } tmp[2] = bpp & 0xff; tmp[3] = bpp >> 8; tmp[4] = 1; /* packets per microframe */ debug("setting audio params: %d Hz, %d bits, %d bpp\n", rate, depth, bpp); dev->audio_parm_answer = -1; ret = snd_usb_caiaq_send_command(dev, EP1_CMD_AUDIO_PARAMS, tmp, sizeof(tmp)); if (ret) return ret; if (!wait_event_timeout(dev->ep1_wait_queue, dev->audio_parm_answer >= 0, HZ)) return -EPIPE; if (dev->audio_parm_answer != 1) debug("unable to set the device's audio params\n"); else dev->bpp = bpp; return dev->audio_parm_answer == 1 ? 0 : -EINVAL; } int snd_usb_caiaq_set_auto_msg(struct snd_usb_caiaqdev *dev, int digital, int analog, int erp) { char tmp[3] = { digital, analog, erp }; return snd_usb_caiaq_send_command(dev, EP1_CMD_AUTO_MSG, tmp, sizeof(tmp)); } static void __devinit setup_card(struct snd_usb_caiaqdev *dev) { int ret; char val[4]; /* device-specific startup specials */ switch (dev->chip.usb_id) { case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL2): /* RigKontrol2 - display centered dash ('-') */ val[0] = 0x00; val[1] = 0x00; val[2] = 0x01; snd_usb_caiaq_send_command(dev, EP1_CMD_WRITE_IO, val, 3); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL3): /* RigKontrol2 - display two centered dashes ('--') */ val[0] = 0x00; val[1] = 0x40; val[2] = 0x40; val[3] = 0x00; snd_usb_caiaq_send_command(dev, EP1_CMD_WRITE_IO, val, 4); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AK1): /* Audio Kontrol 1 - make USB-LED stop blinking */ val[0] = 0x00; snd_usb_caiaq_send_command(dev, EP1_CMD_WRITE_IO, val, 1); break; case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ): /* Audio 8 DJ - trigger read of current settings */ dev->control_state[0] = 0xff; snd_usb_caiaq_set_auto_msg(dev, 1, 0, 0); snd_usb_caiaq_send_command(dev, EP1_CMD_READ_IO, NULL, 0); if (!wait_event_timeout(dev->ep1_wait_queue, dev->control_state[0] != 0xff, HZ)) return; /* fix up some defaults */ if ((dev->control_state[1] != 2) || (dev->control_state[2] != 3) || (dev->control_state[4] != 2)) { dev->control_state[1] = 2; dev->control_state[2] = 3; dev->control_state[4] = 2; snd_usb_caiaq_send_command(dev, EP1_CMD_WRITE_IO, dev->control_state, 6); } break; } if (dev->spec.num_analog_audio_out + dev->spec.num_analog_audio_in + dev->spec.num_digital_audio_out + dev->spec.num_digital_audio_in > 0) { ret = snd_usb_caiaq_audio_init(dev); if (ret < 0) log("Unable to set up audio system (ret=%d)\n", ret); } if (dev->spec.num_midi_in + dev->spec.num_midi_out > 0) { ret = snd_usb_caiaq_midi_init(dev); if (ret < 0) log("Unable to set up MIDI system (ret=%d)\n", ret); } #ifdef CONFIG_SND_USB_CAIAQ_INPUT ret = snd_usb_caiaq_input_init(dev); if (ret < 0) log("Unable to set up input system (ret=%d)\n", ret); #endif /* finally, register the card and all its sub-instances */ ret = snd_card_register(dev->chip.card); if (ret < 0) { log("snd_card_register() returned %d\n", ret); snd_card_free(dev->chip.card); } ret = snd_usb_caiaq_control_init(dev); if (ret < 0) log("Unable to set up control system (ret=%d)\n", ret); } static int create_card(struct usb_device *usb_dev, struct usb_interface *intf, struct snd_card **cardp) { int devnum; int err; struct snd_card *card; struct snd_usb_caiaqdev *dev; for (devnum = 0; devnum < SNDRV_CARDS; devnum++) if (enable[devnum] && !snd_card_used[devnum]) break; if (devnum >= SNDRV_CARDS) return -ENODEV; err = snd_card_create(index[devnum], id[devnum], THIS_MODULE, sizeof(struct snd_usb_caiaqdev), &card); if (err < 0) return err; dev = caiaqdev(card); dev->chip.dev = usb_dev; dev->chip.card = card; dev->chip.usb_id = USB_ID(le16_to_cpu(usb_dev->descriptor.idVendor), le16_to_cpu(usb_dev->descriptor.idProduct)); spin_lock_init(&dev->spinlock); snd_card_set_dev(card, &intf->dev); *cardp = card; return 0; } static int __devinit init_card(struct snd_usb_caiaqdev *dev) { char *c, usbpath[32]; struct usb_device *usb_dev = dev->chip.dev; struct snd_card *card = dev->chip.card; int err, len; if (usb_set_interface(usb_dev, 0, 1) != 0) { log("can't set alt interface.\n"); return -EIO; } usb_init_urb(&dev->ep1_in_urb); usb_init_urb(&dev->midi_out_urb); usb_fill_bulk_urb(&dev->ep1_in_urb, usb_dev, usb_rcvbulkpipe(usb_dev, 0x1), dev->ep1_in_buf, EP1_BUFSIZE, usb_ep1_command_reply_dispatch, dev); usb_fill_bulk_urb(&dev->midi_out_urb, usb_dev, usb_sndbulkpipe(usb_dev, 0x1), dev->midi_out_buf, EP1_BUFSIZE, snd_usb_caiaq_midi_output_done, dev); init_waitqueue_head(&dev->ep1_wait_queue); init_waitqueue_head(&dev->prepare_wait_queue); if (usb_submit_urb(&dev->ep1_in_urb, GFP_KERNEL) != 0) return -EIO; err = snd_usb_caiaq_send_command(dev, EP1_CMD_GET_DEVICE_INFO, NULL, 0); if (err) return err; if (!wait_event_timeout(dev->ep1_wait_queue, dev->spec_received, HZ)) return -ENODEV; usb_string(usb_dev, usb_dev->descriptor.iManufacturer, dev->vendor_name, CAIAQ_USB_STR_LEN); usb_string(usb_dev, usb_dev->descriptor.iProduct, dev->product_name, CAIAQ_USB_STR_LEN); strlcpy(card->driver, MODNAME, sizeof(card->driver)); strlcpy(card->shortname, dev->product_name, sizeof(card->shortname)); strlcpy(card->mixername, dev->product_name, sizeof(card->mixername)); /* if the id was not passed as module option, fill it with a shortened * version of the product string which does not contain any * whitespaces */ if (*card->id == '\0') { char id[sizeof(card->id)]; memset(id, 0, sizeof(id)); for (c = card->shortname, len = 0; *c && len < sizeof(card->id); c++) if (*c != ' ') id[len++] = *c; snd_card_set_id(card, id); } usb_make_path(usb_dev, usbpath, sizeof(usbpath)); snprintf(card->longname, sizeof(card->longname), "%s %s (%s)", dev->vendor_name, dev->product_name, usbpath); setup_card(dev); return 0; } static int __devinit snd_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret; struct snd_card *card; struct usb_device *device = interface_to_usbdev(intf); ret = create_card(device, intf, &card); if (ret < 0) return ret; usb_set_intfdata(intf, card); ret = init_card(caiaqdev(card)); if (ret < 0) { log("unable to init card! (ret=%d)\n", ret); snd_card_free(card); return ret; } return 0; } static void snd_disconnect(struct usb_interface *intf) { struct snd_usb_caiaqdev *dev; struct snd_card *card = usb_get_intfdata(intf); debug("%s(%p)\n", __func__, intf); if (!card) return; dev = caiaqdev(card); snd_card_disconnect(card); #ifdef CONFIG_SND_USB_CAIAQ_INPUT snd_usb_caiaq_input_free(dev); #endif snd_usb_caiaq_audio_free(dev); usb_kill_urb(&dev->ep1_in_urb); usb_kill_urb(&dev->midi_out_urb); snd_card_free(card); usb_reset_device(interface_to_usbdev(intf)); } MODULE_DEVICE_TABLE(usb, snd_usb_id_table); static struct usb_driver snd_usb_driver = { .name = MODNAME, .probe = snd_probe, .disconnect = snd_disconnect, .id_table = snd_usb_id_table, }; module_usb_driver(snd_usb_driver);
gpl-2.0
AdrianHuang/linux-3.8.13
sound/ppc/burgundy.c
3325
24865
/* * PMac Burgundy lowlevel functions * * Copyright (c) by Takashi Iwai <tiwai@suse.de> * code based on dmasound.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/io.h> #include <linux/init.h> #include <linux/delay.h> #include <sound/core.h> #include "pmac.h" #include "burgundy.h" /* Waits for busy flag to clear */ static inline void snd_pmac_burgundy_busy_wait(struct snd_pmac *chip) { int timeout = 50; while ((in_le32(&chip->awacs->codec_ctrl) & MASK_NEWECMD) && timeout--) udelay(1); if (timeout < 0) printk(KERN_DEBUG "burgundy_busy_wait: timeout\n"); } static inline void snd_pmac_burgundy_extend_wait(struct snd_pmac *chip) { int timeout; timeout = 50; while (!(in_le32(&chip->awacs->codec_stat) & MASK_EXTEND) && timeout--) udelay(1); if (timeout < 0) printk(KERN_DEBUG "burgundy_extend_wait: timeout #1\n"); timeout = 50; while ((in_le32(&chip->awacs->codec_stat) & MASK_EXTEND) && timeout--) udelay(1); if (timeout < 0) printk(KERN_DEBUG "burgundy_extend_wait: timeout #2\n"); } static void snd_pmac_burgundy_wcw(struct snd_pmac *chip, unsigned addr, unsigned val) { out_le32(&chip->awacs->codec_ctrl, addr + 0x200c00 + (val & 0xff)); snd_pmac_burgundy_busy_wait(chip); out_le32(&chip->awacs->codec_ctrl, addr + 0x200d00 +((val>>8) & 0xff)); snd_pmac_burgundy_busy_wait(chip); out_le32(&chip->awacs->codec_ctrl, addr + 0x200e00 +((val>>16) & 0xff)); snd_pmac_burgundy_busy_wait(chip); out_le32(&chip->awacs->codec_ctrl, addr + 0x200f00 +((val>>24) & 0xff)); snd_pmac_burgundy_busy_wait(chip); } static unsigned snd_pmac_burgundy_rcw(struct snd_pmac *chip, unsigned addr) { unsigned val = 0; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); out_le32(&chip->awacs->codec_ctrl, addr + 0x100000); snd_pmac_burgundy_busy_wait(chip); snd_pmac_burgundy_extend_wait(chip); val += (in_le32(&chip->awacs->codec_stat) >> 4) & 0xff; out_le32(&chip->awacs->codec_ctrl, addr + 0x100100); snd_pmac_burgundy_busy_wait(chip); snd_pmac_burgundy_extend_wait(chip); val += ((in_le32(&chip->awacs->codec_stat)>>4) & 0xff) <<8; out_le32(&chip->awacs->codec_ctrl, addr + 0x100200); snd_pmac_burgundy_busy_wait(chip); snd_pmac_burgundy_extend_wait(chip); val += ((in_le32(&chip->awacs->codec_stat)>>4) & 0xff) <<16; out_le32(&chip->awacs->codec_ctrl, addr + 0x100300); snd_pmac_burgundy_busy_wait(chip); snd_pmac_burgundy_extend_wait(chip); val += ((in_le32(&chip->awacs->codec_stat)>>4) & 0xff) <<24; spin_unlock_irqrestore(&chip->reg_lock, flags); return val; } static void snd_pmac_burgundy_wcb(struct snd_pmac *chip, unsigned int addr, unsigned int val) { out_le32(&chip->awacs->codec_ctrl, addr + 0x300000 + (val & 0xff)); snd_pmac_burgundy_busy_wait(chip); } static unsigned snd_pmac_burgundy_rcb(struct snd_pmac *chip, unsigned int addr) { unsigned val = 0; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); out_le32(&chip->awacs->codec_ctrl, addr + 0x100000); snd_pmac_burgundy_busy_wait(chip); snd_pmac_burgundy_extend_wait(chip); val += (in_le32(&chip->awacs->codec_stat) >> 4) & 0xff; spin_unlock_irqrestore(&chip->reg_lock, flags); return val; } #define BASE2ADDR(base) ((base) << 12) #define ADDR2BASE(addr) ((addr) >> 12) /* * Burgundy volume: 0 - 100, stereo, word reg */ static void snd_pmac_burgundy_write_volume(struct snd_pmac *chip, unsigned int address, long *volume, int shift) { int hardvolume, lvolume, rvolume; if (volume[0] < 0 || volume[0] > 100 || volume[1] < 0 || volume[1] > 100) return; /* -EINVAL */ lvolume = volume[0] ? volume[0] + BURGUNDY_VOLUME_OFFSET : 0; rvolume = volume[1] ? volume[1] + BURGUNDY_VOLUME_OFFSET : 0; hardvolume = lvolume + (rvolume << shift); if (shift == 8) hardvolume |= hardvolume << 16; snd_pmac_burgundy_wcw(chip, address, hardvolume); } static void snd_pmac_burgundy_read_volume(struct snd_pmac *chip, unsigned int address, long *volume, int shift) { int wvolume; wvolume = snd_pmac_burgundy_rcw(chip, address); volume[0] = wvolume & 0xff; if (volume[0] >= BURGUNDY_VOLUME_OFFSET) volume[0] -= BURGUNDY_VOLUME_OFFSET; else volume[0] = 0; volume[1] = (wvolume >> shift) & 0xff; if (volume[1] >= BURGUNDY_VOLUME_OFFSET) volume[1] -= BURGUNDY_VOLUME_OFFSET; else volume[1] = 0; } static int snd_pmac_burgundy_info_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 100; return 0; } static int snd_pmac_burgundy_get_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR(kcontrol->private_value & 0xff); int shift = (kcontrol->private_value >> 8) & 0xff; snd_pmac_burgundy_read_volume(chip, addr, ucontrol->value.integer.value, shift); return 0; } static int snd_pmac_burgundy_put_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR(kcontrol->private_value & 0xff); int shift = (kcontrol->private_value >> 8) & 0xff; long nvoices[2]; snd_pmac_burgundy_write_volume(chip, addr, ucontrol->value.integer.value, shift); snd_pmac_burgundy_read_volume(chip, addr, nvoices, shift); return (nvoices[0] != ucontrol->value.integer.value[0] || nvoices[1] != ucontrol->value.integer.value[1]); } #define BURGUNDY_VOLUME_W(xname, xindex, addr, shift) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex,\ .info = snd_pmac_burgundy_info_volume,\ .get = snd_pmac_burgundy_get_volume,\ .put = snd_pmac_burgundy_put_volume,\ .private_value = ((ADDR2BASE(addr) & 0xff) | ((shift) << 8)) } /* * Burgundy volume: 0 - 100, stereo, 2-byte reg */ static void snd_pmac_burgundy_write_volume_2b(struct snd_pmac *chip, unsigned int address, long *volume, int off) { int lvolume, rvolume; off |= off << 2; lvolume = volume[0] ? volume[0] + BURGUNDY_VOLUME_OFFSET : 0; rvolume = volume[1] ? volume[1] + BURGUNDY_VOLUME_OFFSET : 0; snd_pmac_burgundy_wcb(chip, address + off, lvolume); snd_pmac_burgundy_wcb(chip, address + off + 0x500, rvolume); } static void snd_pmac_burgundy_read_volume_2b(struct snd_pmac *chip, unsigned int address, long *volume, int off) { volume[0] = snd_pmac_burgundy_rcb(chip, address + off); if (volume[0] >= BURGUNDY_VOLUME_OFFSET) volume[0] -= BURGUNDY_VOLUME_OFFSET; else volume[0] = 0; volume[1] = snd_pmac_burgundy_rcb(chip, address + off + 0x100); if (volume[1] >= BURGUNDY_VOLUME_OFFSET) volume[1] -= BURGUNDY_VOLUME_OFFSET; else volume[1] = 0; } static int snd_pmac_burgundy_info_volume_2b(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 100; return 0; } static int snd_pmac_burgundy_get_volume_2b(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR(kcontrol->private_value & 0xff); int off = kcontrol->private_value & 0x300; snd_pmac_burgundy_read_volume_2b(chip, addr, ucontrol->value.integer.value, off); return 0; } static int snd_pmac_burgundy_put_volume_2b(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR(kcontrol->private_value & 0xff); int off = kcontrol->private_value & 0x300; long nvoices[2]; snd_pmac_burgundy_write_volume_2b(chip, addr, ucontrol->value.integer.value, off); snd_pmac_burgundy_read_volume_2b(chip, addr, nvoices, off); return (nvoices[0] != ucontrol->value.integer.value[0] || nvoices[1] != ucontrol->value.integer.value[1]); } #define BURGUNDY_VOLUME_2B(xname, xindex, addr, off) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex,\ .info = snd_pmac_burgundy_info_volume_2b,\ .get = snd_pmac_burgundy_get_volume_2b,\ .put = snd_pmac_burgundy_put_volume_2b,\ .private_value = ((ADDR2BASE(addr) & 0xff) | ((off) << 8)) } /* * Burgundy gain/attenuation: 0 - 15, mono/stereo, byte reg */ static int snd_pmac_burgundy_info_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int stereo = (kcontrol->private_value >> 24) & 1; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = stereo + 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 15; return 0; } static int snd_pmac_burgundy_get_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR(kcontrol->private_value & 0xff); int stereo = (kcontrol->private_value >> 24) & 1; int atten = (kcontrol->private_value >> 25) & 1; int oval; oval = snd_pmac_burgundy_rcb(chip, addr); if (atten) oval = ~oval & 0xff; ucontrol->value.integer.value[0] = oval & 0xf; if (stereo) ucontrol->value.integer.value[1] = (oval >> 4) & 0xf; return 0; } static int snd_pmac_burgundy_put_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR(kcontrol->private_value & 0xff); int stereo = (kcontrol->private_value >> 24) & 1; int atten = (kcontrol->private_value >> 25) & 1; int oval, val; oval = snd_pmac_burgundy_rcb(chip, addr); if (atten) oval = ~oval & 0xff; val = ucontrol->value.integer.value[0]; if (stereo) val |= ucontrol->value.integer.value[1] << 4; else val |= ucontrol->value.integer.value[0] << 4; if (atten) val = ~val & 0xff; snd_pmac_burgundy_wcb(chip, addr, val); return val != oval; } #define BURGUNDY_VOLUME_B(xname, xindex, addr, stereo, atten) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex,\ .info = snd_pmac_burgundy_info_gain,\ .get = snd_pmac_burgundy_get_gain,\ .put = snd_pmac_burgundy_put_gain,\ .private_value = (ADDR2BASE(addr) | ((stereo) << 24) | ((atten) << 25)) } /* * Burgundy switch: 0/1, mono/stereo, word reg */ static int snd_pmac_burgundy_info_switch_w(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int stereo = (kcontrol->private_value >> 24) & 1; uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = stereo + 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } static int snd_pmac_burgundy_get_switch_w(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR((kcontrol->private_value >> 16) & 0xff); int lmask = 1 << (kcontrol->private_value & 0xff); int rmask = 1 << ((kcontrol->private_value >> 8) & 0xff); int stereo = (kcontrol->private_value >> 24) & 1; int val = snd_pmac_burgundy_rcw(chip, addr); ucontrol->value.integer.value[0] = (val & lmask) ? 1 : 0; if (stereo) ucontrol->value.integer.value[1] = (val & rmask) ? 1 : 0; return 0; } static int snd_pmac_burgundy_put_switch_w(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR((kcontrol->private_value >> 16) & 0xff); int lmask = 1 << (kcontrol->private_value & 0xff); int rmask = 1 << ((kcontrol->private_value >> 8) & 0xff); int stereo = (kcontrol->private_value >> 24) & 1; int val, oval; oval = snd_pmac_burgundy_rcw(chip, addr); val = oval & ~(lmask | (stereo ? rmask : 0)); if (ucontrol->value.integer.value[0]) val |= lmask; if (stereo && ucontrol->value.integer.value[1]) val |= rmask; snd_pmac_burgundy_wcw(chip, addr, val); return val != oval; } #define BURGUNDY_SWITCH_W(xname, xindex, addr, lbit, rbit, stereo) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex,\ .info = snd_pmac_burgundy_info_switch_w,\ .get = snd_pmac_burgundy_get_switch_w,\ .put = snd_pmac_burgundy_put_switch_w,\ .private_value = ((lbit) | ((rbit) << 8)\ | (ADDR2BASE(addr) << 16) | ((stereo) << 24)) } /* * Burgundy switch: 0/1, mono/stereo, byte reg, bit mask */ static int snd_pmac_burgundy_info_switch_b(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int stereo = (kcontrol->private_value >> 24) & 1; uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = stereo + 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } static int snd_pmac_burgundy_get_switch_b(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR((kcontrol->private_value >> 16) & 0xff); int lmask = kcontrol->private_value & 0xff; int rmask = (kcontrol->private_value >> 8) & 0xff; int stereo = (kcontrol->private_value >> 24) & 1; int val = snd_pmac_burgundy_rcb(chip, addr); ucontrol->value.integer.value[0] = (val & lmask) ? 1 : 0; if (stereo) ucontrol->value.integer.value[1] = (val & rmask) ? 1 : 0; return 0; } static int snd_pmac_burgundy_put_switch_b(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR((kcontrol->private_value >> 16) & 0xff); int lmask = kcontrol->private_value & 0xff; int rmask = (kcontrol->private_value >> 8) & 0xff; int stereo = (kcontrol->private_value >> 24) & 1; int val, oval; oval = snd_pmac_burgundy_rcb(chip, addr); val = oval & ~(lmask | rmask); if (ucontrol->value.integer.value[0]) val |= lmask; if (stereo && ucontrol->value.integer.value[1]) val |= rmask; snd_pmac_burgundy_wcb(chip, addr, val); return val != oval; } #define BURGUNDY_SWITCH_B(xname, xindex, addr, lmask, rmask, stereo) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex,\ .info = snd_pmac_burgundy_info_switch_b,\ .get = snd_pmac_burgundy_get_switch_b,\ .put = snd_pmac_burgundy_put_switch_b,\ .private_value = ((lmask) | ((rmask) << 8)\ | (ADDR2BASE(addr) << 16) | ((stereo) << 24)) } /* * Burgundy mixers */ static struct snd_kcontrol_new snd_pmac_burgundy_mixers[] = { BURGUNDY_VOLUME_W("Master Playback Volume", 0, MASK_ADDR_BURGUNDY_MASTER_VOLUME, 8), BURGUNDY_VOLUME_W("CD Capture Volume", 0, MASK_ADDR_BURGUNDY_VOLCD, 16), BURGUNDY_VOLUME_2B("Input Capture Volume", 0, MASK_ADDR_BURGUNDY_VOLMIX01, 2), BURGUNDY_VOLUME_2B("Mixer Playback Volume", 0, MASK_ADDR_BURGUNDY_VOLMIX23, 0), BURGUNDY_VOLUME_B("CD Gain Capture Volume", 0, MASK_ADDR_BURGUNDY_GAINCD, 1, 0), BURGUNDY_SWITCH_W("Master Capture Switch", 0, MASK_ADDR_BURGUNDY_OUTPUTENABLES, 24, 0, 0), BURGUNDY_SWITCH_W("CD Capture Switch", 0, MASK_ADDR_BURGUNDY_CAPTURESELECTS, 0, 16, 1), BURGUNDY_SWITCH_W("CD Playback Switch", 0, MASK_ADDR_BURGUNDY_OUTPUTSELECTS, 0, 16, 1), /* BURGUNDY_SWITCH_W("Loop Capture Switch", 0, * MASK_ADDR_BURGUNDY_CAPTURESELECTS, 8, 24, 1), * BURGUNDY_SWITCH_B("Mixer out Capture Switch", 0, * MASK_ADDR_BURGUNDY_HOSTIFAD, 0x02, 0, 0), * BURGUNDY_SWITCH_B("Mixer Capture Switch", 0, * MASK_ADDR_BURGUNDY_HOSTIFAD, 0x01, 0, 0), * BURGUNDY_SWITCH_B("PCM out Capture Switch", 0, * MASK_ADDR_BURGUNDY_HOSTIFEH, 0x02, 0, 0), */ BURGUNDY_SWITCH_B("PCM Capture Switch", 0, MASK_ADDR_BURGUNDY_HOSTIFEH, 0x01, 0, 0) }; static struct snd_kcontrol_new snd_pmac_burgundy_mixers_imac[] = { BURGUNDY_VOLUME_W("Line in Capture Volume", 0, MASK_ADDR_BURGUNDY_VOLLINE, 16), BURGUNDY_VOLUME_W("Mic Capture Volume", 0, MASK_ADDR_BURGUNDY_VOLMIC, 16), BURGUNDY_VOLUME_B("Line in Gain Capture Volume", 0, MASK_ADDR_BURGUNDY_GAINLINE, 1, 0), BURGUNDY_VOLUME_B("Mic Gain Capture Volume", 0, MASK_ADDR_BURGUNDY_GAINMIC, 1, 0), BURGUNDY_VOLUME_B("Speaker Playback Volume", 0, MASK_ADDR_BURGUNDY_ATTENSPEAKER, 1, 1), BURGUNDY_VOLUME_B("Line out Playback Volume", 0, MASK_ADDR_BURGUNDY_ATTENLINEOUT, 1, 1), BURGUNDY_VOLUME_B("Headphone Playback Volume", 0, MASK_ADDR_BURGUNDY_ATTENHP, 1, 1), BURGUNDY_SWITCH_W("Line in Capture Switch", 0, MASK_ADDR_BURGUNDY_CAPTURESELECTS, 1, 17, 1), BURGUNDY_SWITCH_W("Mic Capture Switch", 0, MASK_ADDR_BURGUNDY_CAPTURESELECTS, 2, 18, 1), BURGUNDY_SWITCH_W("Line in Playback Switch", 0, MASK_ADDR_BURGUNDY_OUTPUTSELECTS, 1, 17, 1), BURGUNDY_SWITCH_W("Mic Playback Switch", 0, MASK_ADDR_BURGUNDY_OUTPUTSELECTS, 2, 18, 1), BURGUNDY_SWITCH_B("Mic Boost Capture Switch", 0, MASK_ADDR_BURGUNDY_INPBOOST, 0x40, 0x80, 1) }; static struct snd_kcontrol_new snd_pmac_burgundy_mixers_pmac[] = { BURGUNDY_VOLUME_W("Line in Capture Volume", 0, MASK_ADDR_BURGUNDY_VOLMIC, 16), BURGUNDY_VOLUME_B("Line in Gain Capture Volume", 0, MASK_ADDR_BURGUNDY_GAINMIC, 1, 0), BURGUNDY_VOLUME_B("Speaker Playback Volume", 0, MASK_ADDR_BURGUNDY_ATTENMONO, 0, 1), BURGUNDY_VOLUME_B("Line out Playback Volume", 0, MASK_ADDR_BURGUNDY_ATTENSPEAKER, 1, 1), BURGUNDY_SWITCH_W("Line in Capture Switch", 0, MASK_ADDR_BURGUNDY_CAPTURESELECTS, 2, 18, 1), BURGUNDY_SWITCH_W("Line in Playback Switch", 0, MASK_ADDR_BURGUNDY_OUTPUTSELECTS, 2, 18, 1), /* BURGUNDY_SWITCH_B("Line in Boost Capture Switch", 0, * MASK_ADDR_BURGUNDY_INPBOOST, 0x40, 0x80, 1) */ }; static struct snd_kcontrol_new snd_pmac_burgundy_master_sw_imac = BURGUNDY_SWITCH_B("Master Playback Switch", 0, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, BURGUNDY_OUTPUT_LEFT | BURGUNDY_LINEOUT_LEFT | BURGUNDY_HP_LEFT, BURGUNDY_OUTPUT_RIGHT | BURGUNDY_LINEOUT_RIGHT | BURGUNDY_HP_RIGHT, 1); static struct snd_kcontrol_new snd_pmac_burgundy_master_sw_pmac = BURGUNDY_SWITCH_B("Master Playback Switch", 0, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, BURGUNDY_OUTPUT_INTERN | BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1); static struct snd_kcontrol_new snd_pmac_burgundy_speaker_sw_imac = BURGUNDY_SWITCH_B("Speaker Playback Switch", 0, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1); static struct snd_kcontrol_new snd_pmac_burgundy_speaker_sw_pmac = BURGUNDY_SWITCH_B("Speaker Playback Switch", 0, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, BURGUNDY_OUTPUT_INTERN, 0, 0); static struct snd_kcontrol_new snd_pmac_burgundy_line_sw_imac = BURGUNDY_SWITCH_B("Line out Playback Switch", 0, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, BURGUNDY_LINEOUT_LEFT, BURGUNDY_LINEOUT_RIGHT, 1); static struct snd_kcontrol_new snd_pmac_burgundy_line_sw_pmac = BURGUNDY_SWITCH_B("Line out Playback Switch", 0, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1); static struct snd_kcontrol_new snd_pmac_burgundy_hp_sw_imac = BURGUNDY_SWITCH_B("Headphone Playback Switch", 0, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, BURGUNDY_HP_LEFT, BURGUNDY_HP_RIGHT, 1); #ifdef PMAC_SUPPORT_AUTOMUTE /* * auto-mute stuffs */ static int snd_pmac_burgundy_detect_headphone(struct snd_pmac *chip) { return (in_le32(&chip->awacs->codec_stat) & chip->hp_stat_mask) ? 1 : 0; } static void snd_pmac_burgundy_update_automute(struct snd_pmac *chip, int do_notify) { if (chip->auto_mute) { int imac = of_machine_is_compatible("iMac"); int reg, oreg; reg = oreg = snd_pmac_burgundy_rcb(chip, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES); reg &= imac ? ~(BURGUNDY_OUTPUT_LEFT | BURGUNDY_OUTPUT_RIGHT | BURGUNDY_HP_LEFT | BURGUNDY_HP_RIGHT) : ~(BURGUNDY_OUTPUT_LEFT | BURGUNDY_OUTPUT_RIGHT | BURGUNDY_OUTPUT_INTERN); if (snd_pmac_burgundy_detect_headphone(chip)) reg |= imac ? (BURGUNDY_HP_LEFT | BURGUNDY_HP_RIGHT) : (BURGUNDY_OUTPUT_LEFT | BURGUNDY_OUTPUT_RIGHT); else reg |= imac ? (BURGUNDY_OUTPUT_LEFT | BURGUNDY_OUTPUT_RIGHT) : (BURGUNDY_OUTPUT_INTERN); if (do_notify && reg == oreg) return; snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, reg); if (do_notify) { snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->master_sw_ctl->id); snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->speaker_sw_ctl->id); snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->hp_detect_ctl->id); } } } #endif /* PMAC_SUPPORT_AUTOMUTE */ /* * initialize burgundy */ int snd_pmac_burgundy_init(struct snd_pmac *chip) { int imac = of_machine_is_compatible("iMac"); int i, err; /* Checks to see the chip is alive and kicking */ if ((in_le32(&chip->awacs->codec_ctrl) & MASK_ERRCODE) == 0xf0000) { printk(KERN_WARNING "pmac burgundy: disabled by MacOS :-(\n"); return 1; } snd_pmac_burgundy_wcw(chip, MASK_ADDR_BURGUNDY_OUTPUTENABLES, DEF_BURGUNDY_OUTPUTENABLES); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, DEF_BURGUNDY_MORE_OUTPUTENABLES); snd_pmac_burgundy_wcw(chip, MASK_ADDR_BURGUNDY_OUTPUTSELECTS, DEF_BURGUNDY_OUTPUTSELECTS); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_INPSEL21, DEF_BURGUNDY_INPSEL21); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_INPSEL3, imac ? DEF_BURGUNDY_INPSEL3_IMAC : DEF_BURGUNDY_INPSEL3_PMAC); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_GAINCD, DEF_BURGUNDY_GAINCD); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_GAINLINE, DEF_BURGUNDY_GAINLINE); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_GAINMIC, DEF_BURGUNDY_GAINMIC); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_GAINMODEM, DEF_BURGUNDY_GAINMODEM); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_ATTENSPEAKER, DEF_BURGUNDY_ATTENSPEAKER); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_ATTENLINEOUT, DEF_BURGUNDY_ATTENLINEOUT); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_ATTENHP, DEF_BURGUNDY_ATTENHP); snd_pmac_burgundy_wcw(chip, MASK_ADDR_BURGUNDY_MASTER_VOLUME, DEF_BURGUNDY_MASTER_VOLUME); snd_pmac_burgundy_wcw(chip, MASK_ADDR_BURGUNDY_VOLCD, DEF_BURGUNDY_VOLCD); snd_pmac_burgundy_wcw(chip, MASK_ADDR_BURGUNDY_VOLLINE, DEF_BURGUNDY_VOLLINE); snd_pmac_burgundy_wcw(chip, MASK_ADDR_BURGUNDY_VOLMIC, DEF_BURGUNDY_VOLMIC); if (chip->hp_stat_mask == 0) { /* set headphone-jack detection bit */ if (imac) chip->hp_stat_mask = BURGUNDY_HPDETECT_IMAC_UPPER | BURGUNDY_HPDETECT_IMAC_LOWER | BURGUNDY_HPDETECT_IMAC_SIDE; else chip->hp_stat_mask = BURGUNDY_HPDETECT_PMAC_BACK; } /* * build burgundy mixers */ strcpy(chip->card->mixername, "PowerMac Burgundy"); for (i = 0; i < ARRAY_SIZE(snd_pmac_burgundy_mixers); i++) { err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_pmac_burgundy_mixers[i], chip)); if (err < 0) return err; } for (i = 0; i < (imac ? ARRAY_SIZE(snd_pmac_burgundy_mixers_imac) : ARRAY_SIZE(snd_pmac_burgundy_mixers_pmac)); i++) { err = snd_ctl_add(chip->card, snd_ctl_new1(imac ? &snd_pmac_burgundy_mixers_imac[i] : &snd_pmac_burgundy_mixers_pmac[i], chip)); if (err < 0) return err; } chip->master_sw_ctl = snd_ctl_new1(imac ? &snd_pmac_burgundy_master_sw_imac : &snd_pmac_burgundy_master_sw_pmac, chip); err = snd_ctl_add(chip->card, chip->master_sw_ctl); if (err < 0) return err; chip->master_sw_ctl = snd_ctl_new1(imac ? &snd_pmac_burgundy_line_sw_imac : &snd_pmac_burgundy_line_sw_pmac, chip); err = snd_ctl_add(chip->card, chip->master_sw_ctl); if (err < 0) return err; if (imac) { chip->master_sw_ctl = snd_ctl_new1( &snd_pmac_burgundy_hp_sw_imac, chip); err = snd_ctl_add(chip->card, chip->master_sw_ctl); if (err < 0) return err; } chip->speaker_sw_ctl = snd_ctl_new1(imac ? &snd_pmac_burgundy_speaker_sw_imac : &snd_pmac_burgundy_speaker_sw_pmac, chip); err = snd_ctl_add(chip->card, chip->speaker_sw_ctl); if (err < 0) return err; #ifdef PMAC_SUPPORT_AUTOMUTE err = snd_pmac_add_automute(chip); if (err < 0) return err; chip->detect_headphone = snd_pmac_burgundy_detect_headphone; chip->update_automute = snd_pmac_burgundy_update_automute; snd_pmac_burgundy_update_automute(chip, 0); /* update the status only */ #endif return 0; }
gpl-2.0
adbaby/android_kernel_msm8974
fs/jbd/commit.c
3325
28750
/* * linux/fs/jbd/commit.c * * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 * * Copyright 1998 Red Hat corp --- All Rights Reserved * * This file is part of the Linux kernel and is made available under * the terms of the GNU General Public License, version 2, or at your * option, any later version, incorporated herein by reference. * * Journal commit routines for the generic filesystem journaling code; * part of the ext2fs journaling system. */ #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <trace/events/jbd.h> /* * Default IO end handler for temporary BJ_IO buffer_heads. */ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate) { BUFFER_TRACE(bh, ""); if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); unlock_buffer(bh); } /* * When an ext3-ordered file is truncated, it is possible that many pages are * not successfully freed, because they are attached to a committing transaction. * After the transaction commits, these pages are left on the LRU, with no * ->mapping, and with attached buffers. These pages are trivially reclaimable * by the VM, but their apparent absence upsets the VM accounting, and it makes * the numbers in /proc/meminfo look odd. * * So here, we have a buffer which has just come off the forget list. Look to * see if we can strip all buffers from the backing page. * * Called under journal->j_list_lock. The caller provided us with a ref * against the buffer, and we drop that here. */ static void release_buffer_page(struct buffer_head *bh) { struct page *page; if (buffer_dirty(bh)) goto nope; if (atomic_read(&bh->b_count) != 1) goto nope; page = bh->b_page; if (!page) goto nope; if (page->mapping) goto nope; /* OK, it's a truncated page */ if (!trylock_page(page)) goto nope; page_cache_get(page); __brelse(bh); try_to_free_buffers(page); unlock_page(page); page_cache_release(page); return; nope: __brelse(bh); } /* * Decrement reference counter for data buffer. If it has been marked * 'BH_Freed', release it and the page to which it belongs if possible. */ static void release_data_buffer(struct buffer_head *bh) { if (buffer_freed(bh)) { clear_buffer_freed(bh); release_buffer_page(bh); } else put_bh(bh); } /* * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is * held. For ranking reasons we must trylock. If we lose, schedule away and * return 0. j_list_lock is dropped in this case. */ static int inverted_lock(journal_t *journal, struct buffer_head *bh) { if (!jbd_trylock_bh_state(bh)) { spin_unlock(&journal->j_list_lock); schedule(); return 0; } return 1; } /* Done it all: now write the commit record. We should have * cleaned up our previous buffers by now, so if we are in abort * mode we can now just skip the rest of the journal write * entirely. * * Returns 1 if the journal needs to be aborted or 0 on success */ static int journal_write_commit_record(journal_t *journal, transaction_t *commit_transaction) { struct journal_head *descriptor; struct buffer_head *bh; journal_header_t *header; int ret; if (is_journal_aborted(journal)) return 0; descriptor = journal_get_descriptor_buffer(journal); if (!descriptor) return 1; bh = jh2bh(descriptor); header = (journal_header_t *)(bh->b_data); header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK); header->h_sequence = cpu_to_be32(commit_transaction->t_tid); JBUFFER_TRACE(descriptor, "write commit block"); set_buffer_dirty(bh); if (journal->j_flags & JFS_BARRIER) ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_FLUSH_FUA); else ret = sync_dirty_buffer(bh); put_bh(bh); /* One for getblk() */ journal_put_journal_head(descriptor); return (ret == -EIO); } static void journal_do_submit_data(struct buffer_head **wbuf, int bufs, int write_op) { int i; for (i = 0; i < bufs; i++) { wbuf[i]->b_end_io = end_buffer_write_sync; /* We use-up our safety reference in submit_bh() */ submit_bh(write_op, wbuf[i]); } } /* * Submit all the data buffers to disk */ static int journal_submit_data_buffers(journal_t *journal, transaction_t *commit_transaction, int write_op) { struct journal_head *jh; struct buffer_head *bh; int locked; int bufs = 0; struct buffer_head **wbuf = journal->j_wbuf; int err = 0; /* * Whenever we unlock the journal and sleep, things can get added * onto ->t_sync_datalist, so we have to keep looping back to * write_out_data until we *know* that the list is empty. * * Cleanup any flushed data buffers from the data list. Even in * abort mode, we want to flush this out as soon as possible. */ write_out_data: cond_resched(); spin_lock(&journal->j_list_lock); while (commit_transaction->t_sync_datalist) { jh = commit_transaction->t_sync_datalist; bh = jh2bh(jh); locked = 0; /* Get reference just to make sure buffer does not disappear * when we are forced to drop various locks */ get_bh(bh); /* If the buffer is dirty, we need to submit IO and hence * we need the buffer lock. We try to lock the buffer without * blocking. If we fail, we need to drop j_list_lock and do * blocking lock_buffer(). */ if (buffer_dirty(bh)) { if (!trylock_buffer(bh)) { BUFFER_TRACE(bh, "needs blocking lock"); spin_unlock(&journal->j_list_lock); trace_jbd_do_submit_data(journal, commit_transaction); /* Write out all data to prevent deadlocks */ journal_do_submit_data(wbuf, bufs, write_op); bufs = 0; lock_buffer(bh); spin_lock(&journal->j_list_lock); } locked = 1; } /* We have to get bh_state lock. Again out of order, sigh. */ if (!inverted_lock(journal, bh)) { jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); } /* Someone already cleaned up the buffer? */ if (!buffer_jbd(bh) || bh2jh(bh) != jh || jh->b_transaction != commit_transaction || jh->b_jlist != BJ_SyncData) { jbd_unlock_bh_state(bh); if (locked) unlock_buffer(bh); BUFFER_TRACE(bh, "already cleaned up"); release_data_buffer(bh); continue; } if (locked && test_clear_buffer_dirty(bh)) { BUFFER_TRACE(bh, "needs writeout, adding to array"); wbuf[bufs++] = bh; __journal_file_buffer(jh, commit_transaction, BJ_Locked); jbd_unlock_bh_state(bh); if (bufs == journal->j_wbufsize) { spin_unlock(&journal->j_list_lock); trace_jbd_do_submit_data(journal, commit_transaction); journal_do_submit_data(wbuf, bufs, write_op); bufs = 0; goto write_out_data; } } else if (!locked && buffer_locked(bh)) { __journal_file_buffer(jh, commit_transaction, BJ_Locked); jbd_unlock_bh_state(bh); put_bh(bh); } else { BUFFER_TRACE(bh, "writeout complete: unfile"); if (unlikely(!buffer_uptodate(bh))) err = -EIO; __journal_unfile_buffer(jh); jbd_unlock_bh_state(bh); if (locked) unlock_buffer(bh); release_data_buffer(bh); } if (need_resched() || spin_needbreak(&journal->j_list_lock)) { spin_unlock(&journal->j_list_lock); goto write_out_data; } } spin_unlock(&journal->j_list_lock); trace_jbd_do_submit_data(journal, commit_transaction); journal_do_submit_data(wbuf, bufs, write_op); return err; } /* * journal_commit_transaction * * The primary function for committing a transaction to the log. This * function is called by the journal thread to begin a complete commit. */ void journal_commit_transaction(journal_t *journal) { transaction_t *commit_transaction; struct journal_head *jh, *new_jh, *descriptor; struct buffer_head **wbuf = journal->j_wbuf; int bufs; int flags; int err; unsigned int blocknr; ktime_t start_time; u64 commit_time; char *tagp = NULL; journal_header_t *header; journal_block_tag_t *tag = NULL; int space_left = 0; int first_tag = 0; int tag_flag; int i; struct blk_plug plug; /* * First job: lock down the current transaction and wait for * all outstanding updates to complete. */ /* Do we need to erase the effects of a prior journal_flush? */ if (journal->j_flags & JFS_FLUSHED) { jbd_debug(3, "super block updated\n"); journal_update_superblock(journal, 1); } else { jbd_debug(3, "superblock not updated\n"); } J_ASSERT(journal->j_running_transaction != NULL); J_ASSERT(journal->j_committing_transaction == NULL); commit_transaction = journal->j_running_transaction; J_ASSERT(commit_transaction->t_state == T_RUNNING); trace_jbd_start_commit(journal, commit_transaction); jbd_debug(1, "JBD: starting commit of transaction %d\n", commit_transaction->t_tid); spin_lock(&journal->j_state_lock); commit_transaction->t_state = T_LOCKED; trace_jbd_commit_locking(journal, commit_transaction); spin_lock(&commit_transaction->t_handle_lock); while (commit_transaction->t_updates) { DEFINE_WAIT(wait); prepare_to_wait(&journal->j_wait_updates, &wait, TASK_UNINTERRUPTIBLE); if (commit_transaction->t_updates) { spin_unlock(&commit_transaction->t_handle_lock); spin_unlock(&journal->j_state_lock); schedule(); spin_lock(&journal->j_state_lock); spin_lock(&commit_transaction->t_handle_lock); } finish_wait(&journal->j_wait_updates, &wait); } spin_unlock(&commit_transaction->t_handle_lock); J_ASSERT (commit_transaction->t_outstanding_credits <= journal->j_max_transaction_buffers); /* * First thing we are allowed to do is to discard any remaining * BJ_Reserved buffers. Note, it is _not_ permissible to assume * that there are no such buffers: if a large filesystem * operation like a truncate needs to split itself over multiple * transactions, then it may try to do a journal_restart() while * there are still BJ_Reserved buffers outstanding. These must * be released cleanly from the current transaction. * * In this case, the filesystem must still reserve write access * again before modifying the buffer in the new transaction, but * we do not require it to remember exactly which old buffers it * has reserved. This is consistent with the existing behaviour * that multiple journal_get_write_access() calls to the same * buffer are perfectly permissible. */ while (commit_transaction->t_reserved_list) { jh = commit_transaction->t_reserved_list; JBUFFER_TRACE(jh, "reserved, unused: refile"); /* * A journal_get_undo_access()+journal_release_buffer() may * leave undo-committed data. */ if (jh->b_committed_data) { struct buffer_head *bh = jh2bh(jh); jbd_lock_bh_state(bh); jbd_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; jbd_unlock_bh_state(bh); } journal_refile_buffer(journal, jh); } /* * Now try to drop any written-back buffers from the journal's * checkpoint lists. We do this *before* commit because it potentially * frees some memory */ spin_lock(&journal->j_list_lock); __journal_clean_checkpoint_list(journal); spin_unlock(&journal->j_list_lock); jbd_debug (3, "JBD: commit phase 1\n"); /* * Clear revoked flag to reflect there is no revoked buffers * in the next transaction which is going to be started. */ journal_clear_buffer_revoked_flags(journal); /* * Switch to a new revoke table. */ journal_switch_revoke_table(journal); trace_jbd_commit_flushing(journal, commit_transaction); commit_transaction->t_state = T_FLUSH; journal->j_committing_transaction = commit_transaction; journal->j_running_transaction = NULL; start_time = ktime_get(); commit_transaction->t_log_start = journal->j_head; wake_up(&journal->j_wait_transaction_locked); spin_unlock(&journal->j_state_lock); jbd_debug (3, "JBD: commit phase 2\n"); /* * Now start flushing things to disk, in the order they appear * on the transaction lists. Data blocks go first. */ blk_start_plug(&plug); err = journal_submit_data_buffers(journal, commit_transaction, WRITE_SYNC); blk_finish_plug(&plug); /* * Wait for all previously submitted IO to complete. */ spin_lock(&journal->j_list_lock); while (commit_transaction->t_locked_list) { struct buffer_head *bh; jh = commit_transaction->t_locked_list->b_tprev; bh = jh2bh(jh); get_bh(bh); if (buffer_locked(bh)) { spin_unlock(&journal->j_list_lock); wait_on_buffer(bh); spin_lock(&journal->j_list_lock); } if (unlikely(!buffer_uptodate(bh))) { if (!trylock_page(bh->b_page)) { spin_unlock(&journal->j_list_lock); lock_page(bh->b_page); spin_lock(&journal->j_list_lock); } if (bh->b_page->mapping) set_bit(AS_EIO, &bh->b_page->mapping->flags); unlock_page(bh->b_page); SetPageError(bh->b_page); err = -EIO; } if (!inverted_lock(journal, bh)) { put_bh(bh); spin_lock(&journal->j_list_lock); continue; } if (buffer_jbd(bh) && bh2jh(bh) == jh && jh->b_transaction == commit_transaction && jh->b_jlist == BJ_Locked) __journal_unfile_buffer(jh); jbd_unlock_bh_state(bh); release_data_buffer(bh); cond_resched_lock(&journal->j_list_lock); } spin_unlock(&journal->j_list_lock); if (err) { char b[BDEVNAME_SIZE]; printk(KERN_WARNING "JBD: Detected IO errors while flushing file data " "on %s\n", bdevname(journal->j_fs_dev, b)); if (journal->j_flags & JFS_ABORT_ON_SYNCDATA_ERR) journal_abort(journal, err); err = 0; } blk_start_plug(&plug); journal_write_revoke_records(journal, commit_transaction, WRITE_SYNC); /* * If we found any dirty or locked buffers, then we should have * looped back up to the write_out_data label. If there weren't * any then journal_clean_data_list should have wiped the list * clean by now, so check that it is in fact empty. */ J_ASSERT (commit_transaction->t_sync_datalist == NULL); jbd_debug (3, "JBD: commit phase 3\n"); /* * Way to go: we have now written out all of the data for a * transaction! Now comes the tricky part: we need to write out * metadata. Loop over the transaction's entire buffer list: */ spin_lock(&journal->j_state_lock); commit_transaction->t_state = T_COMMIT; spin_unlock(&journal->j_state_lock); trace_jbd_commit_logging(journal, commit_transaction); J_ASSERT(commit_transaction->t_nr_buffers <= commit_transaction->t_outstanding_credits); descriptor = NULL; bufs = 0; while (commit_transaction->t_buffers) { /* Find the next buffer to be journaled... */ jh = commit_transaction->t_buffers; /* If we're in abort mode, we just un-journal the buffer and release it. */ if (is_journal_aborted(journal)) { clear_buffer_jbddirty(jh2bh(jh)); JBUFFER_TRACE(jh, "journal is aborting: refile"); journal_refile_buffer(journal, jh); /* If that was the last one, we need to clean up * any descriptor buffers which may have been * already allocated, even if we are now * aborting. */ if (!commit_transaction->t_buffers) goto start_journal_io; continue; } /* Make sure we have a descriptor block in which to record the metadata buffer. */ if (!descriptor) { struct buffer_head *bh; J_ASSERT (bufs == 0); jbd_debug(4, "JBD: get descriptor\n"); descriptor = journal_get_descriptor_buffer(journal); if (!descriptor) { journal_abort(journal, -EIO); continue; } bh = jh2bh(descriptor); jbd_debug(4, "JBD: got buffer %llu (%p)\n", (unsigned long long)bh->b_blocknr, bh->b_data); header = (journal_header_t *)&bh->b_data[0]; header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK); header->h_sequence = cpu_to_be32(commit_transaction->t_tid); tagp = &bh->b_data[sizeof(journal_header_t)]; space_left = bh->b_size - sizeof(journal_header_t); first_tag = 1; set_buffer_jwrite(bh); set_buffer_dirty(bh); wbuf[bufs++] = bh; /* Record it so that we can wait for IO completion later */ BUFFER_TRACE(bh, "ph3: file as descriptor"); journal_file_buffer(descriptor, commit_transaction, BJ_LogCtl); } /* Where is the buffer to be written? */ err = journal_next_log_block(journal, &blocknr); /* If the block mapping failed, just abandon the buffer and repeat this loop: we'll fall into the refile-on-abort condition above. */ if (err) { journal_abort(journal, err); continue; } /* * start_this_handle() uses t_outstanding_credits to determine * the free space in the log, but this counter is changed * by journal_next_log_block() also. */ commit_transaction->t_outstanding_credits--; /* Bump b_count to prevent truncate from stumbling over the shadowed buffer! @@@ This can go if we ever get rid of the BJ_IO/BJ_Shadow pairing of buffers. */ get_bh(jh2bh(jh)); /* Make a temporary IO buffer with which to write it out (this will requeue both the metadata buffer and the temporary IO buffer). new_bh goes on BJ_IO*/ set_buffer_jwrite(jh2bh(jh)); /* * akpm: journal_write_metadata_buffer() sets * new_bh->b_transaction to commit_transaction. * We need to clean this up before we release new_bh * (which is of type BJ_IO) */ JBUFFER_TRACE(jh, "ph3: write metadata"); flags = journal_write_metadata_buffer(commit_transaction, jh, &new_jh, blocknr); set_buffer_jwrite(jh2bh(new_jh)); wbuf[bufs++] = jh2bh(new_jh); /* Record the new block's tag in the current descriptor buffer */ tag_flag = 0; if (flags & 1) tag_flag |= JFS_FLAG_ESCAPE; if (!first_tag) tag_flag |= JFS_FLAG_SAME_UUID; tag = (journal_block_tag_t *) tagp; tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr); tag->t_flags = cpu_to_be32(tag_flag); tagp += sizeof(journal_block_tag_t); space_left -= sizeof(journal_block_tag_t); if (first_tag) { memcpy (tagp, journal->j_uuid, 16); tagp += 16; space_left -= 16; first_tag = 0; } /* If there's no more to do, or if the descriptor is full, let the IO rip! */ if (bufs == journal->j_wbufsize || commit_transaction->t_buffers == NULL || space_left < sizeof(journal_block_tag_t) + 16) { jbd_debug(4, "JBD: Submit %d IOs\n", bufs); /* Write an end-of-descriptor marker before submitting the IOs. "tag" still points to the last tag we set up. */ tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG); start_journal_io: for (i = 0; i < bufs; i++) { struct buffer_head *bh = wbuf[i]; lock_buffer(bh); clear_buffer_dirty(bh); set_buffer_uptodate(bh); bh->b_end_io = journal_end_buffer_io_sync; submit_bh(WRITE_SYNC, bh); } cond_resched(); /* Force a new descriptor to be generated next time round the loop. */ descriptor = NULL; bufs = 0; } } blk_finish_plug(&plug); /* Lo and behold: we have just managed to send a transaction to the log. Before we can commit it, wait for the IO so far to complete. Control buffers being written are on the transaction's t_log_list queue, and metadata buffers are on the t_iobuf_list queue. Wait for the buffers in reverse order. That way we are less likely to be woken up until all IOs have completed, and so we incur less scheduling load. */ jbd_debug(3, "JBD: commit phase 4\n"); /* * akpm: these are BJ_IO, and j_list_lock is not needed. * See __journal_try_to_free_buffer. */ wait_for_iobuf: while (commit_transaction->t_iobuf_list != NULL) { struct buffer_head *bh; jh = commit_transaction->t_iobuf_list->b_tprev; bh = jh2bh(jh); if (buffer_locked(bh)) { wait_on_buffer(bh); goto wait_for_iobuf; } if (cond_resched()) goto wait_for_iobuf; if (unlikely(!buffer_uptodate(bh))) err = -EIO; clear_buffer_jwrite(bh); JBUFFER_TRACE(jh, "ph4: unfile after journal write"); journal_unfile_buffer(journal, jh); /* * ->t_iobuf_list should contain only dummy buffer_heads * which were created by journal_write_metadata_buffer(). */ BUFFER_TRACE(bh, "dumping temporary bh"); journal_put_journal_head(jh); __brelse(bh); J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0); free_buffer_head(bh); /* We also have to unlock and free the corresponding shadowed buffer */ jh = commit_transaction->t_shadow_list->b_tprev; bh = jh2bh(jh); clear_buffer_jwrite(bh); J_ASSERT_BH(bh, buffer_jbddirty(bh)); /* The metadata is now released for reuse, but we need to remember it against this transaction so that when we finally commit, we can do any checkpointing required. */ JBUFFER_TRACE(jh, "file as BJ_Forget"); journal_file_buffer(jh, commit_transaction, BJ_Forget); /* * Wake up any transactions which were waiting for this * IO to complete. The barrier must be here so that changes * by journal_file_buffer() take effect before wake_up_bit() * does the waitqueue check. */ smp_mb(); wake_up_bit(&bh->b_state, BH_Unshadow); JBUFFER_TRACE(jh, "brelse shadowed buffer"); __brelse(bh); } J_ASSERT (commit_transaction->t_shadow_list == NULL); jbd_debug(3, "JBD: commit phase 5\n"); /* Here we wait for the revoke record and descriptor record buffers */ wait_for_ctlbuf: while (commit_transaction->t_log_list != NULL) { struct buffer_head *bh; jh = commit_transaction->t_log_list->b_tprev; bh = jh2bh(jh); if (buffer_locked(bh)) { wait_on_buffer(bh); goto wait_for_ctlbuf; } if (cond_resched()) goto wait_for_ctlbuf; if (unlikely(!buffer_uptodate(bh))) err = -EIO; BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile"); clear_buffer_jwrite(bh); journal_unfile_buffer(journal, jh); journal_put_journal_head(jh); __brelse(bh); /* One for getblk */ /* AKPM: bforget here */ } if (err) journal_abort(journal, err); jbd_debug(3, "JBD: commit phase 6\n"); /* All metadata is written, now write commit record and do cleanup */ spin_lock(&journal->j_state_lock); J_ASSERT(commit_transaction->t_state == T_COMMIT); commit_transaction->t_state = T_COMMIT_RECORD; spin_unlock(&journal->j_state_lock); if (journal_write_commit_record(journal, commit_transaction)) err = -EIO; if (err) journal_abort(journal, err); /* End of a transaction! Finally, we can do checkpoint processing: any buffers committed as a result of this transaction can be removed from any checkpoint list it was on before. */ jbd_debug(3, "JBD: commit phase 7\n"); J_ASSERT(commit_transaction->t_sync_datalist == NULL); J_ASSERT(commit_transaction->t_buffers == NULL); J_ASSERT(commit_transaction->t_checkpoint_list == NULL); J_ASSERT(commit_transaction->t_iobuf_list == NULL); J_ASSERT(commit_transaction->t_shadow_list == NULL); J_ASSERT(commit_transaction->t_log_list == NULL); restart_loop: /* * As there are other places (journal_unmap_buffer()) adding buffers * to this list we have to be careful and hold the j_list_lock. */ spin_lock(&journal->j_list_lock); while (commit_transaction->t_forget) { transaction_t *cp_transaction; struct buffer_head *bh; int try_to_free = 0; jh = commit_transaction->t_forget; spin_unlock(&journal->j_list_lock); bh = jh2bh(jh); /* * Get a reference so that bh cannot be freed before we are * done with it. */ get_bh(bh); jbd_lock_bh_state(bh); J_ASSERT_JH(jh, jh->b_transaction == commit_transaction || jh->b_transaction == journal->j_running_transaction); /* * If there is undo-protected committed data against * this buffer, then we can remove it now. If it is a * buffer needing such protection, the old frozen_data * field now points to a committed version of the * buffer, so rotate that field to the new committed * data. * * Otherwise, we can just throw away the frozen data now. */ if (jh->b_committed_data) { jbd_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; if (jh->b_frozen_data) { jh->b_committed_data = jh->b_frozen_data; jh->b_frozen_data = NULL; } } else if (jh->b_frozen_data) { jbd_free(jh->b_frozen_data, bh->b_size); jh->b_frozen_data = NULL; } spin_lock(&journal->j_list_lock); cp_transaction = jh->b_cp_transaction; if (cp_transaction) { JBUFFER_TRACE(jh, "remove from old cp transaction"); __journal_remove_checkpoint(jh); } /* Only re-checkpoint the buffer_head if it is marked * dirty. If the buffer was added to the BJ_Forget list * by journal_forget, it may no longer be dirty and * there's no point in keeping a checkpoint record for * it. */ /* A buffer which has been freed while still being * journaled by a previous transaction may end up still * being dirty here, but we want to avoid writing back * that buffer in the future after the "add to orphan" * operation been committed, That's not only a performance * gain, it also stops aliasing problems if the buffer is * left behind for writeback and gets reallocated for another * use in a different page. */ if (buffer_freed(bh) && !jh->b_next_transaction) { clear_buffer_freed(bh); clear_buffer_jbddirty(bh); } if (buffer_jbddirty(bh)) { JBUFFER_TRACE(jh, "add to new checkpointing trans"); __journal_insert_checkpoint(jh, commit_transaction); if (is_journal_aborted(journal)) clear_buffer_jbddirty(bh); } else { J_ASSERT_BH(bh, !buffer_dirty(bh)); /* * The buffer on BJ_Forget list and not jbddirty means * it has been freed by this transaction and hence it * could not have been reallocated until this * transaction has committed. *BUT* it could be * reallocated once we have written all the data to * disk and before we process the buffer on BJ_Forget * list. */ if (!jh->b_next_transaction) try_to_free = 1; } JBUFFER_TRACE(jh, "refile or unfile freed buffer"); __journal_refile_buffer(jh); jbd_unlock_bh_state(bh); if (try_to_free) release_buffer_page(bh); else __brelse(bh); cond_resched_lock(&journal->j_list_lock); } spin_unlock(&journal->j_list_lock); /* * This is a bit sleazy. We use j_list_lock to protect transition * of a transaction into T_FINISHED state and calling * __journal_drop_transaction(). Otherwise we could race with * other checkpointing code processing the transaction... */ spin_lock(&journal->j_state_lock); spin_lock(&journal->j_list_lock); /* * Now recheck if some buffers did not get attached to the transaction * while the lock was dropped... */ if (commit_transaction->t_forget) { spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_state_lock); goto restart_loop; } /* Done with this transaction! */ jbd_debug(3, "JBD: commit phase 8\n"); J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD); commit_transaction->t_state = T_FINISHED; J_ASSERT(commit_transaction == journal->j_committing_transaction); journal->j_commit_sequence = commit_transaction->t_tid; journal->j_committing_transaction = NULL; commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); /* * weight the commit time higher than the average time so we don't * react too strongly to vast changes in commit time */ if (likely(journal->j_average_commit_time)) journal->j_average_commit_time = (commit_time*3 + journal->j_average_commit_time) / 4; else journal->j_average_commit_time = commit_time; spin_unlock(&journal->j_state_lock); if (commit_transaction->t_checkpoint_list == NULL && commit_transaction->t_checkpoint_io_list == NULL) { __journal_drop_transaction(journal, commit_transaction); } else { if (journal->j_checkpoint_transactions == NULL) { journal->j_checkpoint_transactions = commit_transaction; commit_transaction->t_cpnext = commit_transaction; commit_transaction->t_cpprev = commit_transaction; } else { commit_transaction->t_cpnext = journal->j_checkpoint_transactions; commit_transaction->t_cpprev = commit_transaction->t_cpnext->t_cpprev; commit_transaction->t_cpnext->t_cpprev = commit_transaction; commit_transaction->t_cpprev->t_cpnext = commit_transaction; } } spin_unlock(&journal->j_list_lock); trace_jbd_end_commit(journal, commit_transaction); jbd_debug(1, "JBD: commit %d complete, head %d\n", journal->j_commit_sequence, journal->j_tail_sequence); wake_up(&journal->j_wait_done_commit); }
gpl-2.0
crdroid-devices/android_kernel_htc_msm8974
arch/sparc/boot/btfixupprep.c
7677
11620
/* Simple utility to prepare vmlinux image for sparc. Resolves all BTFIXUP uses and settings and creates a special .s object to link to the image. Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <stdio.h> #include <string.h> #include <ctype.h> #include <errno.h> #include <unistd.h> #include <stdlib.h> #include <malloc.h> #define MAXSYMS 1024 static char *symtab = "SYMBOL TABLE:"; static char *relrec = "RELOCATION RECORDS FOR ["; static int rellen; static int symlen; int mode; struct _btfixup; typedef struct _btfixuprel { char *sect; unsigned long offset; struct _btfixup *f; int frel; struct _btfixuprel *next; } btfixuprel; typedef struct _btfixup { int type; int setinitval; unsigned int initval; char *initvalstr; char *name; btfixuprel *rel; } btfixup; btfixup array[MAXSYMS]; int last = 0; char buffer[1024]; unsigned long lastfoffset = -1; unsigned long lastfrelno; btfixup *lastf; static void fatal(void) __attribute__((noreturn)); static void fatal(void) { fprintf(stderr, "Malformed output from objdump\n%s\n", buffer); exit(1); } static btfixup *find(int type, char *name) { int i; for (i = 0; i < last; i++) { if (array[i].type == type && !strcmp(array[i].name, name)) return array + i; } array[last].type = type; array[last].name = strdup(name); array[last].setinitval = 0; if (!array[last].name) fatal(); array[last].rel = NULL; last++; if (last >= MAXSYMS) { fprintf(stderr, "Ugh. Something strange. More than %d different BTFIXUP symbols\n", MAXSYMS); exit(1); } return array + last - 1; } static void set_mode (char *buffer) { for (mode = 0;; mode++) if (buffer[mode] < '0' || buffer[mode] > '9') break; if (mode != 8 && mode != 16) fatal(); } int main(int argc,char **argv) { char *p, *q; char *sect; int i, j, k; unsigned int initval; int shift; btfixup *f; btfixuprel *r, **rr; unsigned long offset; char *initvalstr; symlen = strlen(symtab); while (fgets (buffer, 1024, stdin) != NULL) if (!strncmp (buffer, symtab, symlen)) goto main0; fatal(); main0: rellen = strlen(relrec); while (fgets (buffer, 1024, stdin) != NULL) if (!strncmp (buffer, relrec, rellen)) goto main1; fatal(); main1: sect = malloc(strlen (buffer + rellen) + 1); if (!sect) fatal(); strcpy (sect, buffer + rellen); p = strchr (sect, ']'); if (!p) fatal(); *p = 0; if (fgets (buffer, 1024, stdin) == NULL) fatal(); while (fgets (buffer, 1024, stdin) != NULL) { int nbase; if (!strncmp (buffer, relrec, rellen)) goto main1; if (mode == 0) set_mode (buffer); p = strchr (buffer, '\n'); if (p) *p = 0; if (strlen (buffer) < 22+mode) continue; if (strncmp (buffer + mode, " R_SPARC_", 9)) continue; nbase = 27 - 8 + mode; if (buffer[nbase] != '_' || buffer[nbase+1] != '_' || buffer[nbase+2] != '_') continue; switch (buffer[nbase+3]) { case 'f': /* CALL */ case 'b': /* BLACKBOX */ case 's': /* SIMM13 */ case 'a': /* HALF */ case 'h': /* SETHI */ case 'i': /* INT */ break; default: continue; } p = strchr (buffer + nbase+5, '+'); if (p) *p = 0; shift = nbase + 5; if (buffer[nbase+4] == 's' && buffer[nbase+5] == '_') { shift = nbase + 6; if (strcmp (sect, ".init.text")) { fprintf(stderr, "Wrong use of '%s' BTFIXUPSET in '%s' section.\n" "BTFIXUPSET_CALL can be used only in" " __init sections\n", buffer + shift, sect); exit(1); } } else if (buffer[nbase+4] != '_') continue; if (!strcmp (sect, ".text.exit")) continue; if (strcmp (sect, ".text") && strcmp (sect, ".init.text") && strcmp (sect, ".fixup") && (strcmp (sect, "__ksymtab") || buffer[nbase+3] != 'f')) { if (buffer[nbase+3] == 'f') fprintf(stderr, "Wrong use of '%s' in '%s' section.\n" " It can be used only in .text, .init.text," " .fixup and __ksymtab\n", buffer + shift, sect); else fprintf(stderr, "Wrong use of '%s' in '%s' section.\n" " It can be only used in .text, .init.text," " and .fixup\n", buffer + shift, sect); exit(1); } p = strstr (buffer + shift, "__btset_"); if (p && buffer[nbase+4] == 's') { fprintf(stderr, "__btset_ in BTFIXUP name can only be used when defining the variable, not for setting\n%s\n", buffer); exit(1); } initval = 0; initvalstr = NULL; if (p) { if (p[8] != '0' || p[9] != 'x') { fprintf(stderr, "Pre-initialized values can be only initialized with hexadecimal constants starting 0x\n%s\n", buffer); exit(1); } initval = strtoul(p + 10, &q, 16); if (*q || !initval) { fprintf(stderr, "Pre-initialized values can be only in the form name__btset_0xXXXXXXXX where X are hex digits.\nThey cannot be name__btset_0x00000000 though. Use BTFIXUPDEF_XX instead of BTFIXUPDEF_XX_INIT then.\n%s\n", buffer); exit(1); } initvalstr = p + 10; *p = 0; } f = find(buffer[nbase+3], buffer + shift); if (buffer[nbase+4] == 's') continue; switch (buffer[nbase+3]) { case 'f': if (initval) { fprintf(stderr, "Cannot use pre-initialized fixups for calls\n%s\n", buffer); exit(1); } if (!strcmp (sect, "__ksymtab")) { if (strncmp (buffer + mode+9, "32 ", 10)) { fprintf(stderr, "BTFIXUP_CALL in EXPORT_SYMBOL results in relocation other than R_SPARC_32\n\%s\n", buffer); exit(1); } } else if (strncmp (buffer + mode+9, "WDISP30 ", 10) && strncmp (buffer + mode+9, "HI22 ", 10) && strncmp (buffer + mode+9, "LO10 ", 10)) { fprintf(stderr, "BTFIXUP_CALL results in relocation other than R_SPARC_WDISP30, R_SPARC_HI22 or R_SPARC_LO10\n%s\n", buffer); exit(1); } break; case 'b': if (initval) { fprintf(stderr, "Cannot use pre-initialized fixups for blackboxes\n%s\n", buffer); exit(1); } if (strncmp (buffer + mode+9, "HI22 ", 10)) { fprintf(stderr, "BTFIXUP_BLACKBOX results in relocation other than R_SPARC_HI22\n%s\n", buffer); exit(1); } break; case 's': if (initval + 0x1000 >= 0x2000) { fprintf(stderr, "Wrong initializer for SIMM13. Has to be from $fffff000 to $00000fff\n%s\n", buffer); exit(1); } if (strncmp (buffer + mode+9, "13 ", 10)) { fprintf(stderr, "BTFIXUP_SIMM13 results in relocation other than R_SPARC_13\n%s\n", buffer); exit(1); } break; case 'a': if (initval + 0x1000 >= 0x2000 && (initval & 0x3ff)) { fprintf(stderr, "Wrong initializer for HALF.\n%s\n", buffer); exit(1); } if (strncmp (buffer + mode+9, "13 ", 10)) { fprintf(stderr, "BTFIXUP_HALF results in relocation other than R_SPARC_13\n%s\n", buffer); exit(1); } break; case 'h': if (initval & 0x3ff) { fprintf(stderr, "Wrong initializer for SETHI. Cannot have set low 10 bits\n%s\n", buffer); exit(1); } if (strncmp (buffer + mode+9, "HI22 ", 10)) { fprintf(stderr, "BTFIXUP_SETHI results in relocation other than R_SPARC_HI22\n%s\n", buffer); exit(1); } break; case 'i': if (initval) { fprintf(stderr, "Cannot use pre-initialized fixups for INT\n%s\n", buffer); exit(1); } if (strncmp (buffer + mode+9, "HI22 ", 10) && strncmp (buffer + mode+9, "LO10 ", 10)) { fprintf(stderr, "BTFIXUP_INT results in relocation other than R_SPARC_HI22 and R_SPARC_LO10\n%s\n", buffer); exit(1); } break; } if (!f->setinitval) { f->initval = initval; if (initvalstr) { f->initvalstr = strdup(initvalstr); if (!f->initvalstr) fatal(); } f->setinitval = 1; } else if (f->initval != initval) { fprintf(stderr, "Btfixup %s previously used with initializer %s which doesn't match with current initializer\n%s\n", f->name, f->initvalstr ? : "0x00000000", buffer); exit(1); } else if (initval && strcmp(f->initvalstr, initvalstr)) { fprintf(stderr, "Btfixup %s previously used with initializer %s which doesn't match with current initializer.\n" "Initializers have to match literally as well.\n%s\n", f->name, f->initvalstr, buffer); exit(1); } offset = strtoul(buffer, &q, 16); if (q != buffer + mode || (!offset && (mode == 8 ? strncmp (buffer, "00000000 ", 9) : strncmp (buffer, "0000000000000000 ", 17)))) { fprintf(stderr, "Malformed relocation address in\n%s\n", buffer); exit(1); } for (k = 0, r = f->rel, rr = &f->rel; r; rr = &r->next, r = r->next, k++) if (r->offset == offset && !strcmp(r->sect, sect)) { fprintf(stderr, "Ugh. One address has two relocation records\n"); exit(1); } *rr = malloc(sizeof(btfixuprel)); if (!*rr) fatal(); (*rr)->offset = offset; (*rr)->f = NULL; if (buffer[nbase+3] == 'f') { lastf = f; lastfoffset = offset; lastfrelno = k; } else if (lastfoffset + 4 == offset) { (*rr)->f = lastf; (*rr)->frel = lastfrelno; } (*rr)->sect = sect; (*rr)->next = NULL; } printf("! Generated by btfixupprep. Do not edit.\n\n"); printf("\t.section\t\".data..init\",#alloc,#write\n\t.align\t4\n\n"); printf("\t.global\t___btfixup_start\n___btfixup_start:\n\n"); for (i = 0; i < last; i++) { f = array + i; printf("\t.global\t___%cs_%s\n", f->type, f->name); if (f->type == 'f') printf("___%cs_%s:\n\t.word 0x%08x,0,0,", f->type, f->name, f->type << 24); else printf("___%cs_%s:\n\t.word 0x%08x,0,", f->type, f->name, f->type << 24); for (j = 0, r = f->rel; r != NULL; j++, r = r->next); if (j) printf("%d\n\t.word\t", j * 2); else printf("0\n"); for (r = f->rel, j--; r != NULL; j--, r = r->next) { if (!strcmp (r->sect, ".text")) printf ("_stext+0x%08lx", r->offset); else if (!strcmp (r->sect, ".init.text")) printf ("__init_begin+0x%08lx", r->offset); else if (!strcmp (r->sect, "__ksymtab")) printf ("__start___ksymtab+0x%08lx", r->offset); else if (!strcmp (r->sect, ".fixup")) printf ("__start___fixup+0x%08lx", r->offset); else fatal(); if (f->type == 'f' || !r->f) printf (",0"); else printf (",___fs_%s+0x%08x", r->f->name, (4 + r->frel*2)*4 + 4); if (j) printf (","); else printf ("\n"); } printf("\n"); } printf("\n\t.global\t___btfixup_end\n___btfixup_end:\n"); printf("\n\n! Define undefined references\n\n"); for (i = 0; i < last; i++) { f = array + i; if (f->type == 'f') { printf("\t.global\t___f_%s\n", f->name); printf("___f_%s:\n", f->name); } } printf("\tretl\n\t nop\n\n"); for (i = 0; i < last; i++) { f = array + i; if (f->type != 'f') { if (!f->initval) { printf("\t.global\t___%c_%s\n", f->type, f->name); printf("___%c_%s = 0\n", f->type, f->name); } else { printf("\t.global\t___%c_%s__btset_0x%s\n", f->type, f->name, f->initvalstr); printf("___%c_%s__btset_0x%s = 0x%08x\n", f->type, f->name, f->initvalstr, f->initval); } } } printf("\n\n"); exit(0); }
gpl-2.0
Alphix/linuxtv
tools/perf/util/data.c
254
2363
#include <linux/compiler.h> #include <linux/kernel.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <string.h> #include "data.h" #include "util.h" static bool check_pipe(struct perf_data_file *file) { struct stat st; bool is_pipe = false; int fd = perf_data_file__is_read(file) ? STDIN_FILENO : STDOUT_FILENO; if (!file->path) { if (!fstat(fd, &st) && S_ISFIFO(st.st_mode)) is_pipe = true; } else { if (!strcmp(file->path, "-")) is_pipe = true; } if (is_pipe) file->fd = fd; return file->is_pipe = is_pipe; } static int check_backup(struct perf_data_file *file) { struct stat st; if (!stat(file->path, &st) && st.st_size) { /* TODO check errors properly */ char oldname[PATH_MAX]; snprintf(oldname, sizeof(oldname), "%s.old", file->path); unlink(oldname); rename(file->path, oldname); } return 0; } static int open_file_read(struct perf_data_file *file) { struct stat st; int fd; fd = open(file->path, O_RDONLY); if (fd < 0) { int err = errno; pr_err("failed to open %s: %s", file->path, strerror(err)); if (err == ENOENT && !strcmp(file->path, "perf.data")) pr_err(" (try 'perf record' first)"); pr_err("\n"); return -err; } if (fstat(fd, &st) < 0) goto out_close; if (!file->force && st.st_uid && (st.st_uid != geteuid())) { pr_err("file %s not owned by current user or root\n", file->path); goto out_close; } if (!st.st_size) { pr_info("zero-sized file (%s), nothing to do!\n", file->path); goto out_close; } file->size = st.st_size; return fd; out_close: close(fd); return -1; } static int open_file_write(struct perf_data_file *file) { if (check_backup(file)) return -1; return open(file->path, O_CREAT|O_RDWR|O_TRUNC, S_IRUSR|S_IWUSR); } static int open_file(struct perf_data_file *file) { int fd; fd = perf_data_file__is_read(file) ? open_file_read(file) : open_file_write(file); file->fd = fd; return fd < 0 ? -1 : 0; } int perf_data_file__open(struct perf_data_file *file) { if (check_pipe(file)) return 0; if (!file->path) file->path = "perf.data"; return open_file(file); } void perf_data_file__close(struct perf_data_file *file) { close(file->fd); } ssize_t perf_data_file__write(struct perf_data_file *file, void *buf, size_t size) { return writen(file->fd, buf, size); }
gpl-2.0
garwedgess/LuPuS-CM-iCs
drivers/staging/otus/wrap_ev.c
510
9015
/* * Copyright (c) 2007-2008 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* */ /* Module Name : wrap_ev.c */ /* */ /* Abstract */ /* This module contains wrapper functions for events */ /* */ /* NOTES */ /* Platform dependent. */ /* */ /************************************************************************/ #include "oal_dt.h" #include "usbdrv.h" #include <linux/netlink.h> #include <net/iw_handler.h> /***** Management *****/ u16_t zfLnxAuthNotify(zdev_t* dev, u16_t* macAddr) { return 0; } u16_t zfLnxAsocNotify(zdev_t* dev, u16_t* macAddr, u8_t* body, u16_t bodySize, u16_t port) { //#ifdef ZM_HOSTAPD_SUPPORT struct usbdrv_private *macp = dev->ml_priv; union iwreq_data wreq; u8_t *addr = (u8_t *) macAddr; u16_t i, j; memset(&wreq, 0, sizeof(wreq)); memcpy(wreq.addr.sa_data, macAddr, ETH_ALEN); wreq.addr.sa_family = ARPHRD_ETHER; printk(KERN_DEBUG "join_event of MAC: %02x:%02x:%02x:%02x:%02x:%02x\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); for(i = 0; i < ZM_OAL_MAX_STA_SUPPORT; i++) { for(j = 0; j < IEEE80211_ADDR_LEN; j++) { if ((macp->stawpaie[i].wpa_macaddr[j] != 0) && (macp->stawpaie[i].wpa_macaddr[j] != addr[j])) break; } if (j == 6) break; } if (i < ZM_OAL_MAX_STA_SUPPORT) { //printk("zfwAsocNotify - store wpa ie in macp, index = %d\n", i); memcpy(macp->stawpaie[i].wpa_macaddr, macAddr, IEEE80211_ADDR_LEN); memcpy(macp->stawpaie[i].wpa_ie, body, bodySize); } //if(macp->cardSetting.BssType == INFRASTRUCTURE_BSS) { // //wireless_send_event(macp->device, SIOCGIWSCAN, &wreq, NULL); // wireless_send_event(macp->device, SIOCGIWAP, &wreq, NULL); //} //else if(macp->cardSetting.BssType == AP_BSS) { // if (port == 0) // { wireless_send_event(dev, IWEVREGISTERED, &wreq, NULL); // } // else // { // /* Check whether the VAP device is valid */ // if (vap[port].dev != NULL) // { // wireless_send_event(vap[port].dev, IWEVREGISTERED, &wreq, NULL); // } // else // { // printk(KERN_ERR "Can' find a valid VAP device, port: %d\n", port); // } // } //} //#endif return 0; } /* Notification that a STA is disassociated from AP */ /* AP mode only */ u16_t zfLnxDisAsocNotify(zdev_t* dev, u8_t* macAddr, u16_t port) { union iwreq_data wreq; u8_t *addr = (u8_t *) macAddr; memset(&wreq, 0, sizeof(wreq)); memcpy(wreq.addr.sa_data, macAddr, ETH_ALEN); wreq.addr.sa_family = ARPHRD_ETHER; printk(KERN_DEBUG "zfwDisAsocNotify(), MAC: %02x:%02x:%02x:%02x:%02x:%02x\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); return 0; } /* Notification that a STA is connect to AP */ /* AP mode only */ u16_t zfLnxApConnectNotify(zdev_t* dev, u8_t* macAddr, u16_t port) { union iwreq_data wreq; u8_t *addr = (u8_t *) macAddr; memset(&wreq, 0, sizeof(wreq)); memcpy(wreq.addr.sa_data, macAddr, ETH_ALEN); wreq.addr.sa_family = ARPHRD_ETHER; printk(KERN_DEBUG "zfwApConnectNotify(), MAC: %02x:%02x:%02x:%02x:%02x:%02x\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); return 0; } void zfLnxConnectNotify(zdev_t* dev, u16_t status, u16_t* bssid) { union iwreq_data wreq; u8_t *addr = (u8_t *) bssid; struct usbdrv_private *macp = dev->ml_priv; if (bssid != NULL) { memset(&wreq, 0, sizeof(wreq)); if (status == ZM_STATUS_MEDIA_CONNECT) memcpy(wreq.addr.sa_data, bssid, ETH_ALEN); wreq.addr.sa_family = ARPHRD_ETHER; if (status == ZM_STATUS_MEDIA_CONNECT) { #ifdef ZM_CONFIG_BIG_ENDIAN printk(KERN_DEBUG "Connected to AP, MAC: %02x:%02x:%02x:%02x:%02x:%02x\n", addr[1], addr[0], addr[3], addr[2], addr[5], addr[4]); #else printk(KERN_DEBUG "Connected to AP, MAC: %02x:%02x:%02x:%02x:%02x:%02x\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); #endif netif_start_queue(dev); } else if ((status == ZM_STATUS_MEDIA_DISCONNECT) || (status == ZM_STATUS_MEDIA_DISABLED) || (status == ZM_STATUS_MEDIA_CONNECTION_DISABLED) || (status == ZM_STATUS_MEDIA_CONNECTION_RESET) || (status == ZM_STATUS_MEDIA_RESET) || (status == ZM_STATUS_MEDIA_DISCONNECT_DEAUTH) || (status == ZM_STATUS_MEDIA_DISCONNECT_DISASOC) || (status == ZM_STATUS_MEDIA_DISCONNECT_BEACON_MISS) || (status == ZM_STATUS_MEDIA_DISCONNECT_NOT_FOUND) || (status == ZM_STATUS_MEDIA_DISCONNECT_TIMEOUT)) { printk(KERN_DEBUG "Disconnection Notify\n"); netif_stop_queue(dev); } /* Save the connected status */ macp->adapterState = status; if(zfiWlanQueryWlanMode(dev) == ZM_MODE_INFRASTRUCTURE) { // //wireless_send_event(dev, SIOCGIWSCAN, &wreq, NULL); wireless_send_event(dev, SIOCGIWAP, &wreq, NULL); } else if(zfiWlanQueryWlanMode(dev) == ZM_MODE_AP) { //if (port == 0) //{ wireless_send_event(dev, IWEVREGISTERED, &wreq, NULL); //} //else //{ // /* Check whether the VAP device is valid */ // if (vap[port].dev != NULL) // { // wireless_send_event(vap[port].dev, IWEVREGISTERED, &wreq, NULL); // } // else // { // printk(KERN_ERR "Can' find a valid VAP device, port: %d\n", port); // } //} } } //return 0; } void zfLnxScanNotify(zdev_t* dev, struct zsScanResult* result) { return; } void zfLnxStatisticsNotify(zdev_t* dev, struct zsStastics* result) { return; } //void zfwMicFailureNotify(zdev_t* dev, u8_t* message, u16_t event) void zfLnxMicFailureNotify(zdev_t* dev, u16_t* addr, u16_t status) { static const char *tag = "MLME-MICHAELMICFAILURE.indication"; union iwreq_data wrqu; char buf[128]; /* TODO: needed parameters: count, type, src address */ //snprintf(buf, sizeof(buf), "%s(%scast addr=%s)", tag, // (status == ZM_MIC_GROUP_ERROR) ? "broad" : "uni", // ether_sprintf((u8_t *)addr)); if (zfiWlanQueryWlanMode(dev) == ZM_MODE_INFRASTRUCTURE) { strcpy(buf, tag); } memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = strlen(buf); wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); } void zfLnxApMicFailureNotify(zdev_t* dev, u8_t* addr, zbuf_t* buf) { union iwreq_data wreq; memset(&wreq, 0, sizeof(wreq)); memcpy(wreq.addr.sa_data, addr, ETH_ALEN); wreq.addr.sa_family = ARPHRD_ETHER; printk(KERN_DEBUG "zfwApMicFailureNotify(), MAC: %02x:%02x:%02x:%02x:%02x:%02x\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); return; } // status = 0 => partner lost // = 1 => partner alive //void zfwIbssPartnerNotify(zdev_t* dev, u8_t status) void zfLnxIbssPartnerNotify(zdev_t* dev, u16_t status, struct zsPartnerNotifyEvent *event) { } void zfLnxMacAddressNotify(zdev_t* dev, u8_t* addr) { dev->dev_addr[0] = addr[0]; dev->dev_addr[1] = addr[1]; dev->dev_addr[2] = addr[2]; dev->dev_addr[3] = addr[3]; dev->dev_addr[4] = addr[4]; dev->dev_addr[5] = addr[5]; } void zfLnxSendCompleteIndication(zdev_t* dev, zbuf_t* buf) { } void zfLnxRestoreBufData(zdev_t* dev, zbuf_t* buf) { } /* Leave an empty line below to remove warning message on some compiler */
gpl-2.0
XCage15/linux-1
drivers/edac/edac_module.c
766
3198
/* * edac_module.c * * (C) 2007 www.softwarebitmaker.com * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. * * Author: Doug Thompson <dougthompson@xmission.com> * */ #include <linux/edac.h> #include "edac_core.h" #include "edac_module.h" #define EDAC_VERSION "Ver: 3.0.0" #ifdef CONFIG_EDAC_DEBUG static int edac_set_debug_level(const char *buf, struct kernel_param *kp) { unsigned long val; int ret; ret = kstrtoul(buf, 0, &val); if (ret) return ret; if (val > 4) return -EINVAL; return param_set_int(buf, kp); } /* Values of 0 to 4 will generate output */ int edac_debug_level = 2; EXPORT_SYMBOL_GPL(edac_debug_level); module_param_call(edac_debug_level, edac_set_debug_level, param_get_int, &edac_debug_level, 0644); MODULE_PARM_DESC(edac_debug_level, "EDAC debug level: [0-4], default: 2"); #endif /* scope is to module level only */ struct workqueue_struct *edac_workqueue; /* * edac_op_state_to_string() */ char *edac_op_state_to_string(int opstate) { if (opstate == OP_RUNNING_POLL) return "POLLED"; else if (opstate == OP_RUNNING_INTERRUPT) return "INTERRUPT"; else if (opstate == OP_RUNNING_POLL_INTR) return "POLL-INTR"; else if (opstate == OP_ALLOC) return "ALLOC"; else if (opstate == OP_OFFLINE) return "OFFLINE"; return "UNKNOWN"; } /* * edac_workqueue_setup * initialize the edac work queue for polling operations */ static int edac_workqueue_setup(void) { edac_workqueue = create_singlethread_workqueue("edac-poller"); if (edac_workqueue == NULL) return -ENODEV; else return 0; } /* * edac_workqueue_teardown * teardown the edac workqueue */ static void edac_workqueue_teardown(void) { if (edac_workqueue) { flush_workqueue(edac_workqueue); destroy_workqueue(edac_workqueue); edac_workqueue = NULL; } } /* * edac_init * module initialization entry point */ static int __init edac_init(void) { int err = 0; edac_printk(KERN_INFO, EDAC_MC, EDAC_VERSION "\n"); /* * Harvest and clear any boot/initialization PCI parity errors * * FIXME: This only clears errors logged by devices present at time of * module initialization. We should also do an initial clear * of each newly hotplugged device. */ edac_pci_clear_parity_errors(); err = edac_mc_sysfs_init(); if (err) goto err_sysfs; edac_debugfs_init(); err = edac_workqueue_setup(); if (err) { edac_printk(KERN_ERR, EDAC_MC, "Failure initializing workqueue\n"); goto err_wq; } return 0; err_wq: edac_debugfs_exit(); edac_mc_sysfs_exit(); err_sysfs: return err; } /* * edac_exit() * module exit/termination function */ static void __exit edac_exit(void) { edac_dbg(0, "\n"); /* tear down the various subsystems */ edac_workqueue_teardown(); edac_mc_sysfs_exit(); edac_debugfs_exit(); } /* * Inform the kernel of our entry and exit points */ subsys_initcall(edac_init); module_exit(edac_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Doug Thompson www.softwarebitmaker.com, et al"); MODULE_DESCRIPTION("Core library routines for EDAC reporting");
gpl-2.0
kevleyski/o5_raspberrypi_kernel
drivers/clk/clk-max77802.c
766
2682
/* * clk-max77802.c - Clock driver for Maxim 77802 * * Copyright (C) 2014 Google, Inc * * Copyright (C) 2012 Samsung Electornics * Jonghwa Lee <jonghwa3.lee@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * This driver is based on clk-max77686.c */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/mfd/max77686-private.h> #include <linux/clk-provider.h> #include <linux/mutex.h> #include <linux/clkdev.h> #include <dt-bindings/clock/maxim,max77802.h> #include "clk-max-gen.h" #define MAX77802_CLOCK_OPMODE_MASK 0x1 #define MAX77802_CLOCK_LOW_JITTER_SHIFT 0x3 static struct clk_init_data max77802_clks_init[MAX77802_CLKS_NUM] = { [MAX77802_CLK_32K_AP] = { .name = "32khz_ap", .ops = &max_gen_clk_ops, .flags = CLK_IS_ROOT, }, [MAX77802_CLK_32K_CP] = { .name = "32khz_cp", .ops = &max_gen_clk_ops, .flags = CLK_IS_ROOT, }, }; static int max77802_clk_probe(struct platform_device *pdev) { struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); int ret; ret = max_gen_clk_probe(pdev, iodev->regmap, MAX77802_REG_32KHZ, max77802_clks_init, MAX77802_CLKS_NUM); if (ret) { dev_err(&pdev->dev, "generic probe failed %d\n", ret); return ret; } /* Enable low-jitter mode on the 32khz clocks. */ ret = regmap_update_bits(iodev->regmap, MAX77802_REG_32KHZ, 1 << MAX77802_CLOCK_LOW_JITTER_SHIFT, 1 << MAX77802_CLOCK_LOW_JITTER_SHIFT); if (ret < 0) dev_err(&pdev->dev, "failed to enable low-jitter mode\n"); return ret; } static int max77802_clk_remove(struct platform_device *pdev) { return max_gen_clk_remove(pdev, MAX77802_CLKS_NUM); } static const struct platform_device_id max77802_clk_id[] = { { "max77802-clk", 0}, { }, }; MODULE_DEVICE_TABLE(platform, max77802_clk_id); static struct platform_driver max77802_clk_driver = { .driver = { .name = "max77802-clk", }, .probe = max77802_clk_probe, .remove = max77802_clk_remove, .id_table = max77802_clk_id, }; module_platform_driver(max77802_clk_driver); MODULE_DESCRIPTION("MAXIM 77802 Clock Driver"); MODULE_AUTHOR("Javier Martinez Canillas <javier.martinez@collabora.co.uk>"); MODULE_LICENSE("GPL");
gpl-2.0
EPDCenterSpain/kernel_rikomagic_mk808
mm/filemap.c
766
70398
/* * linux/mm/filemap.c * * Copyright (C) 1994-1999 Linus Torvalds */ /* * This file handles the generic file mmap semantics used by * most "normal" filesystems (but you don't /have/ to use this: * the NFS filesystem used to do this differently, for example) */ #include <linux/module.h> #include <linux/compiler.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/aio.h> #include <linux/capability.h> #include <linux/kernel_stat.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/uio.h> #include <linux/hash.h> #include <linux/writeback.h> #include <linux/backing-dev.h> #include <linux/pagevec.h> #include <linux/blkdev.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/cpuset.h> #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ #include <linux/memcontrol.h> #include <linux/mm_inline.h> /* for page_is_file_cache() */ #include <linux/cleancache.h> #include "internal.h" /* * FIXME: remove all knowledge of the buffer layer from the core VM */ #include <linux/buffer_head.h> /* for try_to_free_buffers */ #include <asm/mman.h> /* * Shared mappings implemented 30.11.1994. It's not fully working yet, * though. * * Shared mappings now work. 15.8.1995 Bruno. * * finished 'unifying' the page and buffer cache and SMP-threaded the * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> * * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> */ /* * Lock ordering: * * ->i_mmap_mutex (truncate_pagecache) * ->private_lock (__free_pte->__set_page_dirty_buffers) * ->swap_lock (exclusive_swap_page, others) * ->mapping->tree_lock * * ->i_mutex * ->i_mmap_mutex (truncate->unmap_mapping_range) * * ->mmap_sem * ->i_mmap_mutex * ->page_table_lock or pte_lock (various, mainly in memory.c) * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) * * ->mmap_sem * ->lock_page (access_process_vm) * * ->i_mutex (generic_file_buffered_write) * ->mmap_sem (fault_in_pages_readable->do_page_fault) * * ->i_mutex * ->i_alloc_sem (various) * * inode_wb_list_lock * sb_lock (fs/fs-writeback.c) * ->mapping->tree_lock (__sync_single_inode) * * ->i_mmap_mutex * ->anon_vma.lock (vma_adjust) * * ->anon_vma.lock * ->page_table_lock or pte_lock (anon_vma_prepare and various) * * ->page_table_lock or pte_lock * ->swap_lock (try_to_unmap_one) * ->private_lock (try_to_unmap_one) * ->tree_lock (try_to_unmap_one) * ->zone.lru_lock (follow_page->mark_page_accessed) * ->zone.lru_lock (check_pte_range->isolate_lru_page) * ->private_lock (page_remove_rmap->set_page_dirty) * ->tree_lock (page_remove_rmap->set_page_dirty) * inode_wb_list_lock (page_remove_rmap->set_page_dirty) * ->inode->i_lock (page_remove_rmap->set_page_dirty) * inode_wb_list_lock (zap_pte_range->set_page_dirty) * ->inode->i_lock (zap_pte_range->set_page_dirty) * ->private_lock (zap_pte_range->__set_page_dirty_buffers) * * (code doesn't rely on that order, so you could switch it around) * ->tasklist_lock (memory_failure, collect_procs_ao) * ->i_mmap_mutex */ /* * Delete a page from the page cache and free it. Caller has to make * sure the page is locked and that nobody else uses it - or that usage * is safe. The caller must hold the mapping's tree_lock. */ void __delete_from_page_cache(struct page *page) { struct address_space *mapping = page->mapping; /* * if we're uptodate, flush out into the cleancache, otherwise * invalidate any existing cleancache entries. We can't leave * stale data around in the cleancache once our page is gone */ if (PageUptodate(page) && PageMappedToDisk(page)) cleancache_put_page(page); else cleancache_flush_page(mapping, page); radix_tree_delete(&mapping->page_tree, page->index); page->mapping = NULL; mapping->nrpages--; __dec_zone_page_state(page, NR_FILE_PAGES); if (PageSwapBacked(page)) __dec_zone_page_state(page, NR_SHMEM); BUG_ON(page_mapped(page)); /* * Some filesystems seem to re-dirty the page even after * the VM has canceled the dirty bit (eg ext3 journaling). * * Fix it up by doing a final dirty accounting check after * having removed the page entirely. */ if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { dec_zone_page_state(page, NR_FILE_DIRTY); dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); } } /** * delete_from_page_cache - delete page from page cache * @page: the page which the kernel is trying to remove from page cache * * This must be called only on pages that have been verified to be in the page * cache and locked. It will never put the page into the free list, the caller * has a reference on the page. */ void delete_from_page_cache(struct page *page) { struct address_space *mapping = page->mapping; void (*freepage)(struct page *); BUG_ON(!PageLocked(page)); freepage = mapping->a_ops->freepage; spin_lock_irq(&mapping->tree_lock); __delete_from_page_cache(page); spin_unlock_irq(&mapping->tree_lock); mem_cgroup_uncharge_cache_page(page); if (freepage) freepage(page); page_cache_release(page); } EXPORT_SYMBOL(delete_from_page_cache); static int sleep_on_page(void *word) { io_schedule(); return 0; } static int sleep_on_page_killable(void *word) { sleep_on_page(word); return fatal_signal_pending(current) ? -EINTR : 0; } /** * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range * @mapping: address space structure to write * @start: offset in bytes where the range starts * @end: offset in bytes where the range ends (inclusive) * @sync_mode: enable synchronous operation * * Start writeback against all of a mapping's dirty pages that lie * within the byte offsets <start, end> inclusive. * * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as * opposed to a regular memory cleansing writeback. The difference between * these two operations is that if a dirty page/buffer is encountered, it must * be waited upon, and not just skipped over. */ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode) { int ret; struct writeback_control wbc = { .sync_mode = sync_mode, .nr_to_write = LONG_MAX, .range_start = start, .range_end = end, }; if (!mapping_cap_writeback_dirty(mapping)) return 0; ret = do_writepages(mapping, &wbc); return ret; } static inline int __filemap_fdatawrite(struct address_space *mapping, int sync_mode) { return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); } int filemap_fdatawrite(struct address_space *mapping) { return __filemap_fdatawrite(mapping, WB_SYNC_ALL); } EXPORT_SYMBOL(filemap_fdatawrite); int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end) { return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); } EXPORT_SYMBOL(filemap_fdatawrite_range); /** * filemap_flush - mostly a non-blocking flush * @mapping: target address_space * * This is a mostly non-blocking flush. Not suitable for data-integrity * purposes - I/O may not be started against all dirty pages. */ int filemap_flush(struct address_space *mapping) { return __filemap_fdatawrite(mapping, WB_SYNC_NONE); } EXPORT_SYMBOL(filemap_flush); /** * filemap_fdatawait_range - wait for writeback to complete * @mapping: address space structure to wait for * @start_byte: offset in bytes where the range starts * @end_byte: offset in bytes where the range ends (inclusive) * * Walk the list of under-writeback pages of the given address space * in the given range and wait for all of them. */ int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, loff_t end_byte) { pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; struct pagevec pvec; int nr_pages; int ret = 0; if (end_byte < start_byte) return 0; pagevec_init(&pvec, 0); while ((index <= end) && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_WRITEBACK, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { unsigned i; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; /* until radix tree lookup accepts end_index */ if (page->index > end) continue; wait_on_page_writeback(page); if (TestClearPageError(page)) ret = -EIO; } pagevec_release(&pvec); cond_resched(); } /* Check for outstanding write errors */ if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) ret = -ENOSPC; if (test_and_clear_bit(AS_EIO, &mapping->flags)) ret = -EIO; return ret; } EXPORT_SYMBOL(filemap_fdatawait_range); /** * filemap_fdatawait - wait for all under-writeback pages to complete * @mapping: address space structure to wait for * * Walk the list of under-writeback pages of the given address space * and wait for all of them. */ int filemap_fdatawait(struct address_space *mapping) { loff_t i_size = i_size_read(mapping->host); if (i_size == 0) return 0; return filemap_fdatawait_range(mapping, 0, i_size - 1); } EXPORT_SYMBOL(filemap_fdatawait); int filemap_write_and_wait(struct address_space *mapping) { int err = 0; if (mapping->nrpages) { err = filemap_fdatawrite(mapping); /* * Even if the above returned error, the pages may be * written partially (e.g. -ENOSPC), so we wait for it. * But the -EIO is special case, it may indicate the worst * thing (e.g. bug) happened, so we avoid waiting for it. */ if (err != -EIO) { int err2 = filemap_fdatawait(mapping); if (!err) err = err2; } } return err; } EXPORT_SYMBOL(filemap_write_and_wait); /** * filemap_write_and_wait_range - write out & wait on a file range * @mapping: the address_space for the pages * @lstart: offset in bytes where the range starts * @lend: offset in bytes where the range ends (inclusive) * * Write out and wait upon file offsets lstart->lend, inclusive. * * Note that `lend' is inclusive (describes the last byte to be written) so * that this function can be used to write to the very end-of-file (end = -1). */ int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend) { int err = 0; if (mapping->nrpages) { err = __filemap_fdatawrite_range(mapping, lstart, lend, WB_SYNC_ALL); /* See comment of filemap_write_and_wait() */ if (err != -EIO) { int err2 = filemap_fdatawait_range(mapping, lstart, lend); if (!err) err = err2; } } return err; } EXPORT_SYMBOL(filemap_write_and_wait_range); /** * replace_page_cache_page - replace a pagecache page with a new one * @old: page to be replaced * @new: page to replace with * @gfp_mask: allocation mode * * This function replaces a page in the pagecache with a new one. On * success it acquires the pagecache reference for the new page and * drops it for the old page. Both the old and new pages must be * locked. This function does not add the new page to the LRU, the * caller must do that. * * The remove + add is atomic. The only way this function can fail is * memory allocation failure. */ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) { int error; VM_BUG_ON(!PageLocked(old)); VM_BUG_ON(!PageLocked(new)); VM_BUG_ON(new->mapping); error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); if (!error) { struct address_space *mapping = old->mapping; void (*freepage)(struct page *); pgoff_t offset = old->index; freepage = mapping->a_ops->freepage; page_cache_get(new); new->mapping = mapping; new->index = offset; spin_lock_irq(&mapping->tree_lock); __delete_from_page_cache(old); error = radix_tree_insert(&mapping->page_tree, offset, new); BUG_ON(error); mapping->nrpages++; __inc_zone_page_state(new, NR_FILE_PAGES); if (PageSwapBacked(new)) __inc_zone_page_state(new, NR_SHMEM); spin_unlock_irq(&mapping->tree_lock); /* mem_cgroup codes must not be called under tree_lock */ mem_cgroup_replace_page_cache(old, new); radix_tree_preload_end(); if (freepage) freepage(old); page_cache_release(old); } return error; } EXPORT_SYMBOL_GPL(replace_page_cache_page); /** * add_to_page_cache_locked - add a locked page to the pagecache * @page: page to add * @mapping: the page's address_space * @offset: page index * @gfp_mask: page allocation mode * * This function is used to add a page to the pagecache. It must be locked. * This function does not add the page to the LRU. The caller must do that. */ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { int error; VM_BUG_ON(!PageLocked(page)); error = mem_cgroup_cache_charge(page, current->mm, gfp_mask & GFP_RECLAIM_MASK); if (error) goto out; error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); if (error == 0) { page_cache_get(page); page->mapping = mapping; page->index = offset; spin_lock_irq(&mapping->tree_lock); error = radix_tree_insert(&mapping->page_tree, offset, page); if (likely(!error)) { mapping->nrpages++; __inc_zone_page_state(page, NR_FILE_PAGES); if (PageSwapBacked(page)) __inc_zone_page_state(page, NR_SHMEM); spin_unlock_irq(&mapping->tree_lock); } else { page->mapping = NULL; spin_unlock_irq(&mapping->tree_lock); mem_cgroup_uncharge_cache_page(page); page_cache_release(page); } radix_tree_preload_end(); } else mem_cgroup_uncharge_cache_page(page); out: return error; } EXPORT_SYMBOL(add_to_page_cache_locked); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { int ret; /* * Splice_read and readahead add shmem/tmpfs pages into the page cache * before shmem_readpage has a chance to mark them as SwapBacked: they * need to go on the anon lru below, and mem_cgroup_cache_charge * (called in add_to_page_cache) needs to know where they're going too. */ if (mapping_cap_swap_backed(mapping)) SetPageSwapBacked(page); ret = add_to_page_cache(page, mapping, offset, gfp_mask); if (ret == 0) { if (page_is_file_cache(page)) lru_cache_add_file(page); else lru_cache_add_anon(page); } return ret; } EXPORT_SYMBOL_GPL(add_to_page_cache_lru); #ifdef CONFIG_NUMA struct page *__page_cache_alloc(gfp_t gfp) { int n; struct page *page; if (cpuset_do_page_mem_spread()) { unsigned int cpuset_mems_cookie; do { cpuset_mems_cookie = get_mems_allowed(); n = cpuset_mem_spread_node(); page = alloc_pages_exact_node(n, gfp, 0); } while (!put_mems_allowed(cpuset_mems_cookie) && !page); return page; } return alloc_pages(gfp, 0); } EXPORT_SYMBOL(__page_cache_alloc); #endif /* * In order to wait for pages to become available there must be * waitqueues associated with pages. By using a hash table of * waitqueues where the bucket discipline is to maintain all * waiters on the same queue and wake all when any of the pages * become available, and for the woken contexts to check to be * sure the appropriate page became available, this saves space * at a cost of "thundering herd" phenomena during rare hash * collisions. */ static wait_queue_head_t *page_waitqueue(struct page *page) { const struct zone *zone = page_zone(page); return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; } static inline void wake_up_page(struct page *page, int bit) { __wake_up_bit(page_waitqueue(page), &page->flags, bit); } void wait_on_page_bit(struct page *page, int bit_nr) { DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); if (test_bit(bit_nr, &page->flags)) __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(wait_on_page_bit); int wait_on_page_bit_killable(struct page *page, int bit_nr) { DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); if (!test_bit(bit_nr, &page->flags)) return 0; return __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page_killable, TASK_KILLABLE); } /** * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue * @page: Page defining the wait queue of interest * @waiter: Waiter to add to the queue * * Add an arbitrary @waiter to the wait queue for the nominated @page. */ void add_page_wait_queue(struct page *page, wait_queue_t *waiter) { wait_queue_head_t *q = page_waitqueue(page); unsigned long flags; spin_lock_irqsave(&q->lock, flags); __add_wait_queue(q, waiter); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL_GPL(add_page_wait_queue); /** * unlock_page - unlock a locked page * @page: the page * * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). * Also wakes sleepers in wait_on_page_writeback() because the wakeup * mechananism between PageLocked pages and PageWriteback pages is shared. * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. * * The mb is necessary to enforce ordering between the clear_bit and the read * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). */ void unlock_page(struct page *page) { VM_BUG_ON(!PageLocked(page)); clear_bit_unlock(PG_locked, &page->flags); smp_mb__after_clear_bit(); wake_up_page(page, PG_locked); } EXPORT_SYMBOL(unlock_page); /** * end_page_writeback - end writeback against a page * @page: the page */ void end_page_writeback(struct page *page) { if (TestClearPageReclaim(page)) rotate_reclaimable_page(page); if (!test_clear_page_writeback(page)) BUG(); smp_mb__after_clear_bit(); wake_up_page(page, PG_writeback); } EXPORT_SYMBOL(end_page_writeback); /** * __lock_page - get a lock on the page, assuming we need to sleep to get it * @page: the page to lock */ void __lock_page(struct page *page) { DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(__lock_page); int __lock_page_killable(struct page *page) { DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); return __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page_killable, TASK_KILLABLE); } EXPORT_SYMBOL_GPL(__lock_page_killable); int __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags) { if (flags & FAULT_FLAG_ALLOW_RETRY) { /* * CAUTION! In this case, mmap_sem is not released * even though return 0. */ if (flags & FAULT_FLAG_RETRY_NOWAIT) return 0; up_read(&mm->mmap_sem); if (flags & FAULT_FLAG_KILLABLE) wait_on_page_locked_killable(page); else wait_on_page_locked(page); return 0; } else { if (flags & FAULT_FLAG_KILLABLE) { int ret; ret = __lock_page_killable(page); if (ret) { up_read(&mm->mmap_sem); return 0; } } else __lock_page(page); return 1; } } /** * find_get_page - find and get a page reference * @mapping: the address_space to search * @offset: the page index * * Is there a pagecache struct page at the given (mapping, offset) tuple? * If yes, increment its refcount and return it; if no, return NULL. */ struct page *find_get_page(struct address_space *mapping, pgoff_t offset) { void **pagep; struct page *page; rcu_read_lock(); repeat: page = NULL; pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); if (pagep) { page = radix_tree_deref_slot(pagep); if (unlikely(!page)) goto out; if (radix_tree_deref_retry(page)) goto repeat; if (!page_cache_get_speculative(page)) goto repeat; /* * Has the page moved? * This is part of the lockless pagecache protocol. See * include/linux/pagemap.h for details. */ if (unlikely(page != *pagep)) { page_cache_release(page); goto repeat; } } out: rcu_read_unlock(); return page; } EXPORT_SYMBOL(find_get_page); /** * find_lock_page - locate, pin and lock a pagecache page * @mapping: the address_space to search * @offset: the page index * * Locates the desired pagecache page, locks it, increments its reference * count and returns its address. * * Returns zero if the page was not present. find_lock_page() may sleep. */ struct page *find_lock_page(struct address_space *mapping, pgoff_t offset) { struct page *page; repeat: page = find_get_page(mapping, offset); if (page) { lock_page(page); /* Has the page been truncated? */ if (unlikely(page->mapping != mapping)) { unlock_page(page); page_cache_release(page); goto repeat; } VM_BUG_ON(page->index != offset); } return page; } EXPORT_SYMBOL(find_lock_page); /** * find_or_create_page - locate or add a pagecache page * @mapping: the page's address_space * @index: the page's index into the mapping * @gfp_mask: page allocation mode * * Locates a page in the pagecache. If the page is not present, a new page * is allocated using @gfp_mask and is added to the pagecache and to the VM's * LRU list. The returned page is locked and has its reference count * incremented. * * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic * allocation! * * find_or_create_page() returns the desired page's address, or zero on * memory exhaustion. */ struct page *find_or_create_page(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask) { struct page *page; int err; repeat: page = find_lock_page(mapping, index); if (!page) { page = __page_cache_alloc(gfp_mask); if (!page) return NULL; /* * We want a regular kernel memory (not highmem or DMA etc) * allocation for the radix tree nodes, but we need to honour * the context-specific requirements the caller has asked for. * GFP_RECLAIM_MASK collects those requirements. */ err = add_to_page_cache_lru(page, mapping, index, (gfp_mask & GFP_RECLAIM_MASK)); if (unlikely(err)) { page_cache_release(page); page = NULL; if (err == -EEXIST) goto repeat; } } return page; } EXPORT_SYMBOL(find_or_create_page); /** * find_get_pages - gang pagecache lookup * @mapping: The address_space to search * @start: The starting page index * @nr_pages: The maximum number of pages * @pages: Where the resulting pages are placed * * find_get_pages() will search for and return a group of up to * @nr_pages pages in the mapping. The pages are placed at @pages. * find_get_pages() takes a reference against the returned pages. * * The search returns a group of mapping-contiguous pages with ascending * indexes. There may be holes in the indices due to not-present pages. * * find_get_pages() returns the number of pages which were found. */ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages) { unsigned int i; unsigned int ret; unsigned int nr_found; rcu_read_lock(); restart: nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, (void ***)pages, start, nr_pages); ret = 0; for (i = 0; i < nr_found; i++) { struct page *page; repeat: page = radix_tree_deref_slot((void **)pages[i]); if (unlikely(!page)) continue; /* * This can only trigger when the entry at index 0 moves out * of or back to the root: none yet gotten, safe to restart. */ if (radix_tree_deref_retry(page)) { WARN_ON(start | i); goto restart; } if (!page_cache_get_speculative(page)) goto repeat; /* Has the page moved? */ if (unlikely(page != *((void **)pages[i]))) { page_cache_release(page); goto repeat; } pages[ret] = page; ret++; } /* * If all entries were removed before we could secure them, * try again, because callers stop trying once 0 is returned. */ if (unlikely(!ret && nr_found)) goto restart; rcu_read_unlock(); return ret; } /** * find_get_pages_contig - gang contiguous pagecache lookup * @mapping: The address_space to search * @index: The starting page index * @nr_pages: The maximum number of pages * @pages: Where the resulting pages are placed * * find_get_pages_contig() works exactly like find_get_pages(), except * that the returned number of pages are guaranteed to be contiguous. * * find_get_pages_contig() returns the number of pages which were found. */ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, unsigned int nr_pages, struct page **pages) { unsigned int i; unsigned int ret; unsigned int nr_found; rcu_read_lock(); restart: nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, (void ***)pages, index, nr_pages); ret = 0; for (i = 0; i < nr_found; i++) { struct page *page; repeat: page = radix_tree_deref_slot((void **)pages[i]); if (unlikely(!page)) continue; /* * This can only trigger when the entry at index 0 moves out * of or back to the root: none yet gotten, safe to restart. */ if (radix_tree_deref_retry(page)) goto restart; if (!page_cache_get_speculative(page)) goto repeat; /* Has the page moved? */ if (unlikely(page != *((void **)pages[i]))) { page_cache_release(page); goto repeat; } /* * must check mapping and index after taking the ref. * otherwise we can get both false positives and false * negatives, which is just confusing to the caller. */ if (page->mapping == NULL || page->index != index) { page_cache_release(page); break; } pages[ret] = page; ret++; index++; } rcu_read_unlock(); return ret; } EXPORT_SYMBOL(find_get_pages_contig); /** * find_get_pages_tag - find and return pages that match @tag * @mapping: the address_space to search * @index: the starting page index * @tag: the tag index * @nr_pages: the maximum number of pages * @pages: where the resulting pages are placed * * Like find_get_pages, except we only return pages which are tagged with * @tag. We update @index to index the next page for the traversal. */ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages) { unsigned int i; unsigned int ret; unsigned int nr_found; rcu_read_lock(); restart: nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree, (void ***)pages, *index, nr_pages, tag); ret = 0; for (i = 0; i < nr_found; i++) { struct page *page; repeat: page = radix_tree_deref_slot((void **)pages[i]); if (unlikely(!page)) continue; /* * This can only trigger when the entry at index 0 moves out * of or back to the root: none yet gotten, safe to restart. */ if (radix_tree_deref_retry(page)) goto restart; if (!page_cache_get_speculative(page)) goto repeat; /* Has the page moved? */ if (unlikely(page != *((void **)pages[i]))) { page_cache_release(page); goto repeat; } pages[ret] = page; ret++; } /* * If all entries were removed before we could secure them, * try again, because callers stop trying once 0 is returned. */ if (unlikely(!ret && nr_found)) goto restart; rcu_read_unlock(); if (ret) *index = pages[ret - 1]->index + 1; return ret; } EXPORT_SYMBOL(find_get_pages_tag); /** * grab_cache_page_nowait - returns locked page at given index in given cache * @mapping: target address_space * @index: the page index * * Same as grab_cache_page(), but do not wait if the page is unavailable. * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed. This routine should * be safe to call while holding the lock for another page. * * Clear __GFP_FS when allocating the page to avoid recursion into the fs * and deadlock against the caller's locked page. */ struct page * grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) { struct page *page = find_get_page(mapping, index); if (page) { if (trylock_page(page)) return page; page_cache_release(page); return NULL; } page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) { page_cache_release(page); page = NULL; } return page; } EXPORT_SYMBOL(grab_cache_page_nowait); /* * CD/DVDs are error prone. When a medium error occurs, the driver may fail * a _large_ part of the i/o request. Imagine the worst scenario: * * ---R__________________________________________B__________ * ^ reading here ^ bad block(assume 4k) * * read(R) => miss => readahead(R...B) => media error => frustrating retries * => failing the whole request => read(R) => read(R+1) => * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... * * It is going insane. Fix it by quickly scaling down the readahead size. */ static void shrink_readahead_size_eio(struct file *filp, struct file_ra_state *ra) { ra->ra_pages /= 4; } /** * do_generic_file_read - generic file read routine * @filp: the file to read * @ppos: current file position * @desc: read_descriptor * @actor: read method * * This is a generic file read routine, and uses the * mapping->a_ops->readpage() function for the actual low-level stuff. * * This is really ugly. But the goto's actually try to clarify some * of the logic when it comes to error handling etc. */ static void do_generic_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) { struct address_space *mapping = filp->f_mapping; struct inode *inode = mapping->host; struct file_ra_state *ra = &filp->f_ra; pgoff_t index; pgoff_t last_index; pgoff_t prev_index; unsigned long offset; /* offset into pagecache page */ unsigned int prev_offset; int error; index = *ppos >> PAGE_CACHE_SHIFT; prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; offset = *ppos & ~PAGE_CACHE_MASK; for (;;) { struct page *page; pgoff_t end_index; loff_t isize; unsigned long nr, ret; cond_resched(); find_page: page = find_get_page(mapping, index); if (!page) { page_cache_sync_readahead(mapping, ra, filp, index, last_index - index); page = find_get_page(mapping, index); if (unlikely(page == NULL)) goto no_cached_page; } if (PageReadahead(page)) { page_cache_async_readahead(mapping, ra, filp, page, index, last_index - index); } if (!PageUptodate(page)) { if (inode->i_blkbits == PAGE_CACHE_SHIFT || !mapping->a_ops->is_partially_uptodate) goto page_not_up_to_date; if (!trylock_page(page)) goto page_not_up_to_date; /* Did it get truncated before we got the lock? */ if (!page->mapping) goto page_not_up_to_date_locked; if (!mapping->a_ops->is_partially_uptodate(page, desc, offset)) goto page_not_up_to_date_locked; unlock_page(page); } page_ok: /* * i_size must be checked after we know the page is Uptodate. * * Checking i_size after the check allows us to calculate * the correct value for "nr", which means the zero-filled * part of the page is not copied back to userspace (unless * another truncate extends the file - this is desired though). */ isize = i_size_read(inode); end_index = (isize - 1) >> PAGE_CACHE_SHIFT; if (unlikely(!isize || index > end_index)) { page_cache_release(page); goto out; } /* nr is the maximum number of bytes to copy from this page */ nr = PAGE_CACHE_SIZE; if (index == end_index) { nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; if (nr <= offset) { page_cache_release(page); goto out; } } nr = nr - offset; /* If users can be writing to this page using arbitrary * virtual addresses, take care about potential aliasing * before reading the page on the kernel side. */ if (mapping_writably_mapped(mapping)) flush_dcache_page(page); /* * When a sequential read accesses a page several times, * only mark it as accessed the first time. */ if (prev_index != index || offset != prev_offset) mark_page_accessed(page); prev_index = index; /* * Ok, we have the page, and it's up-to-date, so * now we can copy it to user space... * * The actor routine returns how many bytes were actually used.. * NOTE! This may not be the same as how much of a user buffer * we filled up (we may be padding etc), so we can only update * "pos" here (the actor routine has to update the user buffer * pointers and the remaining count). */ ret = actor(desc, page, offset, nr); offset += ret; index += offset >> PAGE_CACHE_SHIFT; offset &= ~PAGE_CACHE_MASK; prev_offset = offset; page_cache_release(page); if (ret == nr && desc->count) continue; goto out; page_not_up_to_date: /* Get exclusive access to the page ... */ error = lock_page_killable(page); if (unlikely(error)) goto readpage_error; page_not_up_to_date_locked: /* Did it get truncated before we got the lock? */ if (!page->mapping) { unlock_page(page); page_cache_release(page); continue; } /* Did somebody else fill it already? */ if (PageUptodate(page)) { unlock_page(page); goto page_ok; } readpage: /* * A previous I/O error may have been due to temporary * failures, eg. multipath errors. * PG_error will be set again if readpage fails. */ ClearPageError(page); /* Start the actual read. The read will unlock the page. */ error = mapping->a_ops->readpage(filp, page); if (unlikely(error)) { if (error == AOP_TRUNCATED_PAGE) { page_cache_release(page); goto find_page; } goto readpage_error; } if (!PageUptodate(page)) { error = lock_page_killable(page); if (unlikely(error)) goto readpage_error; if (!PageUptodate(page)) { if (page->mapping == NULL) { /* * invalidate_mapping_pages got it */ unlock_page(page); page_cache_release(page); goto find_page; } unlock_page(page); shrink_readahead_size_eio(filp, ra); error = -EIO; goto readpage_error; } unlock_page(page); } goto page_ok; readpage_error: /* UHHUH! A synchronous read error occurred. Report it */ desc->error = error; page_cache_release(page); goto out; no_cached_page: /* * Ok, it wasn't cached, so we need to create a new * page.. */ page = page_cache_alloc_cold(mapping); if (!page) { desc->error = -ENOMEM; goto out; } error = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); if (error) { page_cache_release(page); if (error == -EEXIST) goto find_page; desc->error = error; goto out; } goto readpage; } out: ra->prev_pos = prev_index; ra->prev_pos <<= PAGE_CACHE_SHIFT; ra->prev_pos |= prev_offset; *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; file_accessed(filp); } int file_read_actor(read_descriptor_t *desc, struct page *page, unsigned long offset, unsigned long size) { char *kaddr; unsigned long left, count = desc->count; if (size > count) size = count; /* * Faults on the destination of a read are common, so do it before * taking the kmap. */ if (!fault_in_pages_writeable(desc->arg.buf, size)) { kaddr = kmap_atomic(page, KM_USER0); left = __copy_to_user_inatomic(desc->arg.buf, kaddr + offset, size); kunmap_atomic(kaddr, KM_USER0); if (left == 0) goto success; } /* Do it the slow way */ kaddr = kmap(page); left = __copy_to_user(desc->arg.buf, kaddr + offset, size); kunmap(page); if (left) { size -= left; desc->error = -EFAULT; } success: desc->count = count - size; desc->written += size; desc->arg.buf += size; return size; } /* * Performs necessary checks before doing a write * @iov: io vector request * @nr_segs: number of segments in the iovec * @count: number of bytes to write * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE * * Adjust number of segments and amount of bytes to write (nr_segs should be * properly initialized first). Returns appropriate error code that caller * should return or zero in case that write should be allowed. */ int generic_segment_checks(const struct iovec *iov, unsigned long *nr_segs, size_t *count, int access_flags) { unsigned long seg; size_t cnt = 0; for (seg = 0; seg < *nr_segs; seg++) { const struct iovec *iv = &iov[seg]; /* * If any segment has a negative length, or the cumulative * length ever wraps negative then return -EINVAL. */ cnt += iv->iov_len; if (unlikely((ssize_t)(cnt|iv->iov_len) < 0)) return -EINVAL; if (access_ok(access_flags, iv->iov_base, iv->iov_len)) continue; if (seg == 0) return -EFAULT; *nr_segs = seg; cnt -= iv->iov_len; /* This segment is no good */ break; } *count = cnt; return 0; } EXPORT_SYMBOL(generic_segment_checks); /** * generic_file_aio_read - generic filesystem read routine * @iocb: kernel I/O control block * @iov: io vector request * @nr_segs: number of segments in the iovec * @pos: current file position * * This is the "read()" routine for all filesystems * that can use the page cache directly. */ ssize_t generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *filp = iocb->ki_filp; ssize_t retval; unsigned long seg = 0; size_t count; loff_t *ppos = &iocb->ki_pos; count = 0; retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); if (retval) return retval; /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ if (filp->f_flags & O_DIRECT) { loff_t size; struct address_space *mapping; struct inode *inode; mapping = filp->f_mapping; inode = mapping->host; if (!count) goto out; /* skip atime */ size = i_size_read(inode); if (pos < size) { retval = filemap_write_and_wait_range(mapping, pos, pos + iov_length(iov, nr_segs) - 1); if (!retval) { struct blk_plug plug; blk_start_plug(&plug); retval = mapping->a_ops->direct_IO(READ, iocb, iov, pos, nr_segs); blk_finish_plug(&plug); } if (retval > 0) { *ppos = pos + retval; count -= retval; } /* * Btrfs can have a short DIO read if we encounter * compressed extents, so if there was an error, or if * we've already read everything we wanted to, or if * there was a short read because we hit EOF, go ahead * and return. Otherwise fallthrough to buffered io for * the rest of the read. */ if (retval < 0 || !count || *ppos >= size) { file_accessed(filp); goto out; } } } count = retval; for (seg = 0; seg < nr_segs; seg++) { read_descriptor_t desc; loff_t offset = 0; /* * If we did a short DIO read we need to skip the section of the * iov that we've already read data into. */ if (count) { if (count > iov[seg].iov_len) { count -= iov[seg].iov_len; continue; } offset = count; count = 0; } desc.written = 0; desc.arg.buf = iov[seg].iov_base + offset; desc.count = iov[seg].iov_len - offset; if (desc.count == 0) continue; desc.error = 0; do_generic_file_read(filp, ppos, &desc, file_read_actor); retval += desc.written; if (desc.error) { retval = retval ?: desc.error; break; } if (desc.count > 0) break; } out: return retval; } EXPORT_SYMBOL(generic_file_aio_read); static ssize_t do_readahead(struct address_space *mapping, struct file *filp, pgoff_t index, unsigned long nr) { if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage) return -EINVAL; force_page_cache_readahead(mapping, filp, index, nr); return 0; } SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count) { ssize_t ret; struct file *file; ret = -EBADF; file = fget(fd); if (file) { if (file->f_mode & FMODE_READ) { struct address_space *mapping = file->f_mapping; pgoff_t start = offset >> PAGE_CACHE_SHIFT; pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT; unsigned long len = end - start + 1; ret = do_readahead(mapping, file, start, len); } fput(file); } return ret; } #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_readahead(long fd, loff_t offset, long count) { return SYSC_readahead((int) fd, offset, (size_t) count); } SYSCALL_ALIAS(sys_readahead, SyS_readahead); #endif #ifdef CONFIG_MMU /** * page_cache_read - adds requested page to the page cache if not already there * @file: file to read * @offset: page index * * This adds the requested page to the page cache if it isn't already there, * and schedules an I/O to read in its contents from disk. */ static int page_cache_read(struct file *file, pgoff_t offset) { struct address_space *mapping = file->f_mapping; struct page *page; int ret; do { page = page_cache_alloc_cold(mapping); if (!page) return -ENOMEM; ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); if (ret == 0) ret = mapping->a_ops->readpage(file, page); else if (ret == -EEXIST) ret = 0; /* losing race to add is OK */ page_cache_release(page); } while (ret == AOP_TRUNCATED_PAGE); return ret; } #define MMAP_LOTSAMISS (100) /* * Synchronous readahead happens when we don't even find * a page in the page cache at all. */ static void do_sync_mmap_readahead(struct vm_area_struct *vma, struct file_ra_state *ra, struct file *file, pgoff_t offset) { unsigned long ra_pages; struct address_space *mapping = file->f_mapping; /* If we don't want any read-ahead, don't bother */ if (VM_RandomReadHint(vma)) return; if (!ra->ra_pages) return; if (VM_SequentialReadHint(vma)) { page_cache_sync_readahead(mapping, ra, file, offset, ra->ra_pages); return; } /* Avoid banging the cache line if not needed */ if (ra->mmap_miss < MMAP_LOTSAMISS * 10) ra->mmap_miss++; /* * Do we miss much more than hit in this file? If so, * stop bothering with read-ahead. It will only hurt. */ if (ra->mmap_miss > MMAP_LOTSAMISS) return; /* * mmap read-around */ ra_pages = max_sane_readahead(ra->ra_pages); ra->start = max_t(long, 0, offset - ra_pages / 2); ra->size = ra_pages; ra->async_size = ra_pages / 4; ra_submit(ra, mapping, file); } /* * Asynchronous readahead happens when we find the page and PG_readahead, * so we want to possibly extend the readahead further.. */ static void do_async_mmap_readahead(struct vm_area_struct *vma, struct file_ra_state *ra, struct file *file, struct page *page, pgoff_t offset) { struct address_space *mapping = file->f_mapping; /* If we don't want any read-ahead, don't bother */ if (VM_RandomReadHint(vma)) return; if (ra->mmap_miss > 0) ra->mmap_miss--; if (PageReadahead(page)) page_cache_async_readahead(mapping, ra, file, page, offset, ra->ra_pages); } /** * filemap_fault - read in file data for page fault handling * @vma: vma in which the fault was taken * @vmf: struct vm_fault containing details of the fault * * filemap_fault() is invoked via the vma operations vector for a * mapped memory region to read in file data during a page fault. * * The goto's are kind of ugly, but this streamlines the normal case of having * it in the page cache, and handles the special cases reasonably without * having a lot of duplicated code. */ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { int error; struct file *file = vma->vm_file; struct address_space *mapping = file->f_mapping; struct file_ra_state *ra = &file->f_ra; struct inode *inode = mapping->host; pgoff_t offset = vmf->pgoff; struct page *page; pgoff_t size; int ret = 0; size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (offset >= size) return VM_FAULT_SIGBUS; /* * Do we have something in the page cache already? */ page = find_get_page(mapping, offset); if (likely(page)) { /* * We found the page, so try async readahead before * waiting for the lock. */ do_async_mmap_readahead(vma, ra, file, page, offset); } else { /* No page in the page cache at all */ do_sync_mmap_readahead(vma, ra, file, offset); count_vm_event(PGMAJFAULT); mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); ret = VM_FAULT_MAJOR; retry_find: page = find_get_page(mapping, offset); if (!page) goto no_cached_page; } if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { page_cache_release(page); return ret | VM_FAULT_RETRY; } /* Did it get truncated? */ if (unlikely(page->mapping != mapping)) { unlock_page(page); put_page(page); goto retry_find; } VM_BUG_ON(page->index != offset); /* * We have a locked page in the page cache, now we need to check * that it's up-to-date. If not, it is going to be due to an error. */ if (unlikely(!PageUptodate(page))) goto page_not_uptodate; /* * Found the page and have a reference on it. * We must recheck i_size under page lock. */ size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (unlikely(offset >= size)) { unlock_page(page); page_cache_release(page); return VM_FAULT_SIGBUS; } vmf->page = page; return ret | VM_FAULT_LOCKED; no_cached_page: /* * We're only likely to ever get here if MADV_RANDOM is in * effect. */ error = page_cache_read(file, offset); /* * The page we want has now been added to the page cache. * In the unlikely event that someone removed it in the * meantime, we'll just come back here and read it again. */ if (error >= 0) goto retry_find; /* * An error return from page_cache_read can result if the * system is low on memory, or a problem occurs while trying * to schedule I/O. */ if (error == -ENOMEM) return VM_FAULT_OOM; return VM_FAULT_SIGBUS; page_not_uptodate: /* * Umm, take care of errors if the page isn't up-to-date. * Try to re-read it _once_. We do this synchronously, * because there really aren't any performance issues here * and we need to check for errors. */ ClearPageError(page); error = mapping->a_ops->readpage(file, page); if (!error) { wait_on_page_locked(page); if (!PageUptodate(page)) error = -EIO; } page_cache_release(page); if (!error || error == AOP_TRUNCATED_PAGE) goto retry_find; /* Things didn't work out. Return zero to tell the mm layer so. */ shrink_readahead_size_eio(file, ra); return VM_FAULT_SIGBUS; } EXPORT_SYMBOL(filemap_fault); const struct vm_operations_struct generic_file_vm_ops = { .fault = filemap_fault, }; /* This is used for a general mmap of a disk file */ int generic_file_mmap(struct file * file, struct vm_area_struct * vma) { struct address_space *mapping = file->f_mapping; if (!mapping->a_ops->readpage) return -ENOEXEC; file_accessed(file); vma->vm_ops = &generic_file_vm_ops; vma->vm_flags |= VM_CAN_NONLINEAR; return 0; } /* * This is for filesystems which do not implement ->writepage. */ int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) { if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) return -EINVAL; return generic_file_mmap(file, vma); } #else int generic_file_mmap(struct file * file, struct vm_area_struct * vma) { return -ENOSYS; } int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) { return -ENOSYS; } #endif /* CONFIG_MMU */ EXPORT_SYMBOL(generic_file_mmap); EXPORT_SYMBOL(generic_file_readonly_mmap); static struct page *__read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *,struct page*), void *data, gfp_t gfp) { struct page *page; int err; repeat: page = find_get_page(mapping, index); if (!page) { page = __page_cache_alloc(gfp | __GFP_COLD); if (!page) return ERR_PTR(-ENOMEM); err = add_to_page_cache_lru(page, mapping, index, gfp); if (unlikely(err)) { page_cache_release(page); if (err == -EEXIST) goto repeat; /* Presumably ENOMEM for radix tree node */ return ERR_PTR(err); } err = filler(data, page); if (err < 0) { page_cache_release(page); page = ERR_PTR(err); } } return page; } static struct page *do_read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *,struct page*), void *data, gfp_t gfp) { struct page *page; int err; retry: page = __read_cache_page(mapping, index, filler, data, gfp); if (IS_ERR(page)) return page; if (PageUptodate(page)) goto out; lock_page(page); if (!page->mapping) { unlock_page(page); page_cache_release(page); goto retry; } if (PageUptodate(page)) { unlock_page(page); goto out; } err = filler(data, page); if (err < 0) { page_cache_release(page); return ERR_PTR(err); } out: mark_page_accessed(page); return page; } /** * read_cache_page_async - read into page cache, fill it if needed * @mapping: the page's address_space * @index: the page index * @filler: function to perform the read * @data: destination for read data * * Same as read_cache_page, but don't wait for page to become unlocked * after submitting it to the filler. * * Read into the page cache. If a page already exists, and PageUptodate() is * not set, try to fill the page but don't wait for it to become unlocked. * * If the page does not get brought uptodate, return -EIO. */ struct page *read_cache_page_async(struct address_space *mapping, pgoff_t index, int (*filler)(void *,struct page*), void *data) { return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); } EXPORT_SYMBOL(read_cache_page_async); static struct page *wait_on_page_read(struct page *page) { if (!IS_ERR(page)) { wait_on_page_locked(page); if (!PageUptodate(page)) { page_cache_release(page); page = ERR_PTR(-EIO); } } return page; } /** * read_cache_page_gfp - read into page cache, using specified page allocation flags. * @mapping: the page's address_space * @index: the page index * @gfp: the page allocator flags to use if allocating * * This is the same as "read_mapping_page(mapping, index, NULL)", but with * any new page allocations done using the specified allocation flags. * * If the page does not get brought uptodate, return -EIO. */ struct page *read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) { filler_t *filler = (filler_t *)mapping->a_ops->readpage; return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp)); } EXPORT_SYMBOL(read_cache_page_gfp); /** * read_cache_page - read into page cache, fill it if needed * @mapping: the page's address_space * @index: the page index * @filler: function to perform the read * @data: destination for read data * * Read into the page cache. If a page already exists, and PageUptodate() is * not set, try to fill the page then wait for it to become unlocked. * * If the page does not get brought uptodate, return -EIO. */ struct page *read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *,struct page*), void *data) { return wait_on_page_read(read_cache_page_async(mapping, index, filler, data)); } EXPORT_SYMBOL(read_cache_page); /* * The logic we want is * * if suid or (sgid and xgrp) * remove privs */ int should_remove_suid(struct dentry *dentry) { mode_t mode = dentry->d_inode->i_mode; int kill = 0; /* suid always must be killed */ if (unlikely(mode & S_ISUID)) kill = ATTR_KILL_SUID; /* * sgid without any exec bits is just a mandatory locking mark; leave * it alone. If some exec bits are set, it's a real sgid; kill it. */ if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) kill |= ATTR_KILL_SGID; if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) return kill; return 0; } EXPORT_SYMBOL(should_remove_suid); static int __remove_suid(struct dentry *dentry, int kill) { struct iattr newattrs; newattrs.ia_valid = ATTR_FORCE | kill; return notify_change(dentry, &newattrs); } int file_remove_suid(struct file *file) { struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; int killsuid; int killpriv; int error = 0; /* Fast path for nothing security related */ if (IS_NOSEC(inode)) return 0; killsuid = should_remove_suid(dentry); killpriv = security_inode_need_killpriv(dentry); if (killpriv < 0) return killpriv; if (killpriv) error = security_inode_killpriv(dentry); if (!error && killsuid) error = __remove_suid(dentry, killsuid); if (!error && (inode->i_sb->s_flags & MS_NOSEC)) inode->i_flags |= S_NOSEC; return error; } EXPORT_SYMBOL(file_remove_suid); static size_t __iovec_copy_from_user_inatomic(char *vaddr, const struct iovec *iov, size_t base, size_t bytes) { size_t copied = 0, left = 0; while (bytes) { char __user *buf = iov->iov_base + base; int copy = min(bytes, iov->iov_len - base); base = 0; left = __copy_from_user_inatomic(vaddr, buf, copy); copied += copy; bytes -= copy; vaddr += copy; iov++; if (unlikely(left)) break; } return copied - left; } /* * Copy as much as we can into the page and return the number of bytes which * were successfully copied. If a fault is encountered then return the number of * bytes which were copied. */ size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) { char *kaddr; size_t copied; BUG_ON(!in_atomic()); kaddr = kmap_atomic(page, KM_USER0); if (likely(i->nr_segs == 1)) { int left; char __user *buf = i->iov->iov_base + i->iov_offset; left = __copy_from_user_inatomic(kaddr + offset, buf, bytes); copied = bytes - left; } else { copied = __iovec_copy_from_user_inatomic(kaddr + offset, i->iov, i->iov_offset, bytes); } kunmap_atomic(kaddr, KM_USER0); return copied; } EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); /* * This has the same sideeffects and return value as * iov_iter_copy_from_user_atomic(). * The difference is that it attempts to resolve faults. * Page must not be locked. */ size_t iov_iter_copy_from_user(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) { char *kaddr; size_t copied; kaddr = kmap(page); if (likely(i->nr_segs == 1)) { int left; char __user *buf = i->iov->iov_base + i->iov_offset; left = __copy_from_user(kaddr + offset, buf, bytes); copied = bytes - left; } else { copied = __iovec_copy_from_user_inatomic(kaddr + offset, i->iov, i->iov_offset, bytes); } kunmap(page); return copied; } EXPORT_SYMBOL(iov_iter_copy_from_user); void iov_iter_advance(struct iov_iter *i, size_t bytes) { BUG_ON(i->count < bytes); if (likely(i->nr_segs == 1)) { i->iov_offset += bytes; i->count -= bytes; } else { const struct iovec *iov = i->iov; size_t base = i->iov_offset; /* * The !iov->iov_len check ensures we skip over unlikely * zero-length segments (without overruning the iovec). */ while (bytes || unlikely(i->count && !iov->iov_len)) { int copy; copy = min(bytes, iov->iov_len - base); BUG_ON(!i->count || i->count < copy); i->count -= copy; bytes -= copy; base += copy; if (iov->iov_len == base) { iov++; base = 0; } } i->iov = iov; i->iov_offset = base; } } EXPORT_SYMBOL(iov_iter_advance); /* * Fault in the first iovec of the given iov_iter, to a maximum length * of bytes. Returns 0 on success, or non-zero if the memory could not be * accessed (ie. because it is an invalid address). * * writev-intensive code may want this to prefault several iovecs -- that * would be possible (callers must not rely on the fact that _only_ the * first iovec will be faulted with the current implementation). */ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) { char __user *buf = i->iov->iov_base + i->iov_offset; bytes = min(bytes, i->iov->iov_len - i->iov_offset); return fault_in_pages_readable(buf, bytes); } EXPORT_SYMBOL(iov_iter_fault_in_readable); /* * Return the count of just the current iov_iter segment. */ size_t iov_iter_single_seg_count(struct iov_iter *i) { const struct iovec *iov = i->iov; if (i->nr_segs == 1) return i->count; else return min(i->count, iov->iov_len - i->iov_offset); } EXPORT_SYMBOL(iov_iter_single_seg_count); /* * Performs necessary checks before doing a write * * Can adjust writing position or amount of bytes to write. * Returns appropriate error code that caller should return or * zero in case that write should be allowed. */ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) { struct inode *inode = file->f_mapping->host; unsigned long limit = rlimit(RLIMIT_FSIZE); if (unlikely(*pos < 0)) return -EINVAL; if (!isblk) { /* FIXME: this is for backwards compatibility with 2.4 */ if (file->f_flags & O_APPEND) *pos = i_size_read(inode); if (limit != RLIM_INFINITY) { if (*pos >= limit) { send_sig(SIGXFSZ, current, 0); return -EFBIG; } if (*count > limit - (typeof(limit))*pos) { *count = limit - (typeof(limit))*pos; } } } /* * LFS rule */ if (unlikely(*pos + *count > MAX_NON_LFS && !(file->f_flags & O_LARGEFILE))) { if (*pos >= MAX_NON_LFS) { return -EFBIG; } if (*count > MAX_NON_LFS - (unsigned long)*pos) { *count = MAX_NON_LFS - (unsigned long)*pos; } } /* * Are we about to exceed the fs block limit ? * * If we have written data it becomes a short write. If we have * exceeded without writing data we send a signal and return EFBIG. * Linus frestrict idea will clean these up nicely.. */ if (likely(!isblk)) { if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { if (*count || *pos > inode->i_sb->s_maxbytes) { return -EFBIG; } /* zero-length writes at ->s_maxbytes are OK */ } if (unlikely(*pos + *count > inode->i_sb->s_maxbytes)) *count = inode->i_sb->s_maxbytes - *pos; } else { #ifdef CONFIG_BLOCK loff_t isize; if (bdev_read_only(I_BDEV(inode))) return -EPERM; isize = i_size_read(inode); if (*pos >= isize) { if (*count || *pos > isize) return -ENOSPC; } if (*pos + *count > isize) *count = isize - *pos; #else return -EPERM; #endif } return 0; } EXPORT_SYMBOL(generic_write_checks); int pagecache_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { const struct address_space_operations *aops = mapping->a_ops; return aops->write_begin(file, mapping, pos, len, flags, pagep, fsdata); } EXPORT_SYMBOL(pagecache_write_begin); int pagecache_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { const struct address_space_operations *aops = mapping->a_ops; mark_page_accessed(page); return aops->write_end(file, mapping, pos, len, copied, page, fsdata); } EXPORT_SYMBOL(pagecache_write_end); ssize_t generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, unsigned long *nr_segs, loff_t pos, loff_t *ppos, size_t count, size_t ocount) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; ssize_t written; size_t write_len; pgoff_t end; if (count != ocount) *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); write_len = iov_length(iov, *nr_segs); end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); if (written) goto out; /* * After a write we want buffered reads to be sure to go to disk to get * the new data. We invalidate clean cached page from the region we're * about to write. We do this *before* the write so that we can return * without clobbering -EIOCBQUEUED from ->direct_IO(). */ if (mapping->nrpages) { written = invalidate_inode_pages2_range(mapping, pos >> PAGE_CACHE_SHIFT, end); /* * If a page can not be invalidated, return 0 to fall back * to buffered write. */ if (written) { if (written == -EBUSY) return 0; goto out; } } written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs); /* * Finally, try again to invalidate clean pages which might have been * cached by non-direct readahead, or faulted in by get_user_pages() * if the source of the write was an mmap'ed region of the file * we're writing. Either one is a pretty crazy thing to do, * so we don't support it 100%. If this invalidation * fails, tough, the write still worked... */ if (mapping->nrpages) { invalidate_inode_pages2_range(mapping, pos >> PAGE_CACHE_SHIFT, end); } if (written > 0) { pos += written; if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { i_size_write(inode, pos); mark_inode_dirty(inode); } *ppos = pos; } out: return written; } EXPORT_SYMBOL(generic_file_direct_write); /* * Find or create a page at the given pagecache position. Return the locked * page. This function is specifically for buffered writes. */ struct page *grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index, unsigned flags) { int status; struct page *page; gfp_t gfp_notmask = 0; if (flags & AOP_FLAG_NOFS) gfp_notmask = __GFP_FS; repeat: page = find_lock_page(mapping, index); if (page) goto found; page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask); if (!page) return NULL; status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL & ~gfp_notmask); if (unlikely(status)) { page_cache_release(page); if (status == -EEXIST) goto repeat; return NULL; } found: wait_on_page_writeback(page); return page; } EXPORT_SYMBOL(grab_cache_page_write_begin); static ssize_t generic_perform_write(struct file *file, struct iov_iter *i, loff_t pos) { struct address_space *mapping = file->f_mapping; const struct address_space_operations *a_ops = mapping->a_ops; long status = 0; ssize_t written = 0; unsigned int flags = 0; /* * Copies from kernel address space cannot fail (NFSD is a big user). */ if (segment_eq(get_fs(), KERNEL_DS)) flags |= AOP_FLAG_UNINTERRUPTIBLE; do { struct page *page; unsigned long offset; /* Offset into pagecache page */ unsigned long bytes; /* Bytes to write to page */ size_t copied; /* Bytes copied from user */ void *fsdata; offset = (pos & (PAGE_CACHE_SIZE - 1)); bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, iov_iter_count(i)); again: /* * Bring in the user page that we will copy from _first_. * Otherwise there's a nasty deadlock on copying from the * same page as we're writing to, without it being marked * up-to-date. * * Not only is this an optimisation, but it is also required * to check that the address is actually valid, when atomic * usercopies are used, below. */ if (unlikely(iov_iter_fault_in_readable(i, bytes))) { status = -EFAULT; break; } status = a_ops->write_begin(file, mapping, pos, bytes, flags, &page, &fsdata); if (unlikely(status)) break; if (mapping_writably_mapped(mapping)) flush_dcache_page(page); pagefault_disable(); copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); pagefault_enable(); flush_dcache_page(page); mark_page_accessed(page); status = a_ops->write_end(file, mapping, pos, bytes, copied, page, fsdata); if (unlikely(status < 0)) break; copied = status; cond_resched(); iov_iter_advance(i, copied); if (unlikely(copied == 0)) { /* * If we were unable to copy any data at all, we must * fall back to a single segment length write. * * If we didn't fallback here, we could livelock * because not all segments in the iov can be copied at * once without a pagefault. */ bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, iov_iter_single_seg_count(i)); goto again; } pos += copied; written += copied; balance_dirty_pages_ratelimited(mapping); } while (iov_iter_count(i)); return written ? written : status; } ssize_t generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos, loff_t *ppos, size_t count, ssize_t written) { struct file *file = iocb->ki_filp; ssize_t status; struct iov_iter i; iov_iter_init(&i, iov, nr_segs, count, written); status = generic_perform_write(file, &i, pos); if (likely(status >= 0)) { written += status; *ppos = pos + status; } return written ? written : status; } EXPORT_SYMBOL(generic_file_buffered_write); /** * __generic_file_aio_write - write data to a file * @iocb: IO state structure (file, offset, etc.) * @iov: vector with data to write * @nr_segs: number of segments in the vector * @ppos: position where to write * * This function does all the work needed for actually writing data to a * file. It does all basic checks, removes SUID from the file, updates * modification times and calls proper subroutines depending on whether we * do direct IO or a standard buffered write. * * It expects i_mutex to be grabbed unless we work on a block device or similar * object which does not need locking at all. * * This function does *not* take care of syncing data in case of O_SYNC write. * A caller has to handle it. This is mainly due to the fact that we want to * avoid syncing under i_mutex. */ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { struct file *file = iocb->ki_filp; struct address_space * mapping = file->f_mapping; size_t ocount; /* original count */ size_t count; /* after file limit checks */ struct inode *inode = mapping->host; loff_t pos; ssize_t written; ssize_t err; ocount = 0; err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); if (err) return err; count = ocount; pos = *ppos; vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; written = 0; err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) goto out; if (count == 0) goto out; err = file_remove_suid(file); if (err) goto out; file_update_time(file); /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ if (unlikely(file->f_flags & O_DIRECT)) { loff_t endbyte; ssize_t written_buffered; written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos, count, ocount); if (written < 0 || written == count) goto out; /* * direct-io write to a hole: fall through to buffered I/O * for completing the rest of the request. */ pos += written; count -= written; written_buffered = generic_file_buffered_write(iocb, iov, nr_segs, pos, ppos, count, written); /* * If generic_file_buffered_write() retuned a synchronous error * then we want to return the number of bytes which were * direct-written, or the error code if that was zero. Note * that this differs from normal direct-io semantics, which * will return -EFOO even if some bytes were written. */ if (written_buffered < 0) { err = written_buffered; goto out; } /* * We need to ensure that the page cache pages are written to * disk and invalidated to preserve the expected O_DIRECT * semantics. */ endbyte = pos + written_buffered - written - 1; err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); if (err == 0) { written = written_buffered; invalidate_mapping_pages(mapping, pos >> PAGE_CACHE_SHIFT, endbyte >> PAGE_CACHE_SHIFT); } else { /* * We don't know how much we wrote, so just return * the number of bytes which were direct-written */ } } else { written = generic_file_buffered_write(iocb, iov, nr_segs, pos, ppos, count, written); } out: current->backing_dev_info = NULL; return written ? written : err; } EXPORT_SYMBOL(__generic_file_aio_write); /** * generic_file_aio_write - write data to a file * @iocb: IO state structure * @iov: vector with data to write * @nr_segs: number of segments in the vector * @pos: position in file where to write * * This is a wrapper around __generic_file_aio_write() to be used by most * filesystems. It takes care of syncing the file in case of O_SYNC file * and acquires i_mutex as needed. */ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct blk_plug plug; ssize_t ret; BUG_ON(iocb->ki_pos != pos); mutex_lock(&inode->i_mutex); blk_start_plug(&plug); ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); mutex_unlock(&inode->i_mutex); if (ret > 0 || ret == -EIOCBQUEUED) { ssize_t err; err = generic_write_sync(file, pos, ret); if (err < 0 && ret > 0) ret = err; } blk_finish_plug(&plug); return ret; } EXPORT_SYMBOL(generic_file_aio_write); /** * try_to_release_page() - release old fs-specific metadata on a page * * @page: the page which the kernel is trying to free * @gfp_mask: memory allocation flags (and I/O mode) * * The address_space is to try to release any data against the page * (presumably at page->private). If the release was successful, return `1'. * Otherwise return zero. * * This may also be called if PG_fscache is set on a page, indicating that the * page is known to the local caching routines. * * The @gfp_mask argument specifies whether I/O may be performed to release * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS). * */ int try_to_release_page(struct page *page, gfp_t gfp_mask) { struct address_space * const mapping = page->mapping; BUG_ON(!PageLocked(page)); if (PageWriteback(page)) return 0; if (mapping && mapping->a_ops->releasepage) return mapping->a_ops->releasepage(page, gfp_mask); return try_to_free_buffers(page); } EXPORT_SYMBOL(try_to_release_page);
gpl-2.0
troth/linux-kernel
arch/mips/mm/hugetlbpage.c
1022
2113
/* * MIPS Huge TLB Page Support for Kernel. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> * Copyright 2005, Embedded Alley Solutions, Inc. * Matt Porter <mporter@embeddedalley.com> * Copyright (C) 2008, 2009 Cavium Networks, Inc. */ #include <linux/init.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/err.h> #include <linux/sysctl.h> #include <asm/mman.h> #include <asm/tlb.h> #include <asm/tlbflush.h> pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; pud_t *pud; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); if (pud) pte = (pte_t *)pmd_alloc(mm, pud, addr); return pte; } pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd = NULL; pgd = pgd_offset(mm, addr); if (pgd_present(*pgd)) { pud = pud_offset(pgd, addr); if (pud_present(*pud)) pmd = pmd_offset(pud, addr); } return (pte_t *) pmd; } int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) { return 0; } /* * This function checks for proper alignment of input addr and len parameters. */ int is_aligned_hugepage_range(unsigned long addr, unsigned long len) { if (len & ~HPAGE_MASK) return -EINVAL; if (addr & ~HPAGE_MASK) return -EINVAL; return 0; } struct page * follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { return ERR_PTR(-EINVAL); } int pmd_huge(pmd_t pmd) { return (pmd_val(pmd) & _PAGE_HUGE) != 0; } int pud_huge(pud_t pud) { return (pud_val(pud) & _PAGE_HUGE) != 0; } int pmd_huge_support(void) { return 1; } struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { struct page *page; page = pte_page(*(pte_t *)pmd); if (page) page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); return page; }
gpl-2.0
sakuraba001/android_kernel_samsung_klteactive
arch/arm/mach-msm/modem_notifier.c
3326
5065
/* Copyright (c) 2008-2010, 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * Modem Restart Notifier -- Provides notification * of modem restart events. */ #include <linux/notifier.h> #include <linux/init.h> #include <linux/debugfs.h> #include <linux/module.h> #include <linux/workqueue.h> #include "modem_notifier.h" #define DEBUG static struct srcu_notifier_head modem_notifier_list; static struct workqueue_struct *modem_notifier_wq; static void notify_work_smsm_init(struct work_struct *work) { modem_notify(0, MODEM_NOTIFIER_SMSM_INIT); } static DECLARE_WORK(modem_notifier_smsm_init_work, &notify_work_smsm_init); void modem_queue_smsm_init_notify(void) { int ret; ret = queue_work(modem_notifier_wq, &modem_notifier_smsm_init_work); if (!ret) printk(KERN_ERR "%s\n", __func__); } EXPORT_SYMBOL(modem_queue_smsm_init_notify); static void notify_work_start_reset(struct work_struct *work) { modem_notify(0, MODEM_NOTIFIER_START_RESET); } static DECLARE_WORK(modem_notifier_start_reset_work, &notify_work_start_reset); void modem_queue_start_reset_notify(void) { int ret; ret = queue_work(modem_notifier_wq, &modem_notifier_start_reset_work); if (!ret) printk(KERN_ERR "%s\n", __func__); } EXPORT_SYMBOL(modem_queue_start_reset_notify); static void notify_work_end_reset(struct work_struct *work) { modem_notify(0, MODEM_NOTIFIER_END_RESET); } static DECLARE_WORK(modem_notifier_end_reset_work, &notify_work_end_reset); void modem_queue_end_reset_notify(void) { int ret; ret = queue_work(modem_notifier_wq, &modem_notifier_end_reset_work); if (!ret) printk(KERN_ERR "%s\n", __func__); } EXPORT_SYMBOL(modem_queue_end_reset_notify); int modem_register_notifier(struct notifier_block *nb) { int ret; ret = srcu_notifier_chain_register( &modem_notifier_list, nb); return ret; } EXPORT_SYMBOL(modem_register_notifier); int modem_unregister_notifier(struct notifier_block *nb) { int ret; ret = srcu_notifier_chain_unregister( &modem_notifier_list, nb); return ret; } EXPORT_SYMBOL(modem_unregister_notifier); void modem_notify(void *data, unsigned int state) { srcu_notifier_call_chain(&modem_notifier_list, state, data); } EXPORT_SYMBOL(modem_notify); #if defined(CONFIG_DEBUG_FS) static int debug_reset_start(const char __user *buf, int count) { modem_queue_start_reset_notify(); return 0; } static int debug_reset_end(const char __user *buf, int count) { modem_queue_end_reset_notify(); return 0; } static ssize_t debug_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int (*fling)(const char __user *buf, int max) = file->private_data; fling(buf, count); return count; } static int debug_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static const struct file_operations debug_ops = { .write = debug_write, .open = debug_open, }; static void debug_create(const char *name, mode_t mode, struct dentry *dent, int (*fling)(const char __user *buf, int max)) { debugfs_create_file(name, mode, dent, fling, &debug_ops); } static void modem_notifier_debugfs_init(void) { struct dentry *dent; dent = debugfs_create_dir("modem_notifier", 0); if (IS_ERR(dent)) return; debug_create("reset_start", 0444, dent, debug_reset_start); debug_create("reset_end", 0444, dent, debug_reset_end); } #else static void modem_notifier_debugfs_init(void) {} #endif #if defined(DEBUG) static int modem_notifier_test_call(struct notifier_block *this, unsigned long code, void *_cmd) { switch (code) { case MODEM_NOTIFIER_START_RESET: printk(KERN_ERR "Notify: start reset\n"); break; case MODEM_NOTIFIER_END_RESET: printk(KERN_ERR "Notify: end reset\n"); break; case MODEM_NOTIFIER_SMSM_INIT: printk(KERN_ERR "Notify: smsm init\n"); break; default: printk(KERN_ERR "Notify: general\n"); break; } return NOTIFY_DONE; } static struct notifier_block nb = { .notifier_call = modem_notifier_test_call, }; static void register_test_notifier(void) { modem_register_notifier(&nb); } #endif int __init msm_init_modem_notifier_list(void) { static bool registered; if (registered) return 0; registered = true; srcu_init_notifier_head(&modem_notifier_list); modem_notifier_debugfs_init(); #if defined(DEBUG) register_test_notifier(); #endif /* Create the workqueue */ modem_notifier_wq = create_singlethread_workqueue("modem_notifier"); if (!modem_notifier_wq) { srcu_cleanup_notifier_head(&modem_notifier_list); return -ENOMEM; } return 0; } module_init(msm_init_modem_notifier_list);
gpl-2.0
abev66/ubuntu-raring
arch/ia64/kernel/machine_kexec.c
3326
4657
/* * arch/ia64/kernel/machine_kexec.c * * Handle transition of Linux booting another kernel * Copyright (C) 2005 Hewlett-Packard Development Comapny, L.P. * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com> * Copyright (C) 2006 Intel Corp, Zou Nan hai <nanhai.zou@intel.com> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/mm.h> #include <linux/kexec.h> #include <linux/cpu.h> #include <linux/irq.h> #include <linux/efi.h> #include <linux/numa.h> #include <linux/mmzone.h> #include <asm/numa.h> #include <asm/mmu_context.h> #include <asm/setup.h> #include <asm/delay.h> #include <asm/meminit.h> #include <asm/processor.h> #include <asm/sal.h> #include <asm/mca.h> typedef void (*relocate_new_kernel_t)( unsigned long indirection_page, unsigned long start_address, struct ia64_boot_param *boot_param, unsigned long pal_addr) __noreturn; struct kimage *ia64_kimage; struct resource efi_memmap_res = { .name = "EFI Memory Map", .start = 0, .end = 0, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; struct resource boot_param_res = { .name = "Boot parameter", .start = 0, .end = 0, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; /* * Do what every setup is needed on image and the * reboot code buffer to allow us to avoid allocations * later. */ int machine_kexec_prepare(struct kimage *image) { void *control_code_buffer; const unsigned long *func; func = (unsigned long *)&relocate_new_kernel; /* Pre-load control code buffer to minimize work in kexec path */ control_code_buffer = page_address(image->control_code_page); memcpy((void *)control_code_buffer, (const void *)func[0], relocate_new_kernel_size); flush_icache_range((unsigned long)control_code_buffer, (unsigned long)control_code_buffer + relocate_new_kernel_size); ia64_kimage = image; return 0; } void machine_kexec_cleanup(struct kimage *image) { } /* * Do not allocate memory (or fail in any way) in machine_kexec(). * We are past the point of no return, committed to rebooting now. */ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) { struct kimage *image = arg; relocate_new_kernel_t rnk; void *pal_addr = efi_get_pal_addr(); unsigned long code_addr; int ii; u64 fp, gp; ia64_fptr_t *init_handler = (ia64_fptr_t *)ia64_os_init_on_kdump; BUG_ON(!image); code_addr = (unsigned long)page_address(image->control_code_page); if (image->type == KEXEC_TYPE_CRASH) { crash_save_this_cpu(); current->thread.ksp = (__u64)info->sw - 16; /* Register noop init handler */ fp = ia64_tpa(init_handler->fp); gp = ia64_tpa(ia64_getreg(_IA64_REG_GP)); ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, fp, gp, 0, fp, gp, 0); } else { /* Unregister init handlers of current kernel */ ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 0, 0, 0, 0, 0, 0); } /* Unregister mca handler - No more recovery on current kernel */ ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 0, 0, 0, 0, 0, 0); /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); /* Mask CMC and Performance Monitor interrupts */ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* Mask ITV and Local Redirect Registers */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); /* terminate possible nested in-service interrupts */ for (ii = 0; ii < 16; ii++) ia64_eoi(); /* unmask TPR and clear any pending interrupts */ ia64_setreg(_IA64_REG_CR_TPR, 0); ia64_srlz_d(); while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) ia64_eoi(); platform_kernel_launch_event(); rnk = (relocate_new_kernel_t)&code_addr; (*rnk)(image->head, image->start, ia64_boot_param, GRANULEROUNDDOWN((unsigned long) pal_addr)); BUG(); } void machine_kexec(struct kimage *image) { BUG_ON(!image); unw_init_running(ia64_machine_kexec, image); for(;;); } void arch_crash_save_vmcoreinfo(void) { #if defined(CONFIG_DISCONTIGMEM) || defined(CONFIG_SPARSEMEM) VMCOREINFO_SYMBOL(pgdat_list); VMCOREINFO_LENGTH(pgdat_list, MAX_NUMNODES); #endif #ifdef CONFIG_NUMA VMCOREINFO_SYMBOL(node_memblk); VMCOREINFO_LENGTH(node_memblk, NR_NODE_MEMBLKS); VMCOREINFO_STRUCT_SIZE(node_memblk_s); VMCOREINFO_OFFSET(node_memblk_s, start_paddr); VMCOREINFO_OFFSET(node_memblk_s, size); #endif #ifdef CONFIG_PGTABLE_3 VMCOREINFO_CONFIG(PGTABLE_3); #elif defined(CONFIG_PGTABLE_4) VMCOREINFO_CONFIG(PGTABLE_4); #endif } unsigned long paddr_vmcoreinfo_note(void) { return ia64_tpa((unsigned long)(char *)&vmcoreinfo_note); }
gpl-2.0
dinh-linux/linux-socfpga
drivers/edac/amd64_edac_dbg.c
3582
2023
#include "amd64_edac.h" #define EDAC_DCT_ATTR_SHOW(reg) \ static ssize_t amd64_##reg##_show(struct device *dev, \ struct device_attribute *mattr, \ char *data) \ { \ struct mem_ctl_info *mci = to_mci(dev); \ struct amd64_pvt *pvt = mci->pvt_info; \ return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \ } EDAC_DCT_ATTR_SHOW(dhar); EDAC_DCT_ATTR_SHOW(dbam0); EDAC_DCT_ATTR_SHOW(top_mem); EDAC_DCT_ATTR_SHOW(top_mem2); static ssize_t amd64_hole_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); u64 hole_base = 0; u64 hole_offset = 0; u64 hole_size = 0; amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size); return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset, hole_size); } /* * update NUM_DBG_ATTRS in case you add new members */ static DEVICE_ATTR(dhar, S_IRUGO, amd64_dhar_show, NULL); static DEVICE_ATTR(dbam, S_IRUGO, amd64_dbam0_show, NULL); static DEVICE_ATTR(topmem, S_IRUGO, amd64_top_mem_show, NULL); static DEVICE_ATTR(topmem2, S_IRUGO, amd64_top_mem2_show, NULL); static DEVICE_ATTR(dram_hole, S_IRUGO, amd64_hole_show, NULL); int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci) { int rc; rc = device_create_file(&mci->dev, &dev_attr_dhar); if (rc < 0) return rc; rc = device_create_file(&mci->dev, &dev_attr_dbam); if (rc < 0) return rc; rc = device_create_file(&mci->dev, &dev_attr_topmem); if (rc < 0) return rc; rc = device_create_file(&mci->dev, &dev_attr_topmem2); if (rc < 0) return rc; rc = device_create_file(&mci->dev, &dev_attr_dram_hole); if (rc < 0) return rc; return 0; } void amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci) { device_remove_file(&mci->dev, &dev_attr_dhar); device_remove_file(&mci->dev, &dev_attr_dbam); device_remove_file(&mci->dev, &dev_attr_topmem); device_remove_file(&mci->dev, &dev_attr_topmem2); device_remove_file(&mci->dev, &dev_attr_dram_hole); }
gpl-2.0
BlownFuze/i717_TW_JBkernel
drivers/scsi/scsi_transport_spi.c
3582
43947
/* * Parallel SCSI (SPI) transport specific attributes exported to sysfs. * * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. * Copyright (c) 2004, 2005 James Bottomley <James.Bottomley@SteelEye.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/ctype.h> #include <linux/init.h> #include <linux/module.h> #include <linux/workqueue.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/sysfs.h> #include <linux/slab.h> #include <scsi/scsi.h> #include "scsi_priv.h" #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #define SPI_NUM_ATTRS 14 /* increase this if you add attributes */ #define SPI_OTHER_ATTRS 1 /* Increase this if you add "always * on" attributes */ #define SPI_HOST_ATTRS 1 #define SPI_MAX_ECHO_BUFFER_SIZE 4096 #define DV_LOOPS 3 #define DV_TIMEOUT (10*HZ) #define DV_RETRIES 3 /* should only need at most * two cc/ua clears */ /* Our blacklist flags */ enum { SPI_BLIST_NOIUS = 0x1, }; /* blacklist table, modelled on scsi_devinfo.c */ static struct { char *vendor; char *model; unsigned flags; } spi_static_device_list[] __initdata = { {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS }, {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS }, {NULL, NULL, 0} }; /* Private data accessors (keep these out of the header file) */ #define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress) #define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) struct spi_internal { struct scsi_transport_template t; struct spi_function_template *f; }; #define to_spi_internal(tmpl) container_of(tmpl, struct spi_internal, t) static const int ppr_to_ps[] = { /* The PPR values 0-6 are reserved, fill them in when * the committee defines them */ -1, /* 0x00 */ -1, /* 0x01 */ -1, /* 0x02 */ -1, /* 0x03 */ -1, /* 0x04 */ -1, /* 0x05 */ -1, /* 0x06 */ 3125, /* 0x07 */ 6250, /* 0x08 */ 12500, /* 0x09 */ 25000, /* 0x0a */ 30300, /* 0x0b */ 50000, /* 0x0c */ }; /* The PPR values at which you calculate the period in ns by multiplying * by 4 */ #define SPI_STATIC_PPR 0x0c static int sprint_frac(char *dest, int value, int denom) { int frac = value % denom; int result = sprintf(dest, "%d", value / denom); if (frac == 0) return result; dest[result++] = '.'; do { denom /= 10; sprintf(dest + result, "%d", frac / denom); result++; frac %= denom; } while (frac); dest[result++] = '\0'; return result; } static int spi_execute(struct scsi_device *sdev, const void *cmd, enum dma_data_direction dir, void *buffer, unsigned bufflen, struct scsi_sense_hdr *sshdr) { int i, result; unsigned char sense[SCSI_SENSE_BUFFERSIZE]; for(i = 0; i < DV_RETRIES; i++) { result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense, DV_TIMEOUT, /* retries */ 1, REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER, NULL); if (driver_byte(result) & DRIVER_SENSE) { struct scsi_sense_hdr sshdr_tmp; if (!sshdr) sshdr = &sshdr_tmp; if (scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr) && sshdr->sense_key == UNIT_ATTENTION) continue; } break; } return result; } static struct { enum spi_signal_type value; char *name; } signal_types[] = { { SPI_SIGNAL_UNKNOWN, "unknown" }, { SPI_SIGNAL_SE, "SE" }, { SPI_SIGNAL_LVD, "LVD" }, { SPI_SIGNAL_HVD, "HVD" }, }; static inline const char *spi_signal_to_string(enum spi_signal_type type) { int i; for (i = 0; i < ARRAY_SIZE(signal_types); i++) { if (type == signal_types[i].value) return signal_types[i].name; } return NULL; } static inline enum spi_signal_type spi_signal_to_value(const char *name) { int i, len; for (i = 0; i < ARRAY_SIZE(signal_types); i++) { len = strlen(signal_types[i].name); if (strncmp(name, signal_types[i].name, len) == 0 && (name[len] == '\n' || name[len] == '\0')) return signal_types[i].value; } return SPI_SIGNAL_UNKNOWN; } static int spi_host_setup(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; return 0; } static int spi_host_configure(struct transport_container *tc, struct device *dev, struct device *cdev); static DECLARE_TRANSPORT_CLASS(spi_host_class, "spi_host", spi_host_setup, NULL, spi_host_configure); static int spi_host_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; if (!scsi_is_host_device(dev)) return 0; shost = dev_to_shost(dev); if (!shost->transportt || shost->transportt->host_attrs.ac.class != &spi_host_class.class) return 0; return &shost->transportt->host_attrs.ac == cont; } static int spi_target_configure(struct transport_container *tc, struct device *dev, struct device *cdev); static int spi_device_configure(struct transport_container *tc, struct device *dev, struct device *cdev) { struct scsi_device *sdev = to_scsi_device(dev); struct scsi_target *starget = sdev->sdev_target; unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8], &sdev->inquiry[16], SCSI_DEVINFO_SPI); /* Populate the target capability fields with the values * gleaned from the device inquiry */ spi_support_sync(starget) = scsi_device_sync(sdev); spi_support_wide(starget) = scsi_device_wide(sdev); spi_support_dt(starget) = scsi_device_dt(sdev); spi_support_dt_only(starget) = scsi_device_dt_only(sdev); spi_support_ius(starget) = scsi_device_ius(sdev); if (bflags & SPI_BLIST_NOIUS) { dev_info(dev, "Information Units disabled by blacklist\n"); spi_support_ius(starget) = 0; } spi_support_qas(starget) = scsi_device_qas(sdev); return 0; } static int spi_setup_transport_attrs(struct transport_container *tc, struct device *dev, struct device *cdev) { struct scsi_target *starget = to_scsi_target(dev); spi_period(starget) = -1; /* illegal value */ spi_min_period(starget) = 0; spi_offset(starget) = 0; /* async */ spi_max_offset(starget) = 255; spi_width(starget) = 0; /* narrow */ spi_max_width(starget) = 1; spi_iu(starget) = 0; /* no IU */ spi_max_iu(starget) = 1; spi_dt(starget) = 0; /* ST */ spi_qas(starget) = 0; spi_max_qas(starget) = 1; spi_wr_flow(starget) = 0; spi_rd_strm(starget) = 0; spi_rti(starget) = 0; spi_pcomp_en(starget) = 0; spi_hold_mcs(starget) = 0; spi_dv_pending(starget) = 0; spi_dv_in_progress(starget) = 0; spi_initial_dv(starget) = 0; mutex_init(&spi_dv_mutex(starget)); return 0; } #define spi_transport_show_simple(field, format_string) \ \ static ssize_t \ show_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct spi_transport_attrs *tp; \ \ tp = (struct spi_transport_attrs *)&starget->starget_data; \ return snprintf(buf, 20, format_string, tp->field); \ } #define spi_transport_store_simple(field, format_string) \ \ static ssize_t \ store_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct spi_transport_attrs *tp; \ \ tp = (struct spi_transport_attrs *)&starget->starget_data; \ val = simple_strtoul(buf, NULL, 0); \ tp->field = val; \ return count; \ } #define spi_transport_show_function(field, format_string) \ \ static ssize_t \ show_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ struct spi_transport_attrs *tp; \ struct spi_internal *i = to_spi_internal(shost->transportt); \ tp = (struct spi_transport_attrs *)&starget->starget_data; \ if (i->f->get_##field) \ i->f->get_##field(starget); \ return snprintf(buf, 20, format_string, tp->field); \ } #define spi_transport_store_function(field, format_string) \ static ssize_t \ store_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ struct spi_internal *i = to_spi_internal(shost->transportt); \ \ if (!i->f->set_##field) \ return -EINVAL; \ val = simple_strtoul(buf, NULL, 0); \ i->f->set_##field(starget, val); \ return count; \ } #define spi_transport_store_max(field, format_string) \ static ssize_t \ store_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ struct spi_internal *i = to_spi_internal(shost->transportt); \ struct spi_transport_attrs *tp \ = (struct spi_transport_attrs *)&starget->starget_data; \ \ if (i->f->set_##field) \ return -EINVAL; \ val = simple_strtoul(buf, NULL, 0); \ if (val > tp->max_##field) \ val = tp->max_##field; \ i->f->set_##field(starget, val); \ return count; \ } #define spi_transport_rd_attr(field, format_string) \ spi_transport_show_function(field, format_string) \ spi_transport_store_function(field, format_string) \ static DEVICE_ATTR(field, S_IRUGO, \ show_spi_transport_##field, \ store_spi_transport_##field); #define spi_transport_simple_attr(field, format_string) \ spi_transport_show_simple(field, format_string) \ spi_transport_store_simple(field, format_string) \ static DEVICE_ATTR(field, S_IRUGO, \ show_spi_transport_##field, \ store_spi_transport_##field); #define spi_transport_max_attr(field, format_string) \ spi_transport_show_function(field, format_string) \ spi_transport_store_max(field, format_string) \ spi_transport_simple_attr(max_##field, format_string) \ static DEVICE_ATTR(field, S_IRUGO, \ show_spi_transport_##field, \ store_spi_transport_##field); /* The Parallel SCSI Tranport Attributes: */ spi_transport_max_attr(offset, "%d\n"); spi_transport_max_attr(width, "%d\n"); spi_transport_max_attr(iu, "%d\n"); spi_transport_rd_attr(dt, "%d\n"); spi_transport_max_attr(qas, "%d\n"); spi_transport_rd_attr(wr_flow, "%d\n"); spi_transport_rd_attr(rd_strm, "%d\n"); spi_transport_rd_attr(rti, "%d\n"); spi_transport_rd_attr(pcomp_en, "%d\n"); spi_transport_rd_attr(hold_mcs, "%d\n"); /* we only care about the first child device that's a real SCSI device * so we return 1 to terminate the iteration when we find it */ static int child_iter(struct device *dev, void *data) { if (!scsi_is_sdev_device(dev)) return 0; spi_dv_device(to_scsi_device(dev)); return 1; } static ssize_t store_spi_revalidate(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_target *starget = transport_class_to_starget(dev); device_for_each_child(&starget->dev, NULL, child_iter); return count; } static DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate); /* Translate the period into ns according to the current spec * for SDTR/PPR messages */ static int period_to_str(char *buf, int period) { int len, picosec; if (period < 0 || period > 0xff) { picosec = -1; } else if (period <= SPI_STATIC_PPR) { picosec = ppr_to_ps[period]; } else { picosec = period * 4000; } if (picosec == -1) { len = sprintf(buf, "reserved"); } else { len = sprint_frac(buf, picosec, 1000); } return len; } static ssize_t show_spi_transport_period_helper(char *buf, int period) { int len = period_to_str(buf, period); buf[len++] = '\n'; buf[len] = '\0'; return len; } static ssize_t store_spi_transport_period_helper(struct device *dev, const char *buf, size_t count, int *periodp) { int j, picosec, period = -1; char *endp; picosec = simple_strtoul(buf, &endp, 10) * 1000; if (*endp == '.') { int mult = 100; do { endp++; if (!isdigit(*endp)) break; picosec += (*endp - '0') * mult; mult /= 10; } while (mult > 0); } for (j = 0; j <= SPI_STATIC_PPR; j++) { if (ppr_to_ps[j] < picosec) continue; period = j; break; } if (period == -1) period = picosec / 4000; if (period > 0xff) period = 0xff; *periodp = period; return count; } static ssize_t show_spi_transport_period(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_target *starget = transport_class_to_starget(dev); struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct spi_internal *i = to_spi_internal(shost->transportt); struct spi_transport_attrs *tp = (struct spi_transport_attrs *)&starget->starget_data; if (i->f->get_period) i->f->get_period(starget); return show_spi_transport_period_helper(buf, tp->period); } static ssize_t store_spi_transport_period(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_target *starget = transport_class_to_starget(cdev); struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct spi_internal *i = to_spi_internal(shost->transportt); struct spi_transport_attrs *tp = (struct spi_transport_attrs *)&starget->starget_data; int period, retval; if (!i->f->set_period) return -EINVAL; retval = store_spi_transport_period_helper(cdev, buf, count, &period); if (period < tp->min_period) period = tp->min_period; i->f->set_period(starget, period); return retval; } static DEVICE_ATTR(period, S_IRUGO, show_spi_transport_period, store_spi_transport_period); static ssize_t show_spi_transport_min_period(struct device *cdev, struct device_attribute *attr, char *buf) { struct scsi_target *starget = transport_class_to_starget(cdev); struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct spi_internal *i = to_spi_internal(shost->transportt); struct spi_transport_attrs *tp = (struct spi_transport_attrs *)&starget->starget_data; if (!i->f->set_period) return -EINVAL; return show_spi_transport_period_helper(buf, tp->min_period); } static ssize_t store_spi_transport_min_period(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_target *starget = transport_class_to_starget(cdev); struct spi_transport_attrs *tp = (struct spi_transport_attrs *)&starget->starget_data; return store_spi_transport_period_helper(cdev, buf, count, &tp->min_period); } static DEVICE_ATTR(min_period, S_IRUGO, show_spi_transport_min_period, store_spi_transport_min_period); static ssize_t show_spi_host_signalling(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = transport_class_to_shost(cdev); struct spi_internal *i = to_spi_internal(shost->transportt); if (i->f->get_signalling) i->f->get_signalling(shost); return sprintf(buf, "%s\n", spi_signal_to_string(spi_signalling(shost))); } static ssize_t store_spi_host_signalling(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = transport_class_to_shost(dev); struct spi_internal *i = to_spi_internal(shost->transportt); enum spi_signal_type type = spi_signal_to_value(buf); if (!i->f->set_signalling) return -EINVAL; if (type != SPI_SIGNAL_UNKNOWN) i->f->set_signalling(shost, type); return count; } static DEVICE_ATTR(signalling, S_IRUGO, show_spi_host_signalling, store_spi_host_signalling); #define DV_SET(x, y) \ if(i->f->set_##x) \ i->f->set_##x(sdev->sdev_target, y) enum spi_compare_returns { SPI_COMPARE_SUCCESS, SPI_COMPARE_FAILURE, SPI_COMPARE_SKIP_TEST, }; /* This is for read/write Domain Validation: If the device supports * an echo buffer, we do read/write tests to it */ static enum spi_compare_returns spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer, u8 *ptr, const int retries) { int len = ptr - buffer; int j, k, r, result; unsigned int pattern = 0x0000ffff; struct scsi_sense_hdr sshdr; const char spi_write_buffer[] = { WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 }; const char spi_read_buffer[] = { READ_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 }; /* set up the pattern buffer. Doesn't matter if we spill * slightly beyond since that's where the read buffer is */ for (j = 0; j < len; ) { /* fill the buffer with counting (test a) */ for ( ; j < min(len, 32); j++) buffer[j] = j; k = j; /* fill the buffer with alternating words of 0x0 and * 0xffff (test b) */ for ( ; j < min(len, k + 32); j += 2) { u16 *word = (u16 *)&buffer[j]; *word = (j & 0x02) ? 0x0000 : 0xffff; } k = j; /* fill with crosstalk (alternating 0x5555 0xaaa) * (test c) */ for ( ; j < min(len, k + 32); j += 2) { u16 *word = (u16 *)&buffer[j]; *word = (j & 0x02) ? 0x5555 : 0xaaaa; } k = j; /* fill with shifting bits (test d) */ for ( ; j < min(len, k + 32); j += 4) { u32 *word = (unsigned int *)&buffer[j]; u32 roll = (pattern & 0x80000000) ? 1 : 0; *word = pattern; pattern = (pattern << 1) | roll; } /* don't bother with random data (test e) */ } for (r = 0; r < retries; r++) { result = spi_execute(sdev, spi_write_buffer, DMA_TO_DEVICE, buffer, len, &sshdr); if(result || !scsi_device_online(sdev)) { scsi_device_set_state(sdev, SDEV_QUIESCE); if (scsi_sense_valid(&sshdr) && sshdr.sense_key == ILLEGAL_REQUEST /* INVALID FIELD IN CDB */ && sshdr.asc == 0x24 && sshdr.ascq == 0x00) /* This would mean that the drive lied * to us about supporting an echo * buffer (unfortunately some Western * Digital drives do precisely this) */ return SPI_COMPARE_SKIP_TEST; sdev_printk(KERN_ERR, sdev, "Write Buffer failure %x\n", result); return SPI_COMPARE_FAILURE; } memset(ptr, 0, len); spi_execute(sdev, spi_read_buffer, DMA_FROM_DEVICE, ptr, len, NULL); scsi_device_set_state(sdev, SDEV_QUIESCE); if (memcmp(buffer, ptr, len) != 0) return SPI_COMPARE_FAILURE; } return SPI_COMPARE_SUCCESS; } /* This is for the simplest form of Domain Validation: a read test * on the inquiry data from the device */ static enum spi_compare_returns spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer, u8 *ptr, const int retries) { int r, result; const int len = sdev->inquiry_len; const char spi_inquiry[] = { INQUIRY, 0, 0, 0, len, 0 }; for (r = 0; r < retries; r++) { memset(ptr, 0, len); result = spi_execute(sdev, spi_inquiry, DMA_FROM_DEVICE, ptr, len, NULL); if(result || !scsi_device_online(sdev)) { scsi_device_set_state(sdev, SDEV_QUIESCE); return SPI_COMPARE_FAILURE; } /* If we don't have the inquiry data already, the * first read gets it */ if (ptr == buffer) { ptr += len; --r; continue; } if (memcmp(buffer, ptr, len) != 0) /* failure */ return SPI_COMPARE_FAILURE; } return SPI_COMPARE_SUCCESS; } static enum spi_compare_returns spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr, enum spi_compare_returns (*compare_fn)(struct scsi_device *, u8 *, u8 *, int)) { struct spi_internal *i = to_spi_internal(sdev->host->transportt); struct scsi_target *starget = sdev->sdev_target; int period = 0, prevperiod = 0; enum spi_compare_returns retval; for (;;) { int newperiod; retval = compare_fn(sdev, buffer, ptr, DV_LOOPS); if (retval == SPI_COMPARE_SUCCESS || retval == SPI_COMPARE_SKIP_TEST) break; /* OK, retrain, fallback */ if (i->f->get_iu) i->f->get_iu(starget); if (i->f->get_qas) i->f->get_qas(starget); if (i->f->get_period) i->f->get_period(sdev->sdev_target); /* Here's the fallback sequence; first try turning off * IU, then QAS (if we can control them), then finally * fall down the periods */ if (i->f->set_iu && spi_iu(starget)) { starget_printk(KERN_ERR, starget, "Domain Validation Disabing Information Units\n"); DV_SET(iu, 0); } else if (i->f->set_qas && spi_qas(starget)) { starget_printk(KERN_ERR, starget, "Domain Validation Disabing Quick Arbitration and Selection\n"); DV_SET(qas, 0); } else { newperiod = spi_period(starget); period = newperiod > period ? newperiod : period; if (period < 0x0d) period++; else period += period >> 1; if (unlikely(period > 0xff || period == prevperiod)) { /* Total failure; set to async and return */ starget_printk(KERN_ERR, starget, "Domain Validation Failure, dropping back to Asynchronous\n"); DV_SET(offset, 0); return SPI_COMPARE_FAILURE; } starget_printk(KERN_ERR, starget, "Domain Validation detected failure, dropping back\n"); DV_SET(period, period); prevperiod = period; } } return retval; } static int spi_dv_device_get_echo_buffer(struct scsi_device *sdev, u8 *buffer) { int l, result; /* first off do a test unit ready. This can error out * because of reservations or some other reason. If it * fails, the device won't let us write to the echo buffer * so just return failure */ const char spi_test_unit_ready[] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; const char spi_read_buffer_descriptor[] = { READ_BUFFER, 0x0b, 0, 0, 0, 0, 0, 0, 4, 0 }; /* We send a set of three TURs to clear any outstanding * unit attention conditions if they exist (Otherwise the * buffer tests won't be happy). If the TUR still fails * (reservation conflict, device not ready, etc) just * skip the write tests */ for (l = 0; ; l++) { result = spi_execute(sdev, spi_test_unit_ready, DMA_NONE, NULL, 0, NULL); if(result) { if(l >= 3) return 0; } else { /* TUR succeeded */ break; } } result = spi_execute(sdev, spi_read_buffer_descriptor, DMA_FROM_DEVICE, buffer, 4, NULL); if (result) /* Device has no echo buffer */ return 0; return buffer[3] + ((buffer[2] & 0x1f) << 8); } static void spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) { struct spi_internal *i = to_spi_internal(sdev->host->transportt); struct scsi_target *starget = sdev->sdev_target; struct Scsi_Host *shost = sdev->host; int len = sdev->inquiry_len; int min_period = spi_min_period(starget); int max_width = spi_max_width(starget); /* first set us up for narrow async */ DV_SET(offset, 0); DV_SET(width, 0); if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS) != SPI_COMPARE_SUCCESS) { starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n"); /* FIXME: should probably offline the device here? */ return; } if (!spi_support_wide(starget)) { spi_max_width(starget) = 0; max_width = 0; } /* test width */ if (i->f->set_width && max_width) { i->f->set_width(starget, 1); if (spi_dv_device_compare_inquiry(sdev, buffer, buffer + len, DV_LOOPS) != SPI_COMPARE_SUCCESS) { starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n"); i->f->set_width(starget, 0); /* Make sure we don't force wide back on by asking * for a transfer period that requires it */ max_width = 0; if (min_period < 10) min_period = 10; } } if (!i->f->set_period) return; /* device can't handle synchronous */ if (!spi_support_sync(starget) && !spi_support_dt(starget)) return; /* len == -1 is the signal that we need to ascertain the * presence of an echo buffer before trying to use it. len == * 0 means we don't have an echo buffer */ len = -1; retry: /* now set up to the maximum */ DV_SET(offset, spi_max_offset(starget)); DV_SET(period, min_period); /* try QAS requests; this should be harmless to set if the * target supports it */ if (spi_support_qas(starget) && spi_max_qas(starget)) { DV_SET(qas, 1); } else { DV_SET(qas, 0); } if (spi_support_ius(starget) && spi_max_iu(starget) && min_period < 9) { /* This u320 (or u640). Set IU transfers */ DV_SET(iu, 1); /* Then set the optional parameters */ DV_SET(rd_strm, 1); DV_SET(wr_flow, 1); DV_SET(rti, 1); if (min_period == 8) DV_SET(pcomp_en, 1); } else { DV_SET(iu, 0); } /* now that we've done all this, actually check the bus * signal type (if known). Some devices are stupid on * a SE bus and still claim they can try LVD only settings */ if (i->f->get_signalling) i->f->get_signalling(shost); if (spi_signalling(shost) == SPI_SIGNAL_SE || spi_signalling(shost) == SPI_SIGNAL_HVD || !spi_support_dt(starget)) { DV_SET(dt, 0); } else { DV_SET(dt, 1); } /* set width last because it will pull all the other * parameters down to required values */ DV_SET(width, max_width); /* Do the read only INQUIRY tests */ spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, spi_dv_device_compare_inquiry); /* See if we actually managed to negotiate and sustain DT */ if (i->f->get_dt) i->f->get_dt(starget); /* see if the device has an echo buffer. If it does we can do * the SPI pattern write tests. Because of some broken * devices, we *only* try this on a device that has actually * negotiated DT */ if (len == -1 && spi_dt(starget)) len = spi_dv_device_get_echo_buffer(sdev, buffer); if (len <= 0) { starget_printk(KERN_INFO, starget, "Domain Validation skipping write tests\n"); return; } if (len > SPI_MAX_ECHO_BUFFER_SIZE) { starget_printk(KERN_WARNING, starget, "Echo buffer size %d is too big, trimming to %d\n", len, SPI_MAX_ECHO_BUFFER_SIZE); len = SPI_MAX_ECHO_BUFFER_SIZE; } if (spi_dv_retrain(sdev, buffer, buffer + len, spi_dv_device_echo_buffer) == SPI_COMPARE_SKIP_TEST) { /* OK, the stupid drive can't do a write echo buffer * test after all, fall back to the read tests */ len = 0; goto retry; } } /** spi_dv_device - Do Domain Validation on the device * @sdev: scsi device to validate * * Performs the domain validation on the given device in the * current execution thread. Since DV operations may sleep, * the current thread must have user context. Also no SCSI * related locks that would deadlock I/O issued by the DV may * be held. */ void spi_dv_device(struct scsi_device *sdev) { struct scsi_target *starget = sdev->sdev_target; u8 *buffer; const int len = SPI_MAX_ECHO_BUFFER_SIZE*2; if (unlikely(scsi_device_get(sdev))) return; if (unlikely(spi_dv_in_progress(starget))) return; spi_dv_in_progress(starget) = 1; buffer = kzalloc(len, GFP_KERNEL); if (unlikely(!buffer)) goto out_put; /* We need to verify that the actual device will quiesce; the * later target quiesce is just a nice to have */ if (unlikely(scsi_device_quiesce(sdev))) goto out_free; scsi_target_quiesce(starget); spi_dv_pending(starget) = 1; mutex_lock(&spi_dv_mutex(starget)); starget_printk(KERN_INFO, starget, "Beginning Domain Validation\n"); spi_dv_device_internal(sdev, buffer); starget_printk(KERN_INFO, starget, "Ending Domain Validation\n"); mutex_unlock(&spi_dv_mutex(starget)); spi_dv_pending(starget) = 0; scsi_target_resume(starget); spi_initial_dv(starget) = 1; out_free: kfree(buffer); out_put: spi_dv_in_progress(starget) = 0; scsi_device_put(sdev); } EXPORT_SYMBOL(spi_dv_device); struct work_queue_wrapper { struct work_struct work; struct scsi_device *sdev; }; static void spi_dv_device_work_wrapper(struct work_struct *work) { struct work_queue_wrapper *wqw = container_of(work, struct work_queue_wrapper, work); struct scsi_device *sdev = wqw->sdev; kfree(wqw); spi_dv_device(sdev); spi_dv_pending(sdev->sdev_target) = 0; scsi_device_put(sdev); } /** * spi_schedule_dv_device - schedule domain validation to occur on the device * @sdev: The device to validate * * Identical to spi_dv_device() above, except that the DV will be * scheduled to occur in a workqueue later. All memory allocations * are atomic, so may be called from any context including those holding * SCSI locks. */ void spi_schedule_dv_device(struct scsi_device *sdev) { struct work_queue_wrapper *wqw = kmalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC); if (unlikely(!wqw)) return; if (unlikely(spi_dv_pending(sdev->sdev_target))) { kfree(wqw); return; } /* Set pending early (dv_device doesn't check it, only sets it) */ spi_dv_pending(sdev->sdev_target) = 1; if (unlikely(scsi_device_get(sdev))) { kfree(wqw); spi_dv_pending(sdev->sdev_target) = 0; return; } INIT_WORK(&wqw->work, spi_dv_device_work_wrapper); wqw->sdev = sdev; schedule_work(&wqw->work); } EXPORT_SYMBOL(spi_schedule_dv_device); /** * spi_display_xfer_agreement - Print the current target transfer agreement * @starget: The target for which to display the agreement * * Each SPI port is required to maintain a transfer agreement for each * other port on the bus. This function prints a one-line summary of * the current agreement; more detailed information is available in sysfs. */ void spi_display_xfer_agreement(struct scsi_target *starget) { struct spi_transport_attrs *tp; tp = (struct spi_transport_attrs *)&starget->starget_data; if (tp->offset > 0 && tp->period > 0) { unsigned int picosec, kb100; char *scsi = "FAST-?"; char tmp[8]; if (tp->period <= SPI_STATIC_PPR) { picosec = ppr_to_ps[tp->period]; switch (tp->period) { case 7: scsi = "FAST-320"; break; case 8: scsi = "FAST-160"; break; case 9: scsi = "FAST-80"; break; case 10: case 11: scsi = "FAST-40"; break; case 12: scsi = "FAST-20"; break; } } else { picosec = tp->period * 4000; if (tp->period < 25) scsi = "FAST-20"; else if (tp->period < 50) scsi = "FAST-10"; else scsi = "FAST-5"; } kb100 = (10000000 + picosec / 2) / picosec; if (tp->width) kb100 *= 2; sprint_frac(tmp, picosec, 1000); dev_info(&starget->dev, "%s %sSCSI %d.%d MB/s %s%s%s%s%s%s%s%s (%s ns, offset %d)\n", scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10, tp->dt ? "DT" : "ST", tp->iu ? " IU" : "", tp->qas ? " QAS" : "", tp->rd_strm ? " RDSTRM" : "", tp->rti ? " RTI" : "", tp->wr_flow ? " WRFLOW" : "", tp->pcomp_en ? " PCOMP" : "", tp->hold_mcs ? " HMCS" : "", tmp, tp->offset); } else { dev_info(&starget->dev, "%sasynchronous\n", tp->width ? "wide " : ""); } } EXPORT_SYMBOL(spi_display_xfer_agreement); int spi_populate_width_msg(unsigned char *msg, int width) { msg[0] = EXTENDED_MESSAGE; msg[1] = 2; msg[2] = EXTENDED_WDTR; msg[3] = width; return 4; } EXPORT_SYMBOL_GPL(spi_populate_width_msg); int spi_populate_sync_msg(unsigned char *msg, int period, int offset) { msg[0] = EXTENDED_MESSAGE; msg[1] = 3; msg[2] = EXTENDED_SDTR; msg[3] = period; msg[4] = offset; return 5; } EXPORT_SYMBOL_GPL(spi_populate_sync_msg); int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, int width, int options) { msg[0] = EXTENDED_MESSAGE; msg[1] = 6; msg[2] = EXTENDED_PPR; msg[3] = period; msg[4] = 0; msg[5] = offset; msg[6] = width; msg[7] = options; return 8; } EXPORT_SYMBOL_GPL(spi_populate_ppr_msg); #ifdef CONFIG_SCSI_CONSTANTS static const char * const one_byte_msgs[] = { /* 0x00 */ "Task Complete", NULL /* Extended Message */, "Save Pointers", /* 0x03 */ "Restore Pointers", "Disconnect", "Initiator Error", /* 0x06 */ "Abort Task Set", "Message Reject", "Nop", "Message Parity Error", /* 0x0a */ "Linked Command Complete", "Linked Command Complete w/flag", /* 0x0c */ "Target Reset", "Abort Task", "Clear Task Set", /* 0x0f */ "Initiate Recovery", "Release Recovery", /* 0x11 */ "Terminate Process", "Continue Task", "Target Transfer Disable", /* 0x14 */ NULL, NULL, "Clear ACA", "LUN Reset" }; static const char * const two_byte_msgs[] = { /* 0x20 */ "Simple Queue Tag", "Head of Queue Tag", "Ordered Queue Tag", /* 0x23 */ "Ignore Wide Residue", "ACA" }; static const char * const extended_msgs[] = { /* 0x00 */ "Modify Data Pointer", "Synchronous Data Transfer Request", /* 0x02 */ "SCSI-I Extended Identify", "Wide Data Transfer Request", /* 0x04 */ "Parallel Protocol Request", "Modify Bidirectional Data Pointer" }; static void print_nego(const unsigned char *msg, int per, int off, int width) { if (per) { char buf[20]; period_to_str(buf, msg[per]); printk("period = %s ns ", buf); } if (off) printk("offset = %d ", msg[off]); if (width) printk("width = %d ", 8 << msg[width]); } static void print_ptr(const unsigned char *msg, int msb, const char *desc) { int ptr = (msg[msb] << 24) | (msg[msb+1] << 16) | (msg[msb+2] << 8) | msg[msb+3]; printk("%s = %d ", desc, ptr); } int spi_print_msg(const unsigned char *msg) { int len = 1, i; if (msg[0] == EXTENDED_MESSAGE) { len = 2 + msg[1]; if (len == 2) len += 256; if (msg[2] < ARRAY_SIZE(extended_msgs)) printk ("%s ", extended_msgs[msg[2]]); else printk ("Extended Message, reserved code (0x%02x) ", (int) msg[2]); switch (msg[2]) { case EXTENDED_MODIFY_DATA_POINTER: print_ptr(msg, 3, "pointer"); break; case EXTENDED_SDTR: print_nego(msg, 3, 4, 0); break; case EXTENDED_WDTR: print_nego(msg, 0, 0, 3); break; case EXTENDED_PPR: print_nego(msg, 3, 5, 6); break; case EXTENDED_MODIFY_BIDI_DATA_PTR: print_ptr(msg, 3, "out"); print_ptr(msg, 7, "in"); break; default: for (i = 2; i < len; ++i) printk("%02x ", msg[i]); } /* Identify */ } else if (msg[0] & 0x80) { printk("Identify disconnect %sallowed %s %d ", (msg[0] & 0x40) ? "" : "not ", (msg[0] & 0x20) ? "target routine" : "lun", msg[0] & 0x7); /* Normal One byte */ } else if (msg[0] < 0x1f) { if (msg[0] < ARRAY_SIZE(one_byte_msgs) && one_byte_msgs[msg[0]]) printk("%s ", one_byte_msgs[msg[0]]); else printk("reserved (%02x) ", msg[0]); } else if (msg[0] == 0x55) { printk("QAS Request "); /* Two byte */ } else if (msg[0] <= 0x2f) { if ((msg[0] - 0x20) < ARRAY_SIZE(two_byte_msgs)) printk("%s %02x ", two_byte_msgs[msg[0] - 0x20], msg[1]); else printk("reserved two byte (%02x %02x) ", msg[0], msg[1]); len = 2; } else printk("reserved "); return len; } EXPORT_SYMBOL(spi_print_msg); #else /* ifndef CONFIG_SCSI_CONSTANTS */ int spi_print_msg(const unsigned char *msg) { int len = 1, i; if (msg[0] == EXTENDED_MESSAGE) { len = 2 + msg[1]; if (len == 2) len += 256; for (i = 0; i < len; ++i) printk("%02x ", msg[i]); /* Identify */ } else if (msg[0] & 0x80) { printk("%02x ", msg[0]); /* Normal One byte */ } else if ((msg[0] < 0x1f) || (msg[0] == 0x55)) { printk("%02x ", msg[0]); /* Two byte */ } else if (msg[0] <= 0x2f) { printk("%02x %02x", msg[0], msg[1]); len = 2; } else printk("%02x ", msg[0]); return len; } EXPORT_SYMBOL(spi_print_msg); #endif /* ! CONFIG_SCSI_CONSTANTS */ static int spi_device_match(struct attribute_container *cont, struct device *dev) { struct scsi_device *sdev; struct Scsi_Host *shost; struct spi_internal *i; if (!scsi_is_sdev_device(dev)) return 0; sdev = to_scsi_device(dev); shost = sdev->host; if (!shost->transportt || shost->transportt->host_attrs.ac.class != &spi_host_class.class) return 0; /* Note: this class has no device attributes, so it has * no per-HBA allocation and thus we don't need to distinguish * the attribute containers for the device */ i = to_spi_internal(shost->transportt); if (i->f->deny_binding && i->f->deny_binding(sdev->sdev_target)) return 0; return 1; } static int spi_target_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct scsi_target *starget; struct spi_internal *i; if (!scsi_is_target_device(dev)) return 0; shost = dev_to_shost(dev->parent); if (!shost->transportt || shost->transportt->host_attrs.ac.class != &spi_host_class.class) return 0; i = to_spi_internal(shost->transportt); starget = to_scsi_target(dev); if (i->f->deny_binding && i->f->deny_binding(starget)) return 0; return &i->t.target_attrs.ac == cont; } static DECLARE_TRANSPORT_CLASS(spi_transport_class, "spi_transport", spi_setup_transport_attrs, NULL, spi_target_configure); static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class, spi_device_match, spi_device_configure); static struct attribute *host_attributes[] = { &dev_attr_signalling.attr, NULL }; static struct attribute_group host_attribute_group = { .attrs = host_attributes, }; static int spi_host_configure(struct transport_container *tc, struct device *dev, struct device *cdev) { struct kobject *kobj = &cdev->kobj; struct Scsi_Host *shost = transport_class_to_shost(cdev); struct spi_internal *si = to_spi_internal(shost->transportt); struct attribute *attr = &dev_attr_signalling.attr; int rc = 0; if (si->f->set_signalling) rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR); return rc; } /* returns true if we should be showing the variable. Also * overloads the return by setting 1<<1 if the attribute should * be writeable */ #define TARGET_ATTRIBUTE_HELPER(name) \ (si->f->show_##name ? S_IRUGO : 0) | \ (si->f->set_##name ? S_IWUSR : 0) static mode_t target_attribute_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *cdev = container_of(kobj, struct device, kobj); struct scsi_target *starget = transport_class_to_starget(cdev); struct Scsi_Host *shost = transport_class_to_shost(cdev); struct spi_internal *si = to_spi_internal(shost->transportt); if (attr == &dev_attr_period.attr && spi_support_sync(starget)) return TARGET_ATTRIBUTE_HELPER(period); else if (attr == &dev_attr_min_period.attr && spi_support_sync(starget)) return TARGET_ATTRIBUTE_HELPER(period); else if (attr == &dev_attr_offset.attr && spi_support_sync(starget)) return TARGET_ATTRIBUTE_HELPER(offset); else if (attr == &dev_attr_max_offset.attr && spi_support_sync(starget)) return TARGET_ATTRIBUTE_HELPER(offset); else if (attr == &dev_attr_width.attr && spi_support_wide(starget)) return TARGET_ATTRIBUTE_HELPER(width); else if (attr == &dev_attr_max_width.attr && spi_support_wide(starget)) return TARGET_ATTRIBUTE_HELPER(width); else if (attr == &dev_attr_iu.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(iu); else if (attr == &dev_attr_max_iu.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(iu); else if (attr == &dev_attr_dt.attr && spi_support_dt(starget)) return TARGET_ATTRIBUTE_HELPER(dt); else if (attr == &dev_attr_qas.attr && spi_support_qas(starget)) return TARGET_ATTRIBUTE_HELPER(qas); else if (attr == &dev_attr_max_qas.attr && spi_support_qas(starget)) return TARGET_ATTRIBUTE_HELPER(qas); else if (attr == &dev_attr_wr_flow.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(wr_flow); else if (attr == &dev_attr_rd_strm.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(rd_strm); else if (attr == &dev_attr_rti.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(rti); else if (attr == &dev_attr_pcomp_en.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(pcomp_en); else if (attr == &dev_attr_hold_mcs.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(hold_mcs); else if (attr == &dev_attr_revalidate.attr) return S_IWUSR; return 0; } static struct attribute *target_attributes[] = { &dev_attr_period.attr, &dev_attr_min_period.attr, &dev_attr_offset.attr, &dev_attr_max_offset.attr, &dev_attr_width.attr, &dev_attr_max_width.attr, &dev_attr_iu.attr, &dev_attr_max_iu.attr, &dev_attr_dt.attr, &dev_attr_qas.attr, &dev_attr_max_qas.attr, &dev_attr_wr_flow.attr, &dev_attr_rd_strm.attr, &dev_attr_rti.attr, &dev_attr_pcomp_en.attr, &dev_attr_hold_mcs.attr, &dev_attr_revalidate.attr, NULL }; static struct attribute_group target_attribute_group = { .attrs = target_attributes, .is_visible = target_attribute_is_visible, }; static int spi_target_configure(struct transport_container *tc, struct device *dev, struct device *cdev) { struct kobject *kobj = &cdev->kobj; /* force an update based on parameters read from the device */ sysfs_update_group(kobj, &target_attribute_group); return 0; } struct scsi_transport_template * spi_attach_transport(struct spi_function_template *ft) { struct spi_internal *i = kzalloc(sizeof(struct spi_internal), GFP_KERNEL); if (unlikely(!i)) return NULL; i->t.target_attrs.ac.class = &spi_transport_class.class; i->t.target_attrs.ac.grp = &target_attribute_group; i->t.target_attrs.ac.match = spi_target_match; transport_container_register(&i->t.target_attrs); i->t.target_size = sizeof(struct spi_transport_attrs); i->t.host_attrs.ac.class = &spi_host_class.class; i->t.host_attrs.ac.grp = &host_attribute_group; i->t.host_attrs.ac.match = spi_host_match; transport_container_register(&i->t.host_attrs); i->t.host_size = sizeof(struct spi_host_attrs); i->f = ft; return &i->t; } EXPORT_SYMBOL(spi_attach_transport); void spi_release_transport(struct scsi_transport_template *t) { struct spi_internal *i = to_spi_internal(t); transport_container_unregister(&i->t.target_attrs); transport_container_unregister(&i->t.host_attrs); kfree(i); } EXPORT_SYMBOL(spi_release_transport); static __init int spi_transport_init(void) { int error = scsi_dev_info_add_list(SCSI_DEVINFO_SPI, "SCSI Parallel Transport Class"); if (!error) { int i; for (i = 0; spi_static_device_list[i].vendor; i++) scsi_dev_info_list_add_keyed(1, /* compatible */ spi_static_device_list[i].vendor, spi_static_device_list[i].model, NULL, spi_static_device_list[i].flags, SCSI_DEVINFO_SPI); } error = transport_class_register(&spi_transport_class); if (error) return error; error = anon_transport_class_register(&spi_device_class); return transport_class_register(&spi_host_class); } static void __exit spi_transport_exit(void) { transport_class_unregister(&spi_transport_class); anon_transport_class_unregister(&spi_device_class); transport_class_unregister(&spi_host_class); scsi_dev_info_remove_list(SCSI_DEVINFO_SPI); } MODULE_AUTHOR("Martin Hicks"); MODULE_DESCRIPTION("SPI Transport Attributes"); MODULE_LICENSE("GPL"); module_init(spi_transport_init); module_exit(spi_transport_exit);
gpl-2.0
resin-io/linux
arch/mips/pci/fixup-emma2rh.c
4350
2997
/* * Copyright (C) NEC Electronics Corporation 2004-2006 * * This file is based on the arch/mips/ddb5xxx/ddb5477/pci.c * * Copyright 2001 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <asm/bootinfo.h> #include <asm/emma/emma2rh.h> #define EMMA2RH_PCI_HOST_SLOT 0x09 #define EMMA2RH_USB_SLOT 0x03 #define PCI_DEVICE_ID_NEC_EMMA2RH 0x014b /* EMMA2RH PCI Host */ /* * we fix up irqs based on the slot number. * The first entry is at AD:11. * Fortunately this works because, although we have two pci buses, * they all have different slot numbers (except for rockhopper slot 20 * which is handled below). * */ #define MAX_SLOT_NUM 10 static unsigned char irq_map[][5] __initdata = { [3] = {0, MARKEINS_PCI_IRQ_INTB, MARKEINS_PCI_IRQ_INTC, MARKEINS_PCI_IRQ_INTD, 0,}, [4] = {0, MARKEINS_PCI_IRQ_INTA, 0, 0, 0,}, [5] = {0, 0, 0, 0, 0,}, [6] = {0, MARKEINS_PCI_IRQ_INTC, MARKEINS_PCI_IRQ_INTD, MARKEINS_PCI_IRQ_INTA, MARKEINS_PCI_IRQ_INTB,}, }; static void nec_usb_controller_fixup(struct pci_dev *dev) { if (PCI_SLOT(dev->devfn) == EMMA2RH_USB_SLOT) /* on board USB controller configuration */ pci_write_config_dword(dev, 0xe4, 1 << 5); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB, nec_usb_controller_fixup); /* * Prevent the PCI layer from seeing the resources allocated to this device * if it is the host bridge by marking it as such. These resources are of * no consequence to the PCI layer (they are handled elsewhere). */ static void emma2rh_pci_host_fixup(struct pci_dev *dev) { int i; if (PCI_SLOT(dev->devfn) == EMMA2RH_PCI_HOST_SLOT) { dev->class &= 0xff; dev->class |= PCI_CLASS_BRIDGE_HOST << 8; for (i = 0; i < PCI_NUM_RESOURCES; i++) { dev->resource[i].start = 0; dev->resource[i].end = 0; dev->resource[i].flags = 0; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_EMMA2RH, emma2rh_pci_host_fixup); int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return irq_map[slot][pin]; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; }
gpl-2.0
Genymobile/genymotion-kernel
arch/mips/pci/fixup-pmcmsp.c
4350
8254
/* * PMC-Sierra MSP board specific pci fixups. * * Copyright 2001 MontaVista Software Inc. * Copyright 2005-2007 PMC-Sierra, Inc * * Author: MontaVista Software, Inc. * ppopov@mvista.com or source@mvista.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifdef CONFIG_PCI #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/byteorder.h> #include <msp_pci.h> #include <msp_cic_int.h> /* PCI interrupt pins */ #define IRQ4 MSP_INT_EXT4 #define IRQ5 MSP_INT_EXT5 #define IRQ6 MSP_INT_EXT6 #if defined(CONFIG_PMC_MSP7120_GW) /* Garibaldi Board IRQ wiring to PCI slots */ static char irq_tab[][5] __initdata = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */ {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */ {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */ {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */ {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */ {0, IRQ4, IRQ4, 0, 0 }, /* 18 (AD[28]): slot 0 */ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */ {0, IRQ5, IRQ5, 0, 0 }, /* 20 (AD[30]): slot 1 */ {0, IRQ6, IRQ6, 0, 0 } /* 21 (AD[31]): slot 2 */ }; #elif defined(CONFIG_PMC_MSP7120_EVAL) /* MSP7120 Eval Board IRQ wiring to PCI slots */ static char irq_tab[][5] __initdata = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */ {0, IRQ6, IRQ6, 0, 0 }, /* 6 (AD[16]): slot 3 (mini) */ {0, IRQ5, IRQ5, 0, 0 }, /* 7 (AD[17]): slot 2 (mini) */ {0, IRQ4, IRQ4, IRQ4, IRQ4}, /* 8 (AD[18]): slot 0 (PCI) */ {0, IRQ5, IRQ5, IRQ5, IRQ5}, /* 9 (AD[19]): slot 1 (PCI) */ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */ {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */ {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */ {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */ }; #else /* Unknown board -- don't assign any IRQs */ static char irq_tab[][5] __initdata = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */ {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */ {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */ {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */ {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */ {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */ {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */ {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */ }; #endif /***************************************************************************** * * FUNCTION: pcibios_plat_dev_init * _________________________________________________________________________ * * DESCRIPTION: Perform platform specific device initialization at * pci_enable_device() time. * None are needed for the MSP7120 PCI Controller. * * INPUTS: dev - structure describing the PCI device * * OUTPUTS: none * * RETURNS: PCIBIOS_SUCCESSFUL * ****************************************************************************/ int pcibios_plat_dev_init(struct pci_dev *dev) { return PCIBIOS_SUCCESSFUL; } /***************************************************************************** * * FUNCTION: pcibios_map_irq * _________________________________________________________________________ * * DESCRIPTION: Perform board supplied PCI IRQ mapping routine. * * INPUTS: dev - unused * slot - PCI slot. Identified by which bit of the AD[] bus * drives the IDSEL line. AD[10] is 0, AD[31] is * slot 21. * pin - numbered using the scheme of the PCI_INTERRUPT_PIN * field of the config header. * * OUTPUTS: none * * RETURNS: IRQ number * ****************************************************************************/ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { #if !defined(CONFIG_PMC_MSP7120_GW) && !defined(CONFIG_PMC_MSP7120_EVAL) printk(KERN_WARNING "PCI: unknown board, no PCI IRQs assigned.\n"); #endif printk(KERN_WARNING "PCI: irq_tab returned %d for slot=%d pin=%d\n", irq_tab[slot][pin], slot, pin); return irq_tab[slot][pin]; } #endif /* CONFIG_PCI */
gpl-2.0
KCFTech/Linux_3_2_0-26_Kernel
drivers/staging/slicoss/slicoss.c
5118
107899
/************************************************************************** * * Copyright 2000-2006 Alacritech, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The views and conclusions contained in the software and documentation * are those of the authors and should not be interpreted as representing * official policies, either expressed or implied, of Alacritech, Inc. * **************************************************************************/ /* * FILENAME: slicoss.c * * The SLICOSS driver for Alacritech's IS-NIC products. * * This driver is supposed to support: * * Mojave cards (single port PCI Gigabit) both copper and fiber * Oasis cards (single and dual port PCI-x Gigabit) copper and fiber * Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber * * The driver was acutally tested on Oasis and Kalahari cards. * * * NOTE: This is the standard, non-accelerated version of Alacritech's * IS-NIC driver. */ #define KLUDGE_FOR_4GB_BOUNDARY 1 #define DEBUG_MICROCODE 1 #define DBG 1 #define SLIC_INTERRUPT_PROCESS_LIMIT 1 #define SLIC_OFFLOAD_IP_CHECKSUM 1 #define STATS_TIMER_INTERVAL 2 #define PING_TIMER_INTERVAL 1 #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/io.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/types.h> #include <linux/dma-mapping.h> #include <linux/mii.h> #include <linux/if_vlan.h> #include <asm/unaligned.h> #include <linux/ethtool.h> #include <linux/uaccess.h> #include "slichw.h" #include "slic.h" static uint slic_first_init = 1; static char *slic_banner = "Alacritech SLIC Technology(tm) Server "\ "and Storage Accelerator (Non-Accelerated)"; static char *slic_proc_version = "2.0.351 2006/07/14 12:26:00"; static char *slic_product_name = "SLIC Technology(tm) Server "\ "and Storage Accelerator (Non-Accelerated)"; static char *slic_vendor = "Alacritech, Inc."; static int slic_debug = 1; static int debug = -1; static struct net_device *head_netdevice; static struct base_driver slic_global = { {}, 0, 0, 0, 1, NULL, NULL }; static int intagg_delay = 100; static u32 dynamic_intagg; static unsigned int rcv_count; static struct dentry *slic_debugfs; #define DRV_NAME "slicoss" #define DRV_VERSION "2.0.1" #define DRV_AUTHOR "Alacritech, Inc. Engineering" #define DRV_DESCRIPTION "Alacritech SLIC Techonology(tm) "\ "Non-Accelerated Driver" #define DRV_COPYRIGHT "Copyright 2000-2006 Alacritech, Inc. "\ "All rights reserved." #define PFX DRV_NAME " " MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_LICENSE("Dual BSD/GPL"); module_param(dynamic_intagg, int, 0); MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting"); module_param(intagg_delay, int, 0); MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay"); static DEFINE_PCI_DEVICE_TABLE(slic_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_1GB_DEVICE_ID) }, { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_2GB_DEVICE_ID) }, { 0 } }; MODULE_DEVICE_TABLE(pci, slic_pci_tbl); #ifdef ASSERT #undef ASSERT #endif static void slic_assert_fail(void) { u32 cpuid; u32 curr_pid; cpuid = smp_processor_id(); curr_pid = current->pid; printk(KERN_ERR "%s CPU # %d ---- PID # %d\n", __func__, cpuid, curr_pid); } #ifndef ASSERT #define ASSERT(a) do { \ if (!(a)) { \ printk(KERN_ERR "slicoss ASSERT() Failure: function %s" \ "line %d\n", __func__, __LINE__); \ slic_assert_fail(); \ } \ } while (0) #endif #define SLIC_GET_SLIC_HANDLE(_adapter, _pslic_handle) \ { \ spin_lock_irqsave(&_adapter->handle_lock.lock, \ _adapter->handle_lock.flags); \ _pslic_handle = _adapter->pfree_slic_handles; \ if (_pslic_handle) { \ ASSERT(_pslic_handle->type == SLIC_HANDLE_FREE); \ _adapter->pfree_slic_handles = _pslic_handle->next; \ } \ spin_unlock_irqrestore(&_adapter->handle_lock.lock, \ _adapter->handle_lock.flags); \ } #define SLIC_FREE_SLIC_HANDLE(_adapter, _pslic_handle) \ { \ _pslic_handle->type = SLIC_HANDLE_FREE; \ spin_lock_irqsave(&_adapter->handle_lock.lock, \ _adapter->handle_lock.flags); \ _pslic_handle->next = _adapter->pfree_slic_handles; \ _adapter->pfree_slic_handles = _pslic_handle; \ spin_unlock_irqrestore(&_adapter->handle_lock.lock, \ _adapter->handle_lock.flags); \ } static inline void slic_reg32_write(void __iomem *reg, u32 value, bool flush) { writel(value, reg); if (flush) mb(); } static inline void slic_reg64_write(struct adapter *adapter, void __iomem *reg, u32 value, void __iomem *regh, u32 paddrh, bool flush) { spin_lock_irqsave(&adapter->bit64reglock.lock, adapter->bit64reglock.flags); if (paddrh != adapter->curaddrupper) { adapter->curaddrupper = paddrh; writel(paddrh, regh); } writel(value, reg); if (flush) mb(); spin_unlock_irqrestore(&adapter->bit64reglock.lock, adapter->bit64reglock.flags); } /* * Functions to obtain the CRC corresponding to the destination mac address. * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using * the polynomial: * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + * x^4 + x^2 + x^1. * * After the CRC for the 6 bytes is generated (but before the value is * complemented), * we must then transpose the value and return bits 30-23. * */ static u32 slic_crc_table[256]; /* Table of CRCs for all possible byte values */ static u32 slic_crc_init; /* Is table initialized */ /* * Contruct the CRC32 table */ static void slic_mcast_init_crc32(void) { u32 c; /* CRC shit reg */ u32 e = 0; /* Poly X-or pattern */ int i; /* counter */ int k; /* byte being shifted into crc */ static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 }; for (i = 0; i < ARRAY_SIZE(p); i++) e |= 1L << (31 - p[i]); for (i = 1; i < 256; i++) { c = i; for (k = 8; k; k--) c = c & 1 ? (c >> 1) ^ e : c >> 1; slic_crc_table[i] = c; } } /* * Return the MAC hast as described above. */ static unsigned char slic_mcast_get_mac_hash(char *macaddr) { u32 crc; char *p; int i; unsigned char machash = 0; if (!slic_crc_init) { slic_mcast_init_crc32(); slic_crc_init = 1; } crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */ for (i = 0, p = macaddr; i < 6; ++p, ++i) crc = (crc >> 8) ^ slic_crc_table[(crc ^ *p) & 0xFF]; /* Return bits 1-8, transposed */ for (i = 1; i < 9; i++) machash |= (((crc >> i) & 1) << (8 - i)); return machash; } static void slic_mcast_set_bit(struct adapter *adapter, char *address) { unsigned char crcpoly; /* Get the CRC polynomial for the mac address */ crcpoly = slic_mcast_get_mac_hash(address); /* We only have space on the SLIC for 64 entries. Lop * off the top two bits. (2^6 = 64) */ crcpoly &= 0x3F; /* OR in the new bit into our 64 bit mask. */ adapter->mcastmask |= (u64) 1 << crcpoly; } static void slic_mcast_set_mask(struct adapter *adapter) { __iomem struct slic_regs *slic_regs = adapter->slic_regs; if (adapter->macopts & (MAC_ALLMCAST | MAC_PROMISC)) { /* Turn on all multicast addresses. We have to do this for * promiscuous mode as well as ALLMCAST mode. It saves the * Microcode from having to keep state about the MAC * configuration. */ slic_reg32_write(&slic_regs->slic_mcastlow, 0xFFFFFFFF, FLUSH); slic_reg32_write(&slic_regs->slic_mcasthigh, 0xFFFFFFFF, FLUSH); } else { /* Commit our multicast mast to the SLIC by writing to the * multicast address mask registers */ slic_reg32_write(&slic_regs->slic_mcastlow, (u32)(adapter->mcastmask & 0xFFFFFFFF), FLUSH); slic_reg32_write(&slic_regs->slic_mcasthigh, (u32)((adapter->mcastmask >> 32) & 0xFFFFFFFF), FLUSH); } } static void slic_timer_ping(ulong dev) { struct adapter *adapter; struct sliccard *card; ASSERT(dev); adapter = netdev_priv((struct net_device *)dev); ASSERT(adapter); card = adapter->card; ASSERT(card); adapter->pingtimer.expires = jiffies + (PING_TIMER_INTERVAL * HZ); add_timer(&adapter->pingtimer); } static void slic_unmap_mmio_space(struct adapter *adapter) { if (adapter->slic_regs) iounmap(adapter->slic_regs); adapter->slic_regs = NULL; } /* * slic_link_config * * Write phy control to configure link duplex/speed * */ static void slic_link_config(struct adapter *adapter, u32 linkspeed, u32 linkduplex) { u32 __iomem *wphy; u32 speed; u32 duplex; u32 phy_config; u32 phy_advreg; u32 phy_gctlreg; if (adapter->state != ADAPT_UP) return; ASSERT((adapter->devid == SLIC_1GB_DEVICE_ID) || (adapter->devid == SLIC_2GB_DEVICE_ID)); if (linkspeed > LINK_1000MB) linkspeed = LINK_AUTOSPEED; if (linkduplex > LINK_AUTOD) linkduplex = LINK_AUTOD; wphy = &adapter->slic_regs->slic_wphy; if ((linkspeed == LINK_AUTOSPEED) || (linkspeed == LINK_1000MB)) { if (adapter->flags & ADAPT_FLAGS_FIBERMEDIA) { /* We've got a fiber gigabit interface, and register * 4 is different in fiber mode than in copper mode */ /* advertise FD only @1000 Mb */ phy_advreg = (MIICR_REG_4 | (PAR_ADV1000XFD)); /* enable PAUSE frames */ phy_advreg |= PAR_ASYMPAUSE_FIBER; slic_reg32_write(wphy, phy_advreg, FLUSH); if (linkspeed == LINK_AUTOSPEED) { /* reset phy, enable auto-neg */ phy_config = (MIICR_REG_PCR | (PCR_RESET | PCR_AUTONEG | PCR_AUTONEG_RST)); slic_reg32_write(wphy, phy_config, FLUSH); } else { /* forced 1000 Mb FD*/ /* power down phy to break link this may not work) */ phy_config = (MIICR_REG_PCR | PCR_POWERDOWN); slic_reg32_write(wphy, phy_config, FLUSH); /* wait, Marvell says 1 sec, try to get away with 10 ms */ mdelay(10); /* disable auto-neg, set speed/duplex, soft reset phy, powerup */ phy_config = (MIICR_REG_PCR | (PCR_RESET | PCR_SPEED_1000 | PCR_DUPLEX_FULL)); slic_reg32_write(wphy, phy_config, FLUSH); } } else { /* copper gigabit */ /* Auto-Negotiate or 1000 Mb must be auto negotiated * We've got a copper gigabit interface, and * register 4 is different in copper mode than * in fiber mode */ if (linkspeed == LINK_AUTOSPEED) { /* advertise 10/100 Mb modes */ phy_advreg = (MIICR_REG_4 | (PAR_ADV100FD | PAR_ADV100HD | PAR_ADV10FD | PAR_ADV10HD)); } else { /* linkspeed == LINK_1000MB - don't advertise 10/100 Mb modes */ phy_advreg = MIICR_REG_4; } /* enable PAUSE frames */ phy_advreg |= PAR_ASYMPAUSE; /* required by the Cicada PHY */ phy_advreg |= PAR_802_3; slic_reg32_write(wphy, phy_advreg, FLUSH); /* advertise FD only @1000 Mb */ phy_gctlreg = (MIICR_REG_9 | (PGC_ADV1000FD)); slic_reg32_write(wphy, phy_gctlreg, FLUSH); if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) { /* if a Marvell PHY enable auto crossover */ phy_config = (MIICR_REG_16 | (MRV_REG16_XOVERON)); slic_reg32_write(wphy, phy_config, FLUSH); /* reset phy, enable auto-neg */ phy_config = (MIICR_REG_PCR | (PCR_RESET | PCR_AUTONEG | PCR_AUTONEG_RST)); slic_reg32_write(wphy, phy_config, FLUSH); } else { /* it's a Cicada PHY */ /* enable and restart auto-neg (don't reset) */ phy_config = (MIICR_REG_PCR | (PCR_AUTONEG | PCR_AUTONEG_RST)); slic_reg32_write(wphy, phy_config, FLUSH); } } } else { /* Forced 10/100 */ if (linkspeed == LINK_10MB) speed = 0; else speed = PCR_SPEED_100; if (linkduplex == LINK_HALFD) duplex = 0; else duplex = PCR_DUPLEX_FULL; if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) { /* if a Marvell PHY disable auto crossover */ phy_config = (MIICR_REG_16 | (MRV_REG16_XOVEROFF)); slic_reg32_write(wphy, phy_config, FLUSH); } /* power down phy to break link (this may not work) */ phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN | speed | duplex)); slic_reg32_write(wphy, phy_config, FLUSH); /* wait, Marvell says 1 sec, try to get away with 10 ms */ mdelay(10); if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) { /* if a Marvell PHY disable auto-neg, set speed, soft reset phy, powerup */ phy_config = (MIICR_REG_PCR | (PCR_RESET | speed | duplex)); slic_reg32_write(wphy, phy_config, FLUSH); } else { /* it's a Cicada PHY */ /* disable auto-neg, set speed, powerup */ phy_config = (MIICR_REG_PCR | (speed | duplex)); slic_reg32_write(wphy, phy_config, FLUSH); } } } static int slic_card_download_gbrcv(struct adapter *adapter) { const struct firmware *fw; const char *file = ""; int ret; __iomem struct slic_regs *slic_regs = adapter->slic_regs; u32 codeaddr; u32 instruction; int index = 0; u32 rcvucodelen = 0; switch (adapter->devid) { case SLIC_2GB_DEVICE_ID: file = "slicoss/oasisrcvucode.sys"; break; case SLIC_1GB_DEVICE_ID: file = "slicoss/gbrcvucode.sys"; break; default: ASSERT(0); break; } ret = request_firmware(&fw, file, &adapter->pcidev->dev); if (ret) { dev_err(&adapter->pcidev->dev, "SLICOSS: Failed to load firmware %s\n", file); return ret; } rcvucodelen = *(u32 *)(fw->data + index); index += 4; switch (adapter->devid) { case SLIC_2GB_DEVICE_ID: if (rcvucodelen != OasisRcvUCodeLen) return -EINVAL; break; case SLIC_1GB_DEVICE_ID: if (rcvucodelen != GBRcvUCodeLen) return -EINVAL; break; default: ASSERT(0); break; } /* start download */ slic_reg32_write(&slic_regs->slic_rcv_wcs, SLIC_RCVWCS_BEGIN, FLUSH); /* download the rcv sequencer ucode */ for (codeaddr = 0; codeaddr < rcvucodelen; codeaddr++) { /* write out instruction address */ slic_reg32_write(&slic_regs->slic_rcv_wcs, codeaddr, FLUSH); instruction = *(u32 *)(fw->data + index); index += 4; /* write out the instruction data low addr */ slic_reg32_write(&slic_regs->slic_rcv_wcs, instruction, FLUSH); instruction = *(u8 *)(fw->data + index); index++; /* write out the instruction data high addr */ slic_reg32_write(&slic_regs->slic_rcv_wcs, (u8)instruction, FLUSH); } /* download finished */ release_firmware(fw); slic_reg32_write(&slic_regs->slic_rcv_wcs, SLIC_RCVWCS_FINISH, FLUSH); return 0; } MODULE_FIRMWARE("slicoss/oasisrcvucode.sys"); MODULE_FIRMWARE("slicoss/gbrcvucode.sys"); static int slic_card_download(struct adapter *adapter) { const struct firmware *fw; const char *file = ""; int ret; u32 section; int thissectionsize; int codeaddr; __iomem struct slic_regs *slic_regs = adapter->slic_regs; u32 instruction; u32 baseaddress; u32 i; u32 numsects = 0; u32 sectsize[3]; u32 sectstart[3]; int ucode_start, index = 0; switch (adapter->devid) { case SLIC_2GB_DEVICE_ID: file = "slicoss/oasisdownload.sys"; break; case SLIC_1GB_DEVICE_ID: file = "slicoss/gbdownload.sys"; break; default: ASSERT(0); break; } ret = request_firmware(&fw, file, &adapter->pcidev->dev); if (ret) { dev_err(&adapter->pcidev->dev, "SLICOSS: Failed to load firmware %s\n", file); return ret; } numsects = *(u32 *)(fw->data + index); index += 4; ASSERT(numsects <= 3); for (i = 0; i < numsects; i++) { sectsize[i] = *(u32 *)(fw->data + index); index += 4; } for (i = 0; i < numsects; i++) { sectstart[i] = *(u32 *)(fw->data + index); index += 4; } ucode_start = index; instruction = *(u32 *)(fw->data + index); index += 4; for (section = 0; section < numsects; section++) { baseaddress = sectstart[section]; thissectionsize = sectsize[section] >> 3; for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) { /* Write out instruction address */ slic_reg32_write(&slic_regs->slic_wcs, baseaddress + codeaddr, FLUSH); /* Write out instruction to low addr */ slic_reg32_write(&slic_regs->slic_wcs, instruction, FLUSH); instruction = *(u32 *)(fw->data + index); index += 4; /* Write out instruction to high addr */ slic_reg32_write(&slic_regs->slic_wcs, instruction, FLUSH); instruction = *(u32 *)(fw->data + index); index += 4; } } index = ucode_start; for (section = 0; section < numsects; section++) { instruction = *(u32 *)(fw->data + index); baseaddress = sectstart[section]; if (baseaddress < 0x8000) continue; thissectionsize = sectsize[section] >> 3; for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) { /* Write out instruction address */ slic_reg32_write(&slic_regs->slic_wcs, SLIC_WCS_COMPARE | (baseaddress + codeaddr), FLUSH); /* Write out instruction to low addr */ slic_reg32_write(&slic_regs->slic_wcs, instruction, FLUSH); instruction = *(u32 *)(fw->data + index); index += 4; /* Write out instruction to high addr */ slic_reg32_write(&slic_regs->slic_wcs, instruction, FLUSH); instruction = *(u32 *)(fw->data + index); index += 4; /* Check SRAM location zero. If it is non-zero. Abort.*/ /* failure = readl((u32 __iomem *)&slic_regs->slic_reset); if (failure) { release_firmware(fw); return -EIO; }*/ } } release_firmware(fw); /* Everything OK, kick off the card */ mdelay(10); slic_reg32_write(&slic_regs->slic_wcs, SLIC_WCS_START, FLUSH); /* stall for 20 ms, long enough for ucode to init card and reach mainloop */ mdelay(20); return 0; } MODULE_FIRMWARE("slicoss/oasisdownload.sys"); MODULE_FIRMWARE("slicoss/gbdownload.sys"); static void slic_adapter_set_hwaddr(struct adapter *adapter) { struct sliccard *card = adapter->card; if ((adapter->card) && (card->config_set)) { memcpy(adapter->macaddr, card->config.MacInfo[adapter->functionnumber].macaddrA, sizeof(struct slic_config_mac)); if (!(adapter->currmacaddr[0] || adapter->currmacaddr[1] || adapter->currmacaddr[2] || adapter->currmacaddr[3] || adapter->currmacaddr[4] || adapter->currmacaddr[5])) { memcpy(adapter->currmacaddr, adapter->macaddr, 6); } if (adapter->netdev) { memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6); } } } static void slic_intagg_set(struct adapter *adapter, u32 value) { slic_reg32_write(&adapter->slic_regs->slic_intagg, value, FLUSH); adapter->card->loadlevel_current = value; } static void slic_soft_reset(struct adapter *adapter) { if (adapter->card->state == CARD_UP) { slic_reg32_write(&adapter->slic_regs->slic_quiesce, 0, FLUSH); mdelay(1); } slic_reg32_write(&adapter->slic_regs->slic_reset, SLIC_RESET_MAGIC, FLUSH); mdelay(1); } static void slic_mac_address_config(struct adapter *adapter) { u32 value; u32 value2; __iomem struct slic_regs *slic_regs = adapter->slic_regs; value = *(u32 *) &adapter->currmacaddr[2]; value = ntohl(value); slic_reg32_write(&slic_regs->slic_wraddral, value, FLUSH); slic_reg32_write(&slic_regs->slic_wraddrbl, value, FLUSH); value2 = (u32) ((adapter->currmacaddr[0] << 8 | adapter->currmacaddr[1]) & 0xFFFF); slic_reg32_write(&slic_regs->slic_wraddrah, value2, FLUSH); slic_reg32_write(&slic_regs->slic_wraddrbh, value2, FLUSH); /* Write our multicast mask out to the card. This is done */ /* here in addition to the slic_mcast_addr_set routine */ /* because ALL_MCAST may have been enabled or disabled */ slic_mcast_set_mask(adapter); } static void slic_mac_config(struct adapter *adapter) { u32 value; __iomem struct slic_regs *slic_regs = adapter->slic_regs; /* Setup GMAC gaps */ if (adapter->linkspeed == LINK_1000MB) { value = ((GMCR_GAPBB_1000 << GMCR_GAPBB_SHIFT) | (GMCR_GAPR1_1000 << GMCR_GAPR1_SHIFT) | (GMCR_GAPR2_1000 << GMCR_GAPR2_SHIFT)); } else { value = ((GMCR_GAPBB_100 << GMCR_GAPBB_SHIFT) | (GMCR_GAPR1_100 << GMCR_GAPR1_SHIFT) | (GMCR_GAPR2_100 << GMCR_GAPR2_SHIFT)); } /* enable GMII */ if (adapter->linkspeed == LINK_1000MB) value |= GMCR_GBIT; /* enable fullduplex */ if ((adapter->linkduplex == LINK_FULLD) || (adapter->macopts & MAC_LOOPBACK)) { value |= GMCR_FULLD; } /* write mac config */ slic_reg32_write(&slic_regs->slic_wmcfg, value, FLUSH); /* setup mac addresses */ slic_mac_address_config(adapter); } static void slic_config_set(struct adapter *adapter, bool linkchange) { u32 value; u32 RcrReset; __iomem struct slic_regs *slic_regs = adapter->slic_regs; if (linkchange) { /* Setup MAC */ slic_mac_config(adapter); RcrReset = GRCR_RESET; } else { slic_mac_address_config(adapter); RcrReset = 0; } if (adapter->linkduplex == LINK_FULLD) { /* setup xmtcfg */ value = (GXCR_RESET | /* Always reset */ GXCR_XMTEN | /* Enable transmit */ GXCR_PAUSEEN); /* Enable pause */ slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH); /* Setup rcvcfg last */ value = (RcrReset | /* Reset, if linkchange */ GRCR_CTLEN | /* Enable CTL frames */ GRCR_ADDRAEN | /* Address A enable */ GRCR_RCVBAD | /* Rcv bad frames */ (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT)); } else { /* setup xmtcfg */ value = (GXCR_RESET | /* Always reset */ GXCR_XMTEN); /* Enable transmit */ slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH); /* Setup rcvcfg last */ value = (RcrReset | /* Reset, if linkchange */ GRCR_ADDRAEN | /* Address A enable */ GRCR_RCVBAD | /* Rcv bad frames */ (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT)); } if (adapter->state != ADAPT_DOWN) { /* Only enable receive if we are restarting or running */ value |= GRCR_RCVEN; } if (adapter->macopts & MAC_PROMISC) value |= GRCR_RCVALL; slic_reg32_write(&slic_regs->slic_wrcfg, value, FLUSH); } /* * Turn off RCV and XMT, power down PHY */ static void slic_config_clear(struct adapter *adapter) { u32 value; u32 phy_config; __iomem struct slic_regs *slic_regs = adapter->slic_regs; /* Setup xmtcfg */ value = (GXCR_RESET | /* Always reset */ GXCR_PAUSEEN); /* Enable pause */ slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH); value = (GRCR_RESET | /* Always reset */ GRCR_CTLEN | /* Enable CTL frames */ GRCR_ADDRAEN | /* Address A enable */ (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT)); slic_reg32_write(&slic_regs->slic_wrcfg, value, FLUSH); /* power down phy */ phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN)); slic_reg32_write(&slic_regs->slic_wphy, phy_config, FLUSH); } static bool slic_mac_filter(struct adapter *adapter, struct ether_header *ether_frame) { struct net_device *netdev = adapter->netdev; u32 opts = adapter->macopts; u32 *dhost4 = (u32 *)&ether_frame->ether_dhost[0]; u16 *dhost2 = (u16 *)&ether_frame->ether_dhost[4]; if (opts & MAC_PROMISC) return true; if ((*dhost4 == 0xFFFFFFFF) && (*dhost2 == 0xFFFF)) { if (opts & MAC_BCAST) { adapter->rcv_broadcasts++; return true; } else { return false; } } if (ether_frame->ether_dhost[0] & 0x01) { if (opts & MAC_ALLMCAST) { adapter->rcv_multicasts++; netdev->stats.multicast++; return true; } if (opts & MAC_MCAST) { struct mcast_address *mcaddr = adapter->mcastaddrs; while (mcaddr) { if (!compare_ether_addr(mcaddr->address, ether_frame->ether_dhost)) { adapter->rcv_multicasts++; netdev->stats.multicast++; return true; } mcaddr = mcaddr->next; } return false; } else { return false; } } if (opts & MAC_DIRECTED) { adapter->rcv_unicasts++; return true; } return false; } static int slic_mac_set_address(struct net_device *dev, void *ptr) { struct adapter *adapter = netdev_priv(dev); struct sockaddr *addr = ptr; if (netif_running(dev)) return -EBUSY; if (!adapter) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len); slic_config_set(adapter, true); return 0; } static void slic_timer_load_check(ulong cardaddr) { struct sliccard *card = (struct sliccard *)cardaddr; struct adapter *adapter = card->master; u32 __iomem *intagg; u32 load = card->events; u32 level = 0; intagg = &adapter->slic_regs->slic_intagg; if ((adapter) && (adapter->state == ADAPT_UP) && (card->state == CARD_UP) && (slic_global.dynamic_intagg)) { if (adapter->devid == SLIC_1GB_DEVICE_ID) { if (adapter->linkspeed == LINK_1000MB) level = 100; else { if (load > SLIC_LOAD_5) level = SLIC_INTAGG_5; else if (load > SLIC_LOAD_4) level = SLIC_INTAGG_4; else if (load > SLIC_LOAD_3) level = SLIC_INTAGG_3; else if (load > SLIC_LOAD_2) level = SLIC_INTAGG_2; else if (load > SLIC_LOAD_1) level = SLIC_INTAGG_1; else level = SLIC_INTAGG_0; } if (card->loadlevel_current != level) { card->loadlevel_current = level; slic_reg32_write(intagg, level, FLUSH); } } else { if (load > SLIC_LOAD_5) level = SLIC_INTAGG_5; else if (load > SLIC_LOAD_4) level = SLIC_INTAGG_4; else if (load > SLIC_LOAD_3) level = SLIC_INTAGG_3; else if (load > SLIC_LOAD_2) level = SLIC_INTAGG_2; else if (load > SLIC_LOAD_1) level = SLIC_INTAGG_1; else level = SLIC_INTAGG_0; if (card->loadlevel_current != level) { card->loadlevel_current = level; slic_reg32_write(intagg, level, FLUSH); } } } card->events = 0; card->loadtimer.expires = jiffies + (SLIC_LOADTIMER_PERIOD * HZ); add_timer(&card->loadtimer); } static int slic_upr_queue_request(struct adapter *adapter, u32 upr_request, u32 upr_data, u32 upr_data_h, u32 upr_buffer, u32 upr_buffer_h) { struct slic_upr *upr; struct slic_upr *uprqueue; upr = kmalloc(sizeof(struct slic_upr), GFP_ATOMIC); if (!upr) return -ENOMEM; upr->adapter = adapter->port; upr->upr_request = upr_request; upr->upr_data = upr_data; upr->upr_buffer = upr_buffer; upr->upr_data_h = upr_data_h; upr->upr_buffer_h = upr_buffer_h; upr->next = NULL; if (adapter->upr_list) { uprqueue = adapter->upr_list; while (uprqueue->next) uprqueue = uprqueue->next; uprqueue->next = upr; } else { adapter->upr_list = upr; } return 0; } static void slic_upr_start(struct adapter *adapter) { struct slic_upr *upr; __iomem struct slic_regs *slic_regs = adapter->slic_regs; /* char * ptr1; char * ptr2; uint cmdoffset; */ upr = adapter->upr_list; if (!upr) return; if (adapter->upr_busy) return; adapter->upr_busy = 1; switch (upr->upr_request) { case SLIC_UPR_STATS: if (upr->upr_data_h == 0) { slic_reg32_write(&slic_regs->slic_stats, upr->upr_data, FLUSH); } else { slic_reg64_write(adapter, &slic_regs->slic_stats64, upr->upr_data, &slic_regs->slic_addr_upper, upr->upr_data_h, FLUSH); } break; case SLIC_UPR_RLSR: slic_reg64_write(adapter, &slic_regs->slic_rlsr, upr->upr_data, &slic_regs->slic_addr_upper, upr->upr_data_h, FLUSH); break; case SLIC_UPR_RCONFIG: slic_reg64_write(adapter, &slic_regs->slic_rconfig, upr->upr_data, &slic_regs->slic_addr_upper, upr->upr_data_h, FLUSH); break; case SLIC_UPR_PING: slic_reg32_write(&slic_regs->slic_ping, 1, FLUSH); break; default: ASSERT(0); } } static int slic_upr_request(struct adapter *adapter, u32 upr_request, u32 upr_data, u32 upr_data_h, u32 upr_buffer, u32 upr_buffer_h) { int rc; spin_lock_irqsave(&adapter->upr_lock.lock, adapter->upr_lock.flags); rc = slic_upr_queue_request(adapter, upr_request, upr_data, upr_data_h, upr_buffer, upr_buffer_h); if (rc) goto err_unlock_irq; slic_upr_start(adapter); err_unlock_irq: spin_unlock_irqrestore(&adapter->upr_lock.lock, adapter->upr_lock.flags); return rc; } static void slic_link_upr_complete(struct adapter *adapter, u32 isr) { u32 linkstatus = adapter->pshmem->linkstatus; uint linkup; unsigned char linkspeed; unsigned char linkduplex; if ((isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) { struct slic_shmem *pshmem; pshmem = (struct slic_shmem *)adapter->phys_shmem; #if BITS_PER_LONG == 64 slic_upr_queue_request(adapter, SLIC_UPR_RLSR, SLIC_GET_ADDR_LOW(&pshmem->linkstatus), SLIC_GET_ADDR_HIGH(&pshmem->linkstatus), 0, 0); #else slic_upr_queue_request(adapter, SLIC_UPR_RLSR, (u32) &pshmem->linkstatus, SLIC_GET_ADDR_HIGH(pshmem), 0, 0); #endif return; } if (adapter->state != ADAPT_UP) return; ASSERT((adapter->devid == SLIC_1GB_DEVICE_ID) || (adapter->devid == SLIC_2GB_DEVICE_ID)); linkup = linkstatus & GIG_LINKUP ? LINK_UP : LINK_DOWN; if (linkstatus & GIG_SPEED_1000) linkspeed = LINK_1000MB; else if (linkstatus & GIG_SPEED_100) linkspeed = LINK_100MB; else linkspeed = LINK_10MB; if (linkstatus & GIG_FULLDUPLEX) linkduplex = LINK_FULLD; else linkduplex = LINK_HALFD; if ((adapter->linkstate == LINK_DOWN) && (linkup == LINK_DOWN)) return; /* link up event, but nothing has changed */ if ((adapter->linkstate == LINK_UP) && (linkup == LINK_UP) && (adapter->linkspeed == linkspeed) && (adapter->linkduplex == linkduplex)) return; /* link has changed at this point */ /* link has gone from up to down */ if (linkup == LINK_DOWN) { adapter->linkstate = LINK_DOWN; return; } /* link has gone from down to up */ adapter->linkspeed = linkspeed; adapter->linkduplex = linkduplex; if (adapter->linkstate != LINK_UP) { /* setup the mac */ slic_config_set(adapter, true); adapter->linkstate = LINK_UP; netif_start_queue(adapter->netdev); } } static void slic_upr_request_complete(struct adapter *adapter, u32 isr) { struct sliccard *card = adapter->card; struct slic_upr *upr; spin_lock_irqsave(&adapter->upr_lock.lock, adapter->upr_lock.flags); upr = adapter->upr_list; if (!upr) { ASSERT(0); spin_unlock_irqrestore(&adapter->upr_lock.lock, adapter->upr_lock.flags); return; } adapter->upr_list = upr->next; upr->next = NULL; adapter->upr_busy = 0; ASSERT(adapter->port == upr->adapter); switch (upr->upr_request) { case SLIC_UPR_STATS: { struct slic_stats *slicstats = (struct slic_stats *) &adapter->pshmem->inicstats; struct slic_stats *newstats = slicstats; struct slic_stats *old = &adapter->inicstats_prev; struct slicnet_stats *stst = &adapter->slic_stats; if (isr & ISR_UPCERR) { dev_err(&adapter->netdev->dev, "SLIC_UPR_STATS command failed isr[%x]\n", isr); break; } UPDATE_STATS_GB(stst->tcp.xmit_tcp_segs, newstats->xmit_tcp_segs_gb, old->xmit_tcp_segs_gb); UPDATE_STATS_GB(stst->tcp.xmit_tcp_bytes, newstats->xmit_tcp_bytes_gb, old->xmit_tcp_bytes_gb); UPDATE_STATS_GB(stst->tcp.rcv_tcp_segs, newstats->rcv_tcp_segs_gb, old->rcv_tcp_segs_gb); UPDATE_STATS_GB(stst->tcp.rcv_tcp_bytes, newstats->rcv_tcp_bytes_gb, old->rcv_tcp_bytes_gb); UPDATE_STATS_GB(stst->iface.xmt_bytes, newstats->xmit_bytes_gb, old->xmit_bytes_gb); UPDATE_STATS_GB(stst->iface.xmt_ucast, newstats->xmit_unicasts_gb, old->xmit_unicasts_gb); UPDATE_STATS_GB(stst->iface.rcv_bytes, newstats->rcv_bytes_gb, old->rcv_bytes_gb); UPDATE_STATS_GB(stst->iface.rcv_ucast, newstats->rcv_unicasts_gb, old->rcv_unicasts_gb); UPDATE_STATS_GB(stst->iface.xmt_errors, newstats->xmit_collisions_gb, old->xmit_collisions_gb); UPDATE_STATS_GB(stst->iface.xmt_errors, newstats->xmit_excess_collisions_gb, old->xmit_excess_collisions_gb); UPDATE_STATS_GB(stst->iface.xmt_errors, newstats->xmit_other_error_gb, old->xmit_other_error_gb); UPDATE_STATS_GB(stst->iface.rcv_errors, newstats->rcv_other_error_gb, old->rcv_other_error_gb); UPDATE_STATS_GB(stst->iface.rcv_discards, newstats->rcv_drops_gb, old->rcv_drops_gb); if (newstats->rcv_drops_gb > old->rcv_drops_gb) { adapter->rcv_drops += (newstats->rcv_drops_gb - old->rcv_drops_gb); } memcpy(old, newstats, sizeof(struct slic_stats)); break; } case SLIC_UPR_RLSR: slic_link_upr_complete(adapter, isr); break; case SLIC_UPR_RCONFIG: break; case SLIC_UPR_RPHY: ASSERT(0); break; case SLIC_UPR_ENLB: ASSERT(0); break; case SLIC_UPR_ENCT: ASSERT(0); break; case SLIC_UPR_PDWN: ASSERT(0); break; case SLIC_UPR_PING: card->pingstatus |= (isr & ISR_PINGDSMASK); break; default: ASSERT(0); } kfree(upr); slic_upr_start(adapter); spin_unlock_irqrestore(&adapter->upr_lock.lock, adapter->upr_lock.flags); } static void slic_config_get(struct adapter *adapter, u32 config, u32 config_h) { int status; status = slic_upr_request(adapter, SLIC_UPR_RCONFIG, (u32) config, (u32) config_h, 0, 0); ASSERT(status == 0); } /* * this is here to checksum the eeprom, there is some ucode bug * which prevens us from using the ucode result. * remove this once ucode is fixed. */ static ushort slic_eeprom_cksum(char *m, int len) { #define ADDCARRY(x) (x > 65535 ? x -= 65535 : x) #define REDUCE {l_util.l = sum; sum = l_util.s[0] + l_util.s[1]; ADDCARRY(sum);\ } u16 *w; u32 sum = 0; u32 byte_swapped = 0; u32 w_int; union { char c[2]; ushort s; } s_util; union { ushort s[2]; int l; } l_util; l_util.l = 0; s_util.s = 0; w = (u16 *)m; #if BITS_PER_LONG == 64 w_int = (u32) ((ulong) w & 0x00000000FFFFFFFF); #else w_int = (u32) (w); #endif if ((1 & w_int) && (len > 0)) { REDUCE; sum <<= 8; s_util.c[0] = *(unsigned char *)w; w = (u16 *)((char *)w + 1); len--; byte_swapped = 1; } /* Unroll the loop to make overhead from branches &c small. */ while ((len -= 32) >= 0) { sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7]; sum += w[8]; sum += w[9]; sum += w[10]; sum += w[11]; sum += w[12]; sum += w[13]; sum += w[14]; sum += w[15]; w = (u16 *)((ulong) w + 16); /* verify */ } len += 32; while ((len -= 8) >= 0) { sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; w = (u16 *)((ulong) w + 4); /* verify */ } len += 8; if (len != 0 || byte_swapped != 0) { REDUCE; while ((len -= 2) >= 0) sum += *w++; /* verify */ if (byte_swapped) { REDUCE; sum <<= 8; byte_swapped = 0; if (len == -1) { s_util.c[1] = *(char *) w; sum += s_util.s; len = 0; } else { len = -1; } } else if (len == -1) { s_util.c[0] = *(char *) w; } if (len == -1) { s_util.c[1] = 0; sum += s_util.s; } } REDUCE; return (ushort) sum; } static void slic_rspqueue_free(struct adapter *adapter) { int i; struct slic_rspqueue *rspq = &adapter->rspqueue; for (i = 0; i < rspq->num_pages; i++) { if (rspq->vaddr[i]) { pci_free_consistent(adapter->pcidev, PAGE_SIZE, rspq->vaddr[i], rspq->paddr[i]); } rspq->vaddr[i] = NULL; rspq->paddr[i] = 0; } rspq->offset = 0; rspq->pageindex = 0; rspq->rspbuf = NULL; } static int slic_rspqueue_init(struct adapter *adapter) { int i; struct slic_rspqueue *rspq = &adapter->rspqueue; __iomem struct slic_regs *slic_regs = adapter->slic_regs; u32 paddrh = 0; ASSERT(adapter->state == ADAPT_DOWN); memset(rspq, 0, sizeof(struct slic_rspqueue)); rspq->num_pages = SLIC_RSPQ_PAGES_GB; for (i = 0; i < rspq->num_pages; i++) { rspq->vaddr[i] = pci_alloc_consistent(adapter->pcidev, PAGE_SIZE, &rspq->paddr[i]); if (!rspq->vaddr[i]) { dev_err(&adapter->pcidev->dev, "pci_alloc_consistent failed\n"); slic_rspqueue_free(adapter); return -ENOMEM; } /* FIXME: * do we really need this assertions (4K PAGE_SIZE aligned addr)? */ #if 0 #ifndef CONFIG_X86_64 ASSERT(((u32) rspq->vaddr[i] & 0xFFFFF000) == (u32) rspq->vaddr[i]); ASSERT(((u32) rspq->paddr[i] & 0xFFFFF000) == (u32) rspq->paddr[i]); #endif #endif memset(rspq->vaddr[i], 0, PAGE_SIZE); if (paddrh == 0) { slic_reg32_write(&slic_regs->slic_rbar, (rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE), DONT_FLUSH); } else { slic_reg64_write(adapter, &slic_regs->slic_rbar64, (rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE), &slic_regs->slic_addr_upper, paddrh, DONT_FLUSH); } } rspq->offset = 0; rspq->pageindex = 0; rspq->rspbuf = (struct slic_rspbuf *)rspq->vaddr[0]; return 0; } static struct slic_rspbuf *slic_rspqueue_getnext(struct adapter *adapter) { struct slic_rspqueue *rspq = &adapter->rspqueue; struct slic_rspbuf *buf; if (!(rspq->rspbuf->status)) return NULL; buf = rspq->rspbuf; #if BITS_PER_LONG == 32 ASSERT((buf->status & 0xFFFFFFE0) == 0); #endif ASSERT(buf->hosthandle); if (++rspq->offset < SLIC_RSPQ_BUFSINPAGE) { rspq->rspbuf++; #if BITS_PER_LONG == 32 ASSERT(((u32) rspq->rspbuf & 0xFFFFFFE0) == (u32) rspq->rspbuf); #endif } else { ASSERT(rspq->offset == SLIC_RSPQ_BUFSINPAGE); slic_reg64_write(adapter, &adapter->slic_regs->slic_rbar64, (rspq->paddr[rspq->pageindex] | SLIC_RSPQ_BUFSINPAGE), &adapter->slic_regs->slic_addr_upper, 0, DONT_FLUSH); rspq->pageindex = (++rspq->pageindex) % rspq->num_pages; rspq->offset = 0; rspq->rspbuf = (struct slic_rspbuf *) rspq->vaddr[rspq->pageindex]; #if BITS_PER_LONG == 32 ASSERT(((u32) rspq->rspbuf & 0xFFFFF000) == (u32) rspq->rspbuf); #endif } #if BITS_PER_LONG == 32 ASSERT(((u32) buf & 0xFFFFFFE0) == (u32) buf); #endif return buf; } static void slic_cmdqmem_init(struct adapter *adapter) { struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem; memset(cmdqmem, 0, sizeof(struct slic_cmdqmem)); } static void slic_cmdqmem_free(struct adapter *adapter) { struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem; int i; for (i = 0; i < SLIC_CMDQ_MAXPAGES; i++) { if (cmdqmem->pages[i]) { pci_free_consistent(adapter->pcidev, PAGE_SIZE, (void *) cmdqmem->pages[i], cmdqmem->dma_pages[i]); } } memset(cmdqmem, 0, sizeof(struct slic_cmdqmem)); } static u32 *slic_cmdqmem_addpage(struct adapter *adapter) { struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem; u32 *pageaddr; if (cmdqmem->pagecnt >= SLIC_CMDQ_MAXPAGES) return NULL; pageaddr = pci_alloc_consistent(adapter->pcidev, PAGE_SIZE, &cmdqmem->dma_pages[cmdqmem->pagecnt]); if (!pageaddr) return NULL; #if BITS_PER_LONG == 32 ASSERT(((u32) pageaddr & 0xFFFFF000) == (u32) pageaddr); #endif cmdqmem->pages[cmdqmem->pagecnt] = pageaddr; cmdqmem->pagecnt++; return pageaddr; } static void slic_cmdq_free(struct adapter *adapter) { struct slic_hostcmd *cmd; cmd = adapter->cmdq_all.head; while (cmd) { if (cmd->busy) { struct sk_buff *tempskb; tempskb = cmd->skb; if (tempskb) { cmd->skb = NULL; dev_kfree_skb_irq(tempskb); } } cmd = cmd->next_all; } memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue)); memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue)); memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue)); slic_cmdqmem_free(adapter); } static void slic_cmdq_addcmdpage(struct adapter *adapter, u32 *page) { struct slic_hostcmd *cmd; struct slic_hostcmd *prev; struct slic_hostcmd *tail; struct slic_cmdqueue *cmdq; int cmdcnt; void *cmdaddr; ulong phys_addr; u32 phys_addrl; u32 phys_addrh; struct slic_handle *pslic_handle; cmdaddr = page; cmd = (struct slic_hostcmd *)cmdaddr; cmdcnt = 0; phys_addr = virt_to_bus((void *)page); phys_addrl = SLIC_GET_ADDR_LOW(phys_addr); phys_addrh = SLIC_GET_ADDR_HIGH(phys_addr); prev = NULL; tail = cmd; while ((cmdcnt < SLIC_CMDQ_CMDSINPAGE) && (adapter->slic_handle_ix < 256)) { /* Allocate and initialize a SLIC_HANDLE for this command */ SLIC_GET_SLIC_HANDLE(adapter, pslic_handle); if (pslic_handle == NULL) ASSERT(0); ASSERT(pslic_handle == &adapter->slic_handles[pslic_handle->token. handle_index]); pslic_handle->type = SLIC_HANDLE_CMD; pslic_handle->address = (void *) cmd; pslic_handle->offset = (ushort) adapter->slic_handle_ix++; pslic_handle->other_handle = NULL; pslic_handle->next = NULL; cmd->pslic_handle = pslic_handle; cmd->cmd64.hosthandle = pslic_handle->token.handle_token; cmd->busy = false; cmd->paddrl = phys_addrl; cmd->paddrh = phys_addrh; cmd->next_all = prev; cmd->next = prev; prev = cmd; phys_addrl += SLIC_HOSTCMD_SIZE; cmdaddr += SLIC_HOSTCMD_SIZE; cmd = (struct slic_hostcmd *)cmdaddr; cmdcnt++; } cmdq = &adapter->cmdq_all; cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */ tail->next_all = cmdq->head; cmdq->head = prev; cmdq = &adapter->cmdq_free; spin_lock_irqsave(&cmdq->lock.lock, cmdq->lock.flags); cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */ tail->next = cmdq->head; cmdq->head = prev; spin_unlock_irqrestore(&cmdq->lock.lock, cmdq->lock.flags); } static int slic_cmdq_init(struct adapter *adapter) { int i; u32 *pageaddr; ASSERT(adapter->state == ADAPT_DOWN); memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue)); memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue)); memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue)); spin_lock_init(&adapter->cmdq_all.lock.lock); spin_lock_init(&adapter->cmdq_free.lock.lock); spin_lock_init(&adapter->cmdq_done.lock.lock); slic_cmdqmem_init(adapter); adapter->slic_handle_ix = 1; for (i = 0; i < SLIC_CMDQ_INITPAGES; i++) { pageaddr = slic_cmdqmem_addpage(adapter); #if BITS_PER_LONG == 32 ASSERT(((u32) pageaddr & 0xFFFFF000) == (u32) pageaddr); #endif if (!pageaddr) { slic_cmdq_free(adapter); return -ENOMEM; } slic_cmdq_addcmdpage(adapter, pageaddr); } adapter->slic_handle_ix = 1; return 0; } static void slic_cmdq_reset(struct adapter *adapter) { struct slic_hostcmd *hcmd; struct sk_buff *skb; u32 outstanding; spin_lock_irqsave(&adapter->cmdq_free.lock.lock, adapter->cmdq_free.lock.flags); spin_lock_irqsave(&adapter->cmdq_done.lock.lock, adapter->cmdq_done.lock.flags); outstanding = adapter->cmdq_all.count - adapter->cmdq_done.count; outstanding -= adapter->cmdq_free.count; hcmd = adapter->cmdq_all.head; while (hcmd) { if (hcmd->busy) { skb = hcmd->skb; ASSERT(skb); hcmd->busy = 0; hcmd->skb = NULL; dev_kfree_skb_irq(skb); } hcmd = hcmd->next_all; } adapter->cmdq_free.count = 0; adapter->cmdq_free.head = NULL; adapter->cmdq_free.tail = NULL; adapter->cmdq_done.count = 0; adapter->cmdq_done.head = NULL; adapter->cmdq_done.tail = NULL; adapter->cmdq_free.head = adapter->cmdq_all.head; hcmd = adapter->cmdq_all.head; while (hcmd) { adapter->cmdq_free.count++; hcmd->next = hcmd->next_all; hcmd = hcmd->next_all; } if (adapter->cmdq_free.count != adapter->cmdq_all.count) { dev_err(&adapter->netdev->dev, "free_count %d != all count %d\n", adapter->cmdq_free.count, adapter->cmdq_all.count); } spin_unlock_irqrestore(&adapter->cmdq_done.lock.lock, adapter->cmdq_done.lock.flags); spin_unlock_irqrestore(&adapter->cmdq_free.lock.lock, adapter->cmdq_free.lock.flags); } static void slic_cmdq_getdone(struct adapter *adapter) { struct slic_cmdqueue *done_cmdq = &adapter->cmdq_done; struct slic_cmdqueue *free_cmdq = &adapter->cmdq_free; ASSERT(free_cmdq->head == NULL); spin_lock_irqsave(&done_cmdq->lock.lock, done_cmdq->lock.flags); free_cmdq->head = done_cmdq->head; free_cmdq->count = done_cmdq->count; done_cmdq->head = NULL; done_cmdq->tail = NULL; done_cmdq->count = 0; spin_unlock_irqrestore(&done_cmdq->lock.lock, done_cmdq->lock.flags); } static struct slic_hostcmd *slic_cmdq_getfree(struct adapter *adapter) { struct slic_cmdqueue *cmdq = &adapter->cmdq_free; struct slic_hostcmd *cmd = NULL; lock_and_retry: spin_lock_irqsave(&cmdq->lock.lock, cmdq->lock.flags); retry: cmd = cmdq->head; if (cmd) { cmdq->head = cmd->next; cmdq->count--; spin_unlock_irqrestore(&cmdq->lock.lock, cmdq->lock.flags); } else { slic_cmdq_getdone(adapter); cmd = cmdq->head; if (cmd) { goto retry; } else { u32 *pageaddr; spin_unlock_irqrestore(&cmdq->lock.lock, cmdq->lock.flags); pageaddr = slic_cmdqmem_addpage(adapter); if (pageaddr) { slic_cmdq_addcmdpage(adapter, pageaddr); goto lock_and_retry; } } } return cmd; } static void slic_cmdq_putdone_irq(struct adapter *adapter, struct slic_hostcmd *cmd) { struct slic_cmdqueue *cmdq = &adapter->cmdq_done; spin_lock(&cmdq->lock.lock); cmd->busy = 0; cmd->next = cmdq->head; cmdq->head = cmd; cmdq->count++; if ((adapter->xmitq_full) && (cmdq->count > 10)) netif_wake_queue(adapter->netdev); spin_unlock(&cmdq->lock.lock); } static int slic_rcvqueue_fill(struct adapter *adapter) { void *paddr; u32 paddrl; u32 paddrh; struct slic_rcvqueue *rcvq = &adapter->rcvqueue; int i = 0; struct device *dev = &adapter->netdev->dev; while (i < SLIC_RCVQ_FILLENTRIES) { struct slic_rcvbuf *rcvbuf; struct sk_buff *skb; #ifdef KLUDGE_FOR_4GB_BOUNDARY retry_rcvqfill: #endif skb = alloc_skb(SLIC_RCVQ_RCVBUFSIZE, GFP_ATOMIC); if (skb) { paddr = (void *)pci_map_single(adapter->pcidev, skb->data, SLIC_RCVQ_RCVBUFSIZE, PCI_DMA_FROMDEVICE); paddrl = SLIC_GET_ADDR_LOW(paddr); paddrh = SLIC_GET_ADDR_HIGH(paddr); skb->len = SLIC_RCVBUF_HEADSIZE; rcvbuf = (struct slic_rcvbuf *)skb->head; rcvbuf->status = 0; skb->next = NULL; #ifdef KLUDGE_FOR_4GB_BOUNDARY if (paddrl == 0) { dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n", __func__); dev_err(dev, "skb[%p] PROBLEM\n", skb); dev_err(dev, " skbdata[%p]\n", skb->data); dev_err(dev, " skblen[%x]\n", skb->len); dev_err(dev, " paddr[%p]\n", paddr); dev_err(dev, " paddrl[%x]\n", paddrl); dev_err(dev, " paddrh[%x]\n", paddrh); dev_err(dev, " rcvq->head[%p]\n", rcvq->head); dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail); dev_err(dev, " rcvq->count[%x]\n", rcvq->count); dev_err(dev, "SKIP THIS SKB!!!!!!!!\n"); goto retry_rcvqfill; } #else if (paddrl == 0) { dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n", __func__); dev_err(dev, "skb[%p] PROBLEM\n", skb); dev_err(dev, " skbdata[%p]\n", skb->data); dev_err(dev, " skblen[%x]\n", skb->len); dev_err(dev, " paddr[%p]\n", paddr); dev_err(dev, " paddrl[%x]\n", paddrl); dev_err(dev, " paddrh[%x]\n", paddrh); dev_err(dev, " rcvq->head[%p]\n", rcvq->head); dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail); dev_err(dev, " rcvq->count[%x]\n", rcvq->count); dev_err(dev, "GIVE TO CARD ANYWAY\n"); } #endif if (paddrh == 0) { slic_reg32_write(&adapter->slic_regs->slic_hbar, (u32)paddrl, DONT_FLUSH); } else { slic_reg64_write(adapter, &adapter->slic_regs->slic_hbar64, paddrl, &adapter->slic_regs->slic_addr_upper, paddrh, DONT_FLUSH); } if (rcvq->head) rcvq->tail->next = skb; else rcvq->head = skb; rcvq->tail = skb; rcvq->count++; i++; } else { dev_err(&adapter->netdev->dev, "slic_rcvqueue_fill could only get [%d] skbuffs\n", i); break; } } return i; } static void slic_rcvqueue_free(struct adapter *adapter) { struct slic_rcvqueue *rcvq = &adapter->rcvqueue; struct sk_buff *skb; while (rcvq->head) { skb = rcvq->head; rcvq->head = rcvq->head->next; dev_kfree_skb(skb); } rcvq->tail = NULL; rcvq->head = NULL; rcvq->count = 0; } static int slic_rcvqueue_init(struct adapter *adapter) { int i, count; struct slic_rcvqueue *rcvq = &adapter->rcvqueue; ASSERT(adapter->state == ADAPT_DOWN); rcvq->tail = NULL; rcvq->head = NULL; rcvq->size = SLIC_RCVQ_ENTRIES; rcvq->errors = 0; rcvq->count = 0; i = (SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES); count = 0; while (i) { count += slic_rcvqueue_fill(adapter); i--; } if (rcvq->count < SLIC_RCVQ_MINENTRIES) { slic_rcvqueue_free(adapter); return -ENOMEM; } return 0; } static struct sk_buff *slic_rcvqueue_getnext(struct adapter *adapter) { struct slic_rcvqueue *rcvq = &adapter->rcvqueue; struct sk_buff *skb; struct slic_rcvbuf *rcvbuf; int count; if (rcvq->count) { skb = rcvq->head; rcvbuf = (struct slic_rcvbuf *)skb->head; ASSERT(rcvbuf); if (rcvbuf->status & IRHDDR_SVALID) { rcvq->head = rcvq->head->next; skb->next = NULL; rcvq->count--; } else { skb = NULL; } } else { dev_err(&adapter->netdev->dev, "RcvQ Empty!! rcvq[%p] count[%x]\n", rcvq, rcvq->count); skb = NULL; } while (rcvq->count < SLIC_RCVQ_FILLTHRESH) { count = slic_rcvqueue_fill(adapter); if (!count) break; } if (skb) rcvq->errors = 0; return skb; } static u32 slic_rcvqueue_reinsert(struct adapter *adapter, struct sk_buff *skb) { struct slic_rcvqueue *rcvq = &adapter->rcvqueue; void *paddr; u32 paddrl; u32 paddrh; struct slic_rcvbuf *rcvbuf = (struct slic_rcvbuf *)skb->head; struct device *dev; ASSERT(skb->len == SLIC_RCVBUF_HEADSIZE); paddr = (void *)pci_map_single(adapter->pcidev, skb->head, SLIC_RCVQ_RCVBUFSIZE, PCI_DMA_FROMDEVICE); rcvbuf->status = 0; skb->next = NULL; paddrl = SLIC_GET_ADDR_LOW(paddr); paddrh = SLIC_GET_ADDR_HIGH(paddr); if (paddrl == 0) { dev = &adapter->netdev->dev; dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n", __func__); dev_err(dev, "skb[%p] PROBLEM\n", skb); dev_err(dev, " skbdata[%p]\n", skb->data); dev_err(dev, " skblen[%x]\n", skb->len); dev_err(dev, " paddr[%p]\n", paddr); dev_err(dev, " paddrl[%x]\n", paddrl); dev_err(dev, " paddrh[%x]\n", paddrh); dev_err(dev, " rcvq->head[%p]\n", rcvq->head); dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail); dev_err(dev, " rcvq->count[%x]\n", rcvq->count); } if (paddrh == 0) { slic_reg32_write(&adapter->slic_regs->slic_hbar, (u32)paddrl, DONT_FLUSH); } else { slic_reg64_write(adapter, &adapter->slic_regs->slic_hbar64, paddrl, &adapter->slic_regs->slic_addr_upper, paddrh, DONT_FLUSH); } if (rcvq->head) rcvq->tail->next = skb; else rcvq->head = skb; rcvq->tail = skb; rcvq->count++; return rcvq->count; } static int slic_debug_card_show(struct seq_file *seq, void *v) { #ifdef MOOKTODO int i; struct sliccard *card = seq->private; struct slic_config *config = &card->config; unsigned char *fru = (unsigned char *)(&card->config.atk_fru); unsigned char *oemfru = (unsigned char *)(&card->config.OemFru); #endif seq_printf(seq, "driver_version : %s\n", slic_proc_version); seq_printf(seq, "Microcode versions: \n"); seq_printf(seq, " Gigabit (gb) : %s %s\n", MOJAVE_UCODE_VERS_STRING, MOJAVE_UCODE_VERS_DATE); seq_printf(seq, " Gigabit Receiver : %s %s\n", GB_RCVUCODE_VERS_STRING, GB_RCVUCODE_VERS_DATE); seq_printf(seq, "Vendor : %s\n", slic_vendor); seq_printf(seq, "Product Name : %s\n", slic_product_name); #ifdef MOOKTODO seq_printf(seq, "VendorId : %4.4X\n", config->VendorId); seq_printf(seq, "DeviceId : %4.4X\n", config->DeviceId); seq_printf(seq, "RevisionId : %2.2x\n", config->RevisionId); seq_printf(seq, "Bus # : %d\n", card->busnumber); seq_printf(seq, "Device # : %d\n", card->slotnumber); seq_printf(seq, "Interfaces : %d\n", card->card_size); seq_printf(seq, " Initialized : %d\n", card->adapters_activated); seq_printf(seq, " Allocated : %d\n", card->adapters_allocated); ASSERT(card->card_size <= SLIC_NBR_MACS); for (i = 0; i < card->card_size; i++) { seq_printf(seq, " MAC%d : %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", i, config->macinfo[i].macaddrA[0], config->macinfo[i].macaddrA[1], config->macinfo[i].macaddrA[2], config->macinfo[i].macaddrA[3], config->macinfo[i].macaddrA[4], config->macinfo[i].macaddrA[5]); } seq_printf(seq, " IF Init State Duplex/Speed irq\n"); seq_printf(seq, " -------------------------------\n"); for (i = 0; i < card->adapters_allocated; i++) { struct adapter *adapter; adapter = card->adapter[i]; if (adapter) { seq_printf(seq, " %d %d %s %s %s 0x%X\n", adapter->physport, adapter->state, SLIC_LINKSTATE(adapter->linkstate), SLIC_DUPLEX(adapter->linkduplex), SLIC_SPEED(adapter->linkspeed), (uint) adapter->irq); } } seq_printf(seq, "Generation # : %4.4X\n", card->gennumber); seq_printf(seq, "RcvQ max entries : %4.4X\n", SLIC_RCVQ_ENTRIES); seq_printf(seq, "Ping Status : %8.8X\n", card->pingstatus); seq_printf(seq, "Minimum grant : %2.2x\n", config->MinGrant); seq_printf(seq, "Maximum Latency : %2.2x\n", config->MaxLat); seq_printf(seq, "PciStatus : %4.4x\n", config->Pcistatus); seq_printf(seq, "Debug Device Id : %4.4x\n", config->DbgDevId); seq_printf(seq, "DRAM ROM Function : %4.4x\n", config->DramRomFn); seq_printf(seq, "Network interface Pin 1 : %2.2x\n", config->NetIntPin1); seq_printf(seq, "Network interface Pin 2 : %2.2x\n", config->NetIntPin1); seq_printf(seq, "Network interface Pin 3 : %2.2x\n", config->NetIntPin1); seq_printf(seq, "PM capabilities : %4.4X\n", config->PMECapab); seq_printf(seq, "Network Clock Controls : %4.4X\n", config->NwClkCtrls); switch (config->FruFormat) { case ATK_FRU_FORMAT: { seq_printf(seq, "Vendor : Alacritech, Inc.\n"); seq_printf(seq, "Assembly # : %c%c%c%c%c%c\n", fru[0], fru[1], fru[2], fru[3], fru[4], fru[5]); seq_printf(seq, "Revision # : %c%c\n", fru[6], fru[7]); if (config->OEMFruFormat == VENDOR4_FRU_FORMAT) { seq_printf(seq, "Serial # : " "%c%c%c%c%c%c%c%c%c%c%c%c\n", fru[8], fru[9], fru[10], fru[11], fru[12], fru[13], fru[16], fru[17], fru[18], fru[19], fru[20], fru[21]); } else { seq_printf(seq, "Serial # : " "%c%c%c%c%c%c%c%c%c%c%c%c%c%c\n", fru[8], fru[9], fru[10], fru[11], fru[12], fru[13], fru[14], fru[15], fru[16], fru[17], fru[18], fru[19], fru[20], fru[21]); } break; } default: { seq_printf(seq, "Vendor : Alacritech, Inc.\n"); seq_printf(seq, "Serial # : Empty FRU\n"); break; } } switch (config->OEMFruFormat) { case VENDOR1_FRU_FORMAT: { seq_printf(seq, "FRU Information:\n"); seq_printf(seq, " Commodity # : %c\n", oemfru[0]); seq_printf(seq, " Assembly # : %c%c%c%c\n", oemfru[1], oemfru[2], oemfru[3], oemfru[4]); seq_printf(seq, " Revision # : %c%c\n", oemfru[5], oemfru[6]); seq_printf(seq, " Supplier # : %c%c\n", oemfru[7], oemfru[8]); seq_printf(seq, " Date : %c%c\n", oemfru[9], oemfru[10]); seq_sprintf(seq, " Sequence # : %c%c%c\n", oemfru[11], oemfru[12], oemfru[13]); break; } case VENDOR2_FRU_FORMAT: { seq_printf(seq, "FRU Information:\n"); seq_printf(seq, " Part # : " "%c%c%c%c%c%c%c%c\n", oemfru[0], oemfru[1], oemfru[2], oemfru[3], oemfru[4], oemfru[5], oemfru[6], oemfru[7]); seq_printf(seq, " Supplier # : %c%c%c%c%c\n", oemfru[8], oemfru[9], oemfru[10], oemfru[11], oemfru[12]); seq_printf(seq, " Date : %c%c%c\n", oemfru[13], oemfru[14], oemfru[15]); seq_sprintf(seq, " Sequence # : %c%c%c%c\n", oemfru[16], oemfru[17], oemfru[18], oemfru[19]); break; } case VENDOR3_FRU_FORMAT: { seq_printf(seq, "FRU Information:\n"); } case VENDOR4_FRU_FORMAT: { seq_printf(seq, "FRU Information:\n"); seq_printf(seq, " FRU Number : " "%c%c%c%c%c%c%c%c\n", oemfru[0], oemfru[1], oemfru[2], oemfru[3], oemfru[4], oemfru[5], oemfru[6], oemfru[7]); seq_sprintf(seq, " Part Number : " "%c%c%c%c%c%c%c%c\n", oemfru[8], oemfru[9], oemfru[10], oemfru[11], oemfru[12], oemfru[13], oemfru[14], oemfru[15]); seq_printf(seq, " EC Level : " "%c%c%c%c%c%c%c%c\n", oemfru[16], oemfru[17], oemfru[18], oemfru[19], oemfru[20], oemfru[21], oemfru[22], oemfru[23]); break; } default: break; } #endif return 0; } static int slic_debug_adapter_show(struct seq_file *seq, void *v) { struct adapter *adapter = seq->private; struct net_device *netdev = adapter->netdev; seq_printf(seq, "info: interface : %s\n", adapter->netdev->name); seq_printf(seq, "info: status : %s\n", SLIC_LINKSTATE(adapter->linkstate)); seq_printf(seq, "info: port : %d\n", adapter->physport); seq_printf(seq, "info: speed : %s\n", SLIC_SPEED(adapter->linkspeed)); seq_printf(seq, "info: duplex : %s\n", SLIC_DUPLEX(adapter->linkduplex)); seq_printf(seq, "info: irq : 0x%X\n", (uint) adapter->irq); seq_printf(seq, "info: Interrupt Agg Delay: %d usec\n", adapter->card->loadlevel_current); seq_printf(seq, "info: RcvQ max entries : %4.4X\n", SLIC_RCVQ_ENTRIES); seq_printf(seq, "info: RcvQ current : %4.4X\n", adapter->rcvqueue.count); seq_printf(seq, "rx stats: packets : %8.8lX\n", netdev->stats.rx_packets); seq_printf(seq, "rx stats: bytes : %8.8lX\n", netdev->stats.rx_bytes); seq_printf(seq, "rx stats: broadcasts : %8.8X\n", adapter->rcv_broadcasts); seq_printf(seq, "rx stats: multicasts : %8.8X\n", adapter->rcv_multicasts); seq_printf(seq, "rx stats: unicasts : %8.8X\n", adapter->rcv_unicasts); seq_printf(seq, "rx stats: errors : %8.8X\n", (u32) adapter->slic_stats.iface.rcv_errors); seq_printf(seq, "rx stats: Missed errors : %8.8X\n", (u32) adapter->slic_stats.iface.rcv_discards); seq_printf(seq, "rx stats: drops : %8.8X\n", (u32) adapter->rcv_drops); seq_printf(seq, "tx stats: packets : %8.8lX\n", netdev->stats.tx_packets); seq_printf(seq, "tx stats: bytes : %8.8lX\n", netdev->stats.tx_bytes); seq_printf(seq, "tx stats: errors : %8.8X\n", (u32) adapter->slic_stats.iface.xmt_errors); seq_printf(seq, "rx stats: multicasts : %8.8lX\n", netdev->stats.multicast); seq_printf(seq, "tx stats: collision errors : %8.8X\n", (u32) adapter->slic_stats.iface.xmit_collisions); seq_printf(seq, "perf: Max rcv frames/isr : %8.8X\n", adapter->max_isr_rcvs); seq_printf(seq, "perf: Rcv interrupt yields : %8.8X\n", adapter->rcv_interrupt_yields); seq_printf(seq, "perf: Max xmit complete/isr : %8.8X\n", adapter->max_isr_xmits); seq_printf(seq, "perf: error interrupts : %8.8X\n", adapter->error_interrupts); seq_printf(seq, "perf: error rmiss interrupts : %8.8X\n", adapter->error_rmiss_interrupts); seq_printf(seq, "perf: rcv interrupts : %8.8X\n", adapter->rcv_interrupts); seq_printf(seq, "perf: xmit interrupts : %8.8X\n", adapter->xmit_interrupts); seq_printf(seq, "perf: link event interrupts : %8.8X\n", adapter->linkevent_interrupts); seq_printf(seq, "perf: UPR interrupts : %8.8X\n", adapter->upr_interrupts); seq_printf(seq, "perf: interrupt count : %8.8X\n", adapter->num_isrs); seq_printf(seq, "perf: false interrupts : %8.8X\n", adapter->false_interrupts); seq_printf(seq, "perf: All register writes : %8.8X\n", adapter->all_reg_writes); seq_printf(seq, "perf: ICR register writes : %8.8X\n", adapter->icr_reg_writes); seq_printf(seq, "perf: ISR register writes : %8.8X\n", adapter->isr_reg_writes); seq_printf(seq, "ifevents: overflow 802 errors : %8.8X\n", adapter->if_events.oflow802); seq_printf(seq, "ifevents: transport overflow errors: %8.8X\n", adapter->if_events.Tprtoflow); seq_printf(seq, "ifevents: underflow errors : %8.8X\n", adapter->if_events.uflow802); seq_printf(seq, "ifevents: receive early : %8.8X\n", adapter->if_events.rcvearly); seq_printf(seq, "ifevents: buffer overflows : %8.8X\n", adapter->if_events.Bufov); seq_printf(seq, "ifevents: carrier errors : %8.8X\n", adapter->if_events.Carre); seq_printf(seq, "ifevents: Long : %8.8X\n", adapter->if_events.Longe); seq_printf(seq, "ifevents: invalid preambles : %8.8X\n", adapter->if_events.Invp); seq_printf(seq, "ifevents: CRC errors : %8.8X\n", adapter->if_events.Crc); seq_printf(seq, "ifevents: dribble nibbles : %8.8X\n", adapter->if_events.Drbl); seq_printf(seq, "ifevents: Code violations : %8.8X\n", adapter->if_events.Code); seq_printf(seq, "ifevents: TCP checksum errors : %8.8X\n", adapter->if_events.TpCsum); seq_printf(seq, "ifevents: TCP header short errors : %8.8X\n", adapter->if_events.TpHlen); seq_printf(seq, "ifevents: IP checksum errors : %8.8X\n", adapter->if_events.IpCsum); seq_printf(seq, "ifevents: IP frame incompletes : %8.8X\n", adapter->if_events.IpLen); seq_printf(seq, "ifevents: IP headers shorts : %8.8X\n", adapter->if_events.IpHlen); return 0; } static int slic_debug_adapter_open(struct inode *inode, struct file *file) { return single_open(file, slic_debug_adapter_show, inode->i_private); } static int slic_debug_card_open(struct inode *inode, struct file *file) { return single_open(file, slic_debug_card_show, inode->i_private); } static const struct file_operations slic_debug_adapter_fops = { .owner = THIS_MODULE, .open = slic_debug_adapter_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations slic_debug_card_fops = { .owner = THIS_MODULE, .open = slic_debug_card_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void slic_debug_adapter_create(struct adapter *adapter) { struct dentry *d; char name[7]; struct sliccard *card = adapter->card; if (!card->debugfs_dir) return; sprintf(name, "port%d", adapter->port); d = debugfs_create_file(name, S_IRUGO, card->debugfs_dir, adapter, &slic_debug_adapter_fops); if (!d || IS_ERR(d)) pr_info(PFX "%s: debugfs create failed\n", name); else adapter->debugfs_entry = d; } static void slic_debug_adapter_destroy(struct adapter *adapter) { debugfs_remove(adapter->debugfs_entry); adapter->debugfs_entry = NULL; } static void slic_debug_card_create(struct sliccard *card) { struct dentry *d; char name[IFNAMSIZ]; snprintf(name, sizeof(name), "slic%d", card->cardnum); d = debugfs_create_dir(name, slic_debugfs); if (!d || IS_ERR(d)) pr_info(PFX "%s: debugfs create dir failed\n", name); else { card->debugfs_dir = d; d = debugfs_create_file("cardinfo", S_IRUGO, slic_debugfs, card, &slic_debug_card_fops); if (!d || IS_ERR(d)) pr_info(PFX "%s: debugfs create failed\n", name); else card->debugfs_cardinfo = d; } } static void slic_debug_card_destroy(struct sliccard *card) { int i; for (i = 0; i < card->card_size; i++) { struct adapter *adapter; adapter = card->adapter[i]; if (adapter) slic_debug_adapter_destroy(adapter); } if (card->debugfs_cardinfo) { debugfs_remove(card->debugfs_cardinfo); card->debugfs_cardinfo = NULL; } if (card->debugfs_dir) { debugfs_remove(card->debugfs_dir); card->debugfs_dir = NULL; } } static void slic_debug_init(void) { struct dentry *ent; ent = debugfs_create_dir("slic", NULL); if (!ent || IS_ERR(ent)) { pr_info(PFX "debugfs create directory failed\n"); return; } slic_debugfs = ent; } static void slic_debug_cleanup(void) { if (slic_debugfs) { debugfs_remove(slic_debugfs); slic_debugfs = NULL; } } /* * slic_link_event_handler - * * Initiate a link configuration sequence. The link configuration begins * by issuing a READ_LINK_STATUS command to the Utility Processor on the * SLIC. Since the command finishes asynchronously, the slic_upr_comlete * routine will follow it up witha UP configuration write command, which * will also complete asynchronously. * */ static void slic_link_event_handler(struct adapter *adapter) { int status; struct slic_shmem *pshmem; if (adapter->state != ADAPT_UP) { /* Adapter is not operational. Ignore. */ return; } pshmem = (struct slic_shmem *)adapter->phys_shmem; #if BITS_PER_LONG == 64 status = slic_upr_request(adapter, SLIC_UPR_RLSR, SLIC_GET_ADDR_LOW(&pshmem->linkstatus), SLIC_GET_ADDR_HIGH(&pshmem->linkstatus), 0, 0); #else status = slic_upr_request(adapter, SLIC_UPR_RLSR, (u32) &pshmem->linkstatus, /* no 4GB wrap guaranteed */ 0, 0, 0); #endif ASSERT(status == 0); } static void slic_init_cleanup(struct adapter *adapter) { if (adapter->intrregistered) { adapter->intrregistered = 0; free_irq(adapter->netdev->irq, adapter->netdev); } if (adapter->pshmem) { pci_free_consistent(adapter->pcidev, sizeof(struct slic_shmem), adapter->pshmem, adapter->phys_shmem); adapter->pshmem = NULL; adapter->phys_shmem = (dma_addr_t) NULL; } if (adapter->pingtimerset) { adapter->pingtimerset = 0; del_timer(&adapter->pingtimer); } slic_rspqueue_free(adapter); slic_cmdq_free(adapter); slic_rcvqueue_free(adapter); } /* * Allocate a mcast_address structure to hold the multicast address. * Link it in. */ static int slic_mcast_add_list(struct adapter *adapter, char *address) { struct mcast_address *mcaddr, *mlist; /* Check to see if it already exists */ mlist = adapter->mcastaddrs; while (mlist) { if (!compare_ether_addr(mlist->address, address)) return 0; mlist = mlist->next; } /* Doesn't already exist. Allocate a structure to hold it */ mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC); if (mcaddr == NULL) return 1; memcpy(mcaddr->address, address, 6); mcaddr->next = adapter->mcastaddrs; adapter->mcastaddrs = mcaddr; return 0; } static void slic_mcast_set_list(struct net_device *dev) { struct adapter *adapter = netdev_priv(dev); int status = 0; char *addresses; struct netdev_hw_addr *ha; ASSERT(adapter); netdev_for_each_mc_addr(ha, dev) { addresses = (char *) &ha->addr; status = slic_mcast_add_list(adapter, addresses); if (status != 0) break; slic_mcast_set_bit(adapter, addresses); } if (adapter->devflags_prev != dev->flags) { adapter->macopts = MAC_DIRECTED; if (dev->flags) { if (dev->flags & IFF_BROADCAST) adapter->macopts |= MAC_BCAST; if (dev->flags & IFF_PROMISC) adapter->macopts |= MAC_PROMISC; if (dev->flags & IFF_ALLMULTI) adapter->macopts |= MAC_ALLMCAST; if (dev->flags & IFF_MULTICAST) adapter->macopts |= MAC_MCAST; } adapter->devflags_prev = dev->flags; slic_config_set(adapter, true); } else { if (status == 0) slic_mcast_set_mask(adapter); } return; } #define XMIT_FAIL_LINK_STATE 1 #define XMIT_FAIL_ZERO_LENGTH 2 #define XMIT_FAIL_HOSTCMD_FAIL 3 static void slic_xmit_build_request(struct adapter *adapter, struct slic_hostcmd *hcmd, struct sk_buff *skb) { struct slic_host64_cmd *ihcmd; ulong phys_addr; ihcmd = &hcmd->cmd64; ihcmd->flags = (adapter->port << IHFLG_IFSHFT); ihcmd->command = IHCMD_XMT_REQ; ihcmd->u.slic_buffers.totlen = skb->len; phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len, PCI_DMA_TODEVICE); ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr); ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr); ihcmd->u.slic_buffers.bufs[0].length = skb->len; #if BITS_PER_LONG == 64 hcmd->cmdsize = (u32) ((((u64)&ihcmd->u.slic_buffers.bufs[1] - (u64) hcmd) + 31) >> 5); #else hcmd->cmdsize = ((((u32) &ihcmd->u.slic_buffers.bufs[1] - (u32) hcmd) + 31) >> 5); #endif } static void slic_xmit_fail(struct adapter *adapter, struct sk_buff *skb, void *cmd, u32 skbtype, u32 status) { if (adapter->xmitq_full) netif_stop_queue(adapter->netdev); if ((cmd == NULL) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) { switch (status) { case XMIT_FAIL_LINK_STATE: dev_err(&adapter->netdev->dev, "reject xmit skb[%p: %x] linkstate[%s] " "adapter[%s:%d] card[%s:%d]\n", skb, skb->pkt_type, SLIC_LINKSTATE(adapter->linkstate), SLIC_ADAPTER_STATE(adapter->state), adapter->state, SLIC_CARD_STATE(adapter->card->state), adapter->card->state); break; case XMIT_FAIL_ZERO_LENGTH: dev_err(&adapter->netdev->dev, "xmit_start skb->len == 0 skb[%p] type[%x]\n", skb, skb->pkt_type); break; case XMIT_FAIL_HOSTCMD_FAIL: dev_err(&adapter->netdev->dev, "xmit_start skb[%p] type[%x] No host commands " "available\n", skb, skb->pkt_type); break; default: ASSERT(0); } } dev_kfree_skb(skb); adapter->netdev->stats.tx_dropped++; } static void slic_rcv_handle_error(struct adapter *adapter, struct slic_rcvbuf *rcvbuf) { struct slic_hddr_wds *hdr = (struct slic_hddr_wds *)rcvbuf->data; struct net_device *netdev = adapter->netdev; if (adapter->devid != SLIC_1GB_DEVICE_ID) { if (hdr->frame_status14 & VRHSTAT_802OE) adapter->if_events.oflow802++; if (hdr->frame_status14 & VRHSTAT_TPOFLO) adapter->if_events.Tprtoflow++; if (hdr->frame_status_b14 & VRHSTATB_802UE) adapter->if_events.uflow802++; if (hdr->frame_status_b14 & VRHSTATB_RCVE) { adapter->if_events.rcvearly++; netdev->stats.rx_fifo_errors++; } if (hdr->frame_status_b14 & VRHSTATB_BUFF) { adapter->if_events.Bufov++; netdev->stats.rx_over_errors++; } if (hdr->frame_status_b14 & VRHSTATB_CARRE) { adapter->if_events.Carre++; netdev->stats.tx_carrier_errors++; } if (hdr->frame_status_b14 & VRHSTATB_LONGE) adapter->if_events.Longe++; if (hdr->frame_status_b14 & VRHSTATB_PREA) adapter->if_events.Invp++; if (hdr->frame_status_b14 & VRHSTATB_CRC) { adapter->if_events.Crc++; netdev->stats.rx_crc_errors++; } if (hdr->frame_status_b14 & VRHSTATB_DRBL) adapter->if_events.Drbl++; if (hdr->frame_status_b14 & VRHSTATB_CODE) adapter->if_events.Code++; if (hdr->frame_status_b14 & VRHSTATB_TPCSUM) adapter->if_events.TpCsum++; if (hdr->frame_status_b14 & VRHSTATB_TPHLEN) adapter->if_events.TpHlen++; if (hdr->frame_status_b14 & VRHSTATB_IPCSUM) adapter->if_events.IpCsum++; if (hdr->frame_status_b14 & VRHSTATB_IPLERR) adapter->if_events.IpLen++; if (hdr->frame_status_b14 & VRHSTATB_IPHERR) adapter->if_events.IpHlen++; } else { if (hdr->frame_statusGB & VGBSTAT_XPERR) { u32 xerr = hdr->frame_statusGB >> VGBSTAT_XERRSHFT; if (xerr == VGBSTAT_XCSERR) adapter->if_events.TpCsum++; if (xerr == VGBSTAT_XUFLOW) adapter->if_events.Tprtoflow++; if (xerr == VGBSTAT_XHLEN) adapter->if_events.TpHlen++; } if (hdr->frame_statusGB & VGBSTAT_NETERR) { u32 nerr = (hdr-> frame_statusGB >> VGBSTAT_NERRSHFT) & VGBSTAT_NERRMSK; if (nerr == VGBSTAT_NCSERR) adapter->if_events.IpCsum++; if (nerr == VGBSTAT_NUFLOW) adapter->if_events.IpLen++; if (nerr == VGBSTAT_NHLEN) adapter->if_events.IpHlen++; } if (hdr->frame_statusGB & VGBSTAT_LNKERR) { u32 lerr = hdr->frame_statusGB & VGBSTAT_LERRMSK; if (lerr == VGBSTAT_LDEARLY) adapter->if_events.rcvearly++; if (lerr == VGBSTAT_LBOFLO) adapter->if_events.Bufov++; if (lerr == VGBSTAT_LCODERR) adapter->if_events.Code++; if (lerr == VGBSTAT_LDBLNBL) adapter->if_events.Drbl++; if (lerr == VGBSTAT_LCRCERR) adapter->if_events.Crc++; if (lerr == VGBSTAT_LOFLO) adapter->if_events.oflow802++; if (lerr == VGBSTAT_LUFLO) adapter->if_events.uflow802++; } } return; } #define TCP_OFFLOAD_FRAME_PUSHFLAG 0x10000000 #define M_FAST_PATH 0x0040 static void slic_rcv_handler(struct adapter *adapter) { struct net_device *netdev = adapter->netdev; struct sk_buff *skb; struct slic_rcvbuf *rcvbuf; u32 frames = 0; while ((skb = slic_rcvqueue_getnext(adapter))) { u32 rx_bytes; ASSERT(skb->head); rcvbuf = (struct slic_rcvbuf *)skb->head; adapter->card->events++; if (rcvbuf->status & IRHDDR_ERR) { adapter->rx_errors++; slic_rcv_handle_error(adapter, rcvbuf); slic_rcvqueue_reinsert(adapter, skb); continue; } if (!slic_mac_filter(adapter, (struct ether_header *) rcvbuf->data)) { slic_rcvqueue_reinsert(adapter, skb); continue; } skb_pull(skb, SLIC_RCVBUF_HEADSIZE); rx_bytes = (rcvbuf->length & IRHDDR_FLEN_MSK); skb_put(skb, rx_bytes); netdev->stats.rx_packets++; netdev->stats.rx_bytes += rx_bytes; #if SLIC_OFFLOAD_IP_CHECKSUM skb->ip_summed = CHECKSUM_UNNECESSARY; #endif skb->dev = adapter->netdev; skb->protocol = eth_type_trans(skb, skb->dev); netif_rx(skb); ++frames; #if SLIC_INTERRUPT_PROCESS_LIMIT if (frames >= SLIC_RCVQ_MAX_PROCESS_ISR) { adapter->rcv_interrupt_yields++; break; } #endif } adapter->max_isr_rcvs = max(adapter->max_isr_rcvs, frames); } static void slic_xmit_complete(struct adapter *adapter) { struct slic_hostcmd *hcmd; struct slic_rspbuf *rspbuf; u32 frames = 0; struct slic_handle_word slic_handle_word; do { rspbuf = slic_rspqueue_getnext(adapter); if (!rspbuf) break; adapter->xmit_completes++; adapter->card->events++; /* Get the complete host command buffer */ slic_handle_word.handle_token = rspbuf->hosthandle; ASSERT(slic_handle_word.handle_index); ASSERT(slic_handle_word.handle_index <= SLIC_CMDQ_MAXCMDS); hcmd = (struct slic_hostcmd *) adapter->slic_handles[slic_handle_word.handle_index]. address; /* hcmd = (struct slic_hostcmd *) rspbuf->hosthandle; */ ASSERT(hcmd); ASSERT(hcmd->pslic_handle == &adapter->slic_handles[slic_handle_word.handle_index]); if (hcmd->type == SLIC_CMD_DUMB) { if (hcmd->skb) dev_kfree_skb_irq(hcmd->skb); slic_cmdq_putdone_irq(adapter, hcmd); } rspbuf->status = 0; rspbuf->hosthandle = 0; frames++; } while (1); adapter->max_isr_xmits = max(adapter->max_isr_xmits, frames); } static irqreturn_t slic_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct adapter *adapter = netdev_priv(dev); u32 isr; if ((adapter->pshmem) && (adapter->pshmem->isr)) { slic_reg32_write(&adapter->slic_regs->slic_icr, ICR_INT_MASK, FLUSH); isr = adapter->isrcopy = adapter->pshmem->isr; adapter->pshmem->isr = 0; adapter->num_isrs++; switch (adapter->card->state) { case CARD_UP: if (isr & ~ISR_IO) { if (isr & ISR_ERR) { adapter->error_interrupts++; if (isr & ISR_RMISS) { int count; int pre_count; int errors; struct slic_rcvqueue *rcvq = &adapter->rcvqueue; adapter-> error_rmiss_interrupts++; if (!rcvq->errors) rcv_count = rcvq->count; pre_count = rcvq->count; errors = rcvq->errors; while (rcvq->count < SLIC_RCVQ_FILLTHRESH) { count = slic_rcvqueue_fill (adapter); if (!count) break; } } else if (isr & ISR_XDROP) { dev_err(&dev->dev, "isr & ISR_ERR [%x] " "ISR_XDROP \n", isr); } else { dev_err(&dev->dev, "isr & ISR_ERR [%x]\n", isr); } } if (isr & ISR_LEVENT) { adapter->linkevent_interrupts++; slic_link_event_handler(adapter); } if ((isr & ISR_UPC) || (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) { adapter->upr_interrupts++; slic_upr_request_complete(adapter, isr); } } if (isr & ISR_RCV) { adapter->rcv_interrupts++; slic_rcv_handler(adapter); } if (isr & ISR_CMD) { adapter->xmit_interrupts++; slic_xmit_complete(adapter); } break; case CARD_DOWN: if ((isr & ISR_UPC) || (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) { adapter->upr_interrupts++; slic_upr_request_complete(adapter, isr); } break; default: break; } adapter->isrcopy = 0; adapter->all_reg_writes += 2; adapter->isr_reg_writes++; slic_reg32_write(&adapter->slic_regs->slic_isr, 0, FLUSH); } else { adapter->false_interrupts++; } return IRQ_HANDLED; } #define NORMAL_ETHFRAME 0 static netdev_tx_t slic_xmit_start(struct sk_buff *skb, struct net_device *dev) { struct sliccard *card; struct adapter *adapter = netdev_priv(dev); struct slic_hostcmd *hcmd = NULL; u32 status = 0; u32 skbtype = NORMAL_ETHFRAME; void *offloadcmd = NULL; card = adapter->card; ASSERT(card); if ((adapter->linkstate != LINK_UP) || (adapter->state != ADAPT_UP) || (card->state != CARD_UP)) { status = XMIT_FAIL_LINK_STATE; goto xmit_fail; } else if (skb->len == 0) { status = XMIT_FAIL_ZERO_LENGTH; goto xmit_fail; } if (skbtype == NORMAL_ETHFRAME) { hcmd = slic_cmdq_getfree(adapter); if (!hcmd) { adapter->xmitq_full = 1; status = XMIT_FAIL_HOSTCMD_FAIL; goto xmit_fail; } ASSERT(hcmd->pslic_handle); ASSERT(hcmd->cmd64.hosthandle == hcmd->pslic_handle->token.handle_token); hcmd->skb = skb; hcmd->busy = 1; hcmd->type = SLIC_CMD_DUMB; if (skbtype == NORMAL_ETHFRAME) slic_xmit_build_request(adapter, hcmd, skb); } dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; #ifdef DEBUG_DUMP if (adapter->kill_card) { struct slic_host64_cmd ihcmd; ihcmd = &hcmd->cmd64; ihcmd->flags |= 0x40; adapter->kill_card = 0; /* only do this once */ } #endif if (hcmd->paddrh == 0) { slic_reg32_write(&adapter->slic_regs->slic_cbar, (hcmd->paddrl | hcmd->cmdsize), DONT_FLUSH); } else { slic_reg64_write(adapter, &adapter->slic_regs->slic_cbar64, (hcmd->paddrl | hcmd->cmdsize), &adapter->slic_regs->slic_addr_upper, hcmd->paddrh, DONT_FLUSH); } xmit_done: return NETDEV_TX_OK; xmit_fail: slic_xmit_fail(adapter, skb, offloadcmd, skbtype, status); goto xmit_done; } static void slic_adapter_freeresources(struct adapter *adapter) { slic_init_cleanup(adapter); adapter->error_interrupts = 0; adapter->rcv_interrupts = 0; adapter->xmit_interrupts = 0; adapter->linkevent_interrupts = 0; adapter->upr_interrupts = 0; adapter->num_isrs = 0; adapter->xmit_completes = 0; adapter->rcv_broadcasts = 0; adapter->rcv_multicasts = 0; adapter->rcv_unicasts = 0; } static int slic_adapter_allocresources(struct adapter *adapter) { if (!adapter->intrregistered) { int retval; spin_unlock_irqrestore(&slic_global.driver_lock.lock, slic_global.driver_lock.flags); retval = request_irq(adapter->netdev->irq, &slic_interrupt, IRQF_SHARED, adapter->netdev->name, adapter->netdev); spin_lock_irqsave(&slic_global.driver_lock.lock, slic_global.driver_lock.flags); if (retval) { dev_err(&adapter->netdev->dev, "request_irq (%s) FAILED [%x]\n", adapter->netdev->name, retval); return retval; } adapter->intrregistered = 1; } return 0; } /* * slic_if_init * * Perform initialization of our slic interface. * */ static int slic_if_init(struct adapter *adapter) { struct sliccard *card = adapter->card; struct net_device *dev = adapter->netdev; __iomem struct slic_regs *slic_regs = adapter->slic_regs; struct slic_shmem *pshmem; int rc; ASSERT(card); /* adapter should be down at this point */ if (adapter->state != ADAPT_DOWN) { dev_err(&dev->dev, "%s: adapter->state != ADAPT_DOWN\n", __func__); rc = -EIO; goto err; } ASSERT(adapter->linkstate == LINK_DOWN); adapter->devflags_prev = dev->flags; adapter->macopts = MAC_DIRECTED; if (dev->flags) { if (dev->flags & IFF_BROADCAST) adapter->macopts |= MAC_BCAST; if (dev->flags & IFF_PROMISC) adapter->macopts |= MAC_PROMISC; if (dev->flags & IFF_ALLMULTI) adapter->macopts |= MAC_ALLMCAST; if (dev->flags & IFF_MULTICAST) adapter->macopts |= MAC_MCAST; } rc = slic_adapter_allocresources(adapter); if (rc) { dev_err(&dev->dev, "%s: slic_adapter_allocresources FAILED %x\n", __func__, rc); slic_adapter_freeresources(adapter); goto err; } if (!adapter->queues_initialized) { if ((rc = slic_rspqueue_init(adapter))) goto err; if ((rc = slic_cmdq_init(adapter))) goto err; if ((rc = slic_rcvqueue_init(adapter))) goto err; adapter->queues_initialized = 1; } slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH); mdelay(1); if (!adapter->isp_initialized) { pshmem = (struct slic_shmem *)adapter->phys_shmem; spin_lock_irqsave(&adapter->bit64reglock.lock, adapter->bit64reglock.flags); #if BITS_PER_LONG == 64 slic_reg32_write(&slic_regs->slic_addr_upper, SLIC_GET_ADDR_HIGH(&pshmem->isr), DONT_FLUSH); slic_reg32_write(&slic_regs->slic_isp, SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH); #else slic_reg32_write(&slic_regs->slic_addr_upper, 0, DONT_FLUSH); slic_reg32_write(&slic_regs->slic_isp, (u32)&pshmem->isr, FLUSH); #endif spin_unlock_irqrestore(&adapter->bit64reglock.lock, adapter->bit64reglock.flags); adapter->isp_initialized = 1; } adapter->state = ADAPT_UP; if (!card->loadtimerset) { init_timer(&card->loadtimer); card->loadtimer.expires = jiffies + (SLIC_LOADTIMER_PERIOD * HZ); card->loadtimer.data = (ulong) card; card->loadtimer.function = &slic_timer_load_check; add_timer(&card->loadtimer); card->loadtimerset = 1; } if (!adapter->pingtimerset) { init_timer(&adapter->pingtimer); adapter->pingtimer.expires = jiffies + (PING_TIMER_INTERVAL * HZ); adapter->pingtimer.data = (ulong) dev; adapter->pingtimer.function = &slic_timer_ping; add_timer(&adapter->pingtimer); adapter->pingtimerset = 1; adapter->card->pingstatus = ISR_PINGMASK; } /* * clear any pending events, then enable interrupts */ adapter->isrcopy = 0; adapter->pshmem->isr = 0; slic_reg32_write(&slic_regs->slic_isr, 0, FLUSH); slic_reg32_write(&slic_regs->slic_icr, ICR_INT_ON, FLUSH); slic_link_config(adapter, LINK_AUTOSPEED, LINK_AUTOD); slic_link_event_handler(adapter); err: return rc; } static int slic_entry_open(struct net_device *dev) { struct adapter *adapter = netdev_priv(dev); struct sliccard *card = adapter->card; u32 locked = 0; int status; ASSERT(adapter); ASSERT(card); netif_stop_queue(adapter->netdev); spin_lock_irqsave(&slic_global.driver_lock.lock, slic_global.driver_lock.flags); locked = 1; if (!adapter->activated) { card->adapters_activated++; slic_global.num_slic_ports_active++; adapter->activated = 1; } status = slic_if_init(adapter); if (status != 0) { if (adapter->activated) { card->adapters_activated--; slic_global.num_slic_ports_active--; adapter->activated = 0; } if (locked) { spin_unlock_irqrestore(&slic_global.driver_lock.lock, slic_global.driver_lock.flags); locked = 0; } return status; } if (!card->master) card->master = adapter; if (locked) { spin_unlock_irqrestore(&slic_global.driver_lock.lock, slic_global.driver_lock.flags); locked = 0; } return 0; } static void slic_card_cleanup(struct sliccard *card) { if (card->loadtimerset) { card->loadtimerset = 0; del_timer(&card->loadtimer); } slic_debug_card_destroy(card); kfree(card); } static void __devexit slic_entry_remove(struct pci_dev *pcidev) { struct net_device *dev = pci_get_drvdata(pcidev); u32 mmio_start = 0; uint mmio_len = 0; struct adapter *adapter = netdev_priv(dev); struct sliccard *card; struct mcast_address *mcaddr, *mlist; ASSERT(adapter); slic_adapter_freeresources(adapter); slic_unmap_mmio_space(adapter); unregister_netdev(dev); mmio_start = pci_resource_start(pcidev, 0); mmio_len = pci_resource_len(pcidev, 0); release_mem_region(mmio_start, mmio_len); iounmap((void __iomem *)dev->base_addr); /* free multicast addresses */ mlist = adapter->mcastaddrs; while (mlist) { mcaddr = mlist; mlist = mlist->next; kfree(mcaddr); } ASSERT(adapter->card); card = adapter->card; ASSERT(card->adapters_allocated); card->adapters_allocated--; adapter->allocated = 0; if (!card->adapters_allocated) { struct sliccard *curr_card = slic_global.slic_card; if (curr_card == card) { slic_global.slic_card = card->next; } else { while (curr_card->next != card) curr_card = curr_card->next; ASSERT(curr_card); curr_card->next = card->next; } ASSERT(slic_global.num_slic_cards); slic_global.num_slic_cards--; slic_card_cleanup(card); } free_netdev(dev); pci_release_regions(pcidev); } static int slic_entry_halt(struct net_device *dev) { struct adapter *adapter = netdev_priv(dev); struct sliccard *card = adapter->card; __iomem struct slic_regs *slic_regs = adapter->slic_regs; spin_lock_irqsave(&slic_global.driver_lock.lock, slic_global.driver_lock.flags); ASSERT(card); netif_stop_queue(adapter->netdev); adapter->state = ADAPT_DOWN; adapter->linkstate = LINK_DOWN; adapter->upr_list = NULL; adapter->upr_busy = 0; adapter->devflags_prev = 0; ASSERT(card->adapter[adapter->cardindex] == adapter); slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH); adapter->all_reg_writes++; adapter->icr_reg_writes++; slic_config_clear(adapter); if (adapter->activated) { card->adapters_activated--; slic_global.num_slic_ports_active--; adapter->activated = 0; } #ifdef AUTOMATIC_RESET slic_reg32_write(&slic_regs->slic_reset_iface, 0, FLUSH); #endif /* * Reset the adapter's cmd queues */ slic_cmdq_reset(adapter); #ifdef AUTOMATIC_RESET if (!card->adapters_activated) slic_card_init(card, adapter); #endif spin_unlock_irqrestore(&slic_global.driver_lock.lock, slic_global.driver_lock.flags); return 0; } static struct net_device_stats *slic_get_stats(struct net_device *dev) { struct adapter *adapter = netdev_priv(dev); ASSERT(adapter); dev->stats.collisions = adapter->slic_stats.iface.xmit_collisions; dev->stats.rx_errors = adapter->slic_stats.iface.rcv_errors; dev->stats.tx_errors = adapter->slic_stats.iface.xmt_errors; dev->stats.rx_missed_errors = adapter->slic_stats.iface.rcv_discards; dev->stats.tx_heartbeat_errors = 0; dev->stats.tx_aborted_errors = 0; dev->stats.tx_window_errors = 0; dev->stats.tx_fifo_errors = 0; dev->stats.rx_frame_errors = 0; dev->stats.rx_length_errors = 0; return &dev->stats; } static int slic_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct adapter *adapter = netdev_priv(dev); struct ethtool_cmd edata; struct ethtool_cmd ecmd; u32 data[7]; u32 intagg; ASSERT(rq); switch (cmd) { case SIOCSLICSETINTAGG: if (copy_from_user(data, rq->ifr_data, 28)) return -EFAULT; intagg = data[0]; dev_err(&dev->dev, "%s: set interrupt aggregation to %d\n", __func__, intagg); slic_intagg_set(adapter, intagg); return 0; #ifdef SLIC_TRACE_DUMP_ENABLED case SIOCSLICTRACEDUMP: { u32 value; DBG_IOCTL("slic_ioctl SIOCSLIC_TRACE_DUMP\n"); if (copy_from_user(data, rq->ifr_data, 28)) { PRINT_ERROR ("slic: copy_from_user FAILED getting initial simba param\n"); return -EFAULT; } value = data[0]; if (tracemon_request == SLIC_DUMP_DONE) { PRINT_ERROR ("ATK Diagnostic Trace Dump Requested\n"); tracemon_request = SLIC_DUMP_REQUESTED; tracemon_request_type = value; tracemon_timestamp = jiffies; } else if ((tracemon_request == SLIC_DUMP_REQUESTED) || (tracemon_request == SLIC_DUMP_IN_PROGRESS)) { PRINT_ERROR ("ATK Diagnostic Trace Dump Requested but already in progress... ignore\n"); } else { PRINT_ERROR ("ATK Diagnostic Trace Dump Requested\n"); tracemon_request = SLIC_DUMP_REQUESTED; tracemon_request_type = value; tracemon_timestamp = jiffies; } return 0; } #endif case SIOCETHTOOL: ASSERT(adapter); if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd))) return -EFAULT; if (ecmd.cmd == ETHTOOL_GSET) { edata.supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII); edata.port = PORT_MII; edata.transceiver = XCVR_INTERNAL; edata.phy_address = 0; if (adapter->linkspeed == LINK_100MB) edata.speed = SPEED_100; else if (adapter->linkspeed == LINK_10MB) edata.speed = SPEED_10; else edata.speed = 0; if (adapter->linkduplex == LINK_FULLD) edata.duplex = DUPLEX_FULL; else edata.duplex = DUPLEX_HALF; edata.autoneg = AUTONEG_ENABLE; edata.maxtxpkt = 1; edata.maxrxpkt = 1; if (copy_to_user(rq->ifr_data, &edata, sizeof(edata))) return -EFAULT; } else if (ecmd.cmd == ETHTOOL_SSET) { if (!capable(CAP_NET_ADMIN)) return -EPERM; if (adapter->linkspeed == LINK_100MB) edata.speed = SPEED_100; else if (adapter->linkspeed == LINK_10MB) edata.speed = SPEED_10; else edata.speed = 0; if (adapter->linkduplex == LINK_FULLD) edata.duplex = DUPLEX_FULL; else edata.duplex = DUPLEX_HALF; edata.autoneg = AUTONEG_ENABLE; edata.maxtxpkt = 1; edata.maxrxpkt = 1; if ((ecmd.speed != edata.speed) || (ecmd.duplex != edata.duplex)) { u32 speed; u32 duplex; if (ecmd.speed == SPEED_10) speed = 0; else speed = PCR_SPEED_100; if (ecmd.duplex == DUPLEX_FULL) duplex = PCR_DUPLEX_FULL; else duplex = 0; slic_link_config(adapter, speed, duplex); slic_link_event_handler(adapter); } } return 0; default: return -EOPNOTSUPP; } } static void slic_config_pci(struct pci_dev *pcidev) { u16 pci_command; u16 new_command; pci_read_config_word(pcidev, PCI_COMMAND, &pci_command); new_command = pci_command | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK; if (pci_command != new_command) pci_write_config_word(pcidev, PCI_COMMAND, new_command); } static int slic_card_init(struct sliccard *card, struct adapter *adapter) { __iomem struct slic_regs *slic_regs = adapter->slic_regs; struct slic_eeprom *peeprom; struct oslic_eeprom *pOeeprom; dma_addr_t phys_config; u32 phys_configh; u32 phys_configl; u32 i = 0; struct slic_shmem *pshmem; int status; uint macaddrs = card->card_size; ushort eecodesize; ushort dramsize; ushort ee_chksum; ushort calc_chksum; struct slic_config_mac *pmac; unsigned char fruformat; unsigned char oemfruformat; struct atk_fru *patkfru; union oemfru *poemfru; /* Reset everything except PCI configuration space */ slic_soft_reset(adapter); /* Download the microcode */ status = slic_card_download(adapter); if (status != 0) { dev_err(&adapter->pcidev->dev, "download failed bus %d slot %d\n", adapter->busnumber, adapter->slotnumber); return status; } if (!card->config_set) { peeprom = pci_alloc_consistent(adapter->pcidev, sizeof(struct slic_eeprom), &phys_config); phys_configl = SLIC_GET_ADDR_LOW(phys_config); phys_configh = SLIC_GET_ADDR_HIGH(phys_config); if (!peeprom) { dev_err(&adapter->pcidev->dev, "eeprom read failed to get memory " "bus %d slot %d\n", adapter->busnumber, adapter->slotnumber); return -ENOMEM; } else { memset(peeprom, 0, sizeof(struct slic_eeprom)); } slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH); mdelay(1); pshmem = (struct slic_shmem *)adapter->phys_shmem; spin_lock_irqsave(&adapter->bit64reglock.lock, adapter->bit64reglock.flags); slic_reg32_write(&slic_regs->slic_addr_upper, 0, DONT_FLUSH); slic_reg32_write(&slic_regs->slic_isp, SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH); spin_unlock_irqrestore(&adapter->bit64reglock.lock, adapter->bit64reglock.flags); slic_config_get(adapter, phys_configl, phys_configh); for (;;) { if (adapter->pshmem->isr) { if (adapter->pshmem->isr & ISR_UPC) { adapter->pshmem->isr = 0; slic_reg64_write(adapter, &slic_regs->slic_isp, 0, &slic_regs->slic_addr_upper, 0, FLUSH); slic_reg32_write(&slic_regs->slic_isr, 0, FLUSH); slic_upr_request_complete(adapter, 0); break; } else { adapter->pshmem->isr = 0; slic_reg32_write(&slic_regs->slic_isr, 0, FLUSH); } } else { mdelay(1); i++; if (i > 5000) { dev_err(&adapter->pcidev->dev, "%d config data fetch timed out!\n", adapter->port); slic_reg64_write(adapter, &slic_regs->slic_isp, 0, &slic_regs->slic_addr_upper, 0, FLUSH); return -EINVAL; } } } switch (adapter->devid) { /* Oasis card */ case SLIC_2GB_DEVICE_ID: /* extract EEPROM data and pointers to EEPROM data */ pOeeprom = (struct oslic_eeprom *) peeprom; eecodesize = pOeeprom->EecodeSize; dramsize = pOeeprom->DramSize; pmac = pOeeprom->MacInfo; fruformat = pOeeprom->FruFormat; patkfru = &pOeeprom->AtkFru; oemfruformat = pOeeprom->OemFruFormat; poemfru = &pOeeprom->OemFru; macaddrs = 2; /* Minor kludge for Oasis card get 2 MAC addresses from the EEPROM to ensure that function 1 gets the Port 1 MAC address */ break; default: /* extract EEPROM data and pointers to EEPROM data */ eecodesize = peeprom->EecodeSize; dramsize = peeprom->DramSize; pmac = peeprom->u2.mac.MacInfo; fruformat = peeprom->FruFormat; patkfru = &peeprom->AtkFru; oemfruformat = peeprom->OemFruFormat; poemfru = &peeprom->OemFru; break; } card->config.EepromValid = false; /* see if the EEPROM is valid by checking it's checksum */ if ((eecodesize <= MAX_EECODE_SIZE) && (eecodesize >= MIN_EECODE_SIZE)) { ee_chksum = *(u16 *) ((char *) peeprom + (eecodesize - 2)); /* calculate the EEPROM checksum */ calc_chksum = ~slic_eeprom_cksum((char *) peeprom, (eecodesize - 2)); /* if the ucdoe chksum flag bit worked, we wouldn't need this shit */ if (ee_chksum == calc_chksum) card->config.EepromValid = true; } /* copy in the DRAM size */ card->config.DramSize = dramsize; /* copy in the MAC address(es) */ for (i = 0; i < macaddrs; i++) { memcpy(&card->config.MacInfo[i], &pmac[i], sizeof(struct slic_config_mac)); } /* copy the Alacritech FRU information */ card->config.FruFormat = fruformat; memcpy(&card->config.AtkFru, patkfru, sizeof(struct atk_fru)); pci_free_consistent(adapter->pcidev, sizeof(struct slic_eeprom), peeprom, phys_config); if ((!card->config.EepromValid) && (adapter->reg_params.fail_on_bad_eeprom)) { slic_reg64_write(adapter, &slic_regs->slic_isp, 0, &slic_regs->slic_addr_upper, 0, FLUSH); dev_err(&adapter->pcidev->dev, "unsupported CONFIGURATION EEPROM invalid\n"); return -EINVAL; } card->config_set = 1; } if (slic_card_download_gbrcv(adapter)) { dev_err(&adapter->pcidev->dev, "unable to download GB receive microcode\n"); return -EINVAL; } if (slic_global.dynamic_intagg) slic_intagg_set(adapter, 0); else slic_intagg_set(adapter, intagg_delay); /* * Initialize ping status to "ok" */ card->pingstatus = ISR_PINGMASK; /* * Lastly, mark our card state as up and return success */ card->state = CARD_UP; card->reset_in_progress = 0; return 0; } static void slic_init_driver(void) { if (slic_first_init) { slic_first_init = 0; spin_lock_init(&slic_global.driver_lock.lock); slic_debug_init(); } } static void slic_init_adapter(struct net_device *netdev, struct pci_dev *pcidev, const struct pci_device_id *pci_tbl_entry, void __iomem *memaddr, int chip_idx) { ushort index; struct slic_handle *pslic_handle; struct adapter *adapter = netdev_priv(netdev); /* adapter->pcidev = pcidev;*/ adapter->vendid = pci_tbl_entry->vendor; adapter->devid = pci_tbl_entry->device; adapter->subsysid = pci_tbl_entry->subdevice; adapter->busnumber = pcidev->bus->number; adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F); adapter->functionnumber = (pcidev->devfn & 0x7); adapter->memorylength = pci_resource_len(pcidev, 0); adapter->slic_regs = (__iomem struct slic_regs *)memaddr; adapter->irq = pcidev->irq; /* adapter->netdev = netdev;*/ adapter->next_netdevice = head_netdevice; head_netdevice = netdev; adapter->chipid = chip_idx; adapter->port = 0; /*adapter->functionnumber;*/ adapter->cardindex = adapter->port; adapter->memorybase = memaddr; spin_lock_init(&adapter->upr_lock.lock); spin_lock_init(&adapter->bit64reglock.lock); spin_lock_init(&adapter->adapter_lock.lock); spin_lock_init(&adapter->reset_lock.lock); spin_lock_init(&adapter->handle_lock.lock); adapter->card_size = 1; /* Initialize slic_handle array */ ASSERT(SLIC_CMDQ_MAXCMDS <= 0xFFFF); /* Start with 1. 0 is an invalid host handle. */ for (index = 1, pslic_handle = &adapter->slic_handles[1]; index < SLIC_CMDQ_MAXCMDS; index++, pslic_handle++) { pslic_handle->token.handle_index = index; pslic_handle->type = SLIC_HANDLE_FREE; pslic_handle->next = adapter->pfree_slic_handles; adapter->pfree_slic_handles = pslic_handle; } adapter->pshmem = (struct slic_shmem *) pci_alloc_consistent(adapter->pcidev, sizeof(struct slic_shmem), &adapter-> phys_shmem); ASSERT(adapter->pshmem); memset(adapter->pshmem, 0, sizeof(struct slic_shmem)); return; } static const struct net_device_ops slic_netdev_ops = { .ndo_open = slic_entry_open, .ndo_stop = slic_entry_halt, .ndo_start_xmit = slic_xmit_start, .ndo_do_ioctl = slic_ioctl, .ndo_set_mac_address = slic_mac_set_address, .ndo_get_stats = slic_get_stats, .ndo_set_rx_mode = slic_mcast_set_list, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, }; static u32 slic_card_locate(struct adapter *adapter) { struct sliccard *card = slic_global.slic_card; struct physcard *physcard = slic_global.phys_card; ushort card_hostid; u16 __iomem *hostid_reg; uint i; uint rdhostid_offset = 0; switch (adapter->devid) { case SLIC_2GB_DEVICE_ID: rdhostid_offset = SLIC_RDHOSTID_2GB; break; case SLIC_1GB_DEVICE_ID: rdhostid_offset = SLIC_RDHOSTID_1GB; break; default: ASSERT(0); break; } hostid_reg = (u16 __iomem *) (((u8 __iomem *) (adapter->slic_regs)) + rdhostid_offset); /* read the 16 bit hostid from SRAM */ card_hostid = (ushort) readw(hostid_reg); /* Initialize a new card structure if need be */ if (card_hostid == SLIC_HOSTID_DEFAULT) { card = kzalloc(sizeof(struct sliccard), GFP_KERNEL); if (card == NULL) return -ENOMEM; card->next = slic_global.slic_card; slic_global.slic_card = card; card->busnumber = adapter->busnumber; card->slotnumber = adapter->slotnumber; /* Find an available cardnum */ for (i = 0; i < SLIC_MAX_CARDS; i++) { if (slic_global.cardnuminuse[i] == 0) { slic_global.cardnuminuse[i] = 1; card->cardnum = i; break; } } slic_global.num_slic_cards++; slic_debug_card_create(card); } else { /* Card exists, find the card this adapter belongs to */ while (card) { if (card->cardnum == card_hostid) break; card = card->next; } } ASSERT(card); if (!card) return -ENXIO; /* Put the adapter in the card's adapter list */ ASSERT(card->adapter[adapter->port] == NULL); if (!card->adapter[adapter->port]) { card->adapter[adapter->port] = adapter; adapter->card = card; } card->card_size = 1; /* one port per *logical* card */ while (physcard) { for (i = 0; i < SLIC_MAX_PORTS; i++) { if (!physcard->adapter[i]) continue; else break; } ASSERT(i != SLIC_MAX_PORTS); if (physcard->adapter[i]->slotnumber == adapter->slotnumber) break; physcard = physcard->next; } if (!physcard) { /* no structure allocated for this physical card yet */ physcard = kzalloc(sizeof(struct physcard), GFP_ATOMIC); ASSERT(physcard); physcard->next = slic_global.phys_card; slic_global.phys_card = physcard; physcard->adapters_allocd = 1; } else { physcard->adapters_allocd++; } /* Note - this is ZERO relative */ adapter->physport = physcard->adapters_allocd - 1; ASSERT(physcard->adapter[adapter->physport] == NULL); physcard->adapter[adapter->physport] = adapter; adapter->physcard = physcard; return 0; } static int __devinit slic_entry_probe(struct pci_dev *pcidev, const struct pci_device_id *pci_tbl_entry) { static int cards_found; static int did_version; int err = -ENODEV; struct net_device *netdev; struct adapter *adapter; void __iomem *memmapped_ioaddr = NULL; u32 status = 0; ulong mmio_start = 0; ulong mmio_len = 0; struct sliccard *card = NULL; int pci_using_dac = 0; slic_global.dynamic_intagg = dynamic_intagg; err = pci_enable_device(pcidev); if (err) return err; if (slic_debug > 0 && did_version++ == 0) { printk(KERN_DEBUG "%s\n", slic_banner); printk(KERN_DEBUG "%s\n", slic_proc_version); } if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) { pci_using_dac = 1; if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { dev_err(&pcidev->dev, "unable to obtain 64-bit DMA for " "consistent allocations\n"); goto err_out_disable_pci; } } else if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) { pci_using_dac = 0; pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); } else { dev_err(&pcidev->dev, "no usable DMA configuration\n"); goto err_out_disable_pci; } err = pci_request_regions(pcidev, DRV_NAME); if (err) { dev_err(&pcidev->dev, "can't obtain PCI resources\n"); goto err_out_disable_pci; } pci_set_master(pcidev); netdev = alloc_etherdev(sizeof(struct adapter)); if (!netdev) { err = -ENOMEM; goto err_out_exit_slic_probe; } SET_NETDEV_DEV(netdev, &pcidev->dev); pci_set_drvdata(pcidev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pcidev = pcidev; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; mmio_start = pci_resource_start(pcidev, 0); mmio_len = pci_resource_len(pcidev, 0); /* memmapped_ioaddr = (u32)ioremap_nocache(mmio_start, mmio_len);*/ memmapped_ioaddr = ioremap(mmio_start, mmio_len); if (!memmapped_ioaddr) { dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n", mmio_len, mmio_start); goto err_out_free_netdev; } slic_config_pci(pcidev); slic_init_driver(); slic_init_adapter(netdev, pcidev, pci_tbl_entry, memmapped_ioaddr, cards_found); status = slic_card_locate(adapter); if (status) { dev_err(&pcidev->dev, "cannot locate card\n"); goto err_out_free_mmio_region; } card = adapter->card; if (!adapter->allocated) { card->adapters_allocated++; adapter->allocated = 1; } status = slic_card_init(card, adapter); if (status != 0) { card->state = CARD_FAIL; adapter->state = ADAPT_FAIL; adapter->linkstate = LINK_DOWN; dev_err(&pcidev->dev, "FAILED status[%x]\n", status); } else { slic_adapter_set_hwaddr(adapter); } netdev->base_addr = (unsigned long)adapter->memorybase; netdev->irq = adapter->irq; netdev->netdev_ops = &slic_netdev_ops; slic_debug_adapter_create(adapter); strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) { dev_err(&pcidev->dev, "Cannot register net device, aborting.\n"); goto err_out_unmap; } cards_found++; return status; err_out_unmap: iounmap(memmapped_ioaddr); err_out_free_mmio_region: release_mem_region(mmio_start, mmio_len); err_out_free_netdev: free_netdev(netdev); err_out_exit_slic_probe: pci_release_regions(pcidev); err_out_disable_pci: pci_disable_device(pcidev); return err; } static struct pci_driver slic_driver = { .name = DRV_NAME, .id_table = slic_pci_tbl, .probe = slic_entry_probe, .remove = __devexit_p(slic_entry_remove), }; static int __init slic_module_init(void) { slic_init_driver(); if (debug >= 0 && slic_debug != debug) printk(KERN_DEBUG KBUILD_MODNAME ": debug level is %d.\n", debug); if (debug >= 0) slic_debug = debug; return pci_register_driver(&slic_driver); } static void __exit slic_module_cleanup(void) { pci_unregister_driver(&slic_driver); slic_debug_cleanup(); } module_init(slic_module_init); module_exit(slic_module_cleanup);
gpl-2.0
markbencze/android_kernel_lge_hammerhead
arch/x86/mm/pageattr-test.c
5630
5387
/* * self test for change_page_attr. * * Clears the a test pte bit on random pages in the direct mapping, * then reverts and compares page tables forwards and afterwards. */ #include <linux/bootmem.h> #include <linux/kthread.h> #include <linux/random.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <asm/cacheflush.h> #include <asm/pgtable.h> #include <asm/kdebug.h> /* * Only print the results of the first pass: */ static __read_mostly int print = 1; enum { NTEST = 400, #ifdef CONFIG_X86_64 LPS = (1 << PMD_SHIFT), #elif defined(CONFIG_X86_PAE) LPS = (1 << PMD_SHIFT), #else LPS = (1 << 22), #endif GPS = (1<<30) }; #define PAGE_CPA_TEST __pgprot(_PAGE_CPA_TEST) static int pte_testbit(pte_t pte) { return pte_flags(pte) & _PAGE_UNUSED1; } struct split_state { long lpg, gpg, spg, exec; long min_exec, max_exec; }; static int print_split(struct split_state *s) { long i, expected, missed = 0; int err = 0; s->lpg = s->gpg = s->spg = s->exec = 0; s->min_exec = ~0UL; s->max_exec = 0; for (i = 0; i < max_pfn_mapped; ) { unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT); unsigned int level; pte_t *pte; pte = lookup_address(addr, &level); if (!pte) { missed++; i++; continue; } if (level == PG_LEVEL_1G && sizeof(long) == 8) { s->gpg++; i += GPS/PAGE_SIZE; } else if (level == PG_LEVEL_2M) { if (!(pte_val(*pte) & _PAGE_PSE)) { printk(KERN_ERR "%lx level %d but not PSE %Lx\n", addr, level, (u64)pte_val(*pte)); err = 1; } s->lpg++; i += LPS/PAGE_SIZE; } else { s->spg++; i++; } if (!(pte_val(*pte) & _PAGE_NX)) { s->exec++; if (addr < s->min_exec) s->min_exec = addr; if (addr > s->max_exec) s->max_exec = addr; } } if (print) { printk(KERN_INFO " 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n", s->spg, s->lpg, s->gpg, s->exec, s->min_exec != ~0UL ? s->min_exec : 0, s->max_exec, missed); } expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed; if (expected != i) { printk(KERN_ERR "CPA max_pfn_mapped %lu but expected %lu\n", max_pfn_mapped, expected); return 1; } return err; } static unsigned long addr[NTEST]; static unsigned int len[NTEST]; /* Change the global bit on random pages in the direct mapping */ static int pageattr_test(void) { struct split_state sa, sb, sc; unsigned long *bm; pte_t *pte, pte0; int failed = 0; unsigned int level; int i, k; int err; unsigned long test_addr; if (print) printk(KERN_INFO "CPA self-test:\n"); bm = vzalloc((max_pfn_mapped + 7) / 8); if (!bm) { printk(KERN_ERR "CPA Cannot vmalloc bitmap\n"); return -ENOMEM; } failed += print_split(&sa); srandom32(100); for (i = 0; i < NTEST; i++) { unsigned long pfn = random32() % max_pfn_mapped; addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT); len[i] = random32() % 100; len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1); if (len[i] == 0) len[i] = 1; pte = NULL; pte0 = pfn_pte(0, __pgprot(0)); /* shut gcc up */ for (k = 0; k < len[i]; k++) { pte = lookup_address(addr[i] + k*PAGE_SIZE, &level); if (!pte || pgprot_val(pte_pgprot(*pte)) == 0 || !(pte_val(*pte) & _PAGE_PRESENT)) { addr[i] = 0; break; } if (k == 0) { pte0 = *pte; } else { if (pgprot_val(pte_pgprot(*pte)) != pgprot_val(pte_pgprot(pte0))) { len[i] = k; break; } } if (test_bit(pfn + k, bm)) { len[i] = k; break; } __set_bit(pfn + k, bm); } if (!addr[i] || !pte || !k) { addr[i] = 0; continue; } test_addr = addr[i]; err = change_page_attr_set(&test_addr, len[i], PAGE_CPA_TEST, 0); if (err < 0) { printk(KERN_ERR "CPA %d failed %d\n", i, err); failed++; } pte = lookup_address(addr[i], &level); if (!pte || !pte_testbit(*pte) || pte_huge(*pte)) { printk(KERN_ERR "CPA %lx: bad pte %Lx\n", addr[i], pte ? (u64)pte_val(*pte) : 0ULL); failed++; } if (level != PG_LEVEL_4K) { printk(KERN_ERR "CPA %lx: unexpected level %d\n", addr[i], level); failed++; } } vfree(bm); failed += print_split(&sb); for (i = 0; i < NTEST; i++) { if (!addr[i]) continue; pte = lookup_address(addr[i], &level); if (!pte) { printk(KERN_ERR "CPA lookup of %lx failed\n", addr[i]); failed++; continue; } test_addr = addr[i]; err = change_page_attr_clear(&test_addr, len[i], PAGE_CPA_TEST, 0); if (err < 0) { printk(KERN_ERR "CPA reverting failed: %d\n", err); failed++; } pte = lookup_address(addr[i], &level); if (!pte || pte_testbit(*pte)) { printk(KERN_ERR "CPA %lx: bad pte after revert %Lx\n", addr[i], pte ? (u64)pte_val(*pte) : 0ULL); failed++; } } failed += print_split(&sc); if (failed) { WARN(1, KERN_ERR "NOT PASSED. Please report.\n"); return -EINVAL; } else { if (print) printk(KERN_INFO "ok.\n"); } return 0; } static int do_pageattr_test(void *__unused) { while (!kthread_should_stop()) { schedule_timeout_interruptible(HZ*30); if (pageattr_test() < 0) break; if (print) print--; } return 0; } static int start_pageattr_test(void) { struct task_struct *p; p = kthread_create(do_pageattr_test, NULL, "pageattr-test"); if (!IS_ERR(p)) wake_up_process(p); else WARN_ON(1); return 0; } module_init(start_pageattr_test);
gpl-2.0
AOKP/kernel_asus_tf101
drivers/hid/hid-monterey.c
7422
2139
/* * HID driver for some monterey "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { hid_info(hdev, "fixing up button/consumer in HID report descriptor\n"); rdesc[30] = 0x0c; } return rdesc; } #define mr_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int mr_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case 0x156: mr_map_key_clear(KEY_WORDPROCESSOR); break; case 0x157: mr_map_key_clear(KEY_SPREADSHEET); break; case 0x158: mr_map_key_clear(KEY_PRESENTATION); break; case 0x15c: mr_map_key_clear(KEY_STOP); break; default: return 0; } return 1; } static const struct hid_device_id mr_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, { } }; MODULE_DEVICE_TABLE(hid, mr_devices); static struct hid_driver mr_driver = { .name = "monterey", .id_table = mr_devices, .report_fixup = mr_report_fixup, .input_mapping = mr_input_mapping, }; static int __init mr_init(void) { return hid_register_driver(&mr_driver); } static void __exit mr_exit(void) { hid_unregister_driver(&mr_driver); } module_init(mr_init); module_exit(mr_exit); MODULE_LICENSE("GPL");
gpl-2.0
playfulgod/kernel_lge_dory
drivers/net/wireless/b43/phy_g.c
7934
83156
/* Broadcom B43 wireless driver IEEE 802.11g PHY driver Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it> Copyright (c) 2005-2008 Michael Buesch <m@bues.ch> Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org> Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43.h" #include "phy_g.h" #include "phy_common.h" #include "lo.h" #include "main.h" #include <linux/bitrev.h> #include <linux/slab.h> static const s8 b43_tssi2dbm_g_table[] = { 77, 77, 77, 76, 76, 76, 75, 75, 74, 74, 73, 73, 73, 72, 72, 71, 71, 70, 70, 69, 68, 68, 67, 67, 66, 65, 65, 64, 63, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 50, 49, 47, 45, 43, 40, 37, 33, 28, 22, 14, 5, -7, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, }; static const u8 b43_radio_channel_codes_bg[] = { 12, 17, 22, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72, 84, }; static void b43_calc_nrssi_threshold(struct b43_wldev *dev); #define bitrev4(tmp) (bitrev8(tmp) >> 4) /* Get the freq, as it has to be written to the device. */ static inline u16 channel2freq_bg(u8 channel) { B43_WARN_ON(!(channel >= 1 && channel <= 14)); return b43_radio_channel_codes_bg[channel - 1]; } static void generate_rfatt_list(struct b43_wldev *dev, struct b43_rfatt_list *list) { struct b43_phy *phy = &dev->phy; /* APHY.rev < 5 || GPHY.rev < 6 */ static const struct b43_rfatt rfatt_0[] = { {.att = 3,.with_padmix = 0,}, {.att = 1,.with_padmix = 0,}, {.att = 5,.with_padmix = 0,}, {.att = 7,.with_padmix = 0,}, {.att = 9,.with_padmix = 0,}, {.att = 2,.with_padmix = 0,}, {.att = 0,.with_padmix = 0,}, {.att = 4,.with_padmix = 0,}, {.att = 6,.with_padmix = 0,}, {.att = 8,.with_padmix = 0,}, {.att = 1,.with_padmix = 1,}, {.att = 2,.with_padmix = 1,}, {.att = 3,.with_padmix = 1,}, {.att = 4,.with_padmix = 1,}, }; /* Radio.rev == 8 && Radio.version == 0x2050 */ static const struct b43_rfatt rfatt_1[] = { {.att = 2,.with_padmix = 1,}, {.att = 4,.with_padmix = 1,}, {.att = 6,.with_padmix = 1,}, {.att = 8,.with_padmix = 1,}, {.att = 10,.with_padmix = 1,}, {.att = 12,.with_padmix = 1,}, {.att = 14,.with_padmix = 1,}, }; /* Otherwise */ static const struct b43_rfatt rfatt_2[] = { {.att = 0,.with_padmix = 1,}, {.att = 2,.with_padmix = 1,}, {.att = 4,.with_padmix = 1,}, {.att = 6,.with_padmix = 1,}, {.att = 8,.with_padmix = 1,}, {.att = 9,.with_padmix = 1,}, {.att = 9,.with_padmix = 1,}, }; if (!b43_has_hardware_pctl(dev)) { /* Software pctl */ list->list = rfatt_0; list->len = ARRAY_SIZE(rfatt_0); list->min_val = 0; list->max_val = 9; return; } if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) { /* Hardware pctl */ list->list = rfatt_1; list->len = ARRAY_SIZE(rfatt_1); list->min_val = 0; list->max_val = 14; return; } /* Hardware pctl */ list->list = rfatt_2; list->len = ARRAY_SIZE(rfatt_2); list->min_val = 0; list->max_val = 9; } static void generate_bbatt_list(struct b43_wldev *dev, struct b43_bbatt_list *list) { static const struct b43_bbatt bbatt_0[] = { {.att = 0,}, {.att = 1,}, {.att = 2,}, {.att = 3,}, {.att = 4,}, {.att = 5,}, {.att = 6,}, {.att = 7,}, {.att = 8,}, }; list->list = bbatt_0; list->len = ARRAY_SIZE(bbatt_0); list->min_val = 0; list->max_val = 8; } static void b43_shm_clear_tssi(struct b43_wldev *dev) { b43_shm_write16(dev, B43_SHM_SHARED, 0x0058, 0x7F7F); b43_shm_write16(dev, B43_SHM_SHARED, 0x005a, 0x7F7F); b43_shm_write16(dev, B43_SHM_SHARED, 0x0070, 0x7F7F); b43_shm_write16(dev, B43_SHM_SHARED, 0x0072, 0x7F7F); } /* Synthetic PU workaround */ static void b43_synth_pu_workaround(struct b43_wldev *dev, u8 channel) { struct b43_phy *phy = &dev->phy; might_sleep(); if (phy->radio_ver != 0x2050 || phy->radio_rev >= 6) { /* We do not need the workaround. */ return; } if (channel <= 10) { b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel + 4)); } else { b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(1)); } msleep(1); b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel)); } /* Set the baseband attenuation value on chip. */ void b43_gphy_set_baseband_attenuation(struct b43_wldev *dev, u16 baseband_attenuation) { struct b43_phy *phy = &dev->phy; if (phy->analog == 0) { b43_write16(dev, B43_MMIO_PHY0, (b43_read16(dev, B43_MMIO_PHY0) & 0xFFF0) | baseband_attenuation); } else if (phy->analog > 1) { b43_phy_maskset(dev, B43_PHY_DACCTL, 0xFFC3, (baseband_attenuation << 2)); } else { b43_phy_maskset(dev, B43_PHY_DACCTL, 0xFF87, (baseband_attenuation << 3)); } } /* Adjust the transmission power output (G-PHY) */ static void b43_set_txpower_g(struct b43_wldev *dev, const struct b43_bbatt *bbatt, const struct b43_rfatt *rfatt, u8 tx_control) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; struct b43_txpower_lo_control *lo = gphy->lo_control; u16 bb, rf; u16 tx_bias, tx_magn; bb = bbatt->att; rf = rfatt->att; tx_bias = lo->tx_bias; tx_magn = lo->tx_magn; if (unlikely(tx_bias == 0xFF)) tx_bias = 0; /* Save the values for later. Use memmove, because it's valid * to pass &gphy->rfatt as rfatt pointer argument. Same for bbatt. */ gphy->tx_control = tx_control; memmove(&gphy->rfatt, rfatt, sizeof(*rfatt)); gphy->rfatt.with_padmix = !!(tx_control & B43_TXCTL_TXMIX); memmove(&gphy->bbatt, bbatt, sizeof(*bbatt)); if (b43_debug(dev, B43_DBG_XMITPOWER)) { b43dbg(dev->wl, "Tuning TX-power to bbatt(%u), " "rfatt(%u), tx_control(0x%02X), " "tx_bias(0x%02X), tx_magn(0x%02X)\n", bb, rf, tx_control, tx_bias, tx_magn); } b43_gphy_set_baseband_attenuation(dev, bb); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RFATT, rf); if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) { b43_radio_write16(dev, 0x43, (rf & 0x000F) | (tx_control & 0x0070)); } else { b43_radio_maskset(dev, 0x43, 0xFFF0, (rf & 0x000F)); b43_radio_maskset(dev, 0x52, ~0x0070, (tx_control & 0x0070)); } if (has_tx_magnification(phy)) { b43_radio_write16(dev, 0x52, tx_magn | tx_bias); } else { b43_radio_maskset(dev, 0x52, 0xFFF0, (tx_bias & 0x000F)); } b43_lo_g_adjust(dev); } /* GPHY_TSSI_Power_Lookup_Table_Init */ static void b43_gphy_tssi_power_lt_init(struct b43_wldev *dev) { struct b43_phy_g *gphy = dev->phy.g; int i; u16 value; for (i = 0; i < 32; i++) b43_ofdmtab_write16(dev, 0x3C20, i, gphy->tssi2dbm[i]); for (i = 32; i < 64; i++) b43_ofdmtab_write16(dev, 0x3C00, i - 32, gphy->tssi2dbm[i]); for (i = 0; i < 64; i += 2) { value = (u16) gphy->tssi2dbm[i]; value |= ((u16) gphy->tssi2dbm[i + 1]) << 8; b43_phy_write(dev, 0x380 + (i / 2), value); } } /* GPHY_Gain_Lookup_Table_Init */ static void b43_gphy_gain_lt_init(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; struct b43_txpower_lo_control *lo = gphy->lo_control; u16 nr_written = 0; u16 tmp; u8 rf, bb; for (rf = 0; rf < lo->rfatt_list.len; rf++) { for (bb = 0; bb < lo->bbatt_list.len; bb++) { if (nr_written >= 0x40) return; tmp = lo->bbatt_list.list[bb].att; tmp <<= 8; if (phy->radio_rev == 8) tmp |= 0x50; else tmp |= 0x40; tmp |= lo->rfatt_list.list[rf].att; b43_phy_write(dev, 0x3C0 + nr_written, tmp); nr_written++; } } } static void b43_set_all_gains(struct b43_wldev *dev, s16 first, s16 second, s16 third) { struct b43_phy *phy = &dev->phy; u16 i; u16 start = 0x08, end = 0x18; u16 tmp; u16 table; if (phy->rev <= 1) { start = 0x10; end = 0x20; } table = B43_OFDMTAB_GAINX; if (phy->rev <= 1) table = B43_OFDMTAB_GAINX_R1; for (i = 0; i < 4; i++) b43_ofdmtab_write16(dev, table, i, first); for (i = start; i < end; i++) b43_ofdmtab_write16(dev, table, i, second); if (third != -1) { tmp = ((u16) third << 14) | ((u16) third << 6); b43_phy_maskset(dev, 0x04A0, 0xBFBF, tmp); b43_phy_maskset(dev, 0x04A1, 0xBFBF, tmp); b43_phy_maskset(dev, 0x04A2, 0xBFBF, tmp); } b43_dummy_transmission(dev, false, true); } static void b43_set_original_gains(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; u16 i, tmp; u16 table; u16 start = 0x0008, end = 0x0018; if (phy->rev <= 1) { start = 0x0010; end = 0x0020; } table = B43_OFDMTAB_GAINX; if (phy->rev <= 1) table = B43_OFDMTAB_GAINX_R1; for (i = 0; i < 4; i++) { tmp = (i & 0xFFFC); tmp |= (i & 0x0001) << 1; tmp |= (i & 0x0002) >> 1; b43_ofdmtab_write16(dev, table, i, tmp); } for (i = start; i < end; i++) b43_ofdmtab_write16(dev, table, i, i - start); b43_phy_maskset(dev, 0x04A0, 0xBFBF, 0x4040); b43_phy_maskset(dev, 0x04A1, 0xBFBF, 0x4040); b43_phy_maskset(dev, 0x04A2, 0xBFBF, 0x4000); b43_dummy_transmission(dev, false, true); } /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ static void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val) { b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset); b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val); } /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ static s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset) { u16 val; b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset); val = b43_phy_read(dev, B43_PHY_NRSSILT_DATA); return (s16) val; } /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ static void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val) { u16 i; s16 tmp; for (i = 0; i < 64; i++) { tmp = b43_nrssi_hw_read(dev, i); tmp -= val; tmp = clamp_val(tmp, -32, 31); b43_nrssi_hw_write(dev, i, tmp); } } /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ static void b43_nrssi_mem_update(struct b43_wldev *dev) { struct b43_phy_g *gphy = dev->phy.g; s16 i, delta; s32 tmp; delta = 0x1F - gphy->nrssi[0]; for (i = 0; i < 64; i++) { tmp = (i - delta) * gphy->nrssislope; tmp /= 0x10000; tmp += 0x3A; tmp = clamp_val(tmp, 0, 0x3F); gphy->nrssi_lt[i] = tmp; } } static void b43_calc_nrssi_offset(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; u16 backup[20] = { 0 }; s16 v47F; u16 i; u16 saved = 0xFFFF; backup[0] = b43_phy_read(dev, 0x0001); backup[1] = b43_phy_read(dev, 0x0811); backup[2] = b43_phy_read(dev, 0x0812); if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ backup[3] = b43_phy_read(dev, 0x0814); backup[4] = b43_phy_read(dev, 0x0815); } backup[5] = b43_phy_read(dev, 0x005A); backup[6] = b43_phy_read(dev, 0x0059); backup[7] = b43_phy_read(dev, 0x0058); backup[8] = b43_phy_read(dev, 0x000A); backup[9] = b43_phy_read(dev, 0x0003); backup[10] = b43_radio_read16(dev, 0x007A); backup[11] = b43_radio_read16(dev, 0x0043); b43_phy_mask(dev, 0x0429, 0x7FFF); b43_phy_maskset(dev, 0x0001, 0x3FFF, 0x4000); b43_phy_set(dev, 0x0811, 0x000C); b43_phy_maskset(dev, 0x0812, 0xFFF3, 0x0004); b43_phy_mask(dev, 0x0802, ~(0x1 | 0x2)); if (phy->rev >= 6) { backup[12] = b43_phy_read(dev, 0x002E); backup[13] = b43_phy_read(dev, 0x002F); backup[14] = b43_phy_read(dev, 0x080F); backup[15] = b43_phy_read(dev, 0x0810); backup[16] = b43_phy_read(dev, 0x0801); backup[17] = b43_phy_read(dev, 0x0060); backup[18] = b43_phy_read(dev, 0x0014); backup[19] = b43_phy_read(dev, 0x0478); b43_phy_write(dev, 0x002E, 0); b43_phy_write(dev, 0x002F, 0); b43_phy_write(dev, 0x080F, 0); b43_phy_write(dev, 0x0810, 0); b43_phy_set(dev, 0x0478, 0x0100); b43_phy_set(dev, 0x0801, 0x0040); b43_phy_set(dev, 0x0060, 0x0040); b43_phy_set(dev, 0x0014, 0x0200); } b43_radio_set(dev, 0x007A, 0x0070); b43_radio_set(dev, 0x007A, 0x0080); udelay(30); v47F = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); if (v47F >= 0x20) v47F -= 0x40; if (v47F == 31) { for (i = 7; i >= 4; i--) { b43_radio_write16(dev, 0x007B, i); udelay(20); v47F = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); if (v47F >= 0x20) v47F -= 0x40; if (v47F < 31 && saved == 0xFFFF) saved = i; } if (saved == 0xFFFF) saved = 4; } else { b43_radio_mask(dev, 0x007A, 0x007F); if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ b43_phy_set(dev, 0x0814, 0x0001); b43_phy_mask(dev, 0x0815, 0xFFFE); } b43_phy_set(dev, 0x0811, 0x000C); b43_phy_set(dev, 0x0812, 0x000C); b43_phy_set(dev, 0x0811, 0x0030); b43_phy_set(dev, 0x0812, 0x0030); b43_phy_write(dev, 0x005A, 0x0480); b43_phy_write(dev, 0x0059, 0x0810); b43_phy_write(dev, 0x0058, 0x000D); if (phy->rev == 0) { b43_phy_write(dev, 0x0003, 0x0122); } else { b43_phy_set(dev, 0x000A, 0x2000); } if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ b43_phy_set(dev, 0x0814, 0x0004); b43_phy_mask(dev, 0x0815, 0xFFFB); } b43_phy_maskset(dev, 0x0003, 0xFF9F, 0x0040); b43_radio_set(dev, 0x007A, 0x000F); b43_set_all_gains(dev, 3, 0, 1); b43_radio_maskset(dev, 0x0043, 0x00F0, 0x000F); udelay(30); v47F = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); if (v47F >= 0x20) v47F -= 0x40; if (v47F == -32) { for (i = 0; i < 4; i++) { b43_radio_write16(dev, 0x007B, i); udelay(20); v47F = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); if (v47F >= 0x20) v47F -= 0x40; if (v47F > -31 && saved == 0xFFFF) saved = i; } if (saved == 0xFFFF) saved = 3; } else saved = 0; } b43_radio_write16(dev, 0x007B, saved); if (phy->rev >= 6) { b43_phy_write(dev, 0x002E, backup[12]); b43_phy_write(dev, 0x002F, backup[13]); b43_phy_write(dev, 0x080F, backup[14]); b43_phy_write(dev, 0x0810, backup[15]); } if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ b43_phy_write(dev, 0x0814, backup[3]); b43_phy_write(dev, 0x0815, backup[4]); } b43_phy_write(dev, 0x005A, backup[5]); b43_phy_write(dev, 0x0059, backup[6]); b43_phy_write(dev, 0x0058, backup[7]); b43_phy_write(dev, 0x000A, backup[8]); b43_phy_write(dev, 0x0003, backup[9]); b43_radio_write16(dev, 0x0043, backup[11]); b43_radio_write16(dev, 0x007A, backup[10]); b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x1 | 0x2); b43_phy_set(dev, 0x0429, 0x8000); b43_set_original_gains(dev); if (phy->rev >= 6) { b43_phy_write(dev, 0x0801, backup[16]); b43_phy_write(dev, 0x0060, backup[17]); b43_phy_write(dev, 0x0014, backup[18]); b43_phy_write(dev, 0x0478, backup[19]); } b43_phy_write(dev, 0x0001, backup[0]); b43_phy_write(dev, 0x0812, backup[2]); b43_phy_write(dev, 0x0811, backup[1]); } static void b43_calc_nrssi_slope(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; u16 backup[18] = { 0 }; u16 tmp; s16 nrssi0, nrssi1; B43_WARN_ON(phy->type != B43_PHYTYPE_G); if (phy->radio_rev >= 9) return; if (phy->radio_rev == 8) b43_calc_nrssi_offset(dev); b43_phy_mask(dev, B43_PHY_G_CRS, 0x7FFF); b43_phy_mask(dev, 0x0802, 0xFFFC); backup[7] = b43_read16(dev, 0x03E2); b43_write16(dev, 0x03E2, b43_read16(dev, 0x03E2) | 0x8000); backup[0] = b43_radio_read16(dev, 0x007A); backup[1] = b43_radio_read16(dev, 0x0052); backup[2] = b43_radio_read16(dev, 0x0043); backup[3] = b43_phy_read(dev, 0x0015); backup[4] = b43_phy_read(dev, 0x005A); backup[5] = b43_phy_read(dev, 0x0059); backup[6] = b43_phy_read(dev, 0x0058); backup[8] = b43_read16(dev, 0x03E6); backup[9] = b43_read16(dev, B43_MMIO_CHANNEL_EXT); if (phy->rev >= 3) { backup[10] = b43_phy_read(dev, 0x002E); backup[11] = b43_phy_read(dev, 0x002F); backup[12] = b43_phy_read(dev, 0x080F); backup[13] = b43_phy_read(dev, B43_PHY_G_LO_CONTROL); backup[14] = b43_phy_read(dev, 0x0801); backup[15] = b43_phy_read(dev, 0x0060); backup[16] = b43_phy_read(dev, 0x0014); backup[17] = b43_phy_read(dev, 0x0478); b43_phy_write(dev, 0x002E, 0); b43_phy_write(dev, B43_PHY_G_LO_CONTROL, 0); switch (phy->rev) { case 4: case 6: case 7: b43_phy_set(dev, 0x0478, 0x0100); b43_phy_set(dev, 0x0801, 0x0040); break; case 3: case 5: b43_phy_mask(dev, 0x0801, 0xFFBF); break; } b43_phy_set(dev, 0x0060, 0x0040); b43_phy_set(dev, 0x0014, 0x0200); } b43_radio_set(dev, 0x007A, 0x0070); b43_set_all_gains(dev, 0, 8, 0); b43_radio_mask(dev, 0x007A, 0x00F7); if (phy->rev >= 2) { b43_phy_maskset(dev, 0x0811, 0xFFCF, 0x0030); b43_phy_maskset(dev, 0x0812, 0xFFCF, 0x0010); } b43_radio_set(dev, 0x007A, 0x0080); udelay(20); nrssi0 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); if (nrssi0 >= 0x0020) nrssi0 -= 0x0040; b43_radio_mask(dev, 0x007A, 0x007F); if (phy->rev >= 2) { b43_phy_maskset(dev, 0x0003, 0xFF9F, 0x0040); } b43_write16(dev, B43_MMIO_CHANNEL_EXT, b43_read16(dev, B43_MMIO_CHANNEL_EXT) | 0x2000); b43_radio_set(dev, 0x007A, 0x000F); b43_phy_write(dev, 0x0015, 0xF330); if (phy->rev >= 2) { b43_phy_maskset(dev, 0x0812, 0xFFCF, 0x0020); b43_phy_maskset(dev, 0x0811, 0xFFCF, 0x0020); } b43_set_all_gains(dev, 3, 0, 1); if (phy->radio_rev == 8) { b43_radio_write16(dev, 0x0043, 0x001F); } else { tmp = b43_radio_read16(dev, 0x0052) & 0xFF0F; b43_radio_write16(dev, 0x0052, tmp | 0x0060); tmp = b43_radio_read16(dev, 0x0043) & 0xFFF0; b43_radio_write16(dev, 0x0043, tmp | 0x0009); } b43_phy_write(dev, 0x005A, 0x0480); b43_phy_write(dev, 0x0059, 0x0810); b43_phy_write(dev, 0x0058, 0x000D); udelay(20); nrssi1 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); if (nrssi1 >= 0x0020) nrssi1 -= 0x0040; if (nrssi0 == nrssi1) gphy->nrssislope = 0x00010000; else gphy->nrssislope = 0x00400000 / (nrssi0 - nrssi1); if (nrssi0 >= -4) { gphy->nrssi[0] = nrssi1; gphy->nrssi[1] = nrssi0; } if (phy->rev >= 3) { b43_phy_write(dev, 0x002E, backup[10]); b43_phy_write(dev, 0x002F, backup[11]); b43_phy_write(dev, 0x080F, backup[12]); b43_phy_write(dev, B43_PHY_G_LO_CONTROL, backup[13]); } if (phy->rev >= 2) { b43_phy_mask(dev, 0x0812, 0xFFCF); b43_phy_mask(dev, 0x0811, 0xFFCF); } b43_radio_write16(dev, 0x007A, backup[0]); b43_radio_write16(dev, 0x0052, backup[1]); b43_radio_write16(dev, 0x0043, backup[2]); b43_write16(dev, 0x03E2, backup[7]); b43_write16(dev, 0x03E6, backup[8]); b43_write16(dev, B43_MMIO_CHANNEL_EXT, backup[9]); b43_phy_write(dev, 0x0015, backup[3]); b43_phy_write(dev, 0x005A, backup[4]); b43_phy_write(dev, 0x0059, backup[5]); b43_phy_write(dev, 0x0058, backup[6]); b43_synth_pu_workaround(dev, phy->channel); b43_phy_set(dev, 0x0802, (0x0001 | 0x0002)); b43_set_original_gains(dev); b43_phy_set(dev, B43_PHY_G_CRS, 0x8000); if (phy->rev >= 3) { b43_phy_write(dev, 0x0801, backup[14]); b43_phy_write(dev, 0x0060, backup[15]); b43_phy_write(dev, 0x0014, backup[16]); b43_phy_write(dev, 0x0478, backup[17]); } b43_nrssi_mem_update(dev); b43_calc_nrssi_threshold(dev); } static void b43_calc_nrssi_threshold(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; s32 a, b; s16 tmp16; u16 tmp_u16; B43_WARN_ON(phy->type != B43_PHYTYPE_G); if (!phy->gmode || !(dev->dev->bus_sprom->boardflags_lo & B43_BFL_RSSI)) { tmp16 = b43_nrssi_hw_read(dev, 0x20); if (tmp16 >= 0x20) tmp16 -= 0x40; if (tmp16 < 3) { b43_phy_maskset(dev, 0x048A, 0xF000, 0x09EB); } else { b43_phy_maskset(dev, 0x048A, 0xF000, 0x0AED); } } else { if (gphy->interfmode == B43_INTERFMODE_NONWLAN) { a = 0xE; b = 0xA; } else if (!gphy->aci_wlan_automatic && gphy->aci_enable) { a = 0x13; b = 0x12; } else { a = 0xE; b = 0x11; } a = a * (gphy->nrssi[1] - gphy->nrssi[0]); a += (gphy->nrssi[0] << 6); if (a < 32) a += 31; else a += 32; a = a >> 6; a = clamp_val(a, -31, 31); b = b * (gphy->nrssi[1] - gphy->nrssi[0]); b += (gphy->nrssi[0] << 6); if (b < 32) b += 31; else b += 32; b = b >> 6; b = clamp_val(b, -31, 31); tmp_u16 = b43_phy_read(dev, 0x048A) & 0xF000; tmp_u16 |= ((u32) b & 0x0000003F); tmp_u16 |= (((u32) a & 0x0000003F) << 6); b43_phy_write(dev, 0x048A, tmp_u16); } } /* Stack implementation to save/restore values from the * interference mitigation code. * It is save to restore values in random order. */ static void _stack_save(u32 *_stackptr, size_t *stackidx, u8 id, u16 offset, u16 value) { u32 *stackptr = &(_stackptr[*stackidx]); B43_WARN_ON(offset & 0xF000); B43_WARN_ON(id & 0xF0); *stackptr = offset; *stackptr |= ((u32) id) << 12; *stackptr |= ((u32) value) << 16; (*stackidx)++; B43_WARN_ON(*stackidx >= B43_INTERFSTACK_SIZE); } static u16 _stack_restore(u32 *stackptr, u8 id, u16 offset) { size_t i; B43_WARN_ON(offset & 0xF000); B43_WARN_ON(id & 0xF0); for (i = 0; i < B43_INTERFSTACK_SIZE; i++, stackptr++) { if ((*stackptr & 0x00000FFF) != offset) continue; if (((*stackptr & 0x0000F000) >> 12) != id) continue; return ((*stackptr & 0xFFFF0000) >> 16); } B43_WARN_ON(1); return 0; } #define phy_stacksave(offset) \ do { \ _stack_save(stack, &stackidx, 0x1, (offset), \ b43_phy_read(dev, (offset))); \ } while (0) #define phy_stackrestore(offset) \ do { \ b43_phy_write(dev, (offset), \ _stack_restore(stack, 0x1, \ (offset))); \ } while (0) #define radio_stacksave(offset) \ do { \ _stack_save(stack, &stackidx, 0x2, (offset), \ b43_radio_read16(dev, (offset))); \ } while (0) #define radio_stackrestore(offset) \ do { \ b43_radio_write16(dev, (offset), \ _stack_restore(stack, 0x2, \ (offset))); \ } while (0) #define ofdmtab_stacksave(table, offset) \ do { \ _stack_save(stack, &stackidx, 0x3, (offset)|(table), \ b43_ofdmtab_read16(dev, (table), (offset))); \ } while (0) #define ofdmtab_stackrestore(table, offset) \ do { \ b43_ofdmtab_write16(dev, (table), (offset), \ _stack_restore(stack, 0x3, \ (offset)|(table))); \ } while (0) static void b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; u16 tmp, flipped; size_t stackidx = 0; u32 *stack = gphy->interfstack; switch (mode) { case B43_INTERFMODE_NONWLAN: if (phy->rev != 1) { b43_phy_set(dev, 0x042B, 0x0800); b43_phy_mask(dev, B43_PHY_G_CRS, ~0x4000); break; } radio_stacksave(0x0078); tmp = (b43_radio_read16(dev, 0x0078) & 0x001E); B43_WARN_ON(tmp > 15); flipped = bitrev4(tmp); if (flipped < 10 && flipped >= 8) flipped = 7; else if (flipped >= 10) flipped -= 3; flipped = (bitrev4(flipped) << 1) | 0x0020; b43_radio_write16(dev, 0x0078, flipped); b43_calc_nrssi_threshold(dev); phy_stacksave(0x0406); b43_phy_write(dev, 0x0406, 0x7E28); b43_phy_set(dev, 0x042B, 0x0800); b43_phy_set(dev, B43_PHY_RADIO_BITFIELD, 0x1000); phy_stacksave(0x04A0); b43_phy_maskset(dev, 0x04A0, 0xC0C0, 0x0008); phy_stacksave(0x04A1); b43_phy_maskset(dev, 0x04A1, 0xC0C0, 0x0605); phy_stacksave(0x04A2); b43_phy_maskset(dev, 0x04A2, 0xC0C0, 0x0204); phy_stacksave(0x04A8); b43_phy_maskset(dev, 0x04A8, 0xC0C0, 0x0803); phy_stacksave(0x04AB); b43_phy_maskset(dev, 0x04AB, 0xC0C0, 0x0605); phy_stacksave(0x04A7); b43_phy_write(dev, 0x04A7, 0x0002); phy_stacksave(0x04A3); b43_phy_write(dev, 0x04A3, 0x287A); phy_stacksave(0x04A9); b43_phy_write(dev, 0x04A9, 0x2027); phy_stacksave(0x0493); b43_phy_write(dev, 0x0493, 0x32F5); phy_stacksave(0x04AA); b43_phy_write(dev, 0x04AA, 0x2027); phy_stacksave(0x04AC); b43_phy_write(dev, 0x04AC, 0x32F5); break; case B43_INTERFMODE_MANUALWLAN: if (b43_phy_read(dev, 0x0033) & 0x0800) break; gphy->aci_enable = true; phy_stacksave(B43_PHY_RADIO_BITFIELD); phy_stacksave(B43_PHY_G_CRS); if (phy->rev < 2) { phy_stacksave(0x0406); } else { phy_stacksave(0x04C0); phy_stacksave(0x04C1); } phy_stacksave(0x0033); phy_stacksave(0x04A7); phy_stacksave(0x04A3); phy_stacksave(0x04A9); phy_stacksave(0x04AA); phy_stacksave(0x04AC); phy_stacksave(0x0493); phy_stacksave(0x04A1); phy_stacksave(0x04A0); phy_stacksave(0x04A2); phy_stacksave(0x048A); phy_stacksave(0x04A8); phy_stacksave(0x04AB); if (phy->rev == 2) { phy_stacksave(0x04AD); phy_stacksave(0x04AE); } else if (phy->rev >= 3) { phy_stacksave(0x04AD); phy_stacksave(0x0415); phy_stacksave(0x0416); phy_stacksave(0x0417); ofdmtab_stacksave(0x1A00, 0x2); ofdmtab_stacksave(0x1A00, 0x3); } phy_stacksave(0x042B); phy_stacksave(0x048C); b43_phy_mask(dev, B43_PHY_RADIO_BITFIELD, ~0x1000); b43_phy_maskset(dev, B43_PHY_G_CRS, 0xFFFC, 0x0002); b43_phy_write(dev, 0x0033, 0x0800); b43_phy_write(dev, 0x04A3, 0x2027); b43_phy_write(dev, 0x04A9, 0x1CA8); b43_phy_write(dev, 0x0493, 0x287A); b43_phy_write(dev, 0x04AA, 0x1CA8); b43_phy_write(dev, 0x04AC, 0x287A); b43_phy_maskset(dev, 0x04A0, 0xFFC0, 0x001A); b43_phy_write(dev, 0x04A7, 0x000D); if (phy->rev < 2) { b43_phy_write(dev, 0x0406, 0xFF0D); } else if (phy->rev == 2) { b43_phy_write(dev, 0x04C0, 0xFFFF); b43_phy_write(dev, 0x04C1, 0x00A9); } else { b43_phy_write(dev, 0x04C0, 0x00C1); b43_phy_write(dev, 0x04C1, 0x0059); } b43_phy_maskset(dev, 0x04A1, 0xC0FF, 0x1800); b43_phy_maskset(dev, 0x04A1, 0xFFC0, 0x0015); b43_phy_maskset(dev, 0x04A8, 0xCFFF, 0x1000); b43_phy_maskset(dev, 0x04A8, 0xF0FF, 0x0A00); b43_phy_maskset(dev, 0x04AB, 0xCFFF, 0x1000); b43_phy_maskset(dev, 0x04AB, 0xF0FF, 0x0800); b43_phy_maskset(dev, 0x04AB, 0xFFCF, 0x0010); b43_phy_maskset(dev, 0x04AB, 0xFFF0, 0x0005); b43_phy_maskset(dev, 0x04A8, 0xFFCF, 0x0010); b43_phy_maskset(dev, 0x04A8, 0xFFF0, 0x0006); b43_phy_maskset(dev, 0x04A2, 0xF0FF, 0x0800); b43_phy_maskset(dev, 0x04A0, 0xF0FF, 0x0500); b43_phy_maskset(dev, 0x04A2, 0xFFF0, 0x000B); if (phy->rev >= 3) { b43_phy_mask(dev, 0x048A, 0x7FFF); b43_phy_maskset(dev, 0x0415, 0x8000, 0x36D8); b43_phy_maskset(dev, 0x0416, 0x8000, 0x36D8); b43_phy_maskset(dev, 0x0417, 0xFE00, 0x016D); } else { b43_phy_set(dev, 0x048A, 0x1000); b43_phy_maskset(dev, 0x048A, 0x9FFF, 0x2000); b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ACIW); } if (phy->rev >= 2) { b43_phy_set(dev, 0x042B, 0x0800); } b43_phy_maskset(dev, 0x048C, 0xF0FF, 0x0200); if (phy->rev == 2) { b43_phy_maskset(dev, 0x04AE, 0xFF00, 0x007F); b43_phy_maskset(dev, 0x04AD, 0x00FF, 0x1300); } else if (phy->rev >= 6) { b43_ofdmtab_write16(dev, 0x1A00, 0x3, 0x007F); b43_ofdmtab_write16(dev, 0x1A00, 0x2, 0x007F); b43_phy_mask(dev, 0x04AD, 0x00FF); } b43_calc_nrssi_slope(dev); break; default: B43_WARN_ON(1); } } static void b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; u32 *stack = gphy->interfstack; switch (mode) { case B43_INTERFMODE_NONWLAN: if (phy->rev != 1) { b43_phy_mask(dev, 0x042B, ~0x0800); b43_phy_set(dev, B43_PHY_G_CRS, 0x4000); break; } radio_stackrestore(0x0078); b43_calc_nrssi_threshold(dev); phy_stackrestore(0x0406); b43_phy_mask(dev, 0x042B, ~0x0800); if (!dev->bad_frames_preempt) { b43_phy_mask(dev, B43_PHY_RADIO_BITFIELD, ~(1 << 11)); } b43_phy_set(dev, B43_PHY_G_CRS, 0x4000); phy_stackrestore(0x04A0); phy_stackrestore(0x04A1); phy_stackrestore(0x04A2); phy_stackrestore(0x04A8); phy_stackrestore(0x04AB); phy_stackrestore(0x04A7); phy_stackrestore(0x04A3); phy_stackrestore(0x04A9); phy_stackrestore(0x0493); phy_stackrestore(0x04AA); phy_stackrestore(0x04AC); break; case B43_INTERFMODE_MANUALWLAN: if (!(b43_phy_read(dev, 0x0033) & 0x0800)) break; gphy->aci_enable = false; phy_stackrestore(B43_PHY_RADIO_BITFIELD); phy_stackrestore(B43_PHY_G_CRS); phy_stackrestore(0x0033); phy_stackrestore(0x04A3); phy_stackrestore(0x04A9); phy_stackrestore(0x0493); phy_stackrestore(0x04AA); phy_stackrestore(0x04AC); phy_stackrestore(0x04A0); phy_stackrestore(0x04A7); if (phy->rev >= 2) { phy_stackrestore(0x04C0); phy_stackrestore(0x04C1); } else phy_stackrestore(0x0406); phy_stackrestore(0x04A1); phy_stackrestore(0x04AB); phy_stackrestore(0x04A8); if (phy->rev == 2) { phy_stackrestore(0x04AD); phy_stackrestore(0x04AE); } else if (phy->rev >= 3) { phy_stackrestore(0x04AD); phy_stackrestore(0x0415); phy_stackrestore(0x0416); phy_stackrestore(0x0417); ofdmtab_stackrestore(0x1A00, 0x2); ofdmtab_stackrestore(0x1A00, 0x3); } phy_stackrestore(0x04A2); phy_stackrestore(0x048A); phy_stackrestore(0x042B); phy_stackrestore(0x048C); b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ACIW); b43_calc_nrssi_slope(dev); break; default: B43_WARN_ON(1); } } #undef phy_stacksave #undef phy_stackrestore #undef radio_stacksave #undef radio_stackrestore #undef ofdmtab_stacksave #undef ofdmtab_stackrestore static u16 b43_radio_core_calibration_value(struct b43_wldev *dev) { u16 reg, index, ret; static const u8 rcc_table[] = { 0x02, 0x03, 0x01, 0x0F, 0x06, 0x07, 0x05, 0x0F, 0x0A, 0x0B, 0x09, 0x0F, 0x0E, 0x0F, 0x0D, 0x0F, }; reg = b43_radio_read16(dev, 0x60); index = (reg & 0x001E) >> 1; ret = rcc_table[index] << 1; ret |= (reg & 0x0001); ret |= 0x0020; return ret; } #define LPD(L, P, D) (((L) << 2) | ((P) << 1) | ((D) << 0)) static u16 radio2050_rfover_val(struct b43_wldev *dev, u16 phy_register, unsigned int lpd) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; struct ssb_sprom *sprom = dev->dev->bus_sprom; if (!phy->gmode) return 0; if (has_loopback_gain(phy)) { int max_lb_gain = gphy->max_lb_gain; u16 extlna; u16 i; if (phy->radio_rev == 8) max_lb_gain += 0x3E; else max_lb_gain += 0x26; if (max_lb_gain >= 0x46) { extlna = 0x3000; max_lb_gain -= 0x46; } else if (max_lb_gain >= 0x3A) { extlna = 0x1000; max_lb_gain -= 0x3A; } else if (max_lb_gain >= 0x2E) { extlna = 0x2000; max_lb_gain -= 0x2E; } else { extlna = 0; max_lb_gain -= 0x10; } for (i = 0; i < 16; i++) { max_lb_gain -= (i * 6); if (max_lb_gain < 6) break; } if ((phy->rev < 7) || !(sprom->boardflags_lo & B43_BFL_EXTLNA)) { if (phy_register == B43_PHY_RFOVER) { return 0x1B3; } else if (phy_register == B43_PHY_RFOVERVAL) { extlna |= (i << 8); switch (lpd) { case LPD(0, 1, 1): return 0x0F92; case LPD(0, 0, 1): case LPD(1, 0, 1): return (0x0092 | extlna); case LPD(1, 0, 0): return (0x0093 | extlna); } B43_WARN_ON(1); } B43_WARN_ON(1); } else { if (phy_register == B43_PHY_RFOVER) { return 0x9B3; } else if (phy_register == B43_PHY_RFOVERVAL) { if (extlna) extlna |= 0x8000; extlna |= (i << 8); switch (lpd) { case LPD(0, 1, 1): return 0x8F92; case LPD(0, 0, 1): return (0x8092 | extlna); case LPD(1, 0, 1): return (0x2092 | extlna); case LPD(1, 0, 0): return (0x2093 | extlna); } B43_WARN_ON(1); } B43_WARN_ON(1); } } else { if ((phy->rev < 7) || !(sprom->boardflags_lo & B43_BFL_EXTLNA)) { if (phy_register == B43_PHY_RFOVER) { return 0x1B3; } else if (phy_register == B43_PHY_RFOVERVAL) { switch (lpd) { case LPD(0, 1, 1): return 0x0FB2; case LPD(0, 0, 1): return 0x00B2; case LPD(1, 0, 1): return 0x30B2; case LPD(1, 0, 0): return 0x30B3; } B43_WARN_ON(1); } B43_WARN_ON(1); } else { if (phy_register == B43_PHY_RFOVER) { return 0x9B3; } else if (phy_register == B43_PHY_RFOVERVAL) { switch (lpd) { case LPD(0, 1, 1): return 0x8FB2; case LPD(0, 0, 1): return 0x80B2; case LPD(1, 0, 1): return 0x20B2; case LPD(1, 0, 0): return 0x20B3; } B43_WARN_ON(1); } B43_WARN_ON(1); } } return 0; } struct init2050_saved_values { /* Core registers */ u16 reg_3EC; u16 reg_3E6; u16 reg_3F4; /* Radio registers */ u16 radio_43; u16 radio_51; u16 radio_52; /* PHY registers */ u16 phy_pgactl; u16 phy_cck_5A; u16 phy_cck_59; u16 phy_cck_58; u16 phy_cck_30; u16 phy_rfover; u16 phy_rfoverval; u16 phy_analogover; u16 phy_analogoverval; u16 phy_crs0; u16 phy_classctl; u16 phy_lo_mask; u16 phy_lo_ctl; u16 phy_syncctl; }; static u16 b43_radio_init2050(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct init2050_saved_values sav; u16 rcc; u16 radio78; u16 ret; u16 i, j; u32 tmp1 = 0, tmp2 = 0; memset(&sav, 0, sizeof(sav)); /* get rid of "may be used uninitialized..." */ sav.radio_43 = b43_radio_read16(dev, 0x43); sav.radio_51 = b43_radio_read16(dev, 0x51); sav.radio_52 = b43_radio_read16(dev, 0x52); sav.phy_pgactl = b43_phy_read(dev, B43_PHY_PGACTL); sav.phy_cck_5A = b43_phy_read(dev, B43_PHY_CCK(0x5A)); sav.phy_cck_59 = b43_phy_read(dev, B43_PHY_CCK(0x59)); sav.phy_cck_58 = b43_phy_read(dev, B43_PHY_CCK(0x58)); if (phy->type == B43_PHYTYPE_B) { sav.phy_cck_30 = b43_phy_read(dev, B43_PHY_CCK(0x30)); sav.reg_3EC = b43_read16(dev, 0x3EC); b43_phy_write(dev, B43_PHY_CCK(0x30), 0xFF); b43_write16(dev, 0x3EC, 0x3F3F); } else if (phy->gmode || phy->rev >= 2) { sav.phy_rfover = b43_phy_read(dev, B43_PHY_RFOVER); sav.phy_rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); sav.phy_analogover = b43_phy_read(dev, B43_PHY_ANALOGOVER); sav.phy_analogoverval = b43_phy_read(dev, B43_PHY_ANALOGOVERVAL); sav.phy_crs0 = b43_phy_read(dev, B43_PHY_CRS0); sav.phy_classctl = b43_phy_read(dev, B43_PHY_CLASSCTL); b43_phy_set(dev, B43_PHY_ANALOGOVER, 0x0003); b43_phy_mask(dev, B43_PHY_ANALOGOVERVAL, 0xFFFC); b43_phy_mask(dev, B43_PHY_CRS0, 0x7FFF); b43_phy_mask(dev, B43_PHY_CLASSCTL, 0xFFFC); if (has_loopback_gain(phy)) { sav.phy_lo_mask = b43_phy_read(dev, B43_PHY_LO_MASK); sav.phy_lo_ctl = b43_phy_read(dev, B43_PHY_LO_CTL); if (phy->rev >= 3) b43_phy_write(dev, B43_PHY_LO_MASK, 0xC020); else b43_phy_write(dev, B43_PHY_LO_MASK, 0x8020); b43_phy_write(dev, B43_PHY_LO_CTL, 0); } b43_phy_write(dev, B43_PHY_RFOVERVAL, radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, LPD(0, 1, 1))); b43_phy_write(dev, B43_PHY_RFOVER, radio2050_rfover_val(dev, B43_PHY_RFOVER, 0)); } b43_write16(dev, 0x3E2, b43_read16(dev, 0x3E2) | 0x8000); sav.phy_syncctl = b43_phy_read(dev, B43_PHY_SYNCCTL); b43_phy_mask(dev, B43_PHY_SYNCCTL, 0xFF7F); sav.reg_3E6 = b43_read16(dev, 0x3E6); sav.reg_3F4 = b43_read16(dev, 0x3F4); if (phy->analog == 0) { b43_write16(dev, 0x03E6, 0x0122); } else { if (phy->analog >= 2) { b43_phy_maskset(dev, B43_PHY_CCK(0x03), 0xFFBF, 0x40); } b43_write16(dev, B43_MMIO_CHANNEL_EXT, (b43_read16(dev, B43_MMIO_CHANNEL_EXT) | 0x2000)); } rcc = b43_radio_core_calibration_value(dev); if (phy->type == B43_PHYTYPE_B) b43_radio_write16(dev, 0x78, 0x26); if (phy->gmode || phy->rev >= 2) { b43_phy_write(dev, B43_PHY_RFOVERVAL, radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, LPD(0, 1, 1))); } b43_phy_write(dev, B43_PHY_PGACTL, 0xBFAF); b43_phy_write(dev, B43_PHY_CCK(0x2B), 0x1403); if (phy->gmode || phy->rev >= 2) { b43_phy_write(dev, B43_PHY_RFOVERVAL, radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, LPD(0, 0, 1))); } b43_phy_write(dev, B43_PHY_PGACTL, 0xBFA0); b43_radio_set(dev, 0x51, 0x0004); if (phy->radio_rev == 8) { b43_radio_write16(dev, 0x43, 0x1F); } else { b43_radio_write16(dev, 0x52, 0); b43_radio_maskset(dev, 0x43, 0xFFF0, 0x0009); } b43_phy_write(dev, B43_PHY_CCK(0x58), 0); for (i = 0; i < 16; i++) { b43_phy_write(dev, B43_PHY_CCK(0x5A), 0x0480); b43_phy_write(dev, B43_PHY_CCK(0x59), 0xC810); b43_phy_write(dev, B43_PHY_CCK(0x58), 0x000D); if (phy->gmode || phy->rev >= 2) { b43_phy_write(dev, B43_PHY_RFOVERVAL, radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, LPD(1, 0, 1))); } b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); udelay(10); if (phy->gmode || phy->rev >= 2) { b43_phy_write(dev, B43_PHY_RFOVERVAL, radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, LPD(1, 0, 1))); } b43_phy_write(dev, B43_PHY_PGACTL, 0xEFB0); udelay(10); if (phy->gmode || phy->rev >= 2) { b43_phy_write(dev, B43_PHY_RFOVERVAL, radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, LPD(1, 0, 0))); } b43_phy_write(dev, B43_PHY_PGACTL, 0xFFF0); udelay(20); tmp1 += b43_phy_read(dev, B43_PHY_LO_LEAKAGE); b43_phy_write(dev, B43_PHY_CCK(0x58), 0); if (phy->gmode || phy->rev >= 2) { b43_phy_write(dev, B43_PHY_RFOVERVAL, radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, LPD(1, 0, 1))); } b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); } udelay(10); b43_phy_write(dev, B43_PHY_CCK(0x58), 0); tmp1++; tmp1 >>= 9; for (i = 0; i < 16; i++) { radio78 = (bitrev4(i) << 1) | 0x0020; b43_radio_write16(dev, 0x78, radio78); udelay(10); for (j = 0; j < 16; j++) { b43_phy_write(dev, B43_PHY_CCK(0x5A), 0x0D80); b43_phy_write(dev, B43_PHY_CCK(0x59), 0xC810); b43_phy_write(dev, B43_PHY_CCK(0x58), 0x000D); if (phy->gmode || phy->rev >= 2) { b43_phy_write(dev, B43_PHY_RFOVERVAL, radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, LPD(1, 0, 1))); } b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); udelay(10); if (phy->gmode || phy->rev >= 2) { b43_phy_write(dev, B43_PHY_RFOVERVAL, radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, LPD(1, 0, 1))); } b43_phy_write(dev, B43_PHY_PGACTL, 0xEFB0); udelay(10); if (phy->gmode || phy->rev >= 2) { b43_phy_write(dev, B43_PHY_RFOVERVAL, radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, LPD(1, 0, 0))); } b43_phy_write(dev, B43_PHY_PGACTL, 0xFFF0); udelay(10); tmp2 += b43_phy_read(dev, B43_PHY_LO_LEAKAGE); b43_phy_write(dev, B43_PHY_CCK(0x58), 0); if (phy->gmode || phy->rev >= 2) { b43_phy_write(dev, B43_PHY_RFOVERVAL, radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, LPD(1, 0, 1))); } b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); } tmp2++; tmp2 >>= 8; if (tmp1 < tmp2) break; } /* Restore the registers */ b43_phy_write(dev, B43_PHY_PGACTL, sav.phy_pgactl); b43_radio_write16(dev, 0x51, sav.radio_51); b43_radio_write16(dev, 0x52, sav.radio_52); b43_radio_write16(dev, 0x43, sav.radio_43); b43_phy_write(dev, B43_PHY_CCK(0x5A), sav.phy_cck_5A); b43_phy_write(dev, B43_PHY_CCK(0x59), sav.phy_cck_59); b43_phy_write(dev, B43_PHY_CCK(0x58), sav.phy_cck_58); b43_write16(dev, 0x3E6, sav.reg_3E6); if (phy->analog != 0) b43_write16(dev, 0x3F4, sav.reg_3F4); b43_phy_write(dev, B43_PHY_SYNCCTL, sav.phy_syncctl); b43_synth_pu_workaround(dev, phy->channel); if (phy->type == B43_PHYTYPE_B) { b43_phy_write(dev, B43_PHY_CCK(0x30), sav.phy_cck_30); b43_write16(dev, 0x3EC, sav.reg_3EC); } else if (phy->gmode) { b43_write16(dev, B43_MMIO_PHY_RADIO, b43_read16(dev, B43_MMIO_PHY_RADIO) & 0x7FFF); b43_phy_write(dev, B43_PHY_RFOVER, sav.phy_rfover); b43_phy_write(dev, B43_PHY_RFOVERVAL, sav.phy_rfoverval); b43_phy_write(dev, B43_PHY_ANALOGOVER, sav.phy_analogover); b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, sav.phy_analogoverval); b43_phy_write(dev, B43_PHY_CRS0, sav.phy_crs0); b43_phy_write(dev, B43_PHY_CLASSCTL, sav.phy_classctl); if (has_loopback_gain(phy)) { b43_phy_write(dev, B43_PHY_LO_MASK, sav.phy_lo_mask); b43_phy_write(dev, B43_PHY_LO_CTL, sav.phy_lo_ctl); } } if (i > 15) ret = radio78; else ret = rcc; return ret; } static void b43_phy_initb5(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; u16 offset, value; u8 old_channel; if (phy->analog == 1) { b43_radio_set(dev, 0x007A, 0x0050); } if ((dev->dev->board_vendor != SSB_BOARDVENDOR_BCM) && (dev->dev->board_type != SSB_BOARD_BU4306)) { value = 0x2120; for (offset = 0x00A8; offset < 0x00C7; offset++) { b43_phy_write(dev, offset, value); value += 0x202; } } b43_phy_maskset(dev, 0x0035, 0xF0FF, 0x0700); if (phy->radio_ver == 0x2050) b43_phy_write(dev, 0x0038, 0x0667); if (phy->gmode || phy->rev >= 2) { if (phy->radio_ver == 0x2050) { b43_radio_set(dev, 0x007A, 0x0020); b43_radio_set(dev, 0x0051, 0x0004); } b43_write16(dev, B43_MMIO_PHY_RADIO, 0x0000); b43_phy_set(dev, 0x0802, 0x0100); b43_phy_set(dev, 0x042B, 0x2000); b43_phy_write(dev, 0x001C, 0x186A); b43_phy_maskset(dev, 0x0013, 0x00FF, 0x1900); b43_phy_maskset(dev, 0x0035, 0xFFC0, 0x0064); b43_phy_maskset(dev, 0x005D, 0xFF80, 0x000A); } if (dev->bad_frames_preempt) { b43_phy_set(dev, B43_PHY_RADIO_BITFIELD, (1 << 11)); } if (phy->analog == 1) { b43_phy_write(dev, 0x0026, 0xCE00); b43_phy_write(dev, 0x0021, 0x3763); b43_phy_write(dev, 0x0022, 0x1BC3); b43_phy_write(dev, 0x0023, 0x06F9); b43_phy_write(dev, 0x0024, 0x037E); } else b43_phy_write(dev, 0x0026, 0xCC00); b43_phy_write(dev, 0x0030, 0x00C6); b43_write16(dev, 0x03EC, 0x3F22); if (phy->analog == 1) b43_phy_write(dev, 0x0020, 0x3E1C); else b43_phy_write(dev, 0x0020, 0x301C); if (phy->analog == 0) b43_write16(dev, 0x03E4, 0x3000); old_channel = phy->channel; /* Force to channel 7, even if not supported. */ b43_gphy_channel_switch(dev, 7, 0); if (phy->radio_ver != 0x2050) { b43_radio_write16(dev, 0x0075, 0x0080); b43_radio_write16(dev, 0x0079, 0x0081); } b43_radio_write16(dev, 0x0050, 0x0020); b43_radio_write16(dev, 0x0050, 0x0023); if (phy->radio_ver == 0x2050) { b43_radio_write16(dev, 0x0050, 0x0020); b43_radio_write16(dev, 0x005A, 0x0070); } b43_radio_write16(dev, 0x005B, 0x007B); b43_radio_write16(dev, 0x005C, 0x00B0); b43_radio_set(dev, 0x007A, 0x0007); b43_gphy_channel_switch(dev, old_channel, 0); b43_phy_write(dev, 0x0014, 0x0080); b43_phy_write(dev, 0x0032, 0x00CA); b43_phy_write(dev, 0x002A, 0x88A3); b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control); if (phy->radio_ver == 0x2050) b43_radio_write16(dev, 0x005D, 0x000D); b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004); } static void b43_phy_initb6(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; u16 offset, val; u8 old_channel; b43_phy_write(dev, 0x003E, 0x817A); b43_radio_write16(dev, 0x007A, (b43_radio_read16(dev, 0x007A) | 0x0058)); if (phy->radio_rev == 4 || phy->radio_rev == 5) { b43_radio_write16(dev, 0x51, 0x37); b43_radio_write16(dev, 0x52, 0x70); b43_radio_write16(dev, 0x53, 0xB3); b43_radio_write16(dev, 0x54, 0x9B); b43_radio_write16(dev, 0x5A, 0x88); b43_radio_write16(dev, 0x5B, 0x88); b43_radio_write16(dev, 0x5D, 0x88); b43_radio_write16(dev, 0x5E, 0x88); b43_radio_write16(dev, 0x7D, 0x88); b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW); } B43_WARN_ON(phy->radio_rev == 6 || phy->radio_rev == 7); /* We had code for these revs here... */ if (phy->radio_rev == 8) { b43_radio_write16(dev, 0x51, 0); b43_radio_write16(dev, 0x52, 0x40); b43_radio_write16(dev, 0x53, 0xB7); b43_radio_write16(dev, 0x54, 0x98); b43_radio_write16(dev, 0x5A, 0x88); b43_radio_write16(dev, 0x5B, 0x6B); b43_radio_write16(dev, 0x5C, 0x0F); if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_ALTIQ) { b43_radio_write16(dev, 0x5D, 0xFA); b43_radio_write16(dev, 0x5E, 0xD8); } else { b43_radio_write16(dev, 0x5D, 0xF5); b43_radio_write16(dev, 0x5E, 0xB8); } b43_radio_write16(dev, 0x0073, 0x0003); b43_radio_write16(dev, 0x007D, 0x00A8); b43_radio_write16(dev, 0x007C, 0x0001); b43_radio_write16(dev, 0x007E, 0x0008); } val = 0x1E1F; for (offset = 0x0088; offset < 0x0098; offset++) { b43_phy_write(dev, offset, val); val -= 0x0202; } val = 0x3E3F; for (offset = 0x0098; offset < 0x00A8; offset++) { b43_phy_write(dev, offset, val); val -= 0x0202; } val = 0x2120; for (offset = 0x00A8; offset < 0x00C8; offset++) { b43_phy_write(dev, offset, (val & 0x3F3F)); val += 0x0202; } if (phy->type == B43_PHYTYPE_G) { b43_radio_set(dev, 0x007A, 0x0020); b43_radio_set(dev, 0x0051, 0x0004); b43_phy_set(dev, 0x0802, 0x0100); b43_phy_set(dev, 0x042B, 0x2000); b43_phy_write(dev, 0x5B, 0); b43_phy_write(dev, 0x5C, 0); } old_channel = phy->channel; if (old_channel >= 8) b43_gphy_channel_switch(dev, 1, 0); else b43_gphy_channel_switch(dev, 13, 0); b43_radio_write16(dev, 0x0050, 0x0020); b43_radio_write16(dev, 0x0050, 0x0023); udelay(40); if (phy->radio_rev < 6 || phy->radio_rev == 8) { b43_radio_write16(dev, 0x7C, (b43_radio_read16(dev, 0x7C) | 0x0002)); b43_radio_write16(dev, 0x50, 0x20); } if (phy->radio_rev <= 2) { b43_radio_write16(dev, 0x7C, 0x20); b43_radio_write16(dev, 0x5A, 0x70); b43_radio_write16(dev, 0x5B, 0x7B); b43_radio_write16(dev, 0x5C, 0xB0); } b43_radio_maskset(dev, 0x007A, 0x00F8, 0x0007); b43_gphy_channel_switch(dev, old_channel, 0); b43_phy_write(dev, 0x0014, 0x0200); if (phy->radio_rev >= 6) b43_phy_write(dev, 0x2A, 0x88C2); else b43_phy_write(dev, 0x2A, 0x8AC0); b43_phy_write(dev, 0x0038, 0x0668); b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control); if (phy->radio_rev <= 5) { b43_phy_maskset(dev, 0x5D, 0xFF80, 0x0003); } if (phy->radio_rev <= 2) b43_radio_write16(dev, 0x005D, 0x000D); if (phy->analog == 4) { b43_write16(dev, 0x3E4, 9); b43_phy_mask(dev, 0x61, 0x0FFF); } else { b43_phy_maskset(dev, 0x0002, 0xFFC0, 0x0004); } if (phy->type == B43_PHYTYPE_B) B43_WARN_ON(1); else if (phy->type == B43_PHYTYPE_G) b43_write16(dev, 0x03E6, 0x0); } static void b43_calc_loopback_gain(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; u16 backup_phy[16] = { 0 }; u16 backup_radio[3]; u16 backup_bband; u16 i, j, loop_i_max; u16 trsw_rx; u16 loop1_outer_done, loop1_inner_done; backup_phy[0] = b43_phy_read(dev, B43_PHY_CRS0); backup_phy[1] = b43_phy_read(dev, B43_PHY_CCKBBANDCFG); backup_phy[2] = b43_phy_read(dev, B43_PHY_RFOVER); backup_phy[3] = b43_phy_read(dev, B43_PHY_RFOVERVAL); if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ backup_phy[4] = b43_phy_read(dev, B43_PHY_ANALOGOVER); backup_phy[5] = b43_phy_read(dev, B43_PHY_ANALOGOVERVAL); } backup_phy[6] = b43_phy_read(dev, B43_PHY_CCK(0x5A)); backup_phy[7] = b43_phy_read(dev, B43_PHY_CCK(0x59)); backup_phy[8] = b43_phy_read(dev, B43_PHY_CCK(0x58)); backup_phy[9] = b43_phy_read(dev, B43_PHY_CCK(0x0A)); backup_phy[10] = b43_phy_read(dev, B43_PHY_CCK(0x03)); backup_phy[11] = b43_phy_read(dev, B43_PHY_LO_MASK); backup_phy[12] = b43_phy_read(dev, B43_PHY_LO_CTL); backup_phy[13] = b43_phy_read(dev, B43_PHY_CCK(0x2B)); backup_phy[14] = b43_phy_read(dev, B43_PHY_PGACTL); backup_phy[15] = b43_phy_read(dev, B43_PHY_LO_LEAKAGE); backup_bband = gphy->bbatt.att; backup_radio[0] = b43_radio_read16(dev, 0x52); backup_radio[1] = b43_radio_read16(dev, 0x43); backup_radio[2] = b43_radio_read16(dev, 0x7A); b43_phy_mask(dev, B43_PHY_CRS0, 0x3FFF); b43_phy_set(dev, B43_PHY_CCKBBANDCFG, 0x8000); b43_phy_set(dev, B43_PHY_RFOVER, 0x0002); b43_phy_mask(dev, B43_PHY_RFOVERVAL, 0xFFFD); b43_phy_set(dev, B43_PHY_RFOVER, 0x0001); b43_phy_mask(dev, B43_PHY_RFOVERVAL, 0xFFFE); if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ b43_phy_set(dev, B43_PHY_ANALOGOVER, 0x0001); b43_phy_mask(dev, B43_PHY_ANALOGOVERVAL, 0xFFFE); b43_phy_set(dev, B43_PHY_ANALOGOVER, 0x0002); b43_phy_mask(dev, B43_PHY_ANALOGOVERVAL, 0xFFFD); } b43_phy_set(dev, B43_PHY_RFOVER, 0x000C); b43_phy_set(dev, B43_PHY_RFOVERVAL, 0x000C); b43_phy_set(dev, B43_PHY_RFOVER, 0x0030); b43_phy_maskset(dev, B43_PHY_RFOVERVAL, 0xFFCF, 0x10); b43_phy_write(dev, B43_PHY_CCK(0x5A), 0x0780); b43_phy_write(dev, B43_PHY_CCK(0x59), 0xC810); b43_phy_write(dev, B43_PHY_CCK(0x58), 0x000D); b43_phy_set(dev, B43_PHY_CCK(0x0A), 0x2000); if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ b43_phy_set(dev, B43_PHY_ANALOGOVER, 0x0004); b43_phy_mask(dev, B43_PHY_ANALOGOVERVAL, 0xFFFB); } b43_phy_maskset(dev, B43_PHY_CCK(0x03), 0xFF9F, 0x40); if (phy->radio_rev == 8) { b43_radio_write16(dev, 0x43, 0x000F); } else { b43_radio_write16(dev, 0x52, 0); b43_radio_maskset(dev, 0x43, 0xFFF0, 0x9); } b43_gphy_set_baseband_attenuation(dev, 11); if (phy->rev >= 3) b43_phy_write(dev, B43_PHY_LO_MASK, 0xC020); else b43_phy_write(dev, B43_PHY_LO_MASK, 0x8020); b43_phy_write(dev, B43_PHY_LO_CTL, 0); b43_phy_maskset(dev, B43_PHY_CCK(0x2B), 0xFFC0, 0x01); b43_phy_maskset(dev, B43_PHY_CCK(0x2B), 0xC0FF, 0x800); b43_phy_set(dev, B43_PHY_RFOVER, 0x0100); b43_phy_mask(dev, B43_PHY_RFOVERVAL, 0xCFFF); if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_EXTLNA) { if (phy->rev >= 7) { b43_phy_set(dev, B43_PHY_RFOVER, 0x0800); b43_phy_set(dev, B43_PHY_RFOVERVAL, 0x8000); } } b43_radio_mask(dev, 0x7A, 0x00F7); j = 0; loop_i_max = (phy->radio_rev == 8) ? 15 : 9; for (i = 0; i < loop_i_max; i++) { for (j = 0; j < 16; j++) { b43_radio_write16(dev, 0x43, i); b43_phy_maskset(dev, B43_PHY_RFOVERVAL, 0xF0FF, (j << 8)); b43_phy_maskset(dev, B43_PHY_PGACTL, 0x0FFF, 0xA000); b43_phy_set(dev, B43_PHY_PGACTL, 0xF000); udelay(20); if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC) goto exit_loop1; } } exit_loop1: loop1_outer_done = i; loop1_inner_done = j; if (j >= 8) { b43_phy_set(dev, B43_PHY_RFOVERVAL, 0x30); trsw_rx = 0x1B; for (j = j - 8; j < 16; j++) { b43_phy_maskset(dev, B43_PHY_RFOVERVAL, 0xF0FF, (j << 8)); b43_phy_maskset(dev, B43_PHY_PGACTL, 0x0FFF, 0xA000); b43_phy_set(dev, B43_PHY_PGACTL, 0xF000); udelay(20); trsw_rx -= 3; if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC) goto exit_loop2; } } else trsw_rx = 0x18; exit_loop2: if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ b43_phy_write(dev, B43_PHY_ANALOGOVER, backup_phy[4]); b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, backup_phy[5]); } b43_phy_write(dev, B43_PHY_CCK(0x5A), backup_phy[6]); b43_phy_write(dev, B43_PHY_CCK(0x59), backup_phy[7]); b43_phy_write(dev, B43_PHY_CCK(0x58), backup_phy[8]); b43_phy_write(dev, B43_PHY_CCK(0x0A), backup_phy[9]); b43_phy_write(dev, B43_PHY_CCK(0x03), backup_phy[10]); b43_phy_write(dev, B43_PHY_LO_MASK, backup_phy[11]); b43_phy_write(dev, B43_PHY_LO_CTL, backup_phy[12]); b43_phy_write(dev, B43_PHY_CCK(0x2B), backup_phy[13]); b43_phy_write(dev, B43_PHY_PGACTL, backup_phy[14]); b43_gphy_set_baseband_attenuation(dev, backup_bband); b43_radio_write16(dev, 0x52, backup_radio[0]); b43_radio_write16(dev, 0x43, backup_radio[1]); b43_radio_write16(dev, 0x7A, backup_radio[2]); b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2] | 0x0003); udelay(10); b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2]); b43_phy_write(dev, B43_PHY_RFOVERVAL, backup_phy[3]); b43_phy_write(dev, B43_PHY_CRS0, backup_phy[0]); b43_phy_write(dev, B43_PHY_CCKBBANDCFG, backup_phy[1]); gphy->max_lb_gain = ((loop1_inner_done * 6) - (loop1_outer_done * 4)) - 11; gphy->trsw_rx_gain = trsw_rx * 2; } static void b43_hardware_pctl_early_init(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; if (!b43_has_hardware_pctl(dev)) { b43_phy_write(dev, 0x047A, 0xC111); return; } b43_phy_mask(dev, 0x0036, 0xFEFF); b43_phy_write(dev, 0x002F, 0x0202); b43_phy_set(dev, 0x047C, 0x0002); b43_phy_set(dev, 0x047A, 0xF000); if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) { b43_phy_maskset(dev, 0x047A, 0xFF0F, 0x0010); b43_phy_set(dev, 0x005D, 0x8000); b43_phy_maskset(dev, 0x004E, 0xFFC0, 0x0010); b43_phy_write(dev, 0x002E, 0xC07F); b43_phy_set(dev, 0x0036, 0x0400); } else { b43_phy_set(dev, 0x0036, 0x0200); b43_phy_set(dev, 0x0036, 0x0400); b43_phy_mask(dev, 0x005D, 0x7FFF); b43_phy_mask(dev, 0x004F, 0xFFFE); b43_phy_maskset(dev, 0x004E, 0xFFC0, 0x0010); b43_phy_write(dev, 0x002E, 0xC07F); b43_phy_maskset(dev, 0x047A, 0xFF0F, 0x0010); } } /* Hardware power control for G-PHY */ static void b43_hardware_pctl_init_gphy(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; if (!b43_has_hardware_pctl(dev)) { /* No hardware power control */ b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_HWPCTL); return; } b43_phy_maskset(dev, 0x0036, 0xFFC0, (gphy->tgt_idle_tssi - gphy->cur_idle_tssi)); b43_phy_maskset(dev, 0x0478, 0xFF00, (gphy->tgt_idle_tssi - gphy->cur_idle_tssi)); b43_gphy_tssi_power_lt_init(dev); b43_gphy_gain_lt_init(dev); b43_phy_mask(dev, 0x0060, 0xFFBF); b43_phy_write(dev, 0x0014, 0x0000); B43_WARN_ON(phy->rev < 6); b43_phy_set(dev, 0x0478, 0x0800); b43_phy_mask(dev, 0x0478, 0xFEFF); b43_phy_mask(dev, 0x0801, 0xFFBF); b43_gphy_dc_lt_init(dev, 1); /* Enable hardware pctl in firmware. */ b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL); } /* Initialize B/G PHY power control */ static void b43_phy_init_pctl(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; struct b43_rfatt old_rfatt; struct b43_bbatt old_bbatt; u8 old_tx_control = 0; B43_WARN_ON(phy->type != B43_PHYTYPE_G); if ((dev->dev->board_vendor == SSB_BOARDVENDOR_BCM) && (dev->dev->board_type == SSB_BOARD_BU4306)) return; b43_phy_write(dev, 0x0028, 0x8018); /* This does something with the Analog... */ b43_write16(dev, B43_MMIO_PHY0, b43_read16(dev, B43_MMIO_PHY0) & 0xFFDF); if (!phy->gmode) return; b43_hardware_pctl_early_init(dev); if (gphy->cur_idle_tssi == 0) { if (phy->radio_ver == 0x2050 && phy->analog == 0) { b43_radio_maskset(dev, 0x0076, 0x00F7, 0x0084); } else { struct b43_rfatt rfatt; struct b43_bbatt bbatt; memcpy(&old_rfatt, &gphy->rfatt, sizeof(old_rfatt)); memcpy(&old_bbatt, &gphy->bbatt, sizeof(old_bbatt)); old_tx_control = gphy->tx_control; bbatt.att = 11; if (phy->radio_rev == 8) { rfatt.att = 15; rfatt.with_padmix = true; } else { rfatt.att = 9; rfatt.with_padmix = false; } b43_set_txpower_g(dev, &bbatt, &rfatt, 0); } b43_dummy_transmission(dev, false, true); gphy->cur_idle_tssi = b43_phy_read(dev, B43_PHY_ITSSI); if (B43_DEBUG) { /* Current-Idle-TSSI sanity check. */ if (abs(gphy->cur_idle_tssi - gphy->tgt_idle_tssi) >= 20) { b43dbg(dev->wl, "!WARNING! Idle-TSSI phy->cur_idle_tssi " "measuring failed. (cur=%d, tgt=%d). Disabling TX power " "adjustment.\n", gphy->cur_idle_tssi, gphy->tgt_idle_tssi); gphy->cur_idle_tssi = 0; } } if (phy->radio_ver == 0x2050 && phy->analog == 0) { b43_radio_mask(dev, 0x0076, 0xFF7B); } else { b43_set_txpower_g(dev, &old_bbatt, &old_rfatt, old_tx_control); } } b43_hardware_pctl_init_gphy(dev); b43_shm_clear_tssi(dev); } static void b43_phy_initg(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; u16 tmp; if (phy->rev == 1) b43_phy_initb5(dev); else b43_phy_initb6(dev); if (phy->rev >= 2 || phy->gmode) b43_phy_inita(dev); if (phy->rev >= 2) { b43_phy_write(dev, B43_PHY_ANALOGOVER, 0); b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, 0); } if (phy->rev == 2) { b43_phy_write(dev, B43_PHY_RFOVER, 0); b43_phy_write(dev, B43_PHY_PGACTL, 0xC0); } if (phy->rev > 5) { b43_phy_write(dev, B43_PHY_RFOVER, 0x400); b43_phy_write(dev, B43_PHY_PGACTL, 0xC0); } if (phy->gmode || phy->rev >= 2) { tmp = b43_phy_read(dev, B43_PHY_VERSION_OFDM); tmp &= B43_PHYVER_VERSION; if (tmp == 3 || tmp == 5) { b43_phy_write(dev, B43_PHY_OFDM(0xC2), 0x1816); b43_phy_write(dev, B43_PHY_OFDM(0xC3), 0x8006); } if (tmp == 5) { b43_phy_maskset(dev, B43_PHY_OFDM(0xCC), 0x00FF, 0x1F00); } } if ((phy->rev <= 2 && phy->gmode) || phy->rev >= 2) b43_phy_write(dev, B43_PHY_OFDM(0x7E), 0x78); if (phy->radio_rev == 8) { b43_phy_set(dev, B43_PHY_EXTG(0x01), 0x80); b43_phy_set(dev, B43_PHY_OFDM(0x3E), 0x4); } if (has_loopback_gain(phy)) b43_calc_loopback_gain(dev); if (phy->radio_rev != 8) { if (gphy->initval == 0xFFFF) gphy->initval = b43_radio_init2050(dev); else b43_radio_write16(dev, 0x0078, gphy->initval); } b43_lo_g_init(dev); if (has_tx_magnification(phy)) { b43_radio_write16(dev, 0x52, (b43_radio_read16(dev, 0x52) & 0xFF00) | gphy->lo_control->tx_bias | gphy-> lo_control->tx_magn); } else { b43_radio_maskset(dev, 0x52, 0xFFF0, gphy->lo_control->tx_bias); } if (phy->rev >= 6) { b43_phy_maskset(dev, B43_PHY_CCK(0x36), 0x0FFF, (gphy->lo_control->tx_bias << 12)); } if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075); else b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F); if (phy->rev < 2) b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x101); else b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x202); if (phy->gmode || phy->rev >= 2) { b43_lo_g_adjust(dev); b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078); } if (!(dev->dev->bus_sprom->boardflags_lo & B43_BFL_RSSI)) { /* The specs state to update the NRSSI LT with * the value 0x7FFFFFFF here. I think that is some weird * compiler optimization in the original driver. * Essentially, what we do here is resetting all NRSSI LT * entries to -32 (see the clamp_val() in nrssi_hw_update()) */ b43_nrssi_hw_update(dev, 0xFFFF); //FIXME? b43_calc_nrssi_threshold(dev); } else if (phy->gmode || phy->rev >= 2) { if (gphy->nrssi[0] == -1000) { B43_WARN_ON(gphy->nrssi[1] != -1000); b43_calc_nrssi_slope(dev); } else b43_calc_nrssi_threshold(dev); } if (phy->radio_rev == 8) b43_phy_write(dev, B43_PHY_EXTG(0x05), 0x3230); b43_phy_init_pctl(dev); /* FIXME: The spec says in the following if, the 0 should be replaced 'if OFDM may not be used in the current locale' but OFDM is legal everywhere */ if ((dev->dev->chip_id == 0x4306 && dev->dev->chip_pkg == 2) || 0) { b43_phy_mask(dev, B43_PHY_CRS0, 0xBFFF); b43_phy_mask(dev, B43_PHY_OFDM(0xC3), 0x7FFF); } } void b43_gphy_channel_switch(struct b43_wldev *dev, unsigned int channel, bool synthetic_pu_workaround) { if (synthetic_pu_workaround) b43_synth_pu_workaround(dev, channel); b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel)); if (channel == 14) { if (dev->dev->bus_sprom->country_code == SSB_SPROM1CCODE_JAPAN) b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ACPR); else b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ACPR); b43_write16(dev, B43_MMIO_CHANNEL_EXT, b43_read16(dev, B43_MMIO_CHANNEL_EXT) | (1 << 11)); } else { b43_write16(dev, B43_MMIO_CHANNEL_EXT, b43_read16(dev, B43_MMIO_CHANNEL_EXT) & 0xF7BF); } } static void default_baseband_attenuation(struct b43_wldev *dev, struct b43_bbatt *bb) { struct b43_phy *phy = &dev->phy; if (phy->radio_ver == 0x2050 && phy->radio_rev < 6) bb->att = 0; else bb->att = 2; } static void default_radio_attenuation(struct b43_wldev *dev, struct b43_rfatt *rf) { struct b43_bus_dev *bdev = dev->dev; struct b43_phy *phy = &dev->phy; rf->with_padmix = false; if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM && dev->dev->board_type == SSB_BOARD_BCM4309G) { if (dev->dev->board_rev < 0x43) { rf->att = 2; return; } else if (dev->dev->board_rev < 0x51) { rf->att = 3; return; } } if (phy->type == B43_PHYTYPE_A) { rf->att = 0x60; return; } switch (phy->radio_ver) { case 0x2053: switch (phy->radio_rev) { case 1: rf->att = 6; return; } break; case 0x2050: switch (phy->radio_rev) { case 0: rf->att = 5; return; case 1: if (phy->type == B43_PHYTYPE_G) { if (bdev->board_vendor == SSB_BOARDVENDOR_BCM && bdev->board_type == SSB_BOARD_BCM4309G && bdev->board_rev >= 30) rf->att = 3; else if (bdev->board_vendor == SSB_BOARDVENDOR_BCM && bdev->board_type == SSB_BOARD_BU4306) rf->att = 3; else rf->att = 1; } else { if (bdev->board_vendor == SSB_BOARDVENDOR_BCM && bdev->board_type == SSB_BOARD_BCM4309G && bdev->board_rev >= 30) rf->att = 7; else rf->att = 6; } return; case 2: if (phy->type == B43_PHYTYPE_G) { if (bdev->board_vendor == SSB_BOARDVENDOR_BCM && bdev->board_type == SSB_BOARD_BCM4309G && bdev->board_rev >= 30) rf->att = 3; else if (bdev->board_vendor == SSB_BOARDVENDOR_BCM && bdev->board_type == SSB_BOARD_BU4306) rf->att = 5; else if (bdev->chip_id == 0x4320) rf->att = 4; else rf->att = 3; } else rf->att = 6; return; case 3: rf->att = 5; return; case 4: case 5: rf->att = 1; return; case 6: case 7: rf->att = 5; return; case 8: rf->att = 0xA; rf->with_padmix = true; return; case 9: default: rf->att = 5; return; } } rf->att = 5; } static u16 default_tx_control(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; if (phy->radio_ver != 0x2050) return 0; if (phy->radio_rev == 1) return B43_TXCTL_PA2DB | B43_TXCTL_TXMIX; if (phy->radio_rev < 6) return B43_TXCTL_PA2DB; if (phy->radio_rev == 8) return B43_TXCTL_TXMIX; return 0; } static u8 b43_gphy_aci_detect(struct b43_wldev *dev, u8 channel) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; u8 ret = 0; u16 saved, rssi, temp; int i, j = 0; saved = b43_phy_read(dev, 0x0403); b43_switch_channel(dev, channel); b43_phy_write(dev, 0x0403, (saved & 0xFFF8) | 5); if (gphy->aci_hw_rssi) rssi = b43_phy_read(dev, 0x048A) & 0x3F; else rssi = saved & 0x3F; /* clamp temp to signed 5bit */ if (rssi > 32) rssi -= 64; for (i = 0; i < 100; i++) { temp = (b43_phy_read(dev, 0x047F) >> 8) & 0x3F; if (temp > 32) temp -= 64; if (temp < rssi) j++; if (j >= 20) ret = 1; } b43_phy_write(dev, 0x0403, saved); return ret; } static u8 b43_gphy_aci_scan(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; u8 ret[13]; unsigned int channel = phy->channel; unsigned int i, j, start, end; if (!((phy->type == B43_PHYTYPE_G) && (phy->rev > 0))) return 0; b43_phy_lock(dev); b43_radio_lock(dev); b43_phy_mask(dev, 0x0802, 0xFFFC); b43_phy_mask(dev, B43_PHY_G_CRS, 0x7FFF); b43_set_all_gains(dev, 3, 8, 1); start = (channel - 5 > 0) ? channel - 5 : 1; end = (channel + 5 < 14) ? channel + 5 : 13; for (i = start; i <= end; i++) { if (abs(channel - i) > 2) ret[i - 1] = b43_gphy_aci_detect(dev, i); } b43_switch_channel(dev, channel); b43_phy_maskset(dev, 0x0802, 0xFFFC, 0x0003); b43_phy_mask(dev, 0x0403, 0xFFF8); b43_phy_set(dev, B43_PHY_G_CRS, 0x8000); b43_set_original_gains(dev); for (i = 0; i < 13; i++) { if (!ret[i]) continue; end = (i + 5 < 13) ? i + 5 : 13; for (j = i; j < end; j++) ret[j] = 1; } b43_radio_unlock(dev); b43_phy_unlock(dev); return ret[channel - 1]; } static s32 b43_tssi2dbm_ad(s32 num, s32 den) { if (num < 0) return num / den; else return (num + den / 2) / den; } static s8 b43_tssi2dbm_entry(s8 entry[], u8 index, s16 pab0, s16 pab1, s16 pab2) { s32 m1, m2, f = 256, q, delta; s8 i = 0; m1 = b43_tssi2dbm_ad(16 * pab0 + index * pab1, 32); m2 = max(b43_tssi2dbm_ad(32768 + index * pab2, 256), 1); do { if (i > 15) return -EINVAL; q = b43_tssi2dbm_ad(f * 4096 - b43_tssi2dbm_ad(m2 * f, 16) * f, 2048); delta = abs(q - f); f = q; i++; } while (delta >= 2); entry[index] = clamp_val(b43_tssi2dbm_ad(m1 * f, 8192), -127, 128); return 0; } u8 *b43_generate_dyn_tssi2dbm_tab(struct b43_wldev *dev, s16 pab0, s16 pab1, s16 pab2) { unsigned int i; u8 *tab; int err; tab = kmalloc(64, GFP_KERNEL); if (!tab) { b43err(dev->wl, "Could not allocate memory " "for tssi2dbm table\n"); return NULL; } for (i = 0; i < 64; i++) { err = b43_tssi2dbm_entry(tab, i, pab0, pab1, pab2); if (err) { b43err(dev->wl, "Could not generate " "tssi2dBm table\n"); kfree(tab); return NULL; } } return tab; } /* Initialise the TSSI->dBm lookup table */ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; s16 pab0, pab1, pab2; pab0 = (s16) (dev->dev->bus_sprom->pa0b0); pab1 = (s16) (dev->dev->bus_sprom->pa0b1); pab2 = (s16) (dev->dev->bus_sprom->pa0b2); B43_WARN_ON((dev->dev->chip_id == 0x4301) && (phy->radio_ver != 0x2050)); /* Not supported anymore */ gphy->dyn_tssi_tbl = false; if (pab0 != 0 && pab1 != 0 && pab2 != 0 && pab0 != -1 && pab1 != -1 && pab2 != -1) { /* The pabX values are set in SPROM. Use them. */ if ((s8) dev->dev->bus_sprom->itssi_bg != 0 && (s8) dev->dev->bus_sprom->itssi_bg != -1) { gphy->tgt_idle_tssi = (s8) (dev->dev->bus_sprom->itssi_bg); } else gphy->tgt_idle_tssi = 62; gphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0, pab1, pab2); if (!gphy->tssi2dbm) return -ENOMEM; gphy->dyn_tssi_tbl = true; } else { /* pabX values not set in SPROM. */ gphy->tgt_idle_tssi = 52; gphy->tssi2dbm = b43_tssi2dbm_g_table; } return 0; } static int b43_gphy_op_allocate(struct b43_wldev *dev) { struct b43_phy_g *gphy; struct b43_txpower_lo_control *lo; int err; gphy = kzalloc(sizeof(*gphy), GFP_KERNEL); if (!gphy) { err = -ENOMEM; goto error; } dev->phy.g = gphy; lo = kzalloc(sizeof(*lo), GFP_KERNEL); if (!lo) { err = -ENOMEM; goto err_free_gphy; } gphy->lo_control = lo; err = b43_gphy_init_tssi2dbm_table(dev); if (err) goto err_free_lo; return 0; err_free_lo: kfree(lo); err_free_gphy: kfree(gphy); error: return err; } static void b43_gphy_op_prepare_structs(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; const void *tssi2dbm; int tgt_idle_tssi; struct b43_txpower_lo_control *lo; unsigned int i; /* tssi2dbm table is constant, so it is initialized at alloc time. * Save a copy of the pointer. */ tssi2dbm = gphy->tssi2dbm; tgt_idle_tssi = gphy->tgt_idle_tssi; /* Save the LO pointer. */ lo = gphy->lo_control; /* Zero out the whole PHY structure. */ memset(gphy, 0, sizeof(*gphy)); /* Restore pointers. */ gphy->tssi2dbm = tssi2dbm; gphy->tgt_idle_tssi = tgt_idle_tssi; gphy->lo_control = lo; memset(gphy->minlowsig, 0xFF, sizeof(gphy->minlowsig)); /* NRSSI */ for (i = 0; i < ARRAY_SIZE(gphy->nrssi); i++) gphy->nrssi[i] = -1000; for (i = 0; i < ARRAY_SIZE(gphy->nrssi_lt); i++) gphy->nrssi_lt[i] = i; gphy->lofcal = 0xFFFF; gphy->initval = 0xFFFF; gphy->interfmode = B43_INTERFMODE_NONE; /* OFDM-table address caching. */ gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_UNKNOWN; gphy->average_tssi = 0xFF; /* Local Osciallator structure */ lo->tx_bias = 0xFF; INIT_LIST_HEAD(&lo->calib_list); } static void b43_gphy_op_free(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; kfree(gphy->lo_control); if (gphy->dyn_tssi_tbl) kfree(gphy->tssi2dbm); gphy->dyn_tssi_tbl = false; gphy->tssi2dbm = NULL; kfree(gphy); dev->phy.g = NULL; } static int b43_gphy_op_prepare_hardware(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; struct b43_txpower_lo_control *lo = gphy->lo_control; B43_WARN_ON(phy->type != B43_PHYTYPE_G); default_baseband_attenuation(dev, &gphy->bbatt); default_radio_attenuation(dev, &gphy->rfatt); gphy->tx_control = (default_tx_control(dev) << 4); generate_rfatt_list(dev, &lo->rfatt_list); generate_bbatt_list(dev, &lo->bbatt_list); /* Commit previous writes */ b43_read32(dev, B43_MMIO_MACCTL); if (phy->rev == 1) { /* Workaround: Temporarly disable gmode through the early init * phase, as the gmode stuff is not needed for phy rev 1 */ phy->gmode = false; b43_wireless_core_reset(dev, 0); b43_phy_initg(dev); phy->gmode = true; b43_wireless_core_reset(dev, 1); } return 0; } static int b43_gphy_op_init(struct b43_wldev *dev) { b43_phy_initg(dev); return 0; } static void b43_gphy_op_exit(struct b43_wldev *dev) { b43_lo_g_cleanup(dev); } static u16 b43_gphy_op_read(struct b43_wldev *dev, u16 reg) { b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); return b43_read16(dev, B43_MMIO_PHY_DATA); } static void b43_gphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) { b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, value); } static u16 b43_gphy_op_radio_read(struct b43_wldev *dev, u16 reg) { /* Register 1 is a 32-bit register. */ B43_WARN_ON(reg == 1); /* G-PHY needs 0x80 for read access. */ reg |= 0x80; b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); } static void b43_gphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) { /* Register 1 is a 32-bit register. */ B43_WARN_ON(reg == 1); b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); } static bool b43_gphy_op_supports_hwpctl(struct b43_wldev *dev) { return (dev->phy.rev >= 6); } static void b43_gphy_op_software_rfkill(struct b43_wldev *dev, bool blocked) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; unsigned int channel; might_sleep(); if (!blocked) { /* Turn radio ON */ if (phy->radio_on) return; b43_phy_write(dev, 0x0015, 0x8000); b43_phy_write(dev, 0x0015, 0xCC00); b43_phy_write(dev, 0x0015, (phy->gmode ? 0x00C0 : 0x0000)); if (gphy->radio_off_context.valid) { /* Restore the RFover values. */ b43_phy_write(dev, B43_PHY_RFOVER, gphy->radio_off_context.rfover); b43_phy_write(dev, B43_PHY_RFOVERVAL, gphy->radio_off_context.rfoverval); gphy->radio_off_context.valid = false; } channel = phy->channel; b43_gphy_channel_switch(dev, 6, 1); b43_gphy_channel_switch(dev, channel, 0); } else { /* Turn radio OFF */ u16 rfover, rfoverval; rfover = b43_phy_read(dev, B43_PHY_RFOVER); rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); gphy->radio_off_context.rfover = rfover; gphy->radio_off_context.rfoverval = rfoverval; gphy->radio_off_context.valid = true; b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C); b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73); } } static int b43_gphy_op_switch_channel(struct b43_wldev *dev, unsigned int new_channel) { if ((new_channel < 1) || (new_channel > 14)) return -EINVAL; b43_gphy_channel_switch(dev, new_channel, 0); return 0; } static unsigned int b43_gphy_op_get_default_chan(struct b43_wldev *dev) { return 1; /* Default to channel 1 */ } static void b43_gphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna) { struct b43_phy *phy = &dev->phy; u16 tmp; int autodiv = 0; if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1) autodiv = 1; b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ANTDIVHELP); b43_phy_maskset(dev, B43_PHY_BBANDCFG, ~B43_PHY_BBANDCFG_RXANT, (autodiv ? B43_ANTENNA_AUTO1 : antenna) << B43_PHY_BBANDCFG_RXANT_SHIFT); if (autodiv) { tmp = b43_phy_read(dev, B43_PHY_ANTDWELL); if (antenna == B43_ANTENNA_AUTO1) tmp &= ~B43_PHY_ANTDWELL_AUTODIV1; else tmp |= B43_PHY_ANTDWELL_AUTODIV1; b43_phy_write(dev, B43_PHY_ANTDWELL, tmp); } tmp = b43_phy_read(dev, B43_PHY_ANTWRSETT); if (autodiv) tmp |= B43_PHY_ANTWRSETT_ARXDIV; else tmp &= ~B43_PHY_ANTWRSETT_ARXDIV; b43_phy_write(dev, B43_PHY_ANTWRSETT, tmp); if (autodiv) b43_phy_set(dev, B43_PHY_ANTWRSETT, B43_PHY_ANTWRSETT_ARXDIV); else { b43_phy_mask(dev, B43_PHY_ANTWRSETT, B43_PHY_ANTWRSETT_ARXDIV); } if (phy->rev >= 2) { b43_phy_set(dev, B43_PHY_OFDM61, B43_PHY_OFDM61_10); b43_phy_maskset(dev, B43_PHY_DIVSRCHGAINBACK, 0xFF00, 0x15); if (phy->rev == 2) b43_phy_write(dev, B43_PHY_ADIVRELATED, 8); else b43_phy_maskset(dev, B43_PHY_ADIVRELATED, 0xFF00, 8); } if (phy->rev >= 6) b43_phy_write(dev, B43_PHY_OFDM9B, 0xDC); b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ANTDIVHELP); } static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev, enum b43_interference_mitigation mode) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; int currentmode; B43_WARN_ON(phy->type != B43_PHYTYPE_G); if ((phy->rev == 0) || (!phy->gmode)) return -ENODEV; gphy->aci_wlan_automatic = false; switch (mode) { case B43_INTERFMODE_AUTOWLAN: gphy->aci_wlan_automatic = true; if (gphy->aci_enable) mode = B43_INTERFMODE_MANUALWLAN; else mode = B43_INTERFMODE_NONE; break; case B43_INTERFMODE_NONE: case B43_INTERFMODE_NONWLAN: case B43_INTERFMODE_MANUALWLAN: break; default: return -EINVAL; } currentmode = gphy->interfmode; if (currentmode == mode) return 0; if (currentmode != B43_INTERFMODE_NONE) b43_radio_interference_mitigation_disable(dev, currentmode); if (mode == B43_INTERFMODE_NONE) { gphy->aci_enable = false; gphy->aci_hw_rssi = false; } else b43_radio_interference_mitigation_enable(dev, mode); gphy->interfmode = mode; return 0; } /* http://bcm-specs.sipsolutions.net/EstimatePowerOut * This function converts a TSSI value to dBm in Q5.2 */ static s8 b43_gphy_estimate_power_out(struct b43_wldev *dev, s8 tssi) { struct b43_phy_g *gphy = dev->phy.g; s8 dbm; s32 tmp; tmp = (gphy->tgt_idle_tssi - gphy->cur_idle_tssi + tssi); tmp = clamp_val(tmp, 0x00, 0x3F); dbm = gphy->tssi2dbm[tmp]; return dbm; } static void b43_put_attenuation_into_ranges(struct b43_wldev *dev, int *_bbatt, int *_rfatt) { int rfatt = *_rfatt; int bbatt = *_bbatt; struct b43_txpower_lo_control *lo = dev->phy.g->lo_control; /* Get baseband and radio attenuation values into their permitted ranges. * Radio attenuation affects power level 4 times as much as baseband. */ /* Range constants */ const int rf_min = lo->rfatt_list.min_val; const int rf_max = lo->rfatt_list.max_val; const int bb_min = lo->bbatt_list.min_val; const int bb_max = lo->bbatt_list.max_val; while (1) { if (rfatt > rf_max && bbatt > bb_max - 4) break; /* Can not get it into ranges */ if (rfatt < rf_min && bbatt < bb_min + 4) break; /* Can not get it into ranges */ if (bbatt > bb_max && rfatt > rf_max - 1) break; /* Can not get it into ranges */ if (bbatt < bb_min && rfatt < rf_min + 1) break; /* Can not get it into ranges */ if (bbatt > bb_max) { bbatt -= 4; rfatt += 1; continue; } if (bbatt < bb_min) { bbatt += 4; rfatt -= 1; continue; } if (rfatt > rf_max) { rfatt -= 1; bbatt += 4; continue; } if (rfatt < rf_min) { rfatt += 1; bbatt -= 4; continue; } break; } *_rfatt = clamp_val(rfatt, rf_min, rf_max); *_bbatt = clamp_val(bbatt, bb_min, bb_max); } static void b43_gphy_op_adjust_txpower(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; int rfatt, bbatt; u8 tx_control; b43_mac_suspend(dev); /* Calculate the new attenuation values. */ bbatt = gphy->bbatt.att; bbatt += gphy->bbatt_delta; rfatt = gphy->rfatt.att; rfatt += gphy->rfatt_delta; b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt); tx_control = gphy->tx_control; if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) { if (rfatt <= 1) { if (tx_control == 0) { tx_control = B43_TXCTL_PA2DB | B43_TXCTL_TXMIX; rfatt += 2; bbatt += 2; } else if (dev->dev->bus_sprom-> boardflags_lo & B43_BFL_PACTRL) { bbatt += 4 * (rfatt - 2); rfatt = 2; } } else if (rfatt > 4 && tx_control) { tx_control = 0; if (bbatt < 3) { rfatt -= 3; bbatt += 2; } else { rfatt -= 2; bbatt -= 2; } } } /* Save the control values */ gphy->tx_control = tx_control; b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt); gphy->rfatt.att = rfatt; gphy->bbatt.att = bbatt; if (b43_debug(dev, B43_DBG_XMITPOWER)) b43dbg(dev->wl, "Adjusting TX power\n"); /* Adjust the hardware */ b43_phy_lock(dev); b43_radio_lock(dev); b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control); b43_radio_unlock(dev); b43_phy_unlock(dev); b43_mac_enable(dev); } static enum b43_txpwr_result b43_gphy_op_recalc_txpower(struct b43_wldev *dev, bool ignore_tssi) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; unsigned int average_tssi; int cck_result, ofdm_result; int estimated_pwr, desired_pwr, pwr_adjust; int rfatt_delta, bbatt_delta; unsigned int max_pwr; /* First get the average TSSI */ cck_result = b43_phy_shm_tssi_read(dev, B43_SHM_SH_TSSI_CCK); ofdm_result = b43_phy_shm_tssi_read(dev, B43_SHM_SH_TSSI_OFDM_G); if ((cck_result < 0) && (ofdm_result < 0)) { /* No TSSI information available */ if (!ignore_tssi) goto no_adjustment_needed; cck_result = 0; ofdm_result = 0; } if (cck_result < 0) average_tssi = ofdm_result; else if (ofdm_result < 0) average_tssi = cck_result; else average_tssi = (cck_result + ofdm_result) / 2; /* Merge the average with the stored value. */ if (likely(gphy->average_tssi != 0xFF)) average_tssi = (average_tssi + gphy->average_tssi) / 2; gphy->average_tssi = average_tssi; B43_WARN_ON(average_tssi >= B43_TSSI_MAX); /* Estimate the TX power emission based on the TSSI */ estimated_pwr = b43_gphy_estimate_power_out(dev, average_tssi); B43_WARN_ON(phy->type != B43_PHYTYPE_G); max_pwr = dev->dev->bus_sprom->maxpwr_bg; if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) max_pwr -= 3; /* minus 0.75 */ if (unlikely(max_pwr >= INT_TO_Q52(30/*dBm*/))) { b43warn(dev->wl, "Invalid max-TX-power value in SPROM.\n"); max_pwr = INT_TO_Q52(20); /* fake it */ dev->dev->bus_sprom->maxpwr_bg = max_pwr; } /* Get desired power (in Q5.2) */ if (phy->desired_txpower < 0) desired_pwr = INT_TO_Q52(0); else desired_pwr = INT_TO_Q52(phy->desired_txpower); /* And limit it. max_pwr already is Q5.2 */ desired_pwr = clamp_val(desired_pwr, 0, max_pwr); if (b43_debug(dev, B43_DBG_XMITPOWER)) { b43dbg(dev->wl, "[TX power] current = " Q52_FMT " dBm, desired = " Q52_FMT " dBm, max = " Q52_FMT "\n", Q52_ARG(estimated_pwr), Q52_ARG(desired_pwr), Q52_ARG(max_pwr)); } /* Calculate the adjustment delta. */ pwr_adjust = desired_pwr - estimated_pwr; if (pwr_adjust == 0) goto no_adjustment_needed; /* RF attenuation delta. */ rfatt_delta = ((pwr_adjust + 7) / 8); /* Lower attenuation => Bigger power output. Negate it. */ rfatt_delta = -rfatt_delta; /* Baseband attenuation delta. */ bbatt_delta = pwr_adjust / 2; /* Lower attenuation => Bigger power output. Negate it. */ bbatt_delta = -bbatt_delta; /* RF att affects power level 4 times as much as * Baseband attennuation. Subtract it. */ bbatt_delta -= 4 * rfatt_delta; #if B43_DEBUG if (b43_debug(dev, B43_DBG_XMITPOWER)) { int dbm = pwr_adjust < 0 ? -pwr_adjust : pwr_adjust; b43dbg(dev->wl, "[TX power deltas] %s" Q52_FMT " dBm => " "bbatt-delta = %d, rfatt-delta = %d\n", (pwr_adjust < 0 ? "-" : ""), Q52_ARG(dbm), bbatt_delta, rfatt_delta); } #endif /* DEBUG */ /* So do we finally need to adjust something in hardware? */ if ((rfatt_delta == 0) && (bbatt_delta == 0)) goto no_adjustment_needed; /* Save the deltas for later when we adjust the power. */ gphy->bbatt_delta = bbatt_delta; gphy->rfatt_delta = rfatt_delta; /* We need to adjust the TX power on the device. */ return B43_TXPWR_RES_NEED_ADJUST; no_adjustment_needed: return B43_TXPWR_RES_DONE; } static void b43_gphy_op_pwork_15sec(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; b43_mac_suspend(dev); //TODO: update_aci_moving_average if (gphy->aci_enable && gphy->aci_wlan_automatic) { if (!gphy->aci_enable && 1 /*TODO: not scanning? */ ) { if (0 /*TODO: bunch of conditions */ ) { phy->ops->interf_mitigation(dev, B43_INTERFMODE_MANUALWLAN); } } else if (0 /*TODO*/) { if (/*(aci_average > 1000) &&*/ !b43_gphy_aci_scan(dev)) phy->ops->interf_mitigation(dev, B43_INTERFMODE_NONE); } } else if (gphy->interfmode == B43_INTERFMODE_NONWLAN && phy->rev == 1) { //TODO: implement rev1 workaround } b43_lo_g_maintanance_work(dev); b43_mac_enable(dev); } static void b43_gphy_op_pwork_60sec(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; if (!(dev->dev->bus_sprom->boardflags_lo & B43_BFL_RSSI)) return; b43_mac_suspend(dev); b43_calc_nrssi_slope(dev); if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 8)) { u8 old_chan = phy->channel; /* VCO Calibration */ if (old_chan >= 8) b43_switch_channel(dev, 1); else b43_switch_channel(dev, 13); b43_switch_channel(dev, old_chan); } b43_mac_enable(dev); } const struct b43_phy_operations b43_phyops_g = { .allocate = b43_gphy_op_allocate, .free = b43_gphy_op_free, .prepare_structs = b43_gphy_op_prepare_structs, .prepare_hardware = b43_gphy_op_prepare_hardware, .init = b43_gphy_op_init, .exit = b43_gphy_op_exit, .phy_read = b43_gphy_op_read, .phy_write = b43_gphy_op_write, .radio_read = b43_gphy_op_radio_read, .radio_write = b43_gphy_op_radio_write, .supports_hwpctl = b43_gphy_op_supports_hwpctl, .software_rfkill = b43_gphy_op_software_rfkill, .switch_analog = b43_phyop_switch_analog_generic, .switch_channel = b43_gphy_op_switch_channel, .get_default_chan = b43_gphy_op_get_default_chan, .set_rx_antenna = b43_gphy_op_set_rx_antenna, .interf_mitigation = b43_gphy_op_interf_mitigation, .recalc_txpower = b43_gphy_op_recalc_txpower, .adjust_txpower = b43_gphy_op_adjust_txpower, .pwork_15sec = b43_gphy_op_pwork_15sec, .pwork_60sec = b43_gphy_op_pwork_60sec, };
gpl-2.0
loumatrix/android_kernel_asus_me301t
arch/ia64/kernel/cyclone.c
8958
3017
#include <linux/module.h> #include <linux/smp.h> #include <linux/time.h> #include <linux/errno.h> #include <linux/timex.h> #include <linux/clocksource.h> #include <asm/io.h> /* IBM Summit (EXA) Cyclone counter code*/ #define CYCLONE_CBAR_ADDR 0xFEB00CD0 #define CYCLONE_PMCC_OFFSET 0x51A0 #define CYCLONE_MPMC_OFFSET 0x51D0 #define CYCLONE_MPCS_OFFSET 0x51A8 #define CYCLONE_TIMER_FREQ 100000000 int use_cyclone; void __init cyclone_setup(void) { use_cyclone = 1; } static void __iomem *cyclone_mc; static cycle_t read_cyclone(struct clocksource *cs) { return (cycle_t)readq((void __iomem *)cyclone_mc); } static struct clocksource clocksource_cyclone = { .name = "cyclone", .rating = 300, .read = read_cyclone, .mask = (1LL << 40) - 1, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; int __init init_cyclone_clock(void) { u64 __iomem *reg; u64 base; /* saved cyclone base address */ u64 offset; /* offset from pageaddr to cyclone_timer register */ int i; u32 __iomem *cyclone_timer; /* Cyclone MPMC0 register */ if (!use_cyclone) return 0; printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n"); /* find base address */ offset = (CYCLONE_CBAR_ADDR); reg = ioremap_nocache(offset, sizeof(u64)); if(!reg){ printk(KERN_ERR "Summit chipset: Could not find valid CBAR" " register.\n"); use_cyclone = 0; return -ENODEV; } base = readq(reg); iounmap(reg); if(!base){ printk(KERN_ERR "Summit chipset: Could not find valid CBAR" " value.\n"); use_cyclone = 0; return -ENODEV; } /* setup PMCC */ offset = (base + CYCLONE_PMCC_OFFSET); reg = ioremap_nocache(offset, sizeof(u64)); if(!reg){ printk(KERN_ERR "Summit chipset: Could not find valid PMCC" " register.\n"); use_cyclone = 0; return -ENODEV; } writel(0x00000001,reg); iounmap(reg); /* setup MPCS */ offset = (base + CYCLONE_MPCS_OFFSET); reg = ioremap_nocache(offset, sizeof(u64)); if(!reg){ printk(KERN_ERR "Summit chipset: Could not find valid MPCS" " register.\n"); use_cyclone = 0; return -ENODEV; } writel(0x00000001,reg); iounmap(reg); /* map in cyclone_timer */ offset = (base + CYCLONE_MPMC_OFFSET); cyclone_timer = ioremap_nocache(offset, sizeof(u32)); if(!cyclone_timer){ printk(KERN_ERR "Summit chipset: Could not find valid MPMC" " register.\n"); use_cyclone = 0; return -ENODEV; } /*quick test to make sure its ticking*/ for(i=0; i<3; i++){ u32 old = readl(cyclone_timer); int stall = 100; while(stall--) barrier(); if(readl(cyclone_timer) == old){ printk(KERN_ERR "Summit chipset: Counter not counting!" " DISABLED\n"); iounmap(cyclone_timer); cyclone_timer = NULL; use_cyclone = 0; return -ENODEV; } } /* initialize last tick */ cyclone_mc = cyclone_timer; clocksource_cyclone.archdata.fsys_mmio = cyclone_timer; clocksource_register_hz(&clocksource_cyclone, CYCLONE_TIMER_FREQ); return 0; } __initcall(init_cyclone_clock);
gpl-2.0
kularny/GeniSys.Kernel-old
arch/powerpc/kvm/book3s_32_mmu.c
9982
9959
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright SUSE Linux Products GmbH 2009 * * Authors: Alexander Graf <agraf@suse.de> */ #include <linux/types.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/highmem.h> #include <asm/tlbflush.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> /* #define DEBUG_MMU */ /* #define DEBUG_MMU_PTE */ /* #define DEBUG_MMU_PTE_IP 0xfff14c40 */ #ifdef DEBUG_MMU #define dprintk(X...) printk(KERN_INFO X) #else #define dprintk(X...) do { } while(0) #endif #ifdef DEBUG_MMU_PTE #define dprintk_pte(X...) printk(KERN_INFO X) #else #define dprintk_pte(X...) do { } while(0) #endif #define PTEG_FLAG_ACCESSED 0x00000100 #define PTEG_FLAG_DIRTY 0x00000080 #ifndef SID_SHIFT #define SID_SHIFT 28 #endif static inline bool check_debug_ip(struct kvm_vcpu *vcpu) { #ifdef DEBUG_MMU_PTE_IP return vcpu->arch.pc == DEBUG_MMU_PTE_IP; #else return true; #endif } static inline u32 sr_vsid(u32 sr_raw) { return sr_raw & 0x0fffffff; } static inline bool sr_valid(u32 sr_raw) { return (sr_raw & 0x80000000) ? false : true; } static inline bool sr_ks(u32 sr_raw) { return (sr_raw & 0x40000000) ? true: false; } static inline bool sr_kp(u32 sr_raw) { return (sr_raw & 0x20000000) ? true: false; } static inline bool sr_nx(u32 sr_raw) { return (sr_raw & 0x10000000) ? true: false; } static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) { return vcpu->arch.shared->sr[(eaddr >> 28) & 0xf]; } static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, bool data) { u64 vsid; struct kvmppc_pte pte; if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) return pte.vpage; kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); } static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) { kvmppc_set_msr(vcpu, 0); } static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s, u32 sre, gva_t eaddr, bool primary) { u32 page, hash, pteg, htabmask; hva_t r; page = (eaddr & 0x0FFFFFFF) >> 12; htabmask = ((vcpu_book3s->sdr1 & 0x1FF) << 16) | 0xFFC0; hash = ((sr_vsid(sre) ^ page) << 6); if (!primary) hash = ~hash; hash &= htabmask; pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash; dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n", kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg, sr_vsid(sre)); r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); if (kvm_is_error_hva(r)) return r; return r | (pteg & ~PAGE_MASK); } static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary) { return ((eaddr & 0x0fffffff) >> 22) | (sr_vsid(sre) << 7) | (primary ? 0 : 0x40) | 0x80000000; } static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); struct kvmppc_bat *bat; int i; for (i = 0; i < 8; i++) { if (data) bat = &vcpu_book3s->dbat[i]; else bat = &vcpu_book3s->ibat[i]; if (vcpu->arch.shared->msr & MSR_PR) { if (!bat->vp) continue; } else { if (!bat->vs) continue; } if (check_debug_ip(vcpu)) { dprintk_pte("%cBAT %02d: 0x%lx - 0x%x (0x%x)\n", data ? 'd' : 'i', i, eaddr, bat->bepi, bat->bepi_mask); } if ((eaddr & bat->bepi_mask) == bat->bepi) { u64 vsid; kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); vsid <<= 16; pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid; pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask); pte->may_read = bat->pp; pte->may_write = bat->pp > 1; pte->may_execute = true; if (!pte->may_read) { printk(KERN_INFO "BAT is not readable!\n"); continue; } if (!pte->may_write) { /* let's treat r/o BATs as not-readable for now */ dprintk_pte("BAT is read-only!\n"); continue; } return 0; } } return -ENOENT; } static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data, bool primary) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); u32 sre; hva_t ptegp; u32 pteg[16]; u32 ptem = 0; int i; int found = 0; sre = find_sr(vcpu, eaddr); dprintk_pte("SR 0x%lx: vsid=0x%x, raw=0x%x\n", eaddr >> 28, sr_vsid(sre), sre); pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu_book3s, sre, eaddr, primary); if (kvm_is_error_hva(ptegp)) { printk(KERN_INFO "KVM: Invalid PTEG!\n"); goto no_page_found; } ptem = kvmppc_mmu_book3s_32_get_ptem(sre, eaddr, primary); if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) { printk(KERN_ERR "KVM: Can't copy data from 0x%lx!\n", ptegp); goto no_page_found; } for (i=0; i<16; i+=2) { if (ptem == pteg[i]) { u8 pp; pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF); pp = pteg[i+1] & 3; if ((sr_kp(sre) && (vcpu->arch.shared->msr & MSR_PR)) || (sr_ks(sre) && !(vcpu->arch.shared->msr & MSR_PR))) pp |= 4; pte->may_write = false; pte->may_read = false; pte->may_execute = true; switch (pp) { case 0: case 1: case 2: case 6: pte->may_write = true; case 3: case 5: case 7: pte->may_read = true; break; } if ( !pte->may_read ) continue; dprintk_pte("MMU: Found PTE -> %x %x - %x\n", pteg[i], pteg[i+1], pp); found = 1; break; } } /* Update PTE C and A bits, so the guest's swapper knows we used the page */ if (found) { u32 oldpte = pteg[i+1]; if (pte->may_read) pteg[i+1] |= PTEG_FLAG_ACCESSED; if (pte->may_write) pteg[i+1] |= PTEG_FLAG_DIRTY; else dprintk_pte("KVM: Mapping read-only page!\n"); /* Write back into the PTEG */ if (pteg[i+1] != oldpte) copy_to_user((void __user *)ptegp, pteg, sizeof(pteg)); return 0; } no_page_found: if (check_debug_ip(vcpu)) { dprintk_pte("KVM MMU: No PTE found (sdr1=0x%llx ptegp=0x%lx)\n", to_book3s(vcpu)->sdr1, ptegp); for (i=0; i<16; i+=2) { dprintk_pte(" %02d: 0x%x - 0x%x (0x%x)\n", i, pteg[i], pteg[i+1], ptem); } } return -ENOENT; } static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data) { int r; ulong mp_ea = vcpu->arch.magic_page_ea; pte->eaddr = eaddr; /* Magic page override */ if (unlikely(mp_ea) && unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && !(vcpu->arch.shared->msr & MSR_PR)) { pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff); pte->raddr &= KVM_PAM; pte->may_execute = true; pte->may_read = true; pte->may_write = true; return 0; } r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); if (r < 0) r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); if (r < 0) r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false); return r; } static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum) { return vcpu->arch.shared->sr[srnum]; } static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, ulong value) { vcpu->arch.shared->sr[srnum] = value; kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT); } static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) { kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000); } static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid) { ulong ea = esid << SID_SHIFT; u32 sr; u64 gvsid = esid; if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { sr = find_sr(vcpu, ea); if (sr_valid(sr)) gvsid = sr_vsid(sr); } /* In case we only have one of MSR_IR or MSR_DR set, let's put that in the real-mode context (and hope RM doesn't access high memory) */ switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { case 0: *vsid = VSID_REAL | esid; break; case MSR_IR: *vsid = VSID_REAL_IR | gvsid; break; case MSR_DR: *vsid = VSID_REAL_DR | gvsid; break; case MSR_DR|MSR_IR: if (sr_valid(sr)) *vsid = sr_vsid(sr); else *vsid = VSID_BAT | gvsid; break; default: BUG(); } if (vcpu->arch.shared->msr & MSR_PR) *vsid |= VSID_PR; return 0; } static bool kvmppc_mmu_book3s_32_is_dcbz32(struct kvm_vcpu *vcpu) { return true; } void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu) { struct kvmppc_mmu *mmu = &vcpu->arch.mmu; mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin; mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin; mmu->xlate = kvmppc_mmu_book3s_32_xlate; mmu->reset_msr = kvmppc_mmu_book3s_32_reset_msr; mmu->tlbie = kvmppc_mmu_book3s_32_tlbie; mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid; mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp; mmu->is_dcbz32 = kvmppc_mmu_book3s_32_is_dcbz32; mmu->slbmte = NULL; mmu->slbmfee = NULL; mmu->slbmfev = NULL; mmu->slbie = NULL; mmu->slbia = NULL; }
gpl-2.0
kirananto/RAZORFERRARI
drivers/edac/edac_device_sysfs.c
767
24585
/* * file for managing the edac_device subsystem of devices for EDAC * * (C) 2007 SoftwareBitMaker * * This file may be distributed under the terms of the * GNU General Public License. * * Written Doug Thompson <norsk5@xmission.com> * */ #include <linux/ctype.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/edac.h> #include "edac_core.h" #include "edac_module.h" #define EDAC_DEVICE_SYMLINK "device" #define to_edacdev(k) container_of(k, struct edac_device_ctl_info, kobj) #define to_edacdev_attr(a) container_of(a, struct edacdev_attribute, attr) /* * Set of edac_device_ctl_info attribute store/show functions */ /* 'log_ue' */ static ssize_t edac_device_ctl_log_ue_show(struct edac_device_ctl_info *ctl_info, char *data) { return sprintf(data, "%u\n", ctl_info->log_ue); } static ssize_t edac_device_ctl_log_ue_store(struct edac_device_ctl_info *ctl_info, const char *data, size_t count) { /* if parameter is zero, turn off flag, if non-zero turn on flag */ ctl_info->log_ue = (simple_strtoul(data, NULL, 0) != 0); return count; } /* 'log_ce' */ static ssize_t edac_device_ctl_log_ce_show(struct edac_device_ctl_info *ctl_info, char *data) { return sprintf(data, "%u\n", ctl_info->log_ce); } static ssize_t edac_device_ctl_log_ce_store(struct edac_device_ctl_info *ctl_info, const char *data, size_t count) { /* if parameter is zero, turn off flag, if non-zero turn on flag */ ctl_info->log_ce = (simple_strtoul(data, NULL, 0) != 0); return count; } /* 'panic_on_ce' */ static ssize_t edac_device_ctl_panic_on_ce_show(struct edac_device_ctl_info *ctl_info, char *data) { return snprintf(data, PAGE_SIZE, "%u\n", ctl_info->panic_on_ce); } /* 'panic_on_ue' */ static ssize_t edac_device_ctl_panic_on_ue_show(struct edac_device_ctl_info *ctl_info, char *data) { return sprintf(data, "%u\n", ctl_info->panic_on_ue); } static ssize_t edac_device_ctl_panic_on_ce_store(struct edac_device_ctl_info *ctl_info, const char *data, size_t count) { /* if parameter is zero, turn off flag, if non-zero turn on flag */ ctl_info->panic_on_ce = (simple_strtoul(data, NULL, 0) != 0); return count; } static ssize_t edac_device_ctl_panic_on_ue_store(struct edac_device_ctl_info *ctl_info, const char *data, size_t count) { /* if parameter is zero, turn off flag, if non-zero turn on flag */ ctl_info->panic_on_ue = (simple_strtoul(data, NULL, 0) != 0); return count; } /* 'poll_msec' show and store functions*/ static ssize_t edac_device_ctl_poll_msec_show(struct edac_device_ctl_info *ctl_info, char *data) { return sprintf(data, "%u\n", ctl_info->poll_msec); } static ssize_t edac_device_ctl_poll_msec_store(struct edac_device_ctl_info *ctl_info, const char *data, size_t count) { unsigned long value; /* get the value and enforce that it is non-zero, must be at least * one millisecond for the delay period, between scans * Then cancel last outstanding delay for the work request * and set a new one. */ value = simple_strtoul(data, NULL, 0); edac_device_reset_delay_period(ctl_info, value); return count; } /* edac_device_ctl_info specific attribute structure */ struct ctl_info_attribute { struct attribute attr; ssize_t(*show) (struct edac_device_ctl_info *, char *); ssize_t(*store) (struct edac_device_ctl_info *, const char *, size_t); }; #define to_ctl_info(k) container_of(k, struct edac_device_ctl_info, kobj) #define to_ctl_info_attr(a) container_of(a,struct ctl_info_attribute,attr) /* Function to 'show' fields from the edac_dev 'ctl_info' structure */ static ssize_t edac_dev_ctl_info_show(struct kobject *kobj, struct attribute *attr, char *buffer) { struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj); struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr); if (ctl_info_attr->show) return ctl_info_attr->show(edac_dev, buffer); return -EIO; } /* Function to 'store' fields into the edac_dev 'ctl_info' structure */ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj, struct attribute *attr, const char *buffer, size_t count) { struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj); struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr); if (ctl_info_attr->store) return ctl_info_attr->store(edac_dev, buffer, count); return -EIO; } /* edac_dev file operations for an 'ctl_info' */ static const struct sysfs_ops device_ctl_info_ops = { .show = edac_dev_ctl_info_show, .store = edac_dev_ctl_info_store }; #define CTL_INFO_ATTR(_name,_mode,_show,_store) \ static struct ctl_info_attribute attr_ctl_info_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .show = _show, \ .store = _store, \ }; /* Declare the various ctl_info attributes here and their respective ops */ CTL_INFO_ATTR(log_ue, S_IRUGO | S_IWUSR, edac_device_ctl_log_ue_show, edac_device_ctl_log_ue_store); CTL_INFO_ATTR(log_ce, S_IRUGO | S_IWUSR, edac_device_ctl_log_ce_show, edac_device_ctl_log_ce_store); CTL_INFO_ATTR(panic_on_ce, S_IRUGO | S_IWUSR, edac_device_ctl_panic_on_ce_show, edac_device_ctl_panic_on_ce_store); CTL_INFO_ATTR(panic_on_ue, S_IRUGO | S_IWUSR, edac_device_ctl_panic_on_ue_show, edac_device_ctl_panic_on_ue_store); CTL_INFO_ATTR(poll_msec, S_IRUGO | S_IWUSR, edac_device_ctl_poll_msec_show, edac_device_ctl_poll_msec_store); /* Base Attributes of the EDAC_DEVICE ECC object */ static struct ctl_info_attribute *device_ctrl_attr[] = { &attr_ctl_info_panic_on_ce, &attr_ctl_info_panic_on_ue, &attr_ctl_info_log_ue, &attr_ctl_info_log_ce, &attr_ctl_info_poll_msec, NULL, }; /* * edac_device_ctrl_master_release * * called when the reference count for the 'main' kobj * for a edac_device control struct reaches zero * * Reference count model: * One 'main' kobject for each control structure allocated. * That main kobj is initially set to one AND * the reference count for the EDAC 'core' module is * bumped by one, thus added 'keep in memory' dependency. * * Each new internal kobj (in instances and blocks) then * bumps the 'main' kobject. * * When they are released their release functions decrement * the 'main' kobj. * * When the main kobj reaches zero (0) then THIS function * is called which then decrements the EDAC 'core' module. * When the module reference count reaches zero then the * module no longer has dependency on keeping the release * function code in memory and module can be unloaded. * * This will support several control objects as well, each * with its own 'main' kobj. */ static void edac_device_ctrl_master_release(struct kobject *kobj) { struct edac_device_ctl_info *edac_dev = to_edacdev(kobj); edac_dbg(4, "control index=%d\n", edac_dev->dev_idx); /* decrement the EDAC CORE module ref count */ module_put(edac_dev->owner); /* free the control struct containing the 'main' kobj * passed in to this routine */ kfree(edac_dev); } /* ktype for the main (master) kobject */ static struct kobj_type ktype_device_ctrl = { .release = edac_device_ctrl_master_release, .sysfs_ops = &device_ctl_info_ops, .default_attrs = (struct attribute **)device_ctrl_attr, }; /* * edac_device_register_sysfs_main_kobj * * perform the high level setup for the new edac_device instance * * Return: 0 SUCCESS * !0 FAILURE */ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) { struct bus_type *edac_subsys; int err; edac_dbg(1, "\n"); /* get the /sys/devices/system/edac reference */ edac_subsys = edac_get_sysfs_subsys(); if (edac_subsys == NULL) { edac_dbg(1, "no edac_subsys error\n"); err = -ENODEV; goto err_out; } /* Point to the 'edac_subsys' this instance 'reports' to */ edac_dev->edac_subsys = edac_subsys; /* Init the devices's kobject */ memset(&edac_dev->kobj, 0, sizeof(struct kobject)); /* Record which module 'owns' this control structure * and bump the ref count of the module */ edac_dev->owner = THIS_MODULE; if (!try_module_get(edac_dev->owner)) { err = -ENODEV; goto err_mod_get; } /* register */ err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl, &edac_subsys->dev_root->kobj, "%s", edac_dev->name); if (err) { edac_dbg(1, "Failed to register '.../edac/%s'\n", edac_dev->name); goto err_kobj_reg; } kobject_uevent(&edac_dev->kobj, KOBJ_ADD); /* At this point, to 'free' the control struct, * edac_device_unregister_sysfs_main_kobj() must be used */ edac_dbg(4, "Registered '.../edac/%s' kobject\n", edac_dev->name); return 0; /* Error exit stack */ err_kobj_reg: module_put(edac_dev->owner); err_mod_get: edac_put_sysfs_subsys(); err_out: return err; } /* * edac_device_unregister_sysfs_main_kobj: * the '..../edac/<name>' kobject */ void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev) { edac_dbg(0, "\n"); edac_dbg(4, "name of kobject is: %s\n", kobject_name(&dev->kobj)); /* * Unregister the edac device's kobject and * allow for reference count to reach 0 at which point * the callback will be called to: * a) module_put() this module * b) 'kfree' the memory */ kobject_put(&dev->kobj); edac_put_sysfs_subsys(); } /* edac_dev -> instance information */ /* * Set of low-level instance attribute show functions */ static ssize_t instance_ue_count_show(struct edac_device_instance *instance, char *data) { return sprintf(data, "%u\n", instance->counters.ue_count); } static ssize_t instance_ce_count_show(struct edac_device_instance *instance, char *data) { return sprintf(data, "%u\n", instance->counters.ce_count); } #define to_instance(k) container_of(k, struct edac_device_instance, kobj) #define to_instance_attr(a) container_of(a,struct instance_attribute,attr) /* DEVICE instance kobject release() function */ static void edac_device_ctrl_instance_release(struct kobject *kobj) { struct edac_device_instance *instance; edac_dbg(1, "\n"); /* map from this kobj to the main control struct * and then dec the main kobj count */ instance = to_instance(kobj); kobject_put(&instance->ctl->kobj); } /* instance specific attribute structure */ struct instance_attribute { struct attribute attr; ssize_t(*show) (struct edac_device_instance *, char *); ssize_t(*store) (struct edac_device_instance *, const char *, size_t); }; /* Function to 'show' fields from the edac_dev 'instance' structure */ static ssize_t edac_dev_instance_show(struct kobject *kobj, struct attribute *attr, char *buffer) { struct edac_device_instance *instance = to_instance(kobj); struct instance_attribute *instance_attr = to_instance_attr(attr); if (instance_attr->show) return instance_attr->show(instance, buffer); return -EIO; } /* Function to 'store' fields into the edac_dev 'instance' structure */ static ssize_t edac_dev_instance_store(struct kobject *kobj, struct attribute *attr, const char *buffer, size_t count) { struct edac_device_instance *instance = to_instance(kobj); struct instance_attribute *instance_attr = to_instance_attr(attr); if (instance_attr->store) return instance_attr->store(instance, buffer, count); return -EIO; } /* edac_dev file operations for an 'instance' */ static const struct sysfs_ops device_instance_ops = { .show = edac_dev_instance_show, .store = edac_dev_instance_store }; #define INSTANCE_ATTR(_name,_mode,_show,_store) \ static struct instance_attribute attr_instance_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .show = _show, \ .store = _store, \ }; /* * Define attributes visible for the edac_device instance object * Each contains a pointer to a show and an optional set * function pointer that does the low level output/input */ INSTANCE_ATTR(ce_count, S_IRUGO, instance_ce_count_show, NULL); INSTANCE_ATTR(ue_count, S_IRUGO, instance_ue_count_show, NULL); /* list of edac_dev 'instance' attributes */ static struct instance_attribute *device_instance_attr[] = { &attr_instance_ce_count, &attr_instance_ue_count, NULL, }; /* The 'ktype' for each edac_dev 'instance' */ static struct kobj_type ktype_instance_ctrl = { .release = edac_device_ctrl_instance_release, .sysfs_ops = &device_instance_ops, .default_attrs = (struct attribute **)device_instance_attr, }; /* edac_dev -> instance -> block information */ #define to_block(k) container_of(k, struct edac_device_block, kobj) #define to_block_attr(a) \ container_of(a, struct edac_dev_sysfs_block_attribute, attr) /* * Set of low-level block attribute show functions */ static ssize_t block_ue_count_show(struct kobject *kobj, struct attribute *attr, char *data) { struct edac_device_block *block = to_block(kobj); return sprintf(data, "%u\n", block->counters.ue_count); } static ssize_t block_ce_count_show(struct kobject *kobj, struct attribute *attr, char *data) { struct edac_device_block *block = to_block(kobj); return sprintf(data, "%u\n", block->counters.ce_count); } /* DEVICE block kobject release() function */ static void edac_device_ctrl_block_release(struct kobject *kobj) { struct edac_device_block *block; edac_dbg(1, "\n"); /* get the container of the kobj */ block = to_block(kobj); /* map from 'block kobj' to 'block->instance->controller->main_kobj' * now 'release' the block kobject */ kobject_put(&block->instance->ctl->kobj); } /* Function to 'show' fields from the edac_dev 'block' structure */ static ssize_t edac_dev_block_show(struct kobject *kobj, struct attribute *attr, char *buffer) { struct edac_dev_sysfs_block_attribute *block_attr = to_block_attr(attr); if (block_attr->show) return block_attr->show(kobj, attr, buffer); return -EIO; } /* Function to 'store' fields into the edac_dev 'block' structure */ static ssize_t edac_dev_block_store(struct kobject *kobj, struct attribute *attr, const char *buffer, size_t count) { struct edac_dev_sysfs_block_attribute *block_attr; block_attr = to_block_attr(attr); if (block_attr->store) return block_attr->store(kobj, attr, buffer, count); return -EIO; } /* edac_dev file operations for a 'block' */ static const struct sysfs_ops device_block_ops = { .show = edac_dev_block_show, .store = edac_dev_block_store }; #define BLOCK_ATTR(_name,_mode,_show,_store) \ static struct edac_dev_sysfs_block_attribute attr_block_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .show = _show, \ .store = _store, \ }; BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL); BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL); /* list of edac_dev 'block' attributes */ static struct edac_dev_sysfs_block_attribute *device_block_attr[] = { &attr_block_ce_count, &attr_block_ue_count, NULL, }; /* The 'ktype' for each edac_dev 'block' */ static struct kobj_type ktype_block_ctrl = { .release = edac_device_ctrl_block_release, .sysfs_ops = &device_block_ops, .default_attrs = (struct attribute **)device_block_attr, }; /* block ctor/dtor code */ /* * edac_device_create_block */ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev, struct edac_device_instance *instance, struct edac_device_block *block) { int i; int err; struct edac_dev_sysfs_block_attribute *sysfs_attrib; struct kobject *main_kobj; edac_dbg(4, "Instance '%s' inst_p=%p block '%s' block_p=%p\n", instance->name, instance, block->name, block); edac_dbg(4, "block kobj=%p block kobj->parent=%p\n", &block->kobj, &block->kobj.parent); /* init this block's kobject */ memset(&block->kobj, 0, sizeof(struct kobject)); /* bump the main kobject's reference count for this controller * and this instance is dependent on the main */ main_kobj = kobject_get(&edac_dev->kobj); if (!main_kobj) { err = -ENODEV; goto err_out; } /* Add this block's kobject */ err = kobject_init_and_add(&block->kobj, &ktype_block_ctrl, &instance->kobj, "%s", block->name); if (err) { edac_dbg(1, "Failed to register instance '%s'\n", block->name); kobject_put(main_kobj); err = -ENODEV; goto err_out; } /* If there are driver level block attributes, then added them * to the block kobject */ sysfs_attrib = block->block_attributes; if (sysfs_attrib && block->nr_attribs) { for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) { edac_dbg(4, "creating block attrib='%s' attrib->%p to kobj=%p\n", sysfs_attrib->attr.name, sysfs_attrib, &block->kobj); /* Create each block_attribute file */ err = sysfs_create_file(&block->kobj, &sysfs_attrib->attr); if (err) goto err_on_attrib; } } kobject_uevent(&block->kobj, KOBJ_ADD); return 0; /* Error unwind stack */ err_on_attrib: kobject_put(&block->kobj); err_out: return err; } /* * edac_device_delete_block(edac_dev,block); */ static void edac_device_delete_block(struct edac_device_ctl_info *edac_dev, struct edac_device_block *block) { struct edac_dev_sysfs_block_attribute *sysfs_attrib; int i; /* if this block has 'attributes' then we need to iterate over the list * and 'remove' the attributes on this block */ sysfs_attrib = block->block_attributes; if (sysfs_attrib && block->nr_attribs) { for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) { /* remove each block_attrib file */ sysfs_remove_file(&block->kobj, (struct attribute *) sysfs_attrib); } } /* unregister this block's kobject, SEE: * edac_device_ctrl_block_release() callback operation */ kobject_put(&block->kobj); } /* instance ctor/dtor code */ /* * edac_device_create_instance * create just one instance of an edac_device 'instance' */ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev, int idx) { int i, j; int err; struct edac_device_instance *instance; struct kobject *main_kobj; instance = &edac_dev->instances[idx]; /* Init the instance's kobject */ memset(&instance->kobj, 0, sizeof(struct kobject)); instance->ctl = edac_dev; /* bump the main kobject's reference count for this controller * and this instance is dependent on the main */ main_kobj = kobject_get(&edac_dev->kobj); if (!main_kobj) { err = -ENODEV; goto err_out; } /* Formally register this instance's kobject under the edac_device */ err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl, &edac_dev->kobj, "%s", instance->name); if (err != 0) { edac_dbg(2, "Failed to register instance '%s'\n", instance->name); kobject_put(main_kobj); goto err_out; } edac_dbg(4, "now register '%d' blocks for instance %d\n", instance->nr_blocks, idx); /* register all blocks of this instance */ for (i = 0; i < instance->nr_blocks; i++) { err = edac_device_create_block(edac_dev, instance, &instance->blocks[i]); if (err) { /* If any fail, remove all previous ones */ for (j = 0; j < i; j++) edac_device_delete_block(edac_dev, &instance->blocks[j]); goto err_release_instance_kobj; } } kobject_uevent(&instance->kobj, KOBJ_ADD); edac_dbg(4, "Registered instance %d '%s' kobject\n", idx, instance->name); return 0; /* error unwind stack */ err_release_instance_kobj: kobject_put(&instance->kobj); err_out: return err; } /* * edac_device_remove_instance * remove an edac_device instance */ static void edac_device_delete_instance(struct edac_device_ctl_info *edac_dev, int idx) { struct edac_device_instance *instance; int i; instance = &edac_dev->instances[idx]; /* unregister all blocks in this instance */ for (i = 0; i < instance->nr_blocks; i++) edac_device_delete_block(edac_dev, &instance->blocks[i]); /* unregister this instance's kobject, SEE: * edac_device_ctrl_instance_release() for callback operation */ kobject_put(&instance->kobj); } /* * edac_device_create_instances * create the first level of 'instances' for this device * (ie 'cache' might have 'cache0', 'cache1', 'cache2', etc */ static int edac_device_create_instances(struct edac_device_ctl_info *edac_dev) { int i, j; int err; edac_dbg(0, "\n"); /* iterate over creation of the instances */ for (i = 0; i < edac_dev->nr_instances; i++) { err = edac_device_create_instance(edac_dev, i); if (err) { /* unwind previous instances on error */ for (j = 0; j < i; j++) edac_device_delete_instance(edac_dev, j); return err; } } return 0; } /* * edac_device_delete_instances(edac_dev); * unregister all the kobjects of the instances */ static void edac_device_delete_instances(struct edac_device_ctl_info *edac_dev) { int i; /* iterate over creation of the instances */ for (i = 0; i < edac_dev->nr_instances; i++) edac_device_delete_instance(edac_dev, i); } /* edac_dev sysfs ctor/dtor code */ /* * edac_device_add_main_sysfs_attributes * add some attributes to this instance's main kobject */ static int edac_device_add_main_sysfs_attributes( struct edac_device_ctl_info *edac_dev) { struct edac_dev_sysfs_attribute *sysfs_attrib; int err = 0; sysfs_attrib = edac_dev->sysfs_attributes; if (sysfs_attrib) { /* iterate over the array and create an attribute for each * entry in the list */ while (sysfs_attrib->attr.name != NULL) { err = sysfs_create_file(&edac_dev->kobj, (struct attribute*) sysfs_attrib); if (err) goto err_out; sysfs_attrib++; } } err_out: return err; } /* * edac_device_remove_main_sysfs_attributes * remove any attributes to this instance's main kobject */ static void edac_device_remove_main_sysfs_attributes( struct edac_device_ctl_info *edac_dev) { struct edac_dev_sysfs_attribute *sysfs_attrib; /* if there are main attributes, defined, remove them. First, * point to the start of the array and iterate over it * removing each attribute listed from this device's instance's kobject */ sysfs_attrib = edac_dev->sysfs_attributes; if (sysfs_attrib) { while (sysfs_attrib->attr.name != NULL) { sysfs_remove_file(&edac_dev->kobj, (struct attribute *) sysfs_attrib); sysfs_attrib++; } } } /* * edac_device_create_sysfs() Constructor * * accept a created edac_device control structure * and 'export' it to sysfs. The 'main' kobj should already have been * created. 'instance' and 'block' kobjects should be registered * along with any 'block' attributes from the low driver. In addition, * the main attributes (if any) are connected to the main kobject of * the control structure. * * Return: * 0 Success * !0 Failure */ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev) { int err; struct kobject *edac_kobj = &edac_dev->kobj; edac_dbg(0, "idx=%d\n", edac_dev->dev_idx); /* go create any main attributes callers wants */ err = edac_device_add_main_sysfs_attributes(edac_dev); if (err) { edac_dbg(0, "failed to add sysfs attribs\n"); goto err_out; } /* create a symlink from the edac device * to the platform 'device' being used for this */ err = sysfs_create_link(edac_kobj, &edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK); if (err) { edac_dbg(0, "sysfs_create_link() returned err= %d\n", err); goto err_remove_main_attribs; } /* Create the first level instance directories * In turn, the nested blocks beneath the instances will * be registered as well */ err = edac_device_create_instances(edac_dev); if (err) { edac_dbg(0, "edac_device_create_instances() returned err= %d\n", err); goto err_remove_link; } edac_dbg(4, "create-instances done, idx=%d\n", edac_dev->dev_idx); return 0; /* Error unwind stack */ err_remove_link: /* remove the sym link */ sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK); err_remove_main_attribs: edac_device_remove_main_sysfs_attributes(edac_dev); err_out: return err; } /* * edac_device_remove_sysfs() destructor * * given an edac_device struct, tear down the kobject resources */ void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev) { edac_dbg(0, "\n"); /* remove any main attributes for this device */ edac_device_remove_main_sysfs_attributes(edac_dev); /* remove the device sym link */ sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK); /* walk the instance/block kobject tree, deconstructing it */ edac_device_delete_instances(edac_dev); }
gpl-2.0
mathur/rohan.kernel.op3
drivers/hwmon/pmbus/pmbus.c
1023
6302
/* * Hardware monitoring driver for PMBus devices * * Copyright (c) 2010, 2011 Ericsson AB. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/i2c.h> #include "pmbus.h" /* * Find sensor groups and status registers on each page. */ static void pmbus_find_sensor_groups(struct i2c_client *client, struct pmbus_driver_info *info) { int page; /* Sensors detected on page 0 only */ if (pmbus_check_word_register(client, 0, PMBUS_READ_VIN)) info->func[0] |= PMBUS_HAVE_VIN; if (pmbus_check_word_register(client, 0, PMBUS_READ_VCAP)) info->func[0] |= PMBUS_HAVE_VCAP; if (pmbus_check_word_register(client, 0, PMBUS_READ_IIN)) info->func[0] |= PMBUS_HAVE_IIN; if (pmbus_check_word_register(client, 0, PMBUS_READ_PIN)) info->func[0] |= PMBUS_HAVE_PIN; if (info->func[0] && pmbus_check_byte_register(client, 0, PMBUS_STATUS_INPUT)) info->func[0] |= PMBUS_HAVE_STATUS_INPUT; if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_12) && pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) { info->func[0] |= PMBUS_HAVE_FAN12; if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12)) info->func[0] |= PMBUS_HAVE_STATUS_FAN12; } if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_34) && pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) { info->func[0] |= PMBUS_HAVE_FAN34; if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34)) info->func[0] |= PMBUS_HAVE_STATUS_FAN34; } if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1)) info->func[0] |= PMBUS_HAVE_TEMP; if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_2)) info->func[0] |= PMBUS_HAVE_TEMP2; if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_3)) info->func[0] |= PMBUS_HAVE_TEMP3; if (info->func[0] & (PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3) && pmbus_check_byte_register(client, 0, PMBUS_STATUS_TEMPERATURE)) info->func[0] |= PMBUS_HAVE_STATUS_TEMP; /* Sensors detected on all pages */ for (page = 0; page < info->pages; page++) { if (pmbus_check_word_register(client, page, PMBUS_READ_VOUT)) { info->func[page] |= PMBUS_HAVE_VOUT; if (pmbus_check_byte_register(client, page, PMBUS_STATUS_VOUT)) info->func[page] |= PMBUS_HAVE_STATUS_VOUT; } if (pmbus_check_word_register(client, page, PMBUS_READ_IOUT)) { info->func[page] |= PMBUS_HAVE_IOUT; if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_IOUT)) info->func[page] |= PMBUS_HAVE_STATUS_IOUT; } if (pmbus_check_word_register(client, page, PMBUS_READ_POUT)) info->func[page] |= PMBUS_HAVE_POUT; } } /* * Identify chip parameters. */ static int pmbus_identify(struct i2c_client *client, struct pmbus_driver_info *info) { int ret = 0; if (!info->pages) { /* * Check if the PAGE command is supported. If it is, * keep setting the page number until it fails or until the * maximum number of pages has been reached. Assume that * this is the number of pages supported by the chip. */ if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) { int page; for (page = 1; page < PMBUS_PAGES; page++) { if (pmbus_set_page(client, page) < 0) break; } pmbus_set_page(client, 0); info->pages = page; } else { info->pages = 1; } } if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) { int vout_mode; vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); if (vout_mode >= 0 && vout_mode != 0xff) { switch (vout_mode >> 5) { case 0: break; case 1: info->format[PSC_VOLTAGE_OUT] = vid; break; case 2: info->format[PSC_VOLTAGE_OUT] = direct; break; default: ret = -ENODEV; goto abort; } } } /* * We should check if the COEFFICIENTS register is supported. * If it is, and the chip is configured for direct mode, we can read * the coefficients from the chip, one set per group of sensor * registers. * * To do this, we will need access to a chip which actually supports the * COEFFICIENTS command, since the command is too complex to implement * without testing it. Until then, abort if a chip configured for direct * mode was detected. */ if (info->format[PSC_VOLTAGE_OUT] == direct) { ret = -ENODEV; goto abort; } /* Try to find sensor groups */ pmbus_find_sensor_groups(client, info); abort: return ret; } static int pmbus_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct pmbus_driver_info *info; info = devm_kzalloc(&client->dev, sizeof(struct pmbus_driver_info), GFP_KERNEL); if (!info) return -ENOMEM; info->pages = id->driver_data; info->identify = pmbus_identify; return pmbus_do_probe(client, id, info); } /* * Use driver_data to set the number of pages supported by the chip. */ static const struct i2c_device_id pmbus_id[] = { {"adp4000", 1}, {"bmr453", 1}, {"bmr454", 1}, {"mdt040", 1}, {"ncp4200", 1}, {"ncp4208", 1}, {"pdt003", 1}, {"pdt006", 1}, {"pdt012", 1}, {"pmbus", 0}, {"tps40400", 1}, {"udt020", 1}, {} }; MODULE_DEVICE_TABLE(i2c, pmbus_id); /* This is the driver that will be inserted */ static struct i2c_driver pmbus_driver = { .driver = { .name = "pmbus", }, .probe = pmbus_probe, .remove = pmbus_do_remove, .id_table = pmbus_id, }; module_i2c_driver(pmbus_driver); MODULE_AUTHOR("Guenter Roeck"); MODULE_DESCRIPTION("Generic PMBus driver"); MODULE_LICENSE("GPL");
gpl-2.0
anoever/thunderbolt
drivers/gpu/drm/radeon/rv740_dpm.c
2047
12768
/* * Copyright 2011 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher */ #include "drmP.h" #include "radeon.h" #include "rv740d.h" #include "r600_dpm.h" #include "rv770_dpm.h" #include "atom.h" struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev); u32 rv740_get_decoded_reference_divider(u32 encoded_ref) { u32 ref = 0; switch (encoded_ref) { case 0: ref = 1; break; case 16: ref = 2; break; case 17: ref = 3; break; case 18: ref = 2; break; case 19: ref = 3; break; case 20: ref = 4; break; case 21: ref = 5; break; default: DRM_ERROR("Invalid encoded Reference Divider\n"); ref = 0; break; } return ref; } struct dll_speed_setting { u16 min; u16 max; u32 dll_speed; }; static struct dll_speed_setting dll_speed_table[16] = { { 270, 320, 0x0f }, { 240, 270, 0x0e }, { 200, 240, 0x0d }, { 180, 200, 0x0c }, { 160, 180, 0x0b }, { 140, 160, 0x0a }, { 120, 140, 0x09 }, { 110, 120, 0x08 }, { 95, 110, 0x07 }, { 85, 95, 0x06 }, { 78, 85, 0x05 }, { 70, 78, 0x04 }, { 65, 70, 0x03 }, { 60, 65, 0x02 }, { 42, 60, 0x01 }, { 00, 42, 0x00 } }; u32 rv740_get_dll_speed(bool is_gddr5, u32 memory_clock) { int i; u32 factor; u16 data_rate; if (is_gddr5) factor = 4; else factor = 2; data_rate = (u16)(memory_clock * factor / 1000); if (data_rate < dll_speed_table[0].max) { for (i = 0; i < 16; i++) { if (data_rate > dll_speed_table[i].min && data_rate <= dll_speed_table[i].max) return dll_speed_table[i].dll_speed; } } DRM_DEBUG_KMS("Target MCLK greater than largest MCLK in DLL speed table\n"); return 0x0f; } int rv740_populate_sclk_value(struct radeon_device *rdev, u32 engine_clock, RV770_SMC_SCLK_VALUE *sclk) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); struct atom_clock_dividers dividers; u32 spll_func_cntl = pi->clk_regs.rv770.cg_spll_func_cntl; u32 spll_func_cntl_2 = pi->clk_regs.rv770.cg_spll_func_cntl_2; u32 spll_func_cntl_3 = pi->clk_regs.rv770.cg_spll_func_cntl_3; u32 cg_spll_spread_spectrum = pi->clk_regs.rv770.cg_spll_spread_spectrum; u32 cg_spll_spread_spectrum_2 = pi->clk_regs.rv770.cg_spll_spread_spectrum_2; u64 tmp; u32 reference_clock = rdev->clock.spll.reference_freq; u32 reference_divider; u32 fbdiv; int ret; ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, engine_clock, false, &dividers); if (ret) return ret; reference_divider = 1 + dividers.ref_div; tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384; do_div(tmp, reference_clock); fbdiv = (u32) tmp; spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK); spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div); spll_func_cntl |= SPLL_PDIV_A(dividers.post_div); spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; spll_func_cntl_2 |= SCLK_MUX_SEL(2); spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); spll_func_cntl_3 |= SPLL_DITHEN; if (pi->sclk_ss) { struct radeon_atom_ss ss; u32 vco_freq = engine_clock * dividers.post_div; if (radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_ENGINE_SS, vco_freq)) { u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); cg_spll_spread_spectrum &= ~CLK_S_MASK; cg_spll_spread_spectrum |= CLK_S(clk_s); cg_spll_spread_spectrum |= SSEN; cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; cg_spll_spread_spectrum_2 |= CLK_V(clk_v); } } sclk->sclk_value = cpu_to_be32(engine_clock); sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl); sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2); sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3); sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(cg_spll_spread_spectrum); sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(cg_spll_spread_spectrum_2); return 0; } int rv740_populate_mclk_value(struct radeon_device *rdev, u32 engine_clock, u32 memory_clock, RV7XX_SMC_MCLK_VALUE *mclk) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); u32 mpll_ad_func_cntl = pi->clk_regs.rv770.mpll_ad_func_cntl; u32 mpll_ad_func_cntl_2 = pi->clk_regs.rv770.mpll_ad_func_cntl_2; u32 mpll_dq_func_cntl = pi->clk_regs.rv770.mpll_dq_func_cntl; u32 mpll_dq_func_cntl_2 = pi->clk_regs.rv770.mpll_dq_func_cntl_2; u32 mclk_pwrmgt_cntl = pi->clk_regs.rv770.mclk_pwrmgt_cntl; u32 dll_cntl = pi->clk_regs.rv770.dll_cntl; u32 mpll_ss1 = pi->clk_regs.rv770.mpll_ss1; u32 mpll_ss2 = pi->clk_regs.rv770.mpll_ss2; struct atom_clock_dividers dividers; u32 ibias; u32 dll_speed; int ret; ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM, memory_clock, false, &dividers); if (ret) return ret; ibias = rv770_map_clkf_to_ibias(rdev, dividers.whole_fb_div); mpll_ad_func_cntl &= ~(CLKR_MASK | YCLK_POST_DIV_MASK | CLKF_MASK | CLKFRAC_MASK | IBIAS_MASK); mpll_ad_func_cntl |= CLKR(dividers.ref_div); mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div); mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div); mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div); mpll_ad_func_cntl |= IBIAS(ibias); if (dividers.vco_mode) mpll_ad_func_cntl_2 |= VCO_MODE; else mpll_ad_func_cntl_2 &= ~VCO_MODE; if (pi->mem_gddr5) { mpll_dq_func_cntl &= ~(CLKR_MASK | YCLK_POST_DIV_MASK | CLKF_MASK | CLKFRAC_MASK | IBIAS_MASK); mpll_dq_func_cntl |= CLKR(dividers.ref_div); mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div); mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div); mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div); mpll_dq_func_cntl |= IBIAS(ibias); if (dividers.vco_mode) mpll_dq_func_cntl_2 |= VCO_MODE; else mpll_dq_func_cntl_2 &= ~VCO_MODE; } if (pi->mclk_ss) { struct radeon_atom_ss ss; u32 vco_freq = memory_clock * dividers.post_div; if (radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_MEMORY_SS, vco_freq)) { u32 reference_clock = rdev->clock.mpll.reference_freq; u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div); u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate); u32 clk_v = 0x40000 * ss.percentage * (dividers.whole_fb_div + (dividers.frac_fb_div / 8)) / (clk_s * 10000); mpll_ss1 &= ~CLKV_MASK; mpll_ss1 |= CLKV(clk_v); mpll_ss2 &= ~CLKS_MASK; mpll_ss2 |= CLKS(clk_s); } } dll_speed = rv740_get_dll_speed(pi->mem_gddr5, memory_clock); mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK; mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed); mclk->mclk770.mclk_value = cpu_to_be32(memory_clock); mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2); mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2); mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl); mclk->mclk770.vMPLL_SS = cpu_to_be32(mpll_ss1); mclk->mclk770.vMPLL_SS2 = cpu_to_be32(mpll_ss2); return 0; } void rv740_read_clock_registers(struct radeon_device *rdev) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); pi->clk_regs.rv770.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL); pi->clk_regs.rv770.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2); pi->clk_regs.rv770.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3); pi->clk_regs.rv770.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM); pi->clk_regs.rv770.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2); pi->clk_regs.rv770.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); pi->clk_regs.rv770.mpll_ad_func_cntl_2 = RREG32(MPLL_AD_FUNC_CNTL_2); pi->clk_regs.rv770.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL); pi->clk_regs.rv770.mpll_dq_func_cntl_2 = RREG32(MPLL_DQ_FUNC_CNTL_2); pi->clk_regs.rv770.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); pi->clk_regs.rv770.dll_cntl = RREG32(DLL_CNTL); pi->clk_regs.rv770.mpll_ss1 = RREG32(MPLL_SS1); pi->clk_regs.rv770.mpll_ss2 = RREG32(MPLL_SS2); } int rv740_populate_smc_acpi_state(struct radeon_device *rdev, RV770_SMC_STATETABLE *table) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); u32 mpll_ad_func_cntl = pi->clk_regs.rv770.mpll_ad_func_cntl; u32 mpll_ad_func_cntl_2 = pi->clk_regs.rv770.mpll_ad_func_cntl_2; u32 mpll_dq_func_cntl = pi->clk_regs.rv770.mpll_dq_func_cntl; u32 mpll_dq_func_cntl_2 = pi->clk_regs.rv770.mpll_dq_func_cntl_2; u32 spll_func_cntl = pi->clk_regs.rv770.cg_spll_func_cntl; u32 spll_func_cntl_2 = pi->clk_regs.rv770.cg_spll_func_cntl_2; u32 spll_func_cntl_3 = pi->clk_regs.rv770.cg_spll_func_cntl_3; u32 mclk_pwrmgt_cntl = pi->clk_regs.rv770.mclk_pwrmgt_cntl; u32 dll_cntl = pi->clk_regs.rv770.dll_cntl; table->ACPIState = table->initialState; table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; if (pi->acpi_vddc) { rv770_populate_vddc_value(rdev, pi->acpi_vddc, &table->ACPIState.levels[0].vddc); table->ACPIState.levels[0].gen2PCIE = pi->pcie_gen2 ? pi->acpi_pcie_gen2 : 0; table->ACPIState.levels[0].gen2XSP = pi->acpi_pcie_gen2; } else { rv770_populate_vddc_value(rdev, pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc); table->ACPIState.levels[0].gen2PCIE = 0; } mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN; mpll_dq_func_cntl_2 |= BYPASS | BIAS_GEN_PDNB | RESET_EN; mclk_pwrmgt_cntl |= (MRDCKA0_RESET | MRDCKA1_RESET | MRDCKB0_RESET | MRDCKB1_RESET | MRDCKC0_RESET | MRDCKC1_RESET | MRDCKD0_RESET | MRDCKD1_RESET); dll_cntl |= (MRDCKA0_BYPASS | MRDCKA1_BYPASS | MRDCKB0_BYPASS | MRDCKB1_BYPASS | MRDCKC0_BYPASS | MRDCKC1_BYPASS | MRDCKD0_BYPASS | MRDCKD1_BYPASS); spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN; spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; spll_func_cntl_2 |= SCLK_MUX_SEL(4); table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2); table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2); table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl); table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0; table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl); table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2); table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3); table->ACPIState.levels[0].sclk.sclk_value = 0; table->ACPIState.levels[1] = table->ACPIState.levels[0]; table->ACPIState.levels[2] = table->ACPIState.levels[0]; rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd); return 0; } void rv740_enable_mclk_spread_spectrum(struct radeon_device *rdev, bool enable) { if (enable) WREG32_P(MPLL_CNTL_MODE, SS_SSEN, ~SS_SSEN); else WREG32_P(MPLL_CNTL_MODE, 0, ~SS_SSEN); } u8 rv740_get_mclk_frequency_ratio(u32 memory_clock) { u8 mc_para_index; if ((memory_clock < 10000) || (memory_clock > 47500)) mc_para_index = 0x00; else mc_para_index = (u8)((memory_clock - 10000) / 2500); return mc_para_index; }
gpl-2.0
mantera/WX_435_Kernel-CM7
drivers/mtd/maps/dilnetpc.c
2047
13589
/* dilnetpc.c -- MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP" * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * The DIL/Net PC is a tiny embedded PC board made by SSV Embedded Systems * featuring the AMD Elan SC410 processor. There are two variants of this * board: DNP/1486 and ADNP/1486. The DNP version has 2 megs of flash * ROM (Intel 28F016S3) and 8 megs of DRAM, the ADNP version has 4 megs * flash and 16 megs of RAM. * For details, see http://www.ssv-embedded.de/ssv/pc104/p169.htm * and http://www.ssv-embedded.de/ssv/pc104/p170.htm */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/mtd/concat.h> #include <asm/io.h> /* ** The DIL/NetPC keeps its BIOS in two distinct flash blocks. ** Destroying any of these blocks transforms the DNPC into ** a paperweight (albeit not a very useful one, considering ** it only weighs a few grams). ** ** Therefore, the BIOS blocks must never be erased or written to ** except by people who know exactly what they are doing (e.g. ** to install a BIOS update). These partitions are marked read-only ** by default, but can be made read/write by undefining ** DNPC_BIOS_BLOCKS_WRITEPROTECTED: */ #define DNPC_BIOS_BLOCKS_WRITEPROTECTED /* ** The ID string (in ROM) is checked to determine whether we ** are running on a DNP/1486 or ADNP/1486 */ #define BIOSID_BASE 0x000fe100 #define ID_DNPC "DNP1486" #define ID_ADNP "ADNP1486" /* ** Address where the flash should appear in CPU space */ #define FLASH_BASE 0x2000000 /* ** Chip Setup and Control (CSC) indexed register space */ #define CSC_INDEX 0x22 #define CSC_DATA 0x23 #define CSC_MMSWAR 0x30 /* MMS window C-F attributes register */ #define CSC_MMSWDSR 0x31 /* MMS window C-F device select register */ #define CSC_RBWR 0xa7 /* GPIO Read-Back/Write Register B */ #define CSC_CR 0xd0 /* internal I/O device disable/Echo */ /* Z-bus/configuration register */ #define CSC_PCCMDCR 0xf1 /* PC card mode and DMA control register */ /* ** PC Card indexed register space: */ #define PCC_INDEX 0x3e0 #define PCC_DATA 0x3e1 #define PCC_AWER_B 0x46 /* Socket B Address Window enable register */ #define PCC_MWSAR_1_Lo 0x58 /* memory window 1 start address low register */ #define PCC_MWSAR_1_Hi 0x59 /* memory window 1 start address high register */ #define PCC_MWEAR_1_Lo 0x5A /* memory window 1 stop address low register */ #define PCC_MWEAR_1_Hi 0x5B /* memory window 1 stop address high register */ #define PCC_MWAOR_1_Lo 0x5C /* memory window 1 address offset low register */ #define PCC_MWAOR_1_Hi 0x5D /* memory window 1 address offset high register */ /* ** Access to SC4x0's Chip Setup and Control (CSC) ** and PC Card (PCC) indexed registers: */ static inline void setcsc(int reg, unsigned char data) { outb(reg, CSC_INDEX); outb(data, CSC_DATA); } static inline unsigned char getcsc(int reg) { outb(reg, CSC_INDEX); return(inb(CSC_DATA)); } static inline void setpcc(int reg, unsigned char data) { outb(reg, PCC_INDEX); outb(data, PCC_DATA); } static inline unsigned char getpcc(int reg) { outb(reg, PCC_INDEX); return(inb(PCC_DATA)); } /* ************************************************************ ** Enable access to DIL/NetPC's flash by mapping it into ** the SC4x0's MMS Window C. ************************************************************ */ static void dnpc_map_flash(unsigned long flash_base, unsigned long flash_size) { unsigned long flash_end = flash_base + flash_size - 1; /* ** enable setup of MMS windows C-F: */ /* - enable PC Card indexed register space */ setcsc(CSC_CR, getcsc(CSC_CR) | 0x2); /* - set PC Card controller to operate in standard mode */ setcsc(CSC_PCCMDCR, getcsc(CSC_PCCMDCR) & ~1); /* ** Program base address and end address of window ** where the flash ROM should appear in CPU address space */ setpcc(PCC_MWSAR_1_Lo, (flash_base >> 12) & 0xff); setpcc(PCC_MWSAR_1_Hi, (flash_base >> 20) & 0x3f); setpcc(PCC_MWEAR_1_Lo, (flash_end >> 12) & 0xff); setpcc(PCC_MWEAR_1_Hi, (flash_end >> 20) & 0x3f); /* program offset of first flash location to appear in this window (0) */ setpcc(PCC_MWAOR_1_Lo, ((0 - flash_base) >> 12) & 0xff); setpcc(PCC_MWAOR_1_Hi, ((0 - flash_base)>> 20) & 0x3f); /* set attributes for MMS window C: non-cacheable, write-enabled */ setcsc(CSC_MMSWAR, getcsc(CSC_MMSWAR) & ~0x11); /* select physical device ROMCS0 (i.e. flash) for MMS Window C */ setcsc(CSC_MMSWDSR, getcsc(CSC_MMSWDSR) & ~0x03); /* enable memory window 1 */ setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) | 0x02); /* now disable PC Card indexed register space again */ setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2); } /* ************************************************************ ** Disable access to DIL/NetPC's flash by mapping it into ** the SC4x0's MMS Window C. ************************************************************ */ static void dnpc_unmap_flash(void) { /* - enable PC Card indexed register space */ setcsc(CSC_CR, getcsc(CSC_CR) | 0x2); /* disable memory window 1 */ setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) & ~0x02); /* now disable PC Card indexed register space again */ setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2); } /* ************************************************************ ** Enable/Disable VPP to write to flash ************************************************************ */ static DEFINE_SPINLOCK(dnpc_spin); static int vpp_counter = 0; /* ** This is what has to be done for the DNP board .. */ static void dnp_set_vpp(struct map_info *not_used, int on) { spin_lock_irq(&dnpc_spin); if (on) { if(++vpp_counter == 1) setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x4); } else { if(--vpp_counter == 0) setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x4); else BUG_ON(vpp_counter < 0); } spin_unlock_irq(&dnpc_spin); } /* ** .. and this the ADNP version: */ static void adnp_set_vpp(struct map_info *not_used, int on) { spin_lock_irq(&dnpc_spin); if (on) { if(++vpp_counter == 1) setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x8); } else { if(--vpp_counter == 0) setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x8); else BUG_ON(vpp_counter < 0); } spin_unlock_irq(&dnpc_spin); } #define DNP_WINDOW_SIZE 0x00200000 /* DNP flash size is 2MiB */ #define ADNP_WINDOW_SIZE 0x00400000 /* ADNP flash size is 4MiB */ #define WINDOW_ADDR FLASH_BASE static struct map_info dnpc_map = { .name = "ADNP Flash Bank", .size = ADNP_WINDOW_SIZE, .bankwidth = 1, .set_vpp = adnp_set_vpp, .phys = WINDOW_ADDR }; /* ** The layout of the flash is somewhat "strange": ** ** 1. 960 KiB (15 blocks) : Space for ROM Bootloader and user data ** 2. 64 KiB (1 block) : System BIOS ** 3. 960 KiB (15 blocks) : User Data (DNP model) or ** 3. 3008 KiB (47 blocks) : User Data (ADNP model) ** 4. 64 KiB (1 block) : System BIOS Entry */ static struct mtd_partition partition_info[]= { { .name = "ADNP boot", .offset = 0, .size = 0xf0000, }, { .name = "ADNP system BIOS", .offset = MTDPART_OFS_NXTBLK, .size = 0x10000, #ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED .mask_flags = MTD_WRITEABLE, #endif }, { .name = "ADNP file system", .offset = MTDPART_OFS_NXTBLK, .size = 0x2f0000, }, { .name = "ADNP system BIOS entry", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, #ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED .mask_flags = MTD_WRITEABLE, #endif }, }; #define NUM_PARTITIONS ARRAY_SIZE(partition_info) static struct mtd_info *mymtd; static struct mtd_info *lowlvl_parts[NUM_PARTITIONS]; static struct mtd_info *merged_mtd; /* ** "Highlevel" partition info: ** ** Using the MTD concat layer, we can re-arrange partitions to our ** liking: we construct a virtual MTD device by concatenating the ** partitions, specifying the sequence such that the boot block ** is immediately followed by the filesystem block (i.e. the stupid ** system BIOS block is mapped to a different place). When re-partitioning ** this concatenated MTD device, we can set the boot block size to ** an arbitrary (though erase block aligned) value i.e. not one that ** is dictated by the flash's physical layout. We can thus set the ** boot block to be e.g. 64 KB (which is fully sufficient if we want ** to boot an etherboot image) or to -say- 1.5 MB if we want to boot ** a large kernel image. In all cases, the remainder of the flash ** is available as file system space. */ static struct mtd_partition higlvl_partition_info[]= { { .name = "ADNP boot block", .offset = 0, .size = CONFIG_MTD_DILNETPC_BOOTSIZE, }, { .name = "ADNP file system space", .offset = MTDPART_OFS_NXTBLK, .size = ADNP_WINDOW_SIZE-CONFIG_MTD_DILNETPC_BOOTSIZE-0x20000, }, { .name = "ADNP system BIOS + BIOS Entry", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, #ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED .mask_flags = MTD_WRITEABLE, #endif }, }; #define NUM_HIGHLVL_PARTITIONS ARRAY_SIZE(higlvl_partition_info) static int dnp_adnp_probe(void) { char *biosid, rc = -1; biosid = (char*)ioremap(BIOSID_BASE, 16); if(biosid) { if(!strcmp(biosid, ID_DNPC)) rc = 1; /* this is a DNPC */ else if(!strcmp(biosid, ID_ADNP)) rc = 0; /* this is a ADNPC */ } iounmap((void *)biosid); return(rc); } static int __init init_dnpc(void) { int is_dnp; /* ** determine hardware (DNP/ADNP/invalid) */ if((is_dnp = dnp_adnp_probe()) < 0) return -ENXIO; /* ** Things are set up for ADNP by default ** -> modify all that needs to be different for DNP */ if(is_dnp) { /* ** Adjust window size, select correct set_vpp function. ** The partitioning scheme is identical on both DNP ** and ADNP except for the size of the third partition. */ int i; dnpc_map.size = DNP_WINDOW_SIZE; dnpc_map.set_vpp = dnp_set_vpp; partition_info[2].size = 0xf0000; /* ** increment all string pointers so the leading 'A' gets skipped, ** thus turning all occurrences of "ADNP ..." into "DNP ..." */ ++dnpc_map.name; for(i = 0; i < NUM_PARTITIONS; i++) ++partition_info[i].name; higlvl_partition_info[1].size = DNP_WINDOW_SIZE - CONFIG_MTD_DILNETPC_BOOTSIZE - 0x20000; for(i = 0; i < NUM_HIGHLVL_PARTITIONS; i++) ++higlvl_partition_info[i].name; } printk(KERN_NOTICE "DIL/Net %s flash: 0x%lx at 0x%llx\n", is_dnp ? "DNPC" : "ADNP", dnpc_map.size, (unsigned long long)dnpc_map.phys); dnpc_map.virt = ioremap_nocache(dnpc_map.phys, dnpc_map.size); dnpc_map_flash(dnpc_map.phys, dnpc_map.size); if (!dnpc_map.virt) { printk("Failed to ioremap_nocache\n"); return -EIO; } simple_map_init(&dnpc_map); printk("FLASH virtual address: 0x%p\n", dnpc_map.virt); mymtd = do_map_probe("jedec_probe", &dnpc_map); if (!mymtd) mymtd = do_map_probe("cfi_probe", &dnpc_map); /* ** If flash probes fail, try to make flashes accessible ** at least as ROM. Ajust erasesize in this case since ** the default one (128M) will break our partitioning */ if (!mymtd) if((mymtd = do_map_probe("map_rom", &dnpc_map))) mymtd->erasesize = 0x10000; if (!mymtd) { iounmap(dnpc_map.virt); return -ENXIO; } mymtd->owner = THIS_MODULE; /* ** Supply pointers to lowlvl_parts[] array to add_mtd_partitions() ** -> add_mtd_partitions() will _not_ register MTD devices for ** the partitions, but will instead store pointers to the MTD ** objects it creates into our lowlvl_parts[] array. ** NOTE: we arrange the pointers such that the sequence of the ** partitions gets re-arranged: partition #2 follows ** partition #0. */ partition_info[0].mtdp = &lowlvl_parts[0]; partition_info[1].mtdp = &lowlvl_parts[2]; partition_info[2].mtdp = &lowlvl_parts[1]; partition_info[3].mtdp = &lowlvl_parts[3]; add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS); /* ** now create a virtual MTD device by concatenating the for partitions ** (in the sequence given by the lowlvl_parts[] array. */ merged_mtd = mtd_concat_create(lowlvl_parts, NUM_PARTITIONS, "(A)DNP Flash Concatenated"); if(merged_mtd) { /* ** now partition the new device the way we want it. This time, ** we do not supply mtd pointers in higlvl_partition_info, so ** add_mtd_partitions() will register the devices. */ add_mtd_partitions(merged_mtd, higlvl_partition_info, NUM_HIGHLVL_PARTITIONS); } return 0; } static void __exit cleanup_dnpc(void) { if(merged_mtd) { del_mtd_partitions(merged_mtd); mtd_concat_destroy(merged_mtd); } if (mymtd) { del_mtd_partitions(mymtd); map_destroy(mymtd); } if (dnpc_map.virt) { iounmap(dnpc_map.virt); dnpc_unmap_flash(); dnpc_map.virt = NULL; } } module_init(init_dnpc); module_exit(cleanup_dnpc); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH"); MODULE_DESCRIPTION("MTD map driver for SSV DIL/NetPC DNP & ADNP");
gpl-2.0
zoobab/vzkernel
tools/perf/tests/open-syscall.c
2303
1331
#include "thread_map.h" #include "evsel.h" #include "debug.h" #include "tests.h" int test__open_syscall_event(void) { int err = -1, fd; struct perf_evsel *evsel; unsigned int nr_open_calls = 111, i; struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); if (threads == NULL) { pr_debug("thread_map__new\n"); return -1; } evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); if (evsel == NULL) { pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); goto out_thread_map_delete; } if (perf_evsel__open_per_thread(evsel, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", strerror(errno)); goto out_evsel_delete; } for (i = 0; i < nr_open_calls; ++i) { fd = open("/etc/passwd", O_RDONLY); close(fd); } if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { pr_debug("perf_evsel__read_on_cpu\n"); goto out_close_fd; } if (evsel->counts->cpu[0].val != nr_open_calls) { pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", nr_open_calls, evsel->counts->cpu[0].val); goto out_close_fd; } err = 0; out_close_fd: perf_evsel__close_fd(evsel, 1, threads->nr); out_evsel_delete: perf_evsel__delete(evsel); out_thread_map_delete: thread_map__delete(threads); return err; }
gpl-2.0
GalaxyTab4/twrp_matissevewifi
tools/perf/tests/open-syscall.c
2303
1331
#include "thread_map.h" #include "evsel.h" #include "debug.h" #include "tests.h" int test__open_syscall_event(void) { int err = -1, fd; struct perf_evsel *evsel; unsigned int nr_open_calls = 111, i; struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); if (threads == NULL) { pr_debug("thread_map__new\n"); return -1; } evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); if (evsel == NULL) { pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); goto out_thread_map_delete; } if (perf_evsel__open_per_thread(evsel, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", strerror(errno)); goto out_evsel_delete; } for (i = 0; i < nr_open_calls; ++i) { fd = open("/etc/passwd", O_RDONLY); close(fd); } if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { pr_debug("perf_evsel__read_on_cpu\n"); goto out_close_fd; } if (evsel->counts->cpu[0].val != nr_open_calls) { pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", nr_open_calls, evsel->counts->cpu[0].val); goto out_close_fd; } err = 0; out_close_fd: perf_evsel__close_fd(evsel, 1, threads->nr); out_evsel_delete: perf_evsel__delete(evsel); out_thread_map_delete: thread_map__delete(threads); return err; }
gpl-2.0
Shaky156/Shield-Tablet-CPU2.5Ghz-GPU060Mhz-Overclock
drivers/isdn/i4l/isdn_ppp.c
2303
79844
/* $Id: isdn_ppp.c,v 1.1.2.3 2004/02/10 01:07:13 keil Exp $ * * Linux ISDN subsystem, functions for synchronous PPP (linklevel). * * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/isdn.h> #include <linux/poll.h> #include <linux/ppp-comp.h> #include <linux/slab.h> #ifdef CONFIG_IPPP_FILTER #include <linux/filter.h> #endif #include "isdn_common.h" #include "isdn_ppp.h" #include "isdn_net.h" #ifndef PPP_IPX #define PPP_IPX 0x002b #endif /* Prototypes */ static int isdn_ppp_fill_rq(unsigned char *buf, int len, int proto, int slot); static int isdn_ppp_closewait(int slot); static void isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb, int proto); static int isdn_ppp_if_get_unit(char *namebuf); static int isdn_ppp_set_compressor(struct ippp_struct *is, struct isdn_ppp_comp_data *); static struct sk_buff *isdn_ppp_decompress(struct sk_buff *, struct ippp_struct *, struct ippp_struct *, int *proto); static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb, int proto); static struct sk_buff *isdn_ppp_compress(struct sk_buff *skb_in, int *proto, struct ippp_struct *is, struct ippp_struct *master, int type); static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb); /* New CCP stuff */ static void isdn_ppp_ccp_kickup(struct ippp_struct *is); static void isdn_ppp_ccp_xmit_reset(struct ippp_struct *is, int proto, unsigned char code, unsigned char id, unsigned char *data, int len); static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is); static void isdn_ppp_ccp_reset_free(struct ippp_struct *is); static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is, unsigned char id); static void isdn_ppp_ccp_timer_callback(unsigned long closure); static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is, unsigned char id); static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is, struct isdn_ppp_resetparams *rp); static void isdn_ppp_ccp_reset_ack_rcvd(struct ippp_struct *is, unsigned char id); #ifdef CONFIG_ISDN_MPP static ippp_bundle *isdn_ppp_bundle_arr = NULL; static int isdn_ppp_mp_bundle_array_init(void); static int isdn_ppp_mp_init(isdn_net_local *lp, ippp_bundle *add_to); static void isdn_ppp_mp_receive(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb); static void isdn_ppp_mp_cleanup(isdn_net_local *lp); static int isdn_ppp_bundle(struct ippp_struct *, int unit); #endif /* CONFIG_ISDN_MPP */ char *isdn_ppp_revision = "$Revision: 1.1.2.3 $"; static struct ippp_struct *ippp_table[ISDN_MAX_CHANNELS]; static struct isdn_ppp_compressor *ipc_head = NULL; /* * frame log (debug) */ static void isdn_ppp_frame_log(char *info, char *data, int len, int maxlen, int unit, int slot) { int cnt, j, i; char buf[80]; if (len < maxlen) maxlen = len; for (i = 0, cnt = 0; cnt < maxlen; i++) { for (j = 0; j < 16 && cnt < maxlen; j++, cnt++) sprintf(buf + j * 3, "%02x ", (unsigned char)data[cnt]); printk(KERN_DEBUG "[%d/%d].%s[%d]: %s\n", unit, slot, info, i, buf); } } /* * unbind isdn_net_local <=> ippp-device * note: it can happen, that we hangup/free the master before the slaves * in this case we bind another lp to the master device */ int isdn_ppp_free(isdn_net_local *lp) { struct ippp_struct *is; if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return 0; } #ifdef CONFIG_ISDN_MPP spin_lock(&lp->netdev->pb->lock); #endif isdn_net_rm_from_bundle(lp); #ifdef CONFIG_ISDN_MPP if (lp->netdev->pb->ref_ct == 1) /* last link in queue? */ isdn_ppp_mp_cleanup(lp); lp->netdev->pb->ref_ct--; spin_unlock(&lp->netdev->pb->lock); #endif /* CONFIG_ISDN_MPP */ if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: ppp_slot(%d) now invalid\n", __func__, lp->ppp_slot); return 0; } is = ippp_table[lp->ppp_slot]; if ((is->state & IPPP_CONNECT)) isdn_ppp_closewait(lp->ppp_slot); /* force wakeup on ippp device */ else if (is->state & IPPP_ASSIGNED) is->state = IPPP_OPEN; /* fallback to 'OPEN but not ASSIGNED' state */ if (is->debug & 0x1) printk(KERN_DEBUG "isdn_ppp_free %d %lx %lx\n", lp->ppp_slot, (long) lp, (long) is->lp); is->lp = NULL; /* link is down .. set lp to NULL */ lp->ppp_slot = -1; /* is this OK ?? */ return 0; } /* * bind isdn_net_local <=> ippp-device * * This function is allways called with holding dev->lock so * no additional lock is needed */ int isdn_ppp_bind(isdn_net_local *lp) { int i; int unit = 0; struct ippp_struct *is; int retval; if (lp->pppbind < 0) { /* device bounded to ippp device ? */ isdn_net_dev *net_dev = dev->netdev; char exclusive[ISDN_MAX_CHANNELS]; /* exclusive flags */ memset(exclusive, 0, ISDN_MAX_CHANNELS); while (net_dev) { /* step through net devices to find exclusive minors */ isdn_net_local *lp = net_dev->local; if (lp->pppbind >= 0) exclusive[lp->pppbind] = 1; net_dev = net_dev->next; } /* * search a free device / slot */ for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (ippp_table[i]->state == IPPP_OPEN && !exclusive[ippp_table[i]->minor]) { /* OPEN, but not connected! */ break; } } } else { for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (ippp_table[i]->minor == lp->pppbind && (ippp_table[i]->state & IPPP_OPEN) == IPPP_OPEN) break; } } if (i >= ISDN_MAX_CHANNELS) { printk(KERN_WARNING "isdn_ppp_bind: Can't find a (free) connection to the ipppd daemon.\n"); retval = -1; goto out; } /* get unit number from interface name .. ugly! */ unit = isdn_ppp_if_get_unit(lp->netdev->dev->name); if (unit < 0) { printk(KERN_ERR "isdn_ppp_bind: illegal interface name %s.\n", lp->netdev->dev->name); retval = -1; goto out; } lp->ppp_slot = i; is = ippp_table[i]; is->lp = lp; is->unit = unit; is->state = IPPP_OPEN | IPPP_ASSIGNED; /* assigned to a netdevice but not connected */ #ifdef CONFIG_ISDN_MPP retval = isdn_ppp_mp_init(lp, NULL); if (retval < 0) goto out; #endif /* CONFIG_ISDN_MPP */ retval = lp->ppp_slot; out: return retval; } /* * kick the ipppd on the device * (wakes up daemon after B-channel connect) */ void isdn_ppp_wakeup_daemon(isdn_net_local *lp) { if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return; } ippp_table[lp->ppp_slot]->state = IPPP_OPEN | IPPP_CONNECT | IPPP_NOBLOCK; wake_up_interruptible(&ippp_table[lp->ppp_slot]->wq); } /* * there was a hangup on the netdevice * force wakeup of the ippp device * go into 'device waits for release' state */ static int isdn_ppp_closewait(int slot) { struct ippp_struct *is; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: slot(%d) out of range\n", __func__, slot); return 0; } is = ippp_table[slot]; if (is->state) wake_up_interruptible(&is->wq); is->state = IPPP_CLOSEWAIT; return 1; } /* * isdn_ppp_find_slot / isdn_ppp_free_slot */ static int isdn_ppp_get_slot(void) { int i; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (!ippp_table[i]->state) return i; } return -1; } /* * isdn_ppp_open */ int isdn_ppp_open(int min, struct file *file) { int slot; struct ippp_struct *is; if (min < 0 || min >= ISDN_MAX_CHANNELS) return -ENODEV; slot = isdn_ppp_get_slot(); if (slot < 0) { return -EBUSY; } is = file->private_data = ippp_table[slot]; printk(KERN_DEBUG "ippp, open, slot: %d, minor: %d, state: %04x\n", slot, min, is->state); /* compression stuff */ is->link_compressor = is->compressor = NULL; is->link_decompressor = is->decompressor = NULL; is->link_comp_stat = is->comp_stat = NULL; is->link_decomp_stat = is->decomp_stat = NULL; is->compflags = 0; is->reset = isdn_ppp_ccp_reset_alloc(is); is->lp = NULL; is->mp_seqno = 0; /* MP sequence number */ is->pppcfg = 0; /* ppp configuration */ is->mpppcfg = 0; /* mppp configuration */ is->last_link_seqno = -1; /* MP: maybe set to Bundle-MIN, when joining a bundle ?? */ is->unit = -1; /* set, when we have our interface */ is->mru = 1524; /* MRU, default 1524 */ is->maxcid = 16; /* VJ: maxcid */ is->tk = current; init_waitqueue_head(&is->wq); is->first = is->rq + NUM_RCV_BUFFS - 1; /* receive queue */ is->last = is->rq; is->minor = min; #ifdef CONFIG_ISDN_PPP_VJ /* * VJ header compression init */ is->slcomp = slhc_init(16, 16); /* not necessary for 2. link in bundle */ #endif #ifdef CONFIG_IPPP_FILTER is->pass_filter = NULL; is->active_filter = NULL; #endif is->state = IPPP_OPEN; return 0; } /* * release ippp device */ void isdn_ppp_release(int min, struct file *file) { int i; struct ippp_struct *is; if (min < 0 || min >= ISDN_MAX_CHANNELS) return; is = file->private_data; if (!is) { printk(KERN_ERR "%s: no file->private_data\n", __func__); return; } if (is->debug & 0x1) printk(KERN_DEBUG "ippp: release, minor: %d %lx\n", min, (long) is->lp); if (is->lp) { /* a lp address says: this link is still up */ isdn_net_dev *p = is->lp->netdev; if (!p) { printk(KERN_ERR "%s: no lp->netdev\n", __func__); return; } is->state &= ~IPPP_CONNECT; /* -> effect: no call of wakeup */ /* * isdn_net_hangup() calls isdn_ppp_free() * isdn_ppp_free() sets is->lp to NULL and lp->ppp_slot to -1 * removing the IPPP_CONNECT flag omits calling of isdn_ppp_wakeup_daemon() */ isdn_net_hangup(p->dev); } for (i = 0; i < NUM_RCV_BUFFS; i++) { kfree(is->rq[i].buf); is->rq[i].buf = NULL; } is->first = is->rq + NUM_RCV_BUFFS - 1; /* receive queue */ is->last = is->rq; #ifdef CONFIG_ISDN_PPP_VJ /* TODO: if this was the previous master: link the slcomp to the new master */ slhc_free(is->slcomp); is->slcomp = NULL; #endif #ifdef CONFIG_IPPP_FILTER kfree(is->pass_filter); is->pass_filter = NULL; kfree(is->active_filter); is->active_filter = NULL; #endif /* TODO: if this was the previous master: link the stuff to the new master */ if (is->comp_stat) is->compressor->free(is->comp_stat); if (is->link_comp_stat) is->link_compressor->free(is->link_comp_stat); if (is->link_decomp_stat) is->link_decompressor->free(is->link_decomp_stat); if (is->decomp_stat) is->decompressor->free(is->decomp_stat); is->compressor = is->link_compressor = NULL; is->decompressor = is->link_decompressor = NULL; is->comp_stat = is->link_comp_stat = NULL; is->decomp_stat = is->link_decomp_stat = NULL; /* Clean up if necessary */ if (is->reset) isdn_ppp_ccp_reset_free(is); /* this slot is ready for new connections */ is->state = 0; } /* * get_arg .. ioctl helper */ static int get_arg(void __user *b, void *val, int len) { if (len <= 0) len = sizeof(void *); if (copy_from_user(val, b, len)) return -EFAULT; return 0; } /* * set arg .. ioctl helper */ static int set_arg(void __user *b, void *val, int len) { if (len <= 0) len = sizeof(void *); if (copy_to_user(b, val, len)) return -EFAULT; return 0; } #ifdef CONFIG_IPPP_FILTER static int get_filter(void __user *arg, struct sock_filter **p) { struct sock_fprog uprog; struct sock_filter *code = NULL; int len, err; if (copy_from_user(&uprog, arg, sizeof(uprog))) return -EFAULT; if (!uprog.len) { *p = NULL; return 0; } /* uprog.len is unsigned short, so no overflow here */ len = uprog.len * sizeof(struct sock_filter); code = memdup_user(uprog.filter, len); if (IS_ERR(code)) return PTR_ERR(code); err = sk_chk_filter(code, uprog.len); if (err) { kfree(code); return err; } *p = code; return uprog.len; } #endif /* CONFIG_IPPP_FILTER */ /* * ippp device ioctl */ int isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) { unsigned long val; int r, i, j; struct ippp_struct *is; isdn_net_local *lp; struct isdn_ppp_comp_data data; void __user *argp = (void __user *)arg; is = file->private_data; lp = is->lp; if (is->debug & 0x1) printk(KERN_DEBUG "isdn_ppp_ioctl: minor: %d cmd: %x state: %x\n", min, cmd, is->state); if (!(is->state & IPPP_OPEN)) return -EINVAL; switch (cmd) { case PPPIOCBUNDLE: #ifdef CONFIG_ISDN_MPP if (!(is->state & IPPP_CONNECT)) return -EINVAL; if ((r = get_arg(argp, &val, sizeof(val)))) return r; printk(KERN_DEBUG "iPPP-bundle: minor: %d, slave unit: %d, master unit: %d\n", (int) min, (int) is->unit, (int) val); return isdn_ppp_bundle(is, val); #else return -1; #endif break; case PPPIOCGUNIT: /* get ppp/isdn unit number */ if ((r = set_arg(argp, &is->unit, sizeof(is->unit)))) return r; break; case PPPIOCGIFNAME: if (!lp) return -EINVAL; if ((r = set_arg(argp, lp->netdev->dev->name, strlen(lp->netdev->dev->name)))) return r; break; case PPPIOCGMPFLAGS: /* get configuration flags */ if ((r = set_arg(argp, &is->mpppcfg, sizeof(is->mpppcfg)))) return r; break; case PPPIOCSMPFLAGS: /* set configuration flags */ if ((r = get_arg(argp, &val, sizeof(val)))) return r; is->mpppcfg = val; break; case PPPIOCGFLAGS: /* get configuration flags */ if ((r = set_arg(argp, &is->pppcfg, sizeof(is->pppcfg)))) return r; break; case PPPIOCSFLAGS: /* set configuration flags */ if ((r = get_arg(argp, &val, sizeof(val)))) { return r; } if (val & SC_ENABLE_IP && !(is->pppcfg & SC_ENABLE_IP) && (is->state & IPPP_CONNECT)) { if (lp) { /* OK .. we are ready to send buffers */ is->pppcfg = val; /* isdn_ppp_xmit test for SC_ENABLE_IP !!! */ netif_wake_queue(lp->netdev->dev); break; } } is->pppcfg = val; break; case PPPIOCGIDLE: /* get idle time information */ if (lp) { struct ppp_idle pidle; pidle.xmit_idle = pidle.recv_idle = lp->huptimer; if ((r = set_arg(argp, &pidle, sizeof(struct ppp_idle)))) return r; } break; case PPPIOCSMRU: /* set receive unit size for PPP */ if ((r = get_arg(argp, &val, sizeof(val)))) return r; is->mru = val; break; case PPPIOCSMPMRU: break; case PPPIOCSMPMTU: break; case PPPIOCSMAXCID: /* set the maximum compression slot id */ if ((r = get_arg(argp, &val, sizeof(val)))) return r; val++; if (is->maxcid != val) { #ifdef CONFIG_ISDN_PPP_VJ struct slcompress *sltmp; #endif if (is->debug & 0x1) printk(KERN_DEBUG "ippp, ioctl: changed MAXCID to %ld\n", val); is->maxcid = val; #ifdef CONFIG_ISDN_PPP_VJ sltmp = slhc_init(16, val); if (!sltmp) { printk(KERN_ERR "ippp, can't realloc slhc struct\n"); return -ENOMEM; } if (is->slcomp) slhc_free(is->slcomp); is->slcomp = sltmp; #endif } break; case PPPIOCGDEBUG: if ((r = set_arg(argp, &is->debug, sizeof(is->debug)))) return r; break; case PPPIOCSDEBUG: if ((r = get_arg(argp, &val, sizeof(val)))) return r; is->debug = val; break; case PPPIOCGCOMPRESSORS: { unsigned long protos[8] = {0,}; struct isdn_ppp_compressor *ipc = ipc_head; while (ipc) { j = ipc->num / (sizeof(long) * 8); i = ipc->num % (sizeof(long) * 8); if (j < 8) protos[j] |= (1UL << i); ipc = ipc->next; } if ((r = set_arg(argp, protos, 8 * sizeof(long)))) return r; } break; case PPPIOCSCOMPRESSOR: if ((r = get_arg(argp, &data, sizeof(struct isdn_ppp_comp_data)))) return r; return isdn_ppp_set_compressor(is, &data); case PPPIOCGCALLINFO: { struct pppcallinfo pci; memset((char *)&pci, 0, sizeof(struct pppcallinfo)); if (lp) { strncpy(pci.local_num, lp->msn, 63); if (lp->dial) { strncpy(pci.remote_num, lp->dial->num, 63); } pci.charge_units = lp->charge; if (lp->outgoing) pci.calltype = CALLTYPE_OUTGOING; else pci.calltype = CALLTYPE_INCOMING; if (lp->flags & ISDN_NET_CALLBACK) pci.calltype |= CALLTYPE_CALLBACK; } return set_arg(argp, &pci, sizeof(struct pppcallinfo)); } #ifdef CONFIG_IPPP_FILTER case PPPIOCSPASS: { struct sock_filter *code; int len = get_filter(argp, &code); if (len < 0) return len; kfree(is->pass_filter); is->pass_filter = code; is->pass_len = len; break; } case PPPIOCSACTIVE: { struct sock_filter *code; int len = get_filter(argp, &code); if (len < 0) return len; kfree(is->active_filter); is->active_filter = code; is->active_len = len; break; } #endif /* CONFIG_IPPP_FILTER */ default: break; } return 0; } unsigned int isdn_ppp_poll(struct file *file, poll_table *wait) { u_int mask; struct ippp_buf_queue *bf, *bl; u_long flags; struct ippp_struct *is; is = file->private_data; if (is->debug & 0x2) printk(KERN_DEBUG "isdn_ppp_poll: minor: %d\n", iminor(file_inode(file))); /* just registers wait_queue hook. This doesn't really wait. */ poll_wait(file, &is->wq, wait); if (!(is->state & IPPP_OPEN)) { if (is->state == IPPP_CLOSEWAIT) return POLLHUP; printk(KERN_DEBUG "isdn_ppp: device not open\n"); return POLLERR; } /* we're always ready to send .. */ mask = POLLOUT | POLLWRNORM; spin_lock_irqsave(&is->buflock, flags); bl = is->last; bf = is->first; /* * if IPPP_NOBLOCK is set we return even if we have nothing to read */ if (bf->next != bl || (is->state & IPPP_NOBLOCK)) { is->state &= ~IPPP_NOBLOCK; mask |= POLLIN | POLLRDNORM; } spin_unlock_irqrestore(&is->buflock, flags); return mask; } /* * fill up isdn_ppp_read() queue .. */ static int isdn_ppp_fill_rq(unsigned char *buf, int len, int proto, int slot) { struct ippp_buf_queue *bf, *bl; u_long flags; u_char *nbuf; struct ippp_struct *is; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_WARNING "ippp: illegal slot(%d).\n", slot); return 0; } is = ippp_table[slot]; if (!(is->state & IPPP_CONNECT)) { printk(KERN_DEBUG "ippp: device not activated.\n"); return 0; } nbuf = kmalloc(len + 4, GFP_ATOMIC); if (!nbuf) { printk(KERN_WARNING "ippp: Can't alloc buf\n"); return 0; } nbuf[0] = PPP_ALLSTATIONS; nbuf[1] = PPP_UI; nbuf[2] = proto >> 8; nbuf[3] = proto & 0xff; memcpy(nbuf + 4, buf, len); spin_lock_irqsave(&is->buflock, flags); bf = is->first; bl = is->last; if (bf == bl) { printk(KERN_WARNING "ippp: Queue is full; discarding first buffer\n"); bf = bf->next; kfree(bf->buf); is->first = bf; } bl->buf = (char *) nbuf; bl->len = len + 4; is->last = bl->next; spin_unlock_irqrestore(&is->buflock, flags); wake_up_interruptible(&is->wq); return len; } /* * read() .. non-blocking: ipppd calls it only after select() * reports, that there is data */ int isdn_ppp_read(int min, struct file *file, char __user *buf, int count) { struct ippp_struct *is; struct ippp_buf_queue *b; u_long flags; u_char *save_buf; is = file->private_data; if (!(is->state & IPPP_OPEN)) return 0; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; spin_lock_irqsave(&is->buflock, flags); b = is->first->next; save_buf = b->buf; if (!save_buf) { spin_unlock_irqrestore(&is->buflock, flags); return -EAGAIN; } if (b->len < count) count = b->len; b->buf = NULL; is->first = b; spin_unlock_irqrestore(&is->buflock, flags); if (copy_to_user(buf, save_buf, count)) count = -EFAULT; kfree(save_buf); return count; } /* * ipppd wanna write a packet to the card .. non-blocking */ int isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) { isdn_net_local *lp; struct ippp_struct *is; int proto; unsigned char protobuf[4]; is = file->private_data; if (!(is->state & IPPP_CONNECT)) return 0; lp = is->lp; /* -> push it directly to the lowlevel interface */ if (!lp) printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n"); else { /* * Don't reset huptimer for * LCP packets. (Echo requests). */ if (copy_from_user(protobuf, buf, 4)) return -EFAULT; proto = PPP_PROTOCOL(protobuf); if (proto != PPP_LCP) lp->huptimer = 0; if (lp->isdn_device < 0 || lp->isdn_channel < 0) return 0; if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) && lp->dialstate == 0 && (lp->flags & ISDN_NET_CONNECTED)) { unsigned short hl; struct sk_buff *skb; /* * we need to reserve enough space in front of * sk_buff. old call to dev_alloc_skb only reserved * 16 bytes, now we are looking what the driver want */ hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen; skb = alloc_skb(hl + count, GFP_ATOMIC); if (!skb) { printk(KERN_WARNING "isdn_ppp_write: out of memory!\n"); return count; } skb_reserve(skb, hl); if (copy_from_user(skb_put(skb, count), buf, count)) { kfree_skb(skb); return -EFAULT; } if (is->debug & 0x40) { printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len); isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot); } isdn_ppp_send_ccp(lp->netdev, lp, skb); /* keeps CCP/compression states in sync */ isdn_net_write_super(lp, skb); } } return count; } /* * init memory, structures etc. */ int isdn_ppp_init(void) { int i, j; #ifdef CONFIG_ISDN_MPP if (isdn_ppp_mp_bundle_array_init() < 0) return -ENOMEM; #endif /* CONFIG_ISDN_MPP */ for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (!(ippp_table[i] = kzalloc(sizeof(struct ippp_struct), GFP_KERNEL))) { printk(KERN_WARNING "isdn_ppp_init: Could not alloc ippp_table\n"); for (j = 0; j < i; j++) kfree(ippp_table[j]); return -1; } spin_lock_init(&ippp_table[i]->buflock); ippp_table[i]->state = 0; ippp_table[i]->first = ippp_table[i]->rq + NUM_RCV_BUFFS - 1; ippp_table[i]->last = ippp_table[i]->rq; for (j = 0; j < NUM_RCV_BUFFS; j++) { ippp_table[i]->rq[j].buf = NULL; ippp_table[i]->rq[j].last = ippp_table[i]->rq + (NUM_RCV_BUFFS + j - 1) % NUM_RCV_BUFFS; ippp_table[i]->rq[j].next = ippp_table[i]->rq + (j + 1) % NUM_RCV_BUFFS; } } return 0; } void isdn_ppp_cleanup(void) { int i; for (i = 0; i < ISDN_MAX_CHANNELS; i++) kfree(ippp_table[i]); #ifdef CONFIG_ISDN_MPP kfree(isdn_ppp_bundle_arr); #endif /* CONFIG_ISDN_MPP */ } /* * check for address/control field and skip if allowed * retval != 0 -> discard packet silently */ static int isdn_ppp_skip_ac(struct ippp_struct *is, struct sk_buff *skb) { if (skb->len < 1) return -1; if (skb->data[0] == 0xff) { if (skb->len < 2) return -1; if (skb->data[1] != 0x03) return -1; // skip address/control (AC) field skb_pull(skb, 2); } else { if (is->pppcfg & SC_REJ_COMP_AC) // if AC compression was not negotiated, but used, discard packet return -1; } return 0; } /* * get the PPP protocol header and pull skb * retval < 0 -> discard packet silently */ static int isdn_ppp_strip_proto(struct sk_buff *skb) { int proto; if (skb->len < 1) return -1; if (skb->data[0] & 0x1) { // protocol field is compressed proto = skb->data[0]; skb_pull(skb, 1); } else { if (skb->len < 2) return -1; proto = ((int) skb->data[0] << 8) + skb->data[1]; skb_pull(skb, 2); } return proto; } /* * handler for incoming packets on a syncPPP interface */ void isdn_ppp_receive(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb) { struct ippp_struct *is; int slot; int proto; BUG_ON(net_dev->local->master); // we're called with the master device always slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_receive: lp->ppp_slot(%d)\n", lp->ppp_slot); kfree_skb(skb); return; } is = ippp_table[slot]; if (is->debug & 0x4) { printk(KERN_DEBUG "ippp_receive: is:%08lx lp:%08lx slot:%d unit:%d len:%d\n", (long)is, (long)lp, lp->ppp_slot, is->unit, (int)skb->len); isdn_ppp_frame_log("receive", skb->data, skb->len, 32, is->unit, lp->ppp_slot); } if (isdn_ppp_skip_ac(is, skb) < 0) { kfree_skb(skb); return; } proto = isdn_ppp_strip_proto(skb); if (proto < 0) { kfree_skb(skb); return; } #ifdef CONFIG_ISDN_MPP if (is->compflags & SC_LINK_DECOMP_ON) { skb = isdn_ppp_decompress(skb, is, NULL, &proto); if (!skb) // decompression error return; } if (!(is->mpppcfg & SC_REJ_MP_PROT)) { // we agreed to receive MPPP if (proto == PPP_MP) { isdn_ppp_mp_receive(net_dev, lp, skb); return; } } #endif isdn_ppp_push_higher(net_dev, lp, skb, proto); } /* * we receive a reassembled frame, MPPP has been taken care of before. * address/control and protocol have been stripped from the skb * note: net_dev has to be master net_dev */ static void isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb, int proto) { struct net_device *dev = net_dev->dev; struct ippp_struct *is, *mis; isdn_net_local *mlp = NULL; int slot; slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_push_higher: lp->ppp_slot(%d)\n", lp->ppp_slot); goto drop_packet; } is = ippp_table[slot]; if (lp->master) { // FIXME? mlp = ISDN_MASTER_PRIV(lp); slot = mlp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_push_higher: master->ppp_slot(%d)\n", lp->ppp_slot); goto drop_packet; } } mis = ippp_table[slot]; if (is->debug & 0x10) { printk(KERN_DEBUG "push, skb %d %04x\n", (int) skb->len, proto); isdn_ppp_frame_log("rpush", skb->data, skb->len, 32, is->unit, lp->ppp_slot); } if (mis->compflags & SC_DECOMP_ON) { skb = isdn_ppp_decompress(skb, is, mis, &proto); if (!skb) // decompression error return; } switch (proto) { case PPP_IPX: /* untested */ if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: IPX\n"); skb->protocol = htons(ETH_P_IPX); break; case PPP_IP: if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: IP\n"); skb->protocol = htons(ETH_P_IP); break; case PPP_COMP: case PPP_COMPFRAG: printk(KERN_INFO "isdn_ppp: unexpected compressed frame dropped\n"); goto drop_packet; #ifdef CONFIG_ISDN_PPP_VJ case PPP_VJC_UNCOMP: if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: VJC_UNCOMP\n"); if (net_dev->local->ppp_slot < 0) { printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n", __func__, net_dev->local->ppp_slot); goto drop_packet; } if (slhc_remember(ippp_table[net_dev->local->ppp_slot]->slcomp, skb->data, skb->len) <= 0) { printk(KERN_WARNING "isdn_ppp: received illegal VJC_UNCOMP frame!\n"); goto drop_packet; } skb->protocol = htons(ETH_P_IP); break; case PPP_VJC_COMP: if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: VJC_COMP\n"); { struct sk_buff *skb_old = skb; int pkt_len; skb = dev_alloc_skb(skb_old->len + 128); if (!skb) { printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); skb = skb_old; goto drop_packet; } skb_put(skb, skb_old->len + 128); skb_copy_from_linear_data(skb_old, skb->data, skb_old->len); if (net_dev->local->ppp_slot < 0) { printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n", __func__, net_dev->local->ppp_slot); goto drop_packet; } pkt_len = slhc_uncompress(ippp_table[net_dev->local->ppp_slot]->slcomp, skb->data, skb_old->len); kfree_skb(skb_old); if (pkt_len < 0) goto drop_packet; skb_trim(skb, pkt_len); skb->protocol = htons(ETH_P_IP); } break; #endif case PPP_CCP: case PPP_CCPFRAG: isdn_ppp_receive_ccp(net_dev, lp, skb, proto); /* Dont pop up ResetReq/Ack stuff to the daemon any longer - the job is done already */ if (skb->data[0] == CCP_RESETREQ || skb->data[0] == CCP_RESETACK) break; /* fall through */ default: isdn_ppp_fill_rq(skb->data, skb->len, proto, lp->ppp_slot); /* push data to pppd device */ kfree_skb(skb); return; } #ifdef CONFIG_IPPP_FILTER /* check if the packet passes the pass and active filters * the filter instructions are constructed assuming * a four-byte PPP header on each packet (which is still present) */ skb_push(skb, 4); { u_int16_t *p = (u_int16_t *) skb->data; *p = 0; /* indicate inbound */ } if (is->pass_filter && sk_run_filter(skb, is->pass_filter) == 0) { if (is->debug & 0x2) printk(KERN_DEBUG "IPPP: inbound frame filtered.\n"); kfree_skb(skb); return; } if (!(is->active_filter && sk_run_filter(skb, is->active_filter) == 0)) { if (is->debug & 0x2) printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n"); lp->huptimer = 0; if (mlp) mlp->huptimer = 0; } skb_pull(skb, 4); #else /* CONFIG_IPPP_FILTER */ lp->huptimer = 0; if (mlp) mlp->huptimer = 0; #endif /* CONFIG_IPPP_FILTER */ skb->dev = dev; skb_reset_mac_header(skb); netif_rx(skb); /* net_dev->local->stats.rx_packets++; done in isdn_net.c */ return; drop_packet: net_dev->local->stats.rx_dropped++; kfree_skb(skb); } /* * isdn_ppp_skb_push .. * checks whether we have enough space at the beginning of the skb * and allocs a new SKB if necessary */ static unsigned char *isdn_ppp_skb_push(struct sk_buff **skb_p, int len) { struct sk_buff *skb = *skb_p; if (skb_headroom(skb) < len) { struct sk_buff *nskb = skb_realloc_headroom(skb, len); if (!nskb) { printk(KERN_ERR "isdn_ppp_skb_push: can't realloc headroom!\n"); dev_kfree_skb(skb); return NULL; } printk(KERN_DEBUG "isdn_ppp_skb_push:under %d %d\n", skb_headroom(skb), len); dev_kfree_skb(skb); *skb_p = nskb; return skb_push(nskb, len); } return skb_push(skb, len); } /* * send ppp frame .. we expect a PIDCOMPressable proto -- * (here: currently always PPP_IP,PPP_VJC_COMP,PPP_VJC_UNCOMP) * * VJ compression may change skb pointer!!! .. requeue with old * skb isn't allowed!! */ int isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev) { isdn_net_local *lp, *mlp; isdn_net_dev *nd; unsigned int proto = PPP_IP; /* 0x21 */ struct ippp_struct *ipt, *ipts; int slot, retval = NETDEV_TX_OK; mlp = netdev_priv(netdev); nd = mlp->netdev; /* get master lp */ slot = mlp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_xmit: lp->ppp_slot(%d)\n", mlp->ppp_slot); kfree_skb(skb); goto out; } ipts = ippp_table[slot]; if (!(ipts->pppcfg & SC_ENABLE_IP)) { /* PPP connected ? */ if (ipts->debug & 0x1) printk(KERN_INFO "%s: IP frame delayed.\n", netdev->name); retval = NETDEV_TX_BUSY; goto out; } switch (ntohs(skb->protocol)) { case ETH_P_IP: proto = PPP_IP; break; case ETH_P_IPX: proto = PPP_IPX; /* untested */ break; default: printk(KERN_ERR "isdn_ppp: skipped unsupported protocol: %#x.\n", skb->protocol); dev_kfree_skb(skb); goto out; } lp = isdn_net_get_locked_lp(nd); if (!lp) { printk(KERN_WARNING "%s: all channels busy - requeuing!\n", netdev->name); retval = NETDEV_TX_BUSY; goto out; } /* we have our lp locked from now on */ slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_xmit: lp->ppp_slot(%d)\n", lp->ppp_slot); kfree_skb(skb); goto unlock; } ipt = ippp_table[slot]; /* * after this line .. requeueing in the device queue is no longer allowed!!! */ /* Pull off the fake header we stuck on earlier to keep * the fragmentation code happy. */ skb_pull(skb, IPPP_MAX_HEADER); #ifdef CONFIG_IPPP_FILTER /* check if we should pass this packet * the filter instructions are constructed assuming * a four-byte PPP header on each packet */ *skb_push(skb, 4) = 1; /* indicate outbound */ { __be16 *p = (__be16 *)skb->data; p++; *p = htons(proto); } if (ipt->pass_filter && sk_run_filter(skb, ipt->pass_filter) == 0) { if (ipt->debug & 0x4) printk(KERN_DEBUG "IPPP: outbound frame filtered.\n"); kfree_skb(skb); goto unlock; } if (!(ipt->active_filter && sk_run_filter(skb, ipt->active_filter) == 0)) { if (ipt->debug & 0x4) printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n"); lp->huptimer = 0; } skb_pull(skb, 4); #else /* CONFIG_IPPP_FILTER */ lp->huptimer = 0; #endif /* CONFIG_IPPP_FILTER */ if (ipt->debug & 0x4) printk(KERN_DEBUG "xmit skb, len %d\n", (int) skb->len); if (ipts->debug & 0x40) isdn_ppp_frame_log("xmit0", skb->data, skb->len, 32, ipts->unit, lp->ppp_slot); #ifdef CONFIG_ISDN_PPP_VJ if (proto == PPP_IP && ipts->pppcfg & SC_COMP_TCP) { /* ipts here? probably yes, but check this again */ struct sk_buff *new_skb; unsigned short hl; /* * we need to reserve enough space in front of * sk_buff. old call to dev_alloc_skb only reserved * 16 bytes, now we are looking what the driver want. */ hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen + IPPP_MAX_HEADER; /* * Note: hl might still be insufficient because the method * above does not account for a possibible MPPP slave channel * which had larger HL header space requirements than the * master. */ new_skb = alloc_skb(hl + skb->len, GFP_ATOMIC); if (new_skb) { u_char *buf; int pktlen; skb_reserve(new_skb, hl); new_skb->dev = skb->dev; skb_put(new_skb, skb->len); buf = skb->data; pktlen = slhc_compress(ipts->slcomp, skb->data, skb->len, new_skb->data, &buf, !(ipts->pppcfg & SC_NO_TCP_CCID)); if (buf != skb->data) { if (new_skb->data != buf) printk(KERN_ERR "isdn_ppp: FATAL error after slhc_compress!!\n"); dev_kfree_skb(skb); skb = new_skb; } else { dev_kfree_skb(new_skb); } skb_trim(skb, pktlen); if (skb->data[0] & SL_TYPE_COMPRESSED_TCP) { /* cslip? style -> PPP */ proto = PPP_VJC_COMP; skb->data[0] ^= SL_TYPE_COMPRESSED_TCP; } else { if (skb->data[0] >= SL_TYPE_UNCOMPRESSED_TCP) proto = PPP_VJC_UNCOMP; skb->data[0] = (skb->data[0] & 0x0f) | 0x40; } } } #endif /* * normal (single link) or bundle compression */ if (ipts->compflags & SC_COMP_ON) { /* We send compressed only if both down- und upstream compression is negotiated, that means, CCP is up */ if (ipts->compflags & SC_DECOMP_ON) { skb = isdn_ppp_compress(skb, &proto, ipt, ipts, 0); } else { printk(KERN_DEBUG "isdn_ppp: CCP not yet up - sending as-is\n"); } } if (ipt->debug & 0x24) printk(KERN_DEBUG "xmit2 skb, len %d, proto %04x\n", (int) skb->len, proto); #ifdef CONFIG_ISDN_MPP if (ipt->mpppcfg & SC_MP_PROT) { /* we get mp_seqno from static isdn_net_local */ long mp_seqno = ipts->mp_seqno; ipts->mp_seqno++; if (ipt->mpppcfg & SC_OUT_SHORT_SEQ) { unsigned char *data = isdn_ppp_skb_push(&skb, 3); if (!data) goto unlock; mp_seqno &= 0xfff; data[0] = MP_BEGIN_FRAG | MP_END_FRAG | ((mp_seqno >> 8) & 0xf); /* (B)egin & (E)ndbit .. */ data[1] = mp_seqno & 0xff; data[2] = proto; /* PID compression */ } else { unsigned char *data = isdn_ppp_skb_push(&skb, 5); if (!data) goto unlock; data[0] = MP_BEGIN_FRAG | MP_END_FRAG; /* (B)egin & (E)ndbit .. */ data[1] = (mp_seqno >> 16) & 0xff; /* sequence number: 24bit */ data[2] = (mp_seqno >> 8) & 0xff; data[3] = (mp_seqno >> 0) & 0xff; data[4] = proto; /* PID compression */ } proto = PPP_MP; /* MP Protocol, 0x003d */ } #endif /* * 'link in bundle' compression ... */ if (ipt->compflags & SC_LINK_COMP_ON) skb = isdn_ppp_compress(skb, &proto, ipt, ipts, 1); if ((ipt->pppcfg & SC_COMP_PROT) && (proto <= 0xff)) { unsigned char *data = isdn_ppp_skb_push(&skb, 1); if (!data) goto unlock; data[0] = proto & 0xff; } else { unsigned char *data = isdn_ppp_skb_push(&skb, 2); if (!data) goto unlock; data[0] = (proto >> 8) & 0xff; data[1] = proto & 0xff; } if (!(ipt->pppcfg & SC_COMP_AC)) { unsigned char *data = isdn_ppp_skb_push(&skb, 2); if (!data) goto unlock; data[0] = 0xff; /* All Stations */ data[1] = 0x03; /* Unnumbered information */ } /* tx-stats are now updated via BSENT-callback */ if (ipts->debug & 0x40) { printk(KERN_DEBUG "skb xmit: len: %d\n", (int) skb->len); isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, ipt->unit, lp->ppp_slot); } isdn_net_writebuf_skb(lp, skb); unlock: spin_unlock_bh(&lp->xmit_lock); out: return retval; } #ifdef CONFIG_IPPP_FILTER /* * check if this packet may trigger auto-dial. */ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp) { struct ippp_struct *is = ippp_table[lp->ppp_slot]; u_int16_t proto; int drop = 0; switch (ntohs(skb->protocol)) { case ETH_P_IP: proto = PPP_IP; break; case ETH_P_IPX: proto = PPP_IPX; break; default: printk(KERN_ERR "isdn_ppp_autodial_filter: unsupported protocol 0x%x.\n", skb->protocol); return 1; } /* the filter instructions are constructed assuming * a four-byte PPP header on each packet. we have to * temporarily remove part of the fake header stuck on * earlier. */ *skb_pull(skb, IPPP_MAX_HEADER - 4) = 1; /* indicate outbound */ { __be16 *p = (__be16 *)skb->data; p++; *p = htons(proto); } drop |= is->pass_filter && sk_run_filter(skb, is->pass_filter) == 0; drop |= is->active_filter && sk_run_filter(skb, is->active_filter) == 0; skb_push(skb, IPPP_MAX_HEADER - 4); return drop; } #endif #ifdef CONFIG_ISDN_MPP /* this is _not_ rfc1990 header, but something we convert both short and long * headers to for convinience's sake: * byte 0 is flags as in rfc1990 * bytes 1...4 is 24-bit seqence number converted to host byte order */ #define MP_HEADER_LEN 5 #define MP_LONGSEQ_MASK 0x00ffffff #define MP_SHORTSEQ_MASK 0x00000fff #define MP_LONGSEQ_MAX MP_LONGSEQ_MASK #define MP_SHORTSEQ_MAX MP_SHORTSEQ_MASK #define MP_LONGSEQ_MAXBIT ((MP_LONGSEQ_MASK + 1) >> 1) #define MP_SHORTSEQ_MAXBIT ((MP_SHORTSEQ_MASK + 1) >> 1) /* sequence-wrap safe comparisons (for long sequence)*/ #define MP_LT(a, b) ((a - b) & MP_LONGSEQ_MAXBIT) #define MP_LE(a, b) !((b - a) & MP_LONGSEQ_MAXBIT) #define MP_GT(a, b) ((b - a) & MP_LONGSEQ_MAXBIT) #define MP_GE(a, b) !((a - b) & MP_LONGSEQ_MAXBIT) #define MP_SEQ(f) ((*(u32 *)(f->data + 1))) #define MP_FLAGS(f) (f->data[0]) static int isdn_ppp_mp_bundle_array_init(void) { int i; int sz = ISDN_MAX_CHANNELS * sizeof(ippp_bundle); if ((isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL) return -ENOMEM; for (i = 0; i < ISDN_MAX_CHANNELS; i++) spin_lock_init(&isdn_ppp_bundle_arr[i].lock); return 0; } static ippp_bundle *isdn_ppp_mp_bundle_alloc(void) { int i; for (i = 0; i < ISDN_MAX_CHANNELS; i++) if (isdn_ppp_bundle_arr[i].ref_ct <= 0) return (isdn_ppp_bundle_arr + i); return NULL; } static int isdn_ppp_mp_init(isdn_net_local *lp, ippp_bundle *add_to) { struct ippp_struct *is; if (lp->ppp_slot < 0) { printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return (-EINVAL); } is = ippp_table[lp->ppp_slot]; if (add_to) { if (lp->netdev->pb) lp->netdev->pb->ref_ct--; lp->netdev->pb = add_to; } else { /* first link in a bundle */ is->mp_seqno = 0; if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL) return -ENOMEM; lp->next = lp->last = lp; /* nobody else in a queue */ lp->netdev->pb->frags = NULL; lp->netdev->pb->frames = 0; lp->netdev->pb->seq = UINT_MAX; } lp->netdev->pb->ref_ct++; is->last_link_seqno = 0; return 0; } static u32 isdn_ppp_mp_get_seq(int short_seq, struct sk_buff *skb, u32 last_seq); static struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from, struct sk_buff *to); static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *from, struct sk_buff *to); static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb); static void isdn_ppp_mp_print_recv_pkt(int slot, struct sk_buff *skb); static void isdn_ppp_mp_receive(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb) { struct ippp_struct *is; isdn_net_local *lpq; ippp_bundle *mp; isdn_mppp_stats *stats; struct sk_buff *newfrag, *frag, *start, *nextf; u32 newseq, minseq, thisseq; unsigned long flags; int slot; spin_lock_irqsave(&net_dev->pb->lock, flags); mp = net_dev->pb; stats = &mp->stats; slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d)\n", __func__, lp->ppp_slot); stats->frame_drops++; dev_kfree_skb(skb); spin_unlock_irqrestore(&mp->lock, flags); return; } is = ippp_table[slot]; if (++mp->frames > stats->max_queue_len) stats->max_queue_len = mp->frames; if (is->debug & 0x8) isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb); newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, skb, is->last_link_seqno); /* if this packet seq # is less than last already processed one, * toss it right away, but check for sequence start case first */ if (mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT)) { mp->seq = newseq; /* the first packet: required for * rfc1990 non-compliant clients -- * prevents constant packet toss */ } else if (MP_LT(newseq, mp->seq)) { stats->frame_drops++; isdn_ppp_mp_free_skb(mp, skb); spin_unlock_irqrestore(&mp->lock, flags); return; } /* find the minimum received sequence number over all links */ is->last_link_seqno = minseq = newseq; for (lpq = net_dev->queue;;) { slot = lpq->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lpq->ppp_slot(%d)\n", __func__, lpq->ppp_slot); } else { u32 lls = ippp_table[slot]->last_link_seqno; if (MP_LT(lls, minseq)) minseq = lls; } if ((lpq = lpq->next) == net_dev->queue) break; } if (MP_LT(minseq, mp->seq)) minseq = mp->seq; /* can't go beyond already processed * packets */ newfrag = skb; /* if this new fragment is before the first one, then enqueue it now. */ if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) { newfrag->next = frag; mp->frags = frag = newfrag; newfrag = NULL; } start = MP_FLAGS(frag) & MP_BEGIN_FRAG && MP_SEQ(frag) == mp->seq ? frag : NULL; /* * main fragment traversing loop * * try to accomplish several tasks: * - insert new fragment into the proper sequence slot (once that's done * newfrag will be set to NULL) * - reassemble any complete fragment sequence (non-null 'start' * indicates there is a contiguous sequence present) * - discard any incomplete sequences that are below minseq -- due * to the fact that sender always increment sequence number, if there * is an incomplete sequence below minseq, no new fragments would * come to complete such sequence and it should be discarded * * loop completes when we accomplished the following tasks: * - new fragment is inserted in the proper sequence ('newfrag' is * set to NULL) * - we hit a gap in the sequence, so no reassembly/processing is * possible ('start' would be set to NULL) * * algorithm for this code is derived from code in the book * 'PPP Design And Debugging' by James Carlson (Addison-Wesley) */ while (start != NULL || newfrag != NULL) { thisseq = MP_SEQ(frag); nextf = frag->next; /* drop any duplicate fragments */ if (newfrag != NULL && thisseq == newseq) { isdn_ppp_mp_free_skb(mp, newfrag); newfrag = NULL; } /* insert new fragment before next element if possible. */ if (newfrag != NULL && (nextf == NULL || MP_LT(newseq, MP_SEQ(nextf)))) { newfrag->next = nextf; frag->next = nextf = newfrag; newfrag = NULL; } if (start != NULL) { /* check for misplaced start */ if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) { printk(KERN_WARNING"isdn_mppp(seq %d): new " "BEGIN flag with no prior END", thisseq); stats->seqerrs++; stats->frame_drops++; start = isdn_ppp_mp_discard(mp, start, frag); nextf = frag->next; } } else if (MP_LE(thisseq, minseq)) { if (MP_FLAGS(frag) & MP_BEGIN_FRAG) start = frag; else { if (MP_FLAGS(frag) & MP_END_FRAG) stats->frame_drops++; if (mp->frags == frag) mp->frags = nextf; isdn_ppp_mp_free_skb(mp, frag); frag = nextf; continue; } } /* if start is non-null and we have end fragment, then * we have full reassembly sequence -- reassemble * and process packet now */ if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) { minseq = mp->seq = (thisseq + 1) & MP_LONGSEQ_MASK; /* Reassemble the packet then dispatch it */ isdn_ppp_mp_reassembly(net_dev, lp, start, nextf); start = NULL; frag = NULL; mp->frags = nextf; } /* check if need to update start pointer: if we just * reassembled the packet and sequence is contiguous * then next fragment should be the start of new reassembly * if sequence is contiguous, but we haven't reassembled yet, * keep going. * if sequence is not contiguous, either clear everything * below low watermark and set start to the next frag or * clear start ptr. */ if (nextf != NULL && ((thisseq + 1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) { /* if we just reassembled and the next one is here, * then start another reassembly. */ if (frag == NULL) { if (MP_FLAGS(nextf) & MP_BEGIN_FRAG) start = nextf; else { printk(KERN_WARNING"isdn_mppp(seq %d):" " END flag with no following " "BEGIN", thisseq); stats->seqerrs++; } } } else { if (nextf != NULL && frag != NULL && MP_LT(thisseq, minseq)) { /* we've got a break in the sequence * and we not at the end yet * and we did not just reassembled *(if we did, there wouldn't be anything before) * and we below the low watermark * discard all the frames below low watermark * and start over */ stats->frame_drops++; mp->frags = isdn_ppp_mp_discard(mp, start, nextf); } /* break in the sequence, no reassembly */ start = NULL; } frag = nextf; } /* while -- main loop */ if (mp->frags == NULL) mp->frags = frag; /* rather straighforward way to deal with (not very) possible * queue overflow */ if (mp->frames > MP_MAX_QUEUE_LEN) { stats->overflows++; while (mp->frames > MP_MAX_QUEUE_LEN) { frag = mp->frags->next; isdn_ppp_mp_free_skb(mp, mp->frags); mp->frags = frag; } } spin_unlock_irqrestore(&mp->lock, flags); } static void isdn_ppp_mp_cleanup(isdn_net_local *lp) { struct sk_buff *frag = lp->netdev->pb->frags; struct sk_buff *nextfrag; while (frag) { nextfrag = frag->next; isdn_ppp_mp_free_skb(lp->netdev->pb, frag); frag = nextfrag; } lp->netdev->pb->frags = NULL; } static u32 isdn_ppp_mp_get_seq(int short_seq, struct sk_buff *skb, u32 last_seq) { u32 seq; int flags = skb->data[0] & (MP_BEGIN_FRAG | MP_END_FRAG); if (!short_seq) { seq = ntohl(*(__be32 *)skb->data) & MP_LONGSEQ_MASK; skb_push(skb, 1); } else { /* convert 12-bit short seq number to 24-bit long one */ seq = ntohs(*(__be16 *)skb->data) & MP_SHORTSEQ_MASK; /* check for seqence wrap */ if (!(seq & MP_SHORTSEQ_MAXBIT) && (last_seq & MP_SHORTSEQ_MAXBIT) && (unsigned long)last_seq <= MP_LONGSEQ_MAX) seq |= (last_seq + MP_SHORTSEQ_MAX + 1) & (~MP_SHORTSEQ_MASK & MP_LONGSEQ_MASK); else seq |= last_seq & (~MP_SHORTSEQ_MASK & MP_LONGSEQ_MASK); skb_push(skb, 3); /* put converted seqence back in skb */ } *(u32 *)(skb->data + 1) = seq; /* put seqence back in _host_ byte * order */ skb->data[0] = flags; /* restore flags */ return seq; } struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from, struct sk_buff *to) { if (from) while (from != to) { struct sk_buff *next = from->next; isdn_ppp_mp_free_skb(mp, from); from = next; } return from; } void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *from, struct sk_buff *to) { ippp_bundle *mp = net_dev->pb; int proto; struct sk_buff *skb; unsigned int tot_len; if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return; } if (MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG)) { if (ippp_table[lp->ppp_slot]->debug & 0x40) printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, " "len %d\n", MP_SEQ(from), from->len); skb = from; skb_pull(skb, MP_HEADER_LEN); mp->frames--; } else { struct sk_buff *frag; int n; for (tot_len = n = 0, frag = from; frag != to; frag = frag->next, n++) tot_len += frag->len - MP_HEADER_LEN; if (ippp_table[lp->ppp_slot]->debug & 0x40) printk(KERN_DEBUG"isdn_mppp: reassembling frames %d " "to %d, len %d\n", MP_SEQ(from), (MP_SEQ(from) + n - 1) & MP_LONGSEQ_MASK, tot_len); if ((skb = dev_alloc_skb(tot_len)) == NULL) { printk(KERN_ERR "isdn_mppp: cannot allocate sk buff " "of size %d\n", tot_len); isdn_ppp_mp_discard(mp, from, to); return; } while (from != to) { unsigned int len = from->len - MP_HEADER_LEN; skb_copy_from_linear_data_offset(from, MP_HEADER_LEN, skb_put(skb, len), len); frag = from->next; isdn_ppp_mp_free_skb(mp, from); from = frag; } } proto = isdn_ppp_strip_proto(skb); isdn_ppp_push_higher(net_dev, lp, skb, proto); } static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb) { dev_kfree_skb(skb); mp->frames--; } static void isdn_ppp_mp_print_recv_pkt(int slot, struct sk_buff *skb) { printk(KERN_DEBUG "mp_recv: %d/%d -> %02x %02x %02x %02x %02x %02x\n", slot, (int) skb->len, (int) skb->data[0], (int) skb->data[1], (int) skb->data[2], (int) skb->data[3], (int) skb->data[4], (int) skb->data[5]); } static int isdn_ppp_bundle(struct ippp_struct *is, int unit) { char ifn[IFNAMSIZ + 1]; isdn_net_dev *p; isdn_net_local *lp, *nlp; int rc; unsigned long flags; sprintf(ifn, "ippp%d", unit); p = isdn_net_findif(ifn); if (!p) { printk(KERN_ERR "ippp_bundle: cannot find %s\n", ifn); return -EINVAL; } spin_lock_irqsave(&p->pb->lock, flags); nlp = is->lp; lp = p->queue; if (nlp->ppp_slot < 0 || nlp->ppp_slot >= ISDN_MAX_CHANNELS || lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "ippp_bundle: binding to invalid slot %d\n", nlp->ppp_slot < 0 || nlp->ppp_slot >= ISDN_MAX_CHANNELS ? nlp->ppp_slot : lp->ppp_slot); rc = -EINVAL; goto out; } isdn_net_add_to_bundle(p, nlp); ippp_table[nlp->ppp_slot]->unit = ippp_table[lp->ppp_slot]->unit; /* maybe also SC_CCP stuff */ ippp_table[nlp->ppp_slot]->pppcfg |= ippp_table[lp->ppp_slot]->pppcfg & (SC_ENABLE_IP | SC_NO_TCP_CCID | SC_REJ_COMP_TCP); ippp_table[nlp->ppp_slot]->mpppcfg |= ippp_table[lp->ppp_slot]->mpppcfg & (SC_MP_PROT | SC_REJ_MP_PROT | SC_OUT_SHORT_SEQ | SC_IN_SHORT_SEQ); rc = isdn_ppp_mp_init(nlp, p->pb); out: spin_unlock_irqrestore(&p->pb->lock, flags); return rc; } #endif /* CONFIG_ISDN_MPP */ /* * network device ioctl handlers */ static int isdn_ppp_dev_ioctl_stats(int slot, struct ifreq *ifr, struct net_device *dev) { struct ppp_stats __user *res = ifr->ifr_data; struct ppp_stats t; isdn_net_local *lp = netdev_priv(dev); if (!access_ok(VERIFY_WRITE, res, sizeof(struct ppp_stats))) return -EFAULT; /* build a temporary stat struct and copy it to user space */ memset(&t, 0, sizeof(struct ppp_stats)); if (dev->flags & IFF_UP) { t.p.ppp_ipackets = lp->stats.rx_packets; t.p.ppp_ibytes = lp->stats.rx_bytes; t.p.ppp_ierrors = lp->stats.rx_errors; t.p.ppp_opackets = lp->stats.tx_packets; t.p.ppp_obytes = lp->stats.tx_bytes; t.p.ppp_oerrors = lp->stats.tx_errors; #ifdef CONFIG_ISDN_PPP_VJ if (slot >= 0 && ippp_table[slot]->slcomp) { struct slcompress *slcomp = ippp_table[slot]->slcomp; t.vj.vjs_packets = slcomp->sls_o_compressed + slcomp->sls_o_uncompressed; t.vj.vjs_compressed = slcomp->sls_o_compressed; t.vj.vjs_searches = slcomp->sls_o_searches; t.vj.vjs_misses = slcomp->sls_o_misses; t.vj.vjs_errorin = slcomp->sls_i_error; t.vj.vjs_tossed = slcomp->sls_i_tossed; t.vj.vjs_uncompressedin = slcomp->sls_i_uncompressed; t.vj.vjs_compressedin = slcomp->sls_i_compressed; } #endif } if (copy_to_user(res, &t, sizeof(struct ppp_stats))) return -EFAULT; return 0; } int isdn_ppp_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int error = 0; int len; isdn_net_local *lp = netdev_priv(dev); if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP) return -EINVAL; switch (cmd) { #define PPP_VERSION "2.3.7" case SIOCGPPPVER: len = strlen(PPP_VERSION) + 1; if (copy_to_user(ifr->ifr_data, PPP_VERSION, len)) error = -EFAULT; break; case SIOCGPPPSTATS: error = isdn_ppp_dev_ioctl_stats(lp->ppp_slot, ifr, dev); break; default: error = -EINVAL; break; } return error; } static int isdn_ppp_if_get_unit(char *name) { int len, i, unit = 0, deci; len = strlen(name); if (strncmp("ippp", name, 4) || len > 8) return -1; for (i = 0, deci = 1; i < len; i++, deci *= 10) { char a = name[len - i - 1]; if (a >= '0' && a <= '9') unit += (a - '0') * deci; else break; } if (!i || len - i != 4) unit = -1; return unit; } int isdn_ppp_dial_slave(char *name) { #ifdef CONFIG_ISDN_MPP isdn_net_dev *ndev; isdn_net_local *lp; struct net_device *sdev; if (!(ndev = isdn_net_findif(name))) return 1; lp = ndev->local; if (!(lp->flags & ISDN_NET_CONNECTED)) return 5; sdev = lp->slave; while (sdev) { isdn_net_local *mlp = netdev_priv(sdev); if (!(mlp->flags & ISDN_NET_CONNECTED)) break; sdev = mlp->slave; } if (!sdev) return 2; isdn_net_dial_req(netdev_priv(sdev)); return 0; #else return -1; #endif } int isdn_ppp_hangup_slave(char *name) { #ifdef CONFIG_ISDN_MPP isdn_net_dev *ndev; isdn_net_local *lp; struct net_device *sdev; if (!(ndev = isdn_net_findif(name))) return 1; lp = ndev->local; if (!(lp->flags & ISDN_NET_CONNECTED)) return 5; sdev = lp->slave; while (sdev) { isdn_net_local *mlp = netdev_priv(sdev); if (mlp->slave) { /* find last connected link in chain */ isdn_net_local *nlp = ISDN_SLAVE_PRIV(mlp); if (!(nlp->flags & ISDN_NET_CONNECTED)) break; } else if (mlp->flags & ISDN_NET_CONNECTED) break; sdev = mlp->slave; } if (!sdev) return 2; isdn_net_hangup(sdev); return 0; #else return -1; #endif } /* * PPP compression stuff */ /* Push an empty CCP Data Frame up to the daemon to wake it up and let it generate a CCP Reset-Request or tear down CCP altogether */ static void isdn_ppp_ccp_kickup(struct ippp_struct *is) { isdn_ppp_fill_rq(NULL, 0, PPP_COMP, is->lp->ppp_slot); } /* In-kernel handling of CCP Reset-Request and Reset-Ack is necessary, but absolutely nontrivial. The most abstruse problem we are facing is that the generation, reception and all the handling of timeouts and resends including proper request id management should be entirely left to the (de)compressor, but indeed is not covered by the current API to the (de)compressor. The API is a prototype version from PPP where only some (de)compressors have yet been implemented and all of them are rather simple in their reset handling. Especially, their is only one outstanding ResetAck at a time with all of them and ResetReq/-Acks do not have parameters. For this very special case it was sufficient to just return an error code from the decompressor and have a single reset() entry to communicate all the necessary information between the framework and the (de)compressor. Bad enough, LZS is different (and any other compressor may be different, too). It has multiple histories (eventually) and needs to Reset each of them independently and thus uses multiple outstanding Acks and history numbers as an additional parameter to Reqs/Acks. All that makes it harder to port the reset state engine into the kernel because it is not just the same simple one as in (i)pppd but it must be able to pass additional parameters and have multiple out- standing Acks. We are trying to achieve the impossible by handling reset transactions independent by their id. The id MUST change when the data portion changes, thus any (de)compressor who uses more than one resettable state must provide and recognize individual ids for each individual reset transaction. The framework itself does _only_ differentiate them by id, because it has no other semantics like the (de)compressor might. This looks like a major redesign of the interface would be nice, but I don't have an idea how to do it better. */ /* Send a CCP Reset-Request or Reset-Ack directly from the kernel. This is getting that lengthy because there is no simple "send-this-frame-out" function above but every wrapper does a bit different. Hope I guess correct in this hack... */ static void isdn_ppp_ccp_xmit_reset(struct ippp_struct *is, int proto, unsigned char code, unsigned char id, unsigned char *data, int len) { struct sk_buff *skb; unsigned char *p; int hl; int cnt = 0; isdn_net_local *lp = is->lp; /* Alloc large enough skb */ hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen; skb = alloc_skb(len + hl + 16, GFP_ATOMIC); if (!skb) { printk(KERN_WARNING "ippp: CCP cannot send reset - out of memory\n"); return; } skb_reserve(skb, hl); /* We may need to stuff an address and control field first */ if (!(is->pppcfg & SC_COMP_AC)) { p = skb_put(skb, 2); *p++ = 0xff; *p++ = 0x03; } /* Stuff proto, code, id and length */ p = skb_put(skb, 6); *p++ = (proto >> 8); *p++ = (proto & 0xff); *p++ = code; *p++ = id; cnt = 4 + len; *p++ = (cnt >> 8); *p++ = (cnt & 0xff); /* Now stuff remaining bytes */ if (len) { p = skb_put(skb, len); memcpy(p, data, len); } /* skb is now ready for xmit */ printk(KERN_DEBUG "Sending CCP Frame:\n"); isdn_ppp_frame_log("ccp-xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot); isdn_net_write_super(lp, skb); } /* Allocate the reset state vector */ static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is) { struct ippp_ccp_reset *r; r = kzalloc(sizeof(struct ippp_ccp_reset), GFP_KERNEL); if (!r) { printk(KERN_ERR "ippp_ccp: failed to allocate reset data" " structure - no mem\n"); return NULL; } printk(KERN_DEBUG "ippp_ccp: allocated reset data structure %p\n", r); is->reset = r; return r; } /* Destroy the reset state vector. Kill all pending timers first. */ static void isdn_ppp_ccp_reset_free(struct ippp_struct *is) { unsigned int id; printk(KERN_DEBUG "ippp_ccp: freeing reset data structure %p\n", is->reset); for (id = 0; id < 256; id++) { if (is->reset->rs[id]) { isdn_ppp_ccp_reset_free_state(is, (unsigned char)id); } } kfree(is->reset); is->reset = NULL; } /* Free a given state and clear everything up for later reallocation */ static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is, unsigned char id) { struct ippp_ccp_reset_state *rs; if (is->reset->rs[id]) { printk(KERN_DEBUG "ippp_ccp: freeing state for id %d\n", id); rs = is->reset->rs[id]; /* Make sure the kernel will not call back later */ if (rs->ta) del_timer(&rs->timer); is->reset->rs[id] = NULL; kfree(rs); } else { printk(KERN_WARNING "ippp_ccp: id %d is not allocated\n", id); } } /* The timer callback function which is called when a ResetReq has timed out, aka has never been answered by a ResetAck */ static void isdn_ppp_ccp_timer_callback(unsigned long closure) { struct ippp_ccp_reset_state *rs = (struct ippp_ccp_reset_state *)closure; if (!rs) { printk(KERN_ERR "ippp_ccp: timer cb with zero closure.\n"); return; } if (rs->ta && rs->state == CCPResetSentReq) { /* We are correct here */ if (!rs->expra) { /* Hmm, there is no Ack really expected. We can clean up the state now, it will be reallocated if the decompressor insists on another reset */ rs->ta = 0; isdn_ppp_ccp_reset_free_state(rs->is, rs->id); return; } printk(KERN_DEBUG "ippp_ccp: CCP Reset timed out for id %d\n", rs->id); /* Push it again */ isdn_ppp_ccp_xmit_reset(rs->is, PPP_CCP, CCP_RESETREQ, rs->id, rs->data, rs->dlen); /* Restart timer */ rs->timer.expires = jiffies + HZ * 5; add_timer(&rs->timer); } else { printk(KERN_WARNING "ippp_ccp: timer cb in wrong state %d\n", rs->state); } } /* Allocate a new reset transaction state */ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is, unsigned char id) { struct ippp_ccp_reset_state *rs; if (is->reset->rs[id]) { printk(KERN_WARNING "ippp_ccp: old state exists for id %d\n", id); return NULL; } else { rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL); if (!rs) return NULL; rs->state = CCPResetIdle; rs->is = is; rs->id = id; init_timer(&rs->timer); rs->timer.data = (unsigned long)rs; rs->timer.function = isdn_ppp_ccp_timer_callback; is->reset->rs[id] = rs; } return rs; } /* A decompressor wants a reset with a set of parameters - do what is necessary to fulfill it */ static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is, struct isdn_ppp_resetparams *rp) { struct ippp_ccp_reset_state *rs; if (rp->valid) { /* The decompressor defines parameters by itself */ if (rp->rsend) { /* And he wants us to send a request */ if (!(rp->idval)) { printk(KERN_ERR "ippp_ccp: decompressor must" " specify reset id\n"); return; } if (is->reset->rs[rp->id]) { /* There is already a transaction in existence for this id. May be still waiting for a Ack or may be wrong. */ rs = is->reset->rs[rp->id]; if (rs->state == CCPResetSentReq && rs->ta) { printk(KERN_DEBUG "ippp_ccp: reset" " trans still in progress" " for id %d\n", rp->id); } else { printk(KERN_WARNING "ippp_ccp: reset" " trans in wrong state %d for" " id %d\n", rs->state, rp->id); } } else { /* Ok, this is a new transaction */ printk(KERN_DEBUG "ippp_ccp: new trans for id" " %d to be started\n", rp->id); rs = isdn_ppp_ccp_reset_alloc_state(is, rp->id); if (!rs) { printk(KERN_ERR "ippp_ccp: out of mem" " allocing ccp trans\n"); return; } rs->state = CCPResetSentReq; rs->expra = rp->expra; if (rp->dtval) { rs->dlen = rp->dlen; memcpy(rs->data, rp->data, rp->dlen); } /* HACK TODO - add link comp here */ isdn_ppp_ccp_xmit_reset(is, PPP_CCP, CCP_RESETREQ, rs->id, rs->data, rs->dlen); /* Start the timer */ rs->timer.expires = jiffies + 5 * HZ; add_timer(&rs->timer); rs->ta = 1; } } else { printk(KERN_DEBUG "ippp_ccp: no reset sent\n"); } } else { /* The reset params are invalid. The decompressor does not care about them, so we just send the minimal requests and increase ids only when an Ack is received for a given id */ if (is->reset->rs[is->reset->lastid]) { /* There is already a transaction in existence for this id. May be still waiting for a Ack or may be wrong. */ rs = is->reset->rs[is->reset->lastid]; if (rs->state == CCPResetSentReq && rs->ta) { printk(KERN_DEBUG "ippp_ccp: reset" " trans still in progress" " for id %d\n", rp->id); } else { printk(KERN_WARNING "ippp_ccp: reset" " trans in wrong state %d for" " id %d\n", rs->state, rp->id); } } else { printk(KERN_DEBUG "ippp_ccp: new trans for id" " %d to be started\n", is->reset->lastid); rs = isdn_ppp_ccp_reset_alloc_state(is, is->reset->lastid); if (!rs) { printk(KERN_ERR "ippp_ccp: out of mem" " allocing ccp trans\n"); return; } rs->state = CCPResetSentReq; /* We always expect an Ack if the decompressor doesn't know better */ rs->expra = 1; rs->dlen = 0; /* HACK TODO - add link comp here */ isdn_ppp_ccp_xmit_reset(is, PPP_CCP, CCP_RESETREQ, rs->id, NULL, 0); /* Start the timer */ rs->timer.expires = jiffies + 5 * HZ; add_timer(&rs->timer); rs->ta = 1; } } } /* An Ack was received for this id. This means we stop the timer and clean up the state prior to calling the decompressors reset routine. */ static void isdn_ppp_ccp_reset_ack_rcvd(struct ippp_struct *is, unsigned char id) { struct ippp_ccp_reset_state *rs = is->reset->rs[id]; if (rs) { if (rs->ta && rs->state == CCPResetSentReq) { /* Great, we are correct */ if (!rs->expra) printk(KERN_DEBUG "ippp_ccp: ResetAck received" " for id %d but not expected\n", id); } else { printk(KERN_INFO "ippp_ccp: ResetAck received out of" "sync for id %d\n", id); } if (rs->ta) { rs->ta = 0; del_timer(&rs->timer); } isdn_ppp_ccp_reset_free_state(is, id); } else { printk(KERN_INFO "ippp_ccp: ResetAck received for unknown id" " %d\n", id); } /* Make sure the simple reset stuff uses a new id next time */ is->reset->lastid++; } /* * decompress packet * * if master = 0, we're trying to uncompress an per-link compressed packet, * as opposed to an compressed reconstructed-from-MPPP packet. * proto is updated to protocol field of uncompressed packet. * * retval: decompressed packet, * same packet if uncompressed, * NULL if decompression error */ static struct sk_buff *isdn_ppp_decompress(struct sk_buff *skb, struct ippp_struct *is, struct ippp_struct *master, int *proto) { void *stat = NULL; struct isdn_ppp_compressor *ipc = NULL; struct sk_buff *skb_out; int len; struct ippp_struct *ri; struct isdn_ppp_resetparams rsparm; unsigned char rsdata[IPPP_RESET_MAXDATABYTES]; if (!master) { // per-link decompression stat = is->link_decomp_stat; ipc = is->link_decompressor; ri = is; } else { stat = master->decomp_stat; ipc = master->decompressor; ri = master; } if (!ipc) { // no decompressor -> we can't decompress. printk(KERN_DEBUG "ippp: no decompressor defined!\n"); return skb; } BUG_ON(!stat); // if we have a compressor, stat has been set as well if ((master && *proto == PPP_COMP) || (!master && *proto == PPP_COMPFRAG)) { // compressed packets are compressed by their protocol type // Set up reset params for the decompressor memset(&rsparm, 0, sizeof(rsparm)); rsparm.data = rsdata; rsparm.maxdlen = IPPP_RESET_MAXDATABYTES; skb_out = dev_alloc_skb(is->mru + PPP_HDRLEN); if (!skb_out) { kfree_skb(skb); printk(KERN_ERR "ippp: decomp memory allocation failure\n"); return NULL; } len = ipc->decompress(stat, skb, skb_out, &rsparm); kfree_skb(skb); if (len <= 0) { switch (len) { case DECOMP_ERROR: printk(KERN_INFO "ippp: decomp wants reset %s params\n", rsparm.valid ? "with" : "without"); isdn_ppp_ccp_reset_trans(ri, &rsparm); break; case DECOMP_FATALERROR: ri->pppcfg |= SC_DC_FERROR; /* Kick ipppd to recognize the error */ isdn_ppp_ccp_kickup(ri); break; } kfree_skb(skb_out); return NULL; } *proto = isdn_ppp_strip_proto(skb_out); if (*proto < 0) { kfree_skb(skb_out); return NULL; } return skb_out; } else { // uncompressed packets are fed through the decompressor to // update the decompressor state ipc->incomp(stat, skb, *proto); return skb; } } /* * compress a frame * type=0: normal/bundle compression * =1: link compression * returns original skb if we haven't compressed the frame * and a new skb pointer if we've done it */ static struct sk_buff *isdn_ppp_compress(struct sk_buff *skb_in, int *proto, struct ippp_struct *is, struct ippp_struct *master, int type) { int ret; int new_proto; struct isdn_ppp_compressor *compressor; void *stat; struct sk_buff *skb_out; /* we do not compress control protocols */ if (*proto < 0 || *proto > 0x3fff) { return skb_in; } if (type) { /* type=1 => Link compression */ return skb_in; } else { if (!master) { compressor = is->compressor; stat = is->comp_stat; } else { compressor = master->compressor; stat = master->comp_stat; } new_proto = PPP_COMP; } if (!compressor) { printk(KERN_ERR "isdn_ppp: No compressor set!\n"); return skb_in; } if (!stat) { printk(KERN_ERR "isdn_ppp: Compressor not initialized?\n"); return skb_in; } /* Allow for at least 150 % expansion (for now) */ skb_out = alloc_skb(skb_in->len + skb_in->len / 2 + 32 + skb_headroom(skb_in), GFP_ATOMIC); if (!skb_out) return skb_in; skb_reserve(skb_out, skb_headroom(skb_in)); ret = (compressor->compress)(stat, skb_in, skb_out, *proto); if (!ret) { dev_kfree_skb(skb_out); return skb_in; } dev_kfree_skb(skb_in); *proto = new_proto; return skb_out; } /* * we received a CCP frame .. * not a clean solution, but we MUST handle a few cases in the kernel */ static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb, int proto) { struct ippp_struct *is; struct ippp_struct *mis; int len; struct isdn_ppp_resetparams rsparm; unsigned char rsdata[IPPP_RESET_MAXDATABYTES]; printk(KERN_DEBUG "Received CCP frame from peer slot(%d)\n", lp->ppp_slot); if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return; } is = ippp_table[lp->ppp_slot]; isdn_ppp_frame_log("ccp-rcv", skb->data, skb->len, 32, is->unit, lp->ppp_slot); if (lp->master) { int slot = ISDN_MASTER_PRIV(lp)->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: slot(%d) out of range\n", __func__, slot); return; } mis = ippp_table[slot]; } else mis = is; switch (skb->data[0]) { case CCP_CONFREQ: if (is->debug & 0x10) printk(KERN_DEBUG "Disable compression here!\n"); if (proto == PPP_CCP) mis->compflags &= ~SC_COMP_ON; else is->compflags &= ~SC_LINK_COMP_ON; break; case CCP_TERMREQ: case CCP_TERMACK: if (is->debug & 0x10) printk(KERN_DEBUG "Disable (de)compression here!\n"); if (proto == PPP_CCP) mis->compflags &= ~(SC_DECOMP_ON | SC_COMP_ON); else is->compflags &= ~(SC_LINK_DECOMP_ON | SC_LINK_COMP_ON); break; case CCP_CONFACK: /* if we RECEIVE an ackowledge we enable the decompressor */ if (is->debug & 0x10) printk(KERN_DEBUG "Enable decompression here!\n"); if (proto == PPP_CCP) { if (!mis->decompressor) break; mis->compflags |= SC_DECOMP_ON; } else { if (!is->decompressor) break; is->compflags |= SC_LINK_DECOMP_ON; } break; case CCP_RESETACK: printk(KERN_DEBUG "Received ResetAck from peer\n"); len = (skb->data[2] << 8) | skb->data[3]; len -= 4; if (proto == PPP_CCP) { /* If a reset Ack was outstanding for this id, then clean up the state engine */ isdn_ppp_ccp_reset_ack_rcvd(mis, skb->data[1]); if (mis->decompressor && mis->decomp_stat) mis->decompressor-> reset(mis->decomp_stat, skb->data[0], skb->data[1], len ? &skb->data[4] : NULL, len, NULL); /* TODO: This is not easy to decide here */ mis->compflags &= ~SC_DECOMP_DISCARD; } else { isdn_ppp_ccp_reset_ack_rcvd(is, skb->data[1]); if (is->link_decompressor && is->link_decomp_stat) is->link_decompressor-> reset(is->link_decomp_stat, skb->data[0], skb->data[1], len ? &skb->data[4] : NULL, len, NULL); /* TODO: neither here */ is->compflags &= ~SC_LINK_DECOMP_DISCARD; } break; case CCP_RESETREQ: printk(KERN_DEBUG "Received ResetReq from peer\n"); /* Receiving a ResetReq means we must reset our compressor */ /* Set up reset params for the reset entry */ memset(&rsparm, 0, sizeof(rsparm)); rsparm.data = rsdata; rsparm.maxdlen = IPPP_RESET_MAXDATABYTES; /* Isolate data length */ len = (skb->data[2] << 8) | skb->data[3]; len -= 4; if (proto == PPP_CCP) { if (mis->compressor && mis->comp_stat) mis->compressor-> reset(mis->comp_stat, skb->data[0], skb->data[1], len ? &skb->data[4] : NULL, len, &rsparm); } else { if (is->link_compressor && is->link_comp_stat) is->link_compressor-> reset(is->link_comp_stat, skb->data[0], skb->data[1], len ? &skb->data[4] : NULL, len, &rsparm); } /* Ack the Req as specified by rsparm */ if (rsparm.valid) { /* Compressor reset handler decided how to answer */ if (rsparm.rsend) { /* We should send a Frame */ isdn_ppp_ccp_xmit_reset(is, proto, CCP_RESETACK, rsparm.idval ? rsparm.id : skb->data[1], rsparm.dtval ? rsparm.data : NULL, rsparm.dtval ? rsparm.dlen : 0); } else { printk(KERN_DEBUG "ResetAck suppressed\n"); } } else { /* We answer with a straight reflected Ack */ isdn_ppp_ccp_xmit_reset(is, proto, CCP_RESETACK, skb->data[1], len ? &skb->data[4] : NULL, len); } break; } } /* * Daemon sends a CCP frame ... */ /* TODO: Clean this up with new Reset semantics */ /* I believe the CCP handling as-is is done wrong. Compressed frames * should only be sent/received after CCP reaches UP state, which means * both sides have sent CONF_ACK. Currently, we handle both directions * independently, which means we may accept compressed frames too early * (supposedly not a problem), but may also mean we send compressed frames * too early, which may turn out to be a problem. * This part of state machine should actually be handled by (i)pppd, but * that's too big of a change now. --kai */ /* Actually, we might turn this into an advantage: deal with the RFC in * the old tradition of beeing generous on what we accept, but beeing * strict on what we send. Thus we should just * - accept compressed frames as soon as decompression is negotiated * - send compressed frames only when decomp *and* comp are negotiated * - drop rx compressed frames if we cannot decomp (instead of pushing them * up to ipppd) * and I tried to modify this file according to that. --abp */ static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb) { struct ippp_struct *mis, *is; int proto, slot = lp->ppp_slot; unsigned char *data; if (!skb || skb->len < 3) return; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", __func__, slot); return; } is = ippp_table[slot]; /* Daemon may send with or without address and control field comp */ data = skb->data; if (!(is->pppcfg & SC_COMP_AC) && data[0] == 0xff && data[1] == 0x03) { data += 2; if (skb->len < 5) return; } proto = ((int)data[0]<<8) + data[1]; if (proto != PPP_CCP && proto != PPP_CCPFRAG) return; printk(KERN_DEBUG "Received CCP frame from daemon:\n"); isdn_ppp_frame_log("ccp-xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot); if (lp->master) { slot = ISDN_MASTER_PRIV(lp)->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: slot(%d) out of range\n", __func__, slot); return; } mis = ippp_table[slot]; } else mis = is; if (mis != is) printk(KERN_DEBUG "isdn_ppp: Ouch! Master CCP sends on slave slot!\n"); switch (data[2]) { case CCP_CONFREQ: if (is->debug & 0x10) printk(KERN_DEBUG "Disable decompression here!\n"); if (proto == PPP_CCP) is->compflags &= ~SC_DECOMP_ON; else is->compflags &= ~SC_LINK_DECOMP_ON; break; case CCP_TERMREQ: case CCP_TERMACK: if (is->debug & 0x10) printk(KERN_DEBUG "Disable (de)compression here!\n"); if (proto == PPP_CCP) is->compflags &= ~(SC_DECOMP_ON | SC_COMP_ON); else is->compflags &= ~(SC_LINK_DECOMP_ON | SC_LINK_COMP_ON); break; case CCP_CONFACK: /* if we SEND an ackowledge we can/must enable the compressor */ if (is->debug & 0x10) printk(KERN_DEBUG "Enable compression here!\n"); if (proto == PPP_CCP) { if (!is->compressor) break; is->compflags |= SC_COMP_ON; } else { if (!is->compressor) break; is->compflags |= SC_LINK_COMP_ON; } break; case CCP_RESETACK: /* If we send a ACK we should reset our compressor */ if (is->debug & 0x10) printk(KERN_DEBUG "Reset decompression state here!\n"); printk(KERN_DEBUG "ResetAck from daemon passed by\n"); if (proto == PPP_CCP) { /* link to master? */ if (is->compressor && is->comp_stat) is->compressor->reset(is->comp_stat, 0, 0, NULL, 0, NULL); is->compflags &= ~SC_COMP_DISCARD; } else { if (is->link_compressor && is->link_comp_stat) is->link_compressor->reset(is->link_comp_stat, 0, 0, NULL, 0, NULL); is->compflags &= ~SC_LINK_COMP_DISCARD; } break; case CCP_RESETREQ: /* Just let it pass by */ printk(KERN_DEBUG "ResetReq from daemon passed by\n"); break; } } int isdn_ppp_register_compressor(struct isdn_ppp_compressor *ipc) { ipc->next = ipc_head; ipc->prev = NULL; if (ipc_head) { ipc_head->prev = ipc; } ipc_head = ipc; return 0; } int isdn_ppp_unregister_compressor(struct isdn_ppp_compressor *ipc) { if (ipc->prev) ipc->prev->next = ipc->next; else ipc_head = ipc->next; if (ipc->next) ipc->next->prev = ipc->prev; ipc->prev = ipc->next = NULL; return 0; } static int isdn_ppp_set_compressor(struct ippp_struct *is, struct isdn_ppp_comp_data *data) { struct isdn_ppp_compressor *ipc = ipc_head; int ret; void *stat; int num = data->num; if (is->debug & 0x10) printk(KERN_DEBUG "[%d] Set %s type %d\n", is->unit, (data->flags & IPPP_COMP_FLAG_XMIT) ? "compressor" : "decompressor", num); /* If is has no valid reset state vector, we cannot allocate a decompressor. The decompressor would cause reset transactions sooner or later, and they need that vector. */ if (!(data->flags & IPPP_COMP_FLAG_XMIT) && !is->reset) { printk(KERN_ERR "ippp_ccp: no reset data structure - can't" " allow decompression.\n"); return -ENOMEM; } while (ipc) { if (ipc->num == num) { stat = ipc->alloc(data); if (stat) { ret = ipc->init(stat, data, is->unit, 0); if (!ret) { printk(KERN_ERR "Can't init (de)compression!\n"); ipc->free(stat); stat = NULL; break; } } else { printk(KERN_ERR "Can't alloc (de)compression!\n"); break; } if (data->flags & IPPP_COMP_FLAG_XMIT) { if (data->flags & IPPP_COMP_FLAG_LINK) { if (is->link_comp_stat) is->link_compressor->free(is->link_comp_stat); is->link_comp_stat = stat; is->link_compressor = ipc; } else { if (is->comp_stat) is->compressor->free(is->comp_stat); is->comp_stat = stat; is->compressor = ipc; } } else { if (data->flags & IPPP_COMP_FLAG_LINK) { if (is->link_decomp_stat) is->link_decompressor->free(is->link_decomp_stat); is->link_decomp_stat = stat; is->link_decompressor = ipc; } else { if (is->decomp_stat) is->decompressor->free(is->decomp_stat); is->decomp_stat = stat; is->decompressor = ipc; } } return 0; } ipc = ipc->next; } return -EINVAL; }
gpl-2.0
bgn9000/Dragon-Shiryu
arch/arm/mach-rpc/irq.c
3071
3324
#include <linux/init.h> #include <linux/list.h> #include <linux/io.h> #include <asm/mach/irq.h> #include <asm/hardware/iomd.h> #include <asm/irq.h> static void iomd_ack_irq_a(struct irq_data *d) { unsigned int val, mask; mask = 1 << d->irq; val = iomd_readb(IOMD_IRQMASKA); iomd_writeb(val & ~mask, IOMD_IRQMASKA); iomd_writeb(mask, IOMD_IRQCLRA); } static void iomd_mask_irq_a(struct irq_data *d) { unsigned int val, mask; mask = 1 << d->irq; val = iomd_readb(IOMD_IRQMASKA); iomd_writeb(val & ~mask, IOMD_IRQMASKA); } static void iomd_unmask_irq_a(struct irq_data *d) { unsigned int val, mask; mask = 1 << d->irq; val = iomd_readb(IOMD_IRQMASKA); iomd_writeb(val | mask, IOMD_IRQMASKA); } static struct irq_chip iomd_a_chip = { .irq_ack = iomd_ack_irq_a, .irq_mask = iomd_mask_irq_a, .irq_unmask = iomd_unmask_irq_a, }; static void iomd_mask_irq_b(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_IRQMASKB); iomd_writeb(val & ~mask, IOMD_IRQMASKB); } static void iomd_unmask_irq_b(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_IRQMASKB); iomd_writeb(val | mask, IOMD_IRQMASKB); } static struct irq_chip iomd_b_chip = { .irq_ack = iomd_mask_irq_b, .irq_mask = iomd_mask_irq_b, .irq_unmask = iomd_unmask_irq_b, }; static void iomd_mask_irq_dma(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_DMAMASK); iomd_writeb(val & ~mask, IOMD_DMAMASK); } static void iomd_unmask_irq_dma(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_DMAMASK); iomd_writeb(val | mask, IOMD_DMAMASK); } static struct irq_chip iomd_dma_chip = { .irq_ack = iomd_mask_irq_dma, .irq_mask = iomd_mask_irq_dma, .irq_unmask = iomd_unmask_irq_dma, }; static void iomd_mask_irq_fiq(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_FIQMASK); iomd_writeb(val & ~mask, IOMD_FIQMASK); } static void iomd_unmask_irq_fiq(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_FIQMASK); iomd_writeb(val | mask, IOMD_FIQMASK); } static struct irq_chip iomd_fiq_chip = { .irq_ack = iomd_mask_irq_fiq, .irq_mask = iomd_mask_irq_fiq, .irq_unmask = iomd_unmask_irq_fiq, }; void __init rpc_init_irq(void) { unsigned int irq, flags; iomd_writeb(0, IOMD_IRQMASKA); iomd_writeb(0, IOMD_IRQMASKB); iomd_writeb(0, IOMD_FIQMASK); iomd_writeb(0, IOMD_DMAMASK); for (irq = 0; irq < NR_IRQS; irq++) { flags = IRQF_VALID; if (irq <= 6 || (irq >= 9 && irq <= 15)) flags |= IRQF_PROBE; if (irq == 21 || (irq >= 16 && irq <= 19) || irq == IRQ_KEYBOARDTX) flags |= IRQF_NOAUTOEN; switch (irq) { case 0 ... 7: irq_set_chip_and_handler(irq, &iomd_a_chip, handle_level_irq); set_irq_flags(irq, flags); break; case 8 ... 15: irq_set_chip_and_handler(irq, &iomd_b_chip, handle_level_irq); set_irq_flags(irq, flags); break; case 16 ... 21: irq_set_chip_and_handler(irq, &iomd_dma_chip, handle_level_irq); set_irq_flags(irq, flags); break; case 64 ... 71: irq_set_chip(irq, &iomd_fiq_chip); set_irq_flags(irq, IRQF_VALID); break; } } init_FIQ(); }
gpl-2.0
AD5GB/kernel_n5_3.10-experimental
drivers/media/radio/radio-aimslab.c
3327
5501
/* * AimsLab RadioTrack (aka RadioVeveal) driver * * Copyright 1997 M. Kirkwood * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com> * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> * Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk> * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org> * * Notes on the hardware (reverse engineered from other peoples' * reverse engineering of AIMS' code :-) * * Frequency control is done digitally -- ie out(port,encodefreq(95.8)); * * The signal strength query is unsurprisingly inaccurate. And it seems * to indicate that (on my card, at least) the frequency setting isn't * too great. (I have to tune up .025MHz from what the freq should be * to get a report that the thing is tuned.) * * Volume control is (ugh) analogue: * out(port, start_increasing_volume); * wait(a_wee_while); * out(port, stop_changing_the_volume); * * Fully tested with the Keene USB FM Transmitter and the v4l2-compliance tool. */ #include <linux/module.h> /* Modules */ #include <linux/init.h> /* Initdata */ #include <linux/ioport.h> /* request_region */ #include <linux/delay.h> /* msleep */ #include <linux/videodev2.h> /* kernel radio structs */ #include <linux/io.h> /* outb, outb_p */ #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include "radio-isa.h" #include "lm7000.h" MODULE_AUTHOR("M. Kirkwood"); MODULE_DESCRIPTION("A driver for the RadioTrack/RadioReveal radio card."); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); #ifndef CONFIG_RADIO_RTRACK_PORT #define CONFIG_RADIO_RTRACK_PORT -1 #endif #define RTRACK_MAX 2 static int io[RTRACK_MAX] = { [0] = CONFIG_RADIO_RTRACK_PORT, [1 ... (RTRACK_MAX - 1)] = -1 }; static int radio_nr[RTRACK_MAX] = { [0 ... (RTRACK_MAX - 1)] = -1 }; module_param_array(io, int, NULL, 0444); MODULE_PARM_DESC(io, "I/O addresses of the RadioTrack card (0x20f or 0x30f)"); module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(radio_nr, "Radio device numbers"); struct rtrack { struct radio_isa_card isa; int curvol; }; static struct radio_isa_card *rtrack_alloc(void) { struct rtrack *rt = kzalloc(sizeof(struct rtrack), GFP_KERNEL); if (rt) rt->curvol = 0xff; return rt ? &rt->isa : NULL; } #define AIMS_BIT_TUN_CE (1 << 0) #define AIMS_BIT_TUN_CLK (1 << 1) #define AIMS_BIT_TUN_DATA (1 << 2) #define AIMS_BIT_VOL_CE (1 << 3) #define AIMS_BIT_TUN_STRQ (1 << 4) /* bit 5 is not connected */ #define AIMS_BIT_VOL_UP (1 << 6) /* active low */ #define AIMS_BIT_VOL_DN (1 << 7) /* active low */ static void rtrack_set_pins(void *handle, u8 pins) { struct radio_isa_card *isa = handle; struct rtrack *rt = container_of(isa, struct rtrack, isa); u8 bits = AIMS_BIT_VOL_DN | AIMS_BIT_VOL_UP | AIMS_BIT_TUN_STRQ; if (!v4l2_ctrl_g_ctrl(rt->isa.mute)) bits |= AIMS_BIT_VOL_CE; if (pins & LM7000_DATA) bits |= AIMS_BIT_TUN_DATA; if (pins & LM7000_CLK) bits |= AIMS_BIT_TUN_CLK; if (pins & LM7000_CE) bits |= AIMS_BIT_TUN_CE; outb_p(bits, rt->isa.io); } static int rtrack_s_frequency(struct radio_isa_card *isa, u32 freq) { lm7000_set_freq(freq, isa, rtrack_set_pins); return 0; } static u32 rtrack_g_signal(struct radio_isa_card *isa) { /* bit set = no signal present */ return 0xffff * !(inb(isa->io) & 2); } static int rtrack_s_mute_volume(struct radio_isa_card *isa, bool mute, int vol) { struct rtrack *rt = container_of(isa, struct rtrack, isa); int curvol = rt->curvol; if (mute) { outb(0xd0, isa->io); /* volume steady + sigstr + off */ return 0; } if (vol == 0) { /* volume = 0 means mute the card */ outb(0x48, isa->io); /* volume down but still "on" */ msleep(curvol * 3); /* make sure it's totally down */ } else if (curvol < vol) { outb(0x98, isa->io); /* volume up + sigstr + on */ for (; curvol < vol; curvol++) udelay(3000); } else if (curvol > vol) { outb(0x58, isa->io); /* volume down + sigstr + on */ for (; curvol > vol; curvol--) udelay(3000); } outb(0xd8, isa->io); /* volume steady + sigstr + on */ rt->curvol = vol; return 0; } /* Mute card - prevents noisy bootups */ static int rtrack_initialize(struct radio_isa_card *isa) { /* this ensures that the volume is all the way up */ outb(0x90, isa->io); /* volume up but still "on" */ msleep(3000); /* make sure it's totally up */ outb(0xc0, isa->io); /* steady volume, mute card */ return 0; } static const struct radio_isa_ops rtrack_ops = { .alloc = rtrack_alloc, .init = rtrack_initialize, .s_mute_volume = rtrack_s_mute_volume, .s_frequency = rtrack_s_frequency, .g_signal = rtrack_g_signal, }; static const int rtrack_ioports[] = { 0x20f, 0x30f }; static struct radio_isa_driver rtrack_driver = { .driver = { .match = radio_isa_match, .probe = radio_isa_probe, .remove = radio_isa_remove, .driver = { .name = "radio-aimslab", }, }, .io_params = io, .radio_nr_params = radio_nr, .io_ports = rtrack_ioports, .num_of_io_ports = ARRAY_SIZE(rtrack_ioports), .region_size = 2, .card = "AIMSlab RadioTrack/RadioReveal", .ops = &rtrack_ops, .has_stereo = true, .max_volume = 0xff, }; static int __init rtrack_init(void) { return isa_register_driver(&rtrack_driver.driver, RTRACK_MAX); } static void __exit rtrack_exit(void) { isa_unregister_driver(&rtrack_driver.driver); } module_init(rtrack_init); module_exit(rtrack_exit);
gpl-2.0
CandyDevices/kernel_htc_msm8974
sound/soc/codecs/wm8731.c
3839
19275
/* * wm8731.c -- WM8731 ALSA SoC Audio driver * * Copyright 2005 Openedhand Ltd. * * Author: Richard Purdie <richard@openedhand.com> * * Based on wm8753.c by Liam Girdwood * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <linux/of_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include "wm8731.h" #define WM8731_NUM_SUPPLIES 4 static const char *wm8731_supply_names[WM8731_NUM_SUPPLIES] = { "AVDD", "HPVDD", "DCVDD", "DBVDD", }; /* codec private data */ struct wm8731_priv { struct regmap *regmap; struct regulator_bulk_data supplies[WM8731_NUM_SUPPLIES]; unsigned int sysclk; int sysclk_type; int playback_fs; bool deemph; }; /* * wm8731 register cache */ static const struct reg_default wm8731_reg_defaults[] = { { 0, 0x0097 }, { 1, 0x0097 }, { 2, 0x0079 }, { 3, 0x0079 }, { 4, 0x000a }, { 5, 0x0008 }, { 6, 0x009f }, { 7, 0x000a }, { 8, 0x0000 }, { 9, 0x0000 }, }; static bool wm8731_volatile(struct device *dev, unsigned int reg) { return reg == WM8731_RESET; } static bool wm8731_writeable(struct device *dev, unsigned int reg) { return reg <= WM8731_RESET; } #define wm8731_reset(c) snd_soc_write(c, WM8731_RESET, 0) static const char *wm8731_input_select[] = {"Line In", "Mic"}; static const struct soc_enum wm8731_insel_enum = SOC_ENUM_SINGLE(WM8731_APANA, 2, 2, wm8731_input_select); static int wm8731_deemph[] = { 0, 32000, 44100, 48000 }; static int wm8731_set_deemph(struct snd_soc_codec *codec) { struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); int val, i, best; /* If we're using deemphasis select the nearest available sample * rate. */ if (wm8731->deemph) { best = 1; for (i = 2; i < ARRAY_SIZE(wm8731_deemph); i++) { if (abs(wm8731_deemph[i] - wm8731->playback_fs) < abs(wm8731_deemph[best] - wm8731->playback_fs)) best = i; } val = best << 1; } else { best = 0; val = 0; } dev_dbg(codec->dev, "Set deemphasis %d (%dHz)\n", best, wm8731_deemph[best]); return snd_soc_update_bits(codec, WM8731_APDIGI, 0x6, val); } static int wm8731_get_deemph(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); ucontrol->value.enumerated.item[0] = wm8731->deemph; return 0; } static int wm8731_put_deemph(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); int deemph = ucontrol->value.enumerated.item[0]; int ret = 0; if (deemph > 1) return -EINVAL; mutex_lock(&codec->mutex); if (wm8731->deemph != deemph) { wm8731->deemph = deemph; wm8731_set_deemph(codec); ret = 1; } mutex_unlock(&codec->mutex); return ret; } static const DECLARE_TLV_DB_SCALE(in_tlv, -3450, 150, 0); static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -1500, 300, 0); static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 2000, 0); static const struct snd_kcontrol_new wm8731_snd_controls[] = { SOC_DOUBLE_R_TLV("Master Playback Volume", WM8731_LOUT1V, WM8731_ROUT1V, 0, 127, 0, out_tlv), SOC_DOUBLE_R("Master Playback ZC Switch", WM8731_LOUT1V, WM8731_ROUT1V, 7, 1, 0), SOC_DOUBLE_R_TLV("Capture Volume", WM8731_LINVOL, WM8731_RINVOL, 0, 31, 0, in_tlv), SOC_DOUBLE_R("Line Capture Switch", WM8731_LINVOL, WM8731_RINVOL, 7, 1, 1), SOC_SINGLE_TLV("Mic Boost Volume", WM8731_APANA, 0, 1, 0, mic_tlv), SOC_SINGLE("Mic Capture Switch", WM8731_APANA, 1, 1, 1), SOC_SINGLE_TLV("Sidetone Playback Volume", WM8731_APANA, 6, 3, 1, sidetone_tlv), SOC_SINGLE("ADC High Pass Filter Switch", WM8731_APDIGI, 0, 1, 1), SOC_SINGLE("Store DC Offset Switch", WM8731_APDIGI, 4, 1, 0), SOC_SINGLE_BOOL_EXT("Playback Deemphasis Switch", 0, wm8731_get_deemph, wm8731_put_deemph), }; /* Output Mixer */ static const struct snd_kcontrol_new wm8731_output_mixer_controls[] = { SOC_DAPM_SINGLE("Line Bypass Switch", WM8731_APANA, 3, 1, 0), SOC_DAPM_SINGLE("Mic Sidetone Switch", WM8731_APANA, 5, 1, 0), SOC_DAPM_SINGLE("HiFi Playback Switch", WM8731_APANA, 4, 1, 0), }; /* Input mux */ static const struct snd_kcontrol_new wm8731_input_mux_controls = SOC_DAPM_ENUM("Input Select", wm8731_insel_enum); static const struct snd_soc_dapm_widget wm8731_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("ACTIVE",WM8731_ACTIVE, 0, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("OSC", WM8731_PWR, 5, 1, NULL, 0), SND_SOC_DAPM_MIXER("Output Mixer", WM8731_PWR, 4, 1, &wm8731_output_mixer_controls[0], ARRAY_SIZE(wm8731_output_mixer_controls)), SND_SOC_DAPM_DAC("DAC", "HiFi Playback", WM8731_PWR, 3, 1), SND_SOC_DAPM_OUTPUT("LOUT"), SND_SOC_DAPM_OUTPUT("LHPOUT"), SND_SOC_DAPM_OUTPUT("ROUT"), SND_SOC_DAPM_OUTPUT("RHPOUT"), SND_SOC_DAPM_ADC("ADC", "HiFi Capture", WM8731_PWR, 2, 1), SND_SOC_DAPM_MUX("Input Mux", SND_SOC_NOPM, 0, 0, &wm8731_input_mux_controls), SND_SOC_DAPM_PGA("Line Input", WM8731_PWR, 0, 1, NULL, 0), SND_SOC_DAPM_MICBIAS("Mic Bias", WM8731_PWR, 1, 1), SND_SOC_DAPM_INPUT("MICIN"), SND_SOC_DAPM_INPUT("RLINEIN"), SND_SOC_DAPM_INPUT("LLINEIN"), }; static int wm8731_check_osc(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(source->codec); return wm8731->sysclk_type == WM8731_SYSCLK_XTAL; } static const struct snd_soc_dapm_route wm8731_intercon[] = { {"DAC", NULL, "OSC", wm8731_check_osc}, {"ADC", NULL, "OSC", wm8731_check_osc}, {"DAC", NULL, "ACTIVE"}, {"ADC", NULL, "ACTIVE"}, /* output mixer */ {"Output Mixer", "Line Bypass Switch", "Line Input"}, {"Output Mixer", "HiFi Playback Switch", "DAC"}, {"Output Mixer", "Mic Sidetone Switch", "Mic Bias"}, /* outputs */ {"RHPOUT", NULL, "Output Mixer"}, {"ROUT", NULL, "Output Mixer"}, {"LHPOUT", NULL, "Output Mixer"}, {"LOUT", NULL, "Output Mixer"}, /* input mux */ {"Input Mux", "Line In", "Line Input"}, {"Input Mux", "Mic", "Mic Bias"}, {"ADC", NULL, "Input Mux"}, /* inputs */ {"Line Input", NULL, "LLINEIN"}, {"Line Input", NULL, "RLINEIN"}, {"Mic Bias", NULL, "MICIN"}, }; struct _coeff_div { u32 mclk; u32 rate; u16 fs; u8 sr:4; u8 bosr:1; u8 usb:1; }; /* codec mclk clock divider coefficients */ static const struct _coeff_div coeff_div[] = { /* 48k */ {12288000, 48000, 256, 0x0, 0x0, 0x0}, {18432000, 48000, 384, 0x0, 0x1, 0x0}, {12000000, 48000, 250, 0x0, 0x0, 0x1}, /* 32k */ {12288000, 32000, 384, 0x6, 0x0, 0x0}, {18432000, 32000, 576, 0x6, 0x1, 0x0}, {12000000, 32000, 375, 0x6, 0x0, 0x1}, /* 8k */ {12288000, 8000, 1536, 0x3, 0x0, 0x0}, {18432000, 8000, 2304, 0x3, 0x1, 0x0}, {11289600, 8000, 1408, 0xb, 0x0, 0x0}, {16934400, 8000, 2112, 0xb, 0x1, 0x0}, {12000000, 8000, 1500, 0x3, 0x0, 0x1}, /* 96k */ {12288000, 96000, 128, 0x7, 0x0, 0x0}, {18432000, 96000, 192, 0x7, 0x1, 0x0}, {12000000, 96000, 125, 0x7, 0x0, 0x1}, /* 44.1k */ {11289600, 44100, 256, 0x8, 0x0, 0x0}, {16934400, 44100, 384, 0x8, 0x1, 0x0}, {12000000, 44100, 272, 0x8, 0x1, 0x1}, /* 88.2k */ {11289600, 88200, 128, 0xf, 0x0, 0x0}, {16934400, 88200, 192, 0xf, 0x1, 0x0}, {12000000, 88200, 136, 0xf, 0x1, 0x1}, }; static inline int get_coeff(int mclk, int rate) { int i; for (i = 0; i < ARRAY_SIZE(coeff_div); i++) { if (coeff_div[i].rate == rate && coeff_div[i].mclk == mclk) return i; } return 0; } static int wm8731_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); u16 iface = snd_soc_read(codec, WM8731_IFACE) & 0xfff3; int i = get_coeff(wm8731->sysclk, params_rate(params)); u16 srate = (coeff_div[i].sr << 2) | (coeff_div[i].bosr << 1) | coeff_div[i].usb; wm8731->playback_fs = params_rate(params); snd_soc_write(codec, WM8731_SRATE, srate); /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: iface |= 0x0004; break; case SNDRV_PCM_FORMAT_S24_LE: iface |= 0x0008; break; } wm8731_set_deemph(codec); snd_soc_write(codec, WM8731_IFACE, iface); return 0; } static int wm8731_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u16 mute_reg = snd_soc_read(codec, WM8731_APDIGI) & 0xfff7; if (mute) snd_soc_write(codec, WM8731_APDIGI, mute_reg | 0x8); else snd_soc_write(codec, WM8731_APDIGI, mute_reg); return 0; } static int wm8731_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); switch (clk_id) { case WM8731_SYSCLK_XTAL: case WM8731_SYSCLK_MCLK: wm8731->sysclk_type = clk_id; break; default: return -EINVAL; } switch (freq) { case 11289600: case 12000000: case 12288000: case 16934400: case 18432000: wm8731->sysclk = freq; break; default: return -EINVAL; } snd_soc_dapm_sync(&codec->dapm); return 0; } static int wm8731_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 iface = 0; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: iface |= 0x0040; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: iface |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: iface |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: iface |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: iface |= 0x0013; break; default: return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: iface |= 0x0090; break; case SND_SOC_DAIFMT_IB_NF: iface |= 0x0080; break; case SND_SOC_DAIFMT_NB_IF: iface |= 0x0010; break; default: return -EINVAL; } /* set iface */ snd_soc_write(codec, WM8731_IFACE, iface); return 0; } static int wm8731_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); int ret; u16 reg; switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies); if (ret != 0) return ret; regcache_sync(wm8731->regmap); } /* Clear PWROFF, gate CLKOUT, everything else as-is */ reg = snd_soc_read(codec, WM8731_PWR) & 0xff7f; snd_soc_write(codec, WM8731_PWR, reg | 0x0040); break; case SND_SOC_BIAS_OFF: snd_soc_write(codec, WM8731_PWR, 0xffff); regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies); regcache_mark_dirty(wm8731->regmap); break; } codec->dapm.bias_level = level; return 0; } #define WM8731_RATES SNDRV_PCM_RATE_8000_96000 #define WM8731_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE) static const struct snd_soc_dai_ops wm8731_dai_ops = { .hw_params = wm8731_hw_params, .digital_mute = wm8731_mute, .set_sysclk = wm8731_set_dai_sysclk, .set_fmt = wm8731_set_dai_fmt, }; static struct snd_soc_dai_driver wm8731_dai = { .name = "wm8731-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = WM8731_RATES, .formats = WM8731_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8731_RATES, .formats = WM8731_FORMATS,}, .ops = &wm8731_dai_ops, .symmetric_rates = 1, }; #ifdef CONFIG_PM static int wm8731_suspend(struct snd_soc_codec *codec) { wm8731_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm8731_resume(struct snd_soc_codec *codec) { wm8731_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } #else #define wm8731_suspend NULL #define wm8731_resume NULL #endif static int wm8731_probe(struct snd_soc_codec *codec) { struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); int ret = 0, i; codec->control_data = wm8731->regmap; ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_REGMAP); if (ret < 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } for (i = 0; i < ARRAY_SIZE(wm8731->supplies); i++) wm8731->supplies[i].supply = wm8731_supply_names[i]; ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8731->supplies), wm8731->supplies); if (ret != 0) { dev_err(codec->dev, "Failed to request supplies: %d\n", ret); return ret; } ret = regulator_bulk_enable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies); if (ret != 0) { dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); goto err_regulator_get; } ret = wm8731_reset(codec); if (ret < 0) { dev_err(codec->dev, "Failed to issue reset: %d\n", ret); goto err_regulator_enable; } wm8731_set_bias_level(codec, SND_SOC_BIAS_STANDBY); /* Latch the update bits */ snd_soc_update_bits(codec, WM8731_LOUT1V, 0x100, 0); snd_soc_update_bits(codec, WM8731_ROUT1V, 0x100, 0); snd_soc_update_bits(codec, WM8731_LINVOL, 0x100, 0); snd_soc_update_bits(codec, WM8731_RINVOL, 0x100, 0); /* Disable bypass path by default */ snd_soc_update_bits(codec, WM8731_APANA, 0x8, 0); /* Regulators will have been enabled by bias management */ regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies); return 0; err_regulator_enable: regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies); err_regulator_get: regulator_bulk_free(ARRAY_SIZE(wm8731->supplies), wm8731->supplies); return ret; } /* power down chip */ static int wm8731_remove(struct snd_soc_codec *codec) { struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); wm8731_set_bias_level(codec, SND_SOC_BIAS_OFF); regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies); regulator_bulk_free(ARRAY_SIZE(wm8731->supplies), wm8731->supplies); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm8731 = { .probe = wm8731_probe, .remove = wm8731_remove, .suspend = wm8731_suspend, .resume = wm8731_resume, .set_bias_level = wm8731_set_bias_level, .dapm_widgets = wm8731_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8731_dapm_widgets), .dapm_routes = wm8731_intercon, .num_dapm_routes = ARRAY_SIZE(wm8731_intercon), .controls = wm8731_snd_controls, .num_controls = ARRAY_SIZE(wm8731_snd_controls), }; static const struct of_device_id wm8731_of_match[] = { { .compatible = "wlf,wm8731", }, { } }; MODULE_DEVICE_TABLE(of, wm8731_of_match); static const struct regmap_config wm8731_regmap = { .reg_bits = 7, .val_bits = 9, .max_register = WM8731_RESET, .volatile_reg = wm8731_volatile, .writeable_reg = wm8731_writeable, .cache_type = REGCACHE_RBTREE, .reg_defaults = wm8731_reg_defaults, .num_reg_defaults = ARRAY_SIZE(wm8731_reg_defaults), }; #if defined(CONFIG_SPI_MASTER) static int __devinit wm8731_spi_probe(struct spi_device *spi) { struct wm8731_priv *wm8731; int ret; wm8731 = kzalloc(sizeof(struct wm8731_priv), GFP_KERNEL); if (wm8731 == NULL) return -ENOMEM; wm8731->regmap = regmap_init_spi(spi, &wm8731_regmap); if (IS_ERR(wm8731->regmap)) { ret = PTR_ERR(wm8731->regmap); dev_err(&spi->dev, "Failed to allocate register map: %d\n", ret); goto err; } spi_set_drvdata(spi, wm8731); ret = snd_soc_register_codec(&spi->dev, &soc_codec_dev_wm8731, &wm8731_dai, 1); if (ret != 0) { dev_err(&spi->dev, "Failed to register CODEC: %d\n", ret); goto err_regmap; } return 0; err_regmap: regmap_exit(wm8731->regmap); err: kfree(wm8731); return ret; } static int __devexit wm8731_spi_remove(struct spi_device *spi) { struct wm8731_priv *wm8731 = spi_get_drvdata(spi); snd_soc_unregister_codec(&spi->dev); regmap_exit(wm8731->regmap); kfree(wm8731); return 0; } static struct spi_driver wm8731_spi_driver = { .driver = { .name = "wm8731", .owner = THIS_MODULE, .of_match_table = wm8731_of_match, }, .probe = wm8731_spi_probe, .remove = __devexit_p(wm8731_spi_remove), }; #endif /* CONFIG_SPI_MASTER */ #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static __devinit int wm8731_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8731_priv *wm8731; int ret; wm8731 = kzalloc(sizeof(struct wm8731_priv), GFP_KERNEL); if (wm8731 == NULL) return -ENOMEM; wm8731->regmap = regmap_init_i2c(i2c, &wm8731_regmap); if (IS_ERR(wm8731->regmap)) { ret = PTR_ERR(wm8731->regmap); dev_err(&i2c->dev, "Failed to allocate register map: %d\n", ret); goto err; } i2c_set_clientdata(i2c, wm8731); ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8731, &wm8731_dai, 1); if (ret != 0) { dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret); goto err_regmap; } return 0; err_regmap: regmap_exit(wm8731->regmap); err: kfree(wm8731); return ret; } static __devexit int wm8731_i2c_remove(struct i2c_client *client) { struct wm8731_priv *wm8731 = i2c_get_clientdata(client); snd_soc_unregister_codec(&client->dev); regmap_exit(wm8731->regmap); kfree(wm8731); return 0; } static const struct i2c_device_id wm8731_i2c_id[] = { { "wm8731", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8731_i2c_id); static struct i2c_driver wm8731_i2c_driver = { .driver = { .name = "wm8731", .owner = THIS_MODULE, .of_match_table = wm8731_of_match, }, .probe = wm8731_i2c_probe, .remove = __devexit_p(wm8731_i2c_remove), .id_table = wm8731_i2c_id, }; #endif static int __init wm8731_modinit(void) { int ret = 0; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ret = i2c_add_driver(&wm8731_i2c_driver); if (ret != 0) { printk(KERN_ERR "Failed to register WM8731 I2C driver: %d\n", ret); } #endif #if defined(CONFIG_SPI_MASTER) ret = spi_register_driver(&wm8731_spi_driver); if (ret != 0) { printk(KERN_ERR "Failed to register WM8731 SPI driver: %d\n", ret); } #endif return ret; } module_init(wm8731_modinit); static void __exit wm8731_exit(void) { #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) i2c_del_driver(&wm8731_i2c_driver); #endif #if defined(CONFIG_SPI_MASTER) spi_unregister_driver(&wm8731_spi_driver); #endif } module_exit(wm8731_exit); MODULE_DESCRIPTION("ASoC WM8731 driver"); MODULE_AUTHOR("Richard Purdie"); MODULE_LICENSE("GPL");
gpl-2.0
upworkstar/AndroidAmazon
drivers/media/video/mt9m001.c
4863
19752
/* * Driver for MT9M001 CMOS Image Sensor from Micron * * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/videodev2.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/log2.h> #include <linux/module.h> #include <media/soc_camera.h> #include <media/soc_mediabus.h> #include <media/v4l2-subdev.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-ctrls.h> /* * mt9m001 i2c address 0x5d * The platform has to define ctruct i2c_board_info objects and link to them * from struct soc_camera_link */ /* mt9m001 selected register addresses */ #define MT9M001_CHIP_VERSION 0x00 #define MT9M001_ROW_START 0x01 #define MT9M001_COLUMN_START 0x02 #define MT9M001_WINDOW_HEIGHT 0x03 #define MT9M001_WINDOW_WIDTH 0x04 #define MT9M001_HORIZONTAL_BLANKING 0x05 #define MT9M001_VERTICAL_BLANKING 0x06 #define MT9M001_OUTPUT_CONTROL 0x07 #define MT9M001_SHUTTER_WIDTH 0x09 #define MT9M001_FRAME_RESTART 0x0b #define MT9M001_SHUTTER_DELAY 0x0c #define MT9M001_RESET 0x0d #define MT9M001_READ_OPTIONS1 0x1e #define MT9M001_READ_OPTIONS2 0x20 #define MT9M001_GLOBAL_GAIN 0x35 #define MT9M001_CHIP_ENABLE 0xF1 #define MT9M001_MAX_WIDTH 1280 #define MT9M001_MAX_HEIGHT 1024 #define MT9M001_MIN_WIDTH 48 #define MT9M001_MIN_HEIGHT 32 #define MT9M001_COLUMN_SKIP 20 #define MT9M001_ROW_SKIP 12 /* MT9M001 has only one fixed colorspace per pixelcode */ struct mt9m001_datafmt { enum v4l2_mbus_pixelcode code; enum v4l2_colorspace colorspace; }; /* Find a data format by a pixel code in an array */ static const struct mt9m001_datafmt *mt9m001_find_datafmt( enum v4l2_mbus_pixelcode code, const struct mt9m001_datafmt *fmt, int n) { int i; for (i = 0; i < n; i++) if (fmt[i].code == code) return fmt + i; return NULL; } static const struct mt9m001_datafmt mt9m001_colour_fmts[] = { /* * Order important: first natively supported, * second supported with a GPIO extender */ {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB}, {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB}, }; static const struct mt9m001_datafmt mt9m001_monochrome_fmts[] = { /* Order important - see above */ {V4L2_MBUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG}, {V4L2_MBUS_FMT_Y8_1X8, V4L2_COLORSPACE_JPEG}, }; struct mt9m001 { struct v4l2_subdev subdev; struct v4l2_ctrl_handler hdl; struct { /* exposure/auto-exposure cluster */ struct v4l2_ctrl *autoexposure; struct v4l2_ctrl *exposure; }; struct v4l2_rect rect; /* Sensor window */ const struct mt9m001_datafmt *fmt; const struct mt9m001_datafmt *fmts; int num_fmts; int model; /* V4L2_IDENT_MT9M001* codes from v4l2-chip-ident.h */ unsigned int total_h; unsigned short y_skip_top; /* Lines to skip at the top */ }; static struct mt9m001 *to_mt9m001(const struct i2c_client *client) { return container_of(i2c_get_clientdata(client), struct mt9m001, subdev); } static int reg_read(struct i2c_client *client, const u8 reg) { return i2c_smbus_read_word_swapped(client, reg); } static int reg_write(struct i2c_client *client, const u8 reg, const u16 data) { return i2c_smbus_write_word_swapped(client, reg, data); } static int reg_set(struct i2c_client *client, const u8 reg, const u16 data) { int ret; ret = reg_read(client, reg); if (ret < 0) return ret; return reg_write(client, reg, ret | data); } static int reg_clear(struct i2c_client *client, const u8 reg, const u16 data) { int ret; ret = reg_read(client, reg); if (ret < 0) return ret; return reg_write(client, reg, ret & ~data); } static int mt9m001_init(struct i2c_client *client) { int ret; dev_dbg(&client->dev, "%s\n", __func__); /* * We don't know, whether platform provides reset, issue a soft reset * too. This returns all registers to their default values. */ ret = reg_write(client, MT9M001_RESET, 1); if (!ret) ret = reg_write(client, MT9M001_RESET, 0); /* Disable chip, synchronous option update */ if (!ret) ret = reg_write(client, MT9M001_OUTPUT_CONTROL, 0); return ret; } static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable) { struct i2c_client *client = v4l2_get_subdevdata(sd); /* Switch to master "normal" mode or stop sensor readout */ if (reg_write(client, MT9M001_OUTPUT_CONTROL, enable ? 2 : 0) < 0) return -EIO; return 0; } static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9m001 *mt9m001 = to_mt9m001(client); struct v4l2_rect rect = a->c; int ret; const u16 hblank = 9, vblank = 25; if (mt9m001->fmts == mt9m001_colour_fmts) /* * Bayer format - even number of rows for simplicity, * but let the user play with the top row. */ rect.height = ALIGN(rect.height, 2); /* Datasheet requirement: see register description */ rect.width = ALIGN(rect.width, 2); rect.left = ALIGN(rect.left, 2); soc_camera_limit_side(&rect.left, &rect.width, MT9M001_COLUMN_SKIP, MT9M001_MIN_WIDTH, MT9M001_MAX_WIDTH); soc_camera_limit_side(&rect.top, &rect.height, MT9M001_ROW_SKIP, MT9M001_MIN_HEIGHT, MT9M001_MAX_HEIGHT); mt9m001->total_h = rect.height + mt9m001->y_skip_top + vblank; /* Blanking and start values - default... */ ret = reg_write(client, MT9M001_HORIZONTAL_BLANKING, hblank); if (!ret) ret = reg_write(client, MT9M001_VERTICAL_BLANKING, vblank); /* * The caller provides a supported format, as verified per * call to .try_mbus_fmt() */ if (!ret) ret = reg_write(client, MT9M001_COLUMN_START, rect.left); if (!ret) ret = reg_write(client, MT9M001_ROW_START, rect.top); if (!ret) ret = reg_write(client, MT9M001_WINDOW_WIDTH, rect.width - 1); if (!ret) ret = reg_write(client, MT9M001_WINDOW_HEIGHT, rect.height + mt9m001->y_skip_top - 1); if (!ret && v4l2_ctrl_g_ctrl(mt9m001->autoexposure) == V4L2_EXPOSURE_AUTO) ret = reg_write(client, MT9M001_SHUTTER_WIDTH, mt9m001->total_h); if (!ret) mt9m001->rect = rect; return ret; } static int mt9m001_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9m001 *mt9m001 = to_mt9m001(client); a->c = mt9m001->rect; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; return 0; } static int mt9m001_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) { a->bounds.left = MT9M001_COLUMN_SKIP; a->bounds.top = MT9M001_ROW_SKIP; a->bounds.width = MT9M001_MAX_WIDTH; a->bounds.height = MT9M001_MAX_HEIGHT; a->defrect = a->bounds; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; a->pixelaspect.numerator = 1; a->pixelaspect.denominator = 1; return 0; } static int mt9m001_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9m001 *mt9m001 = to_mt9m001(client); mf->width = mt9m001->rect.width; mf->height = mt9m001->rect.height; mf->code = mt9m001->fmt->code; mf->colorspace = mt9m001->fmt->colorspace; mf->field = V4L2_FIELD_NONE; return 0; } static int mt9m001_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9m001 *mt9m001 = to_mt9m001(client); struct v4l2_crop a = { .c = { .left = mt9m001->rect.left, .top = mt9m001->rect.top, .width = mf->width, .height = mf->height, }, }; int ret; /* No support for scaling so far, just crop. TODO: use skipping */ ret = mt9m001_s_crop(sd, &a); if (!ret) { mf->width = mt9m001->rect.width; mf->height = mt9m001->rect.height; mt9m001->fmt = mt9m001_find_datafmt(mf->code, mt9m001->fmts, mt9m001->num_fmts); mf->colorspace = mt9m001->fmt->colorspace; } return ret; } static int mt9m001_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9m001 *mt9m001 = to_mt9m001(client); const struct mt9m001_datafmt *fmt; v4l_bound_align_image(&mf->width, MT9M001_MIN_WIDTH, MT9M001_MAX_WIDTH, 1, &mf->height, MT9M001_MIN_HEIGHT + mt9m001->y_skip_top, MT9M001_MAX_HEIGHT + mt9m001->y_skip_top, 0, 0); if (mt9m001->fmts == mt9m001_colour_fmts) mf->height = ALIGN(mf->height - 1, 2); fmt = mt9m001_find_datafmt(mf->code, mt9m001->fmts, mt9m001->num_fmts); if (!fmt) { fmt = mt9m001->fmt; mf->code = fmt->code; } mf->colorspace = fmt->colorspace; return 0; } static int mt9m001_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *id) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9m001 *mt9m001 = to_mt9m001(client); if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; if (id->match.addr != client->addr) return -ENODEV; id->ident = mt9m001->model; id->revision = 0; return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int mt9m001_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; if (reg->match.addr != client->addr) return -ENODEV; reg->size = 2; reg->val = reg_read(client, reg->reg); if (reg->val > 0xffff) return -EIO; return 0; } static int mt9m001_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; if (reg->match.addr != client->addr) return -ENODEV; if (reg_write(client, reg->reg, reg->val) < 0) return -EIO; return 0; } #endif static int mt9m001_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct mt9m001 *mt9m001 = container_of(ctrl->handler, struct mt9m001, hdl); s32 min, max; switch (ctrl->id) { case V4L2_CID_EXPOSURE_AUTO: min = mt9m001->exposure->minimum; max = mt9m001->exposure->maximum; mt9m001->exposure->val = (524 + (mt9m001->total_h - 1) * (max - min)) / 1048 + min; break; } return 0; } static int mt9m001_s_ctrl(struct v4l2_ctrl *ctrl) { struct mt9m001 *mt9m001 = container_of(ctrl->handler, struct mt9m001, hdl); struct v4l2_subdev *sd = &mt9m001->subdev; struct i2c_client *client = v4l2_get_subdevdata(sd); struct v4l2_ctrl *exp = mt9m001->exposure; int data; switch (ctrl->id) { case V4L2_CID_VFLIP: if (ctrl->val) data = reg_set(client, MT9M001_READ_OPTIONS2, 0x8000); else data = reg_clear(client, MT9M001_READ_OPTIONS2, 0x8000); if (data < 0) return -EIO; return 0; case V4L2_CID_GAIN: /* See Datasheet Table 7, Gain settings. */ if (ctrl->val <= ctrl->default_value) { /* Pack it into 0..1 step 0.125, register values 0..8 */ unsigned long range = ctrl->default_value - ctrl->minimum; data = ((ctrl->val - ctrl->minimum) * 8 + range / 2) / range; dev_dbg(&client->dev, "Setting gain %d\n", data); data = reg_write(client, MT9M001_GLOBAL_GAIN, data); if (data < 0) return -EIO; } else { /* Pack it into 1.125..15 variable step, register values 9..67 */ /* We assume qctrl->maximum - qctrl->default_value - 1 > 0 */ unsigned long range = ctrl->maximum - ctrl->default_value - 1; unsigned long gain = ((ctrl->val - ctrl->default_value - 1) * 111 + range / 2) / range + 9; if (gain <= 32) data = gain; else if (gain <= 64) data = ((gain - 32) * 16 + 16) / 32 + 80; else data = ((gain - 64) * 7 + 28) / 56 + 96; dev_dbg(&client->dev, "Setting gain from %d to %d\n", reg_read(client, MT9M001_GLOBAL_GAIN), data); data = reg_write(client, MT9M001_GLOBAL_GAIN, data); if (data < 0) return -EIO; } return 0; case V4L2_CID_EXPOSURE_AUTO: if (ctrl->val == V4L2_EXPOSURE_MANUAL) { unsigned long range = exp->maximum - exp->minimum; unsigned long shutter = ((exp->val - exp->minimum) * 1048 + range / 2) / range + 1; dev_dbg(&client->dev, "Setting shutter width from %d to %lu\n", reg_read(client, MT9M001_SHUTTER_WIDTH), shutter); if (reg_write(client, MT9M001_SHUTTER_WIDTH, shutter) < 0) return -EIO; } else { const u16 vblank = 25; mt9m001->total_h = mt9m001->rect.height + mt9m001->y_skip_top + vblank; if (reg_write(client, MT9M001_SHUTTER_WIDTH, mt9m001->total_h) < 0) return -EIO; } return 0; } return -EINVAL; } /* * Interface active, can use i2c. If it fails, it can indeed mean, that * this wasn't our capture interface, so, we wait for the right one */ static int mt9m001_video_probe(struct soc_camera_link *icl, struct i2c_client *client) { struct mt9m001 *mt9m001 = to_mt9m001(client); s32 data; unsigned long flags; int ret; /* Enable the chip */ data = reg_write(client, MT9M001_CHIP_ENABLE, 1); dev_dbg(&client->dev, "write: %d\n", data); /* Read out the chip version register */ data = reg_read(client, MT9M001_CHIP_VERSION); /* must be 0x8411 or 0x8421 for colour sensor and 8431 for bw */ switch (data) { case 0x8411: case 0x8421: mt9m001->model = V4L2_IDENT_MT9M001C12ST; mt9m001->fmts = mt9m001_colour_fmts; break; case 0x8431: mt9m001->model = V4L2_IDENT_MT9M001C12STM; mt9m001->fmts = mt9m001_monochrome_fmts; break; default: dev_err(&client->dev, "No MT9M001 chip detected, register read %x\n", data); return -ENODEV; } mt9m001->num_fmts = 0; /* * This is a 10bit sensor, so by default we only allow 10bit. * The platform may support different bus widths due to * different routing of the data lines. */ if (icl->query_bus_param) flags = icl->query_bus_param(icl); else flags = SOCAM_DATAWIDTH_10; if (flags & SOCAM_DATAWIDTH_10) mt9m001->num_fmts++; else mt9m001->fmts++; if (flags & SOCAM_DATAWIDTH_8) mt9m001->num_fmts++; mt9m001->fmt = &mt9m001->fmts[0]; dev_info(&client->dev, "Detected a MT9M001 chip ID %x (%s)\n", data, data == 0x8431 ? "C12STM" : "C12ST"); ret = mt9m001_init(client); if (ret < 0) dev_err(&client->dev, "Failed to initialise the camera\n"); /* mt9m001_init() has reset the chip, returning registers to defaults */ return v4l2_ctrl_handler_setup(&mt9m001->hdl); } static void mt9m001_video_remove(struct soc_camera_link *icl) { if (icl->free_bus) icl->free_bus(icl); } static int mt9m001_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9m001 *mt9m001 = to_mt9m001(client); *lines = mt9m001->y_skip_top; return 0; } static const struct v4l2_ctrl_ops mt9m001_ctrl_ops = { .g_volatile_ctrl = mt9m001_g_volatile_ctrl, .s_ctrl = mt9m001_s_ctrl, }; static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = { .g_chip_ident = mt9m001_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = mt9m001_g_register, .s_register = mt9m001_s_register, #endif }; static int mt9m001_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9m001 *mt9m001 = to_mt9m001(client); if (index >= mt9m001->num_fmts) return -EINVAL; *code = mt9m001->fmts[index].code; return 0; } static int mt9m001_g_mbus_config(struct v4l2_subdev *sd, struct v4l2_mbus_config *cfg) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_link *icl = soc_camera_i2c_to_link(client); /* MT9M001 has all capture_format parameters fixed */ cfg->flags = V4L2_MBUS_PCLK_SAMPLE_FALLING | V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_MASTER; cfg->type = V4L2_MBUS_PARALLEL; cfg->flags = soc_camera_apply_board_flags(icl, cfg); return 0; } static int mt9m001_s_mbus_config(struct v4l2_subdev *sd, const struct v4l2_mbus_config *cfg) { const struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_link *icl = soc_camera_i2c_to_link(client); struct mt9m001 *mt9m001 = to_mt9m001(client); unsigned int bps = soc_mbus_get_fmtdesc(mt9m001->fmt->code)->bits_per_sample; if (icl->set_bus_param) return icl->set_bus_param(icl, 1 << (bps - 1)); /* * Without board specific bus width settings we only support the * sensors native bus width */ return bps == 10 ? 0 : -EINVAL; } static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = { .s_stream = mt9m001_s_stream, .s_mbus_fmt = mt9m001_s_fmt, .g_mbus_fmt = mt9m001_g_fmt, .try_mbus_fmt = mt9m001_try_fmt, .s_crop = mt9m001_s_crop, .g_crop = mt9m001_g_crop, .cropcap = mt9m001_cropcap, .enum_mbus_fmt = mt9m001_enum_fmt, .g_mbus_config = mt9m001_g_mbus_config, .s_mbus_config = mt9m001_s_mbus_config, }; static struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = { .g_skip_top_lines = mt9m001_g_skip_top_lines, }; static struct v4l2_subdev_ops mt9m001_subdev_ops = { .core = &mt9m001_subdev_core_ops, .video = &mt9m001_subdev_video_ops, .sensor = &mt9m001_subdev_sensor_ops, }; static int mt9m001_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct mt9m001 *mt9m001; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct soc_camera_link *icl = soc_camera_i2c_to_link(client); int ret; if (!icl) { dev_err(&client->dev, "MT9M001 driver needs platform data\n"); return -EINVAL; } if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) { dev_warn(&adapter->dev, "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n"); return -EIO; } mt9m001 = kzalloc(sizeof(struct mt9m001), GFP_KERNEL); if (!mt9m001) return -ENOMEM; v4l2_i2c_subdev_init(&mt9m001->subdev, client, &mt9m001_subdev_ops); v4l2_ctrl_handler_init(&mt9m001->hdl, 4); v4l2_ctrl_new_std(&mt9m001->hdl, &mt9m001_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&mt9m001->hdl, &mt9m001_ctrl_ops, V4L2_CID_GAIN, 0, 127, 1, 64); mt9m001->exposure = v4l2_ctrl_new_std(&mt9m001->hdl, &mt9m001_ctrl_ops, V4L2_CID_EXPOSURE, 1, 255, 1, 255); /* * Simulated autoexposure. If enabled, we calculate shutter width * ourselves in the driver based on vertical blanking and frame width */ mt9m001->autoexposure = v4l2_ctrl_new_std_menu(&mt9m001->hdl, &mt9m001_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0, V4L2_EXPOSURE_AUTO); mt9m001->subdev.ctrl_handler = &mt9m001->hdl; if (mt9m001->hdl.error) { int err = mt9m001->hdl.error; kfree(mt9m001); return err; } v4l2_ctrl_auto_cluster(2, &mt9m001->autoexposure, V4L2_EXPOSURE_MANUAL, true); /* Second stage probe - when a capture adapter is there */ mt9m001->y_skip_top = 0; mt9m001->rect.left = MT9M001_COLUMN_SKIP; mt9m001->rect.top = MT9M001_ROW_SKIP; mt9m001->rect.width = MT9M001_MAX_WIDTH; mt9m001->rect.height = MT9M001_MAX_HEIGHT; ret = mt9m001_video_probe(icl, client); if (ret) { v4l2_ctrl_handler_free(&mt9m001->hdl); kfree(mt9m001); } return ret; } static int mt9m001_remove(struct i2c_client *client) { struct mt9m001 *mt9m001 = to_mt9m001(client); struct soc_camera_link *icl = soc_camera_i2c_to_link(client); v4l2_device_unregister_subdev(&mt9m001->subdev); v4l2_ctrl_handler_free(&mt9m001->hdl); mt9m001_video_remove(icl); kfree(mt9m001); return 0; } static const struct i2c_device_id mt9m001_id[] = { { "mt9m001", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, mt9m001_id); static struct i2c_driver mt9m001_i2c_driver = { .driver = { .name = "mt9m001", }, .probe = mt9m001_probe, .remove = mt9m001_remove, .id_table = mt9m001_id, }; module_i2c_driver(mt9m001_i2c_driver); MODULE_DESCRIPTION("Micron MT9M001 Camera driver"); MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>"); MODULE_LICENSE("GPL");
gpl-2.0
gwindlord/android_kernel_lenovo_b8000
arch/arm/mach-pxa/viper.c
4863
22610
/* * linux/arch/arm/mach-pxa/viper.c * * Support for the Arcom VIPER SBC. * * Author: Ian Campbell * Created: Feb 03, 2003 * Copyright: Arcom Control Systems * * Maintained by Marc Zyngier <maz@misterjones.org> * <marc.zyngier@altran.com> * * Based on lubbock.c: * Author: Nicolas Pitre * Created: Jun 15, 2001 * Copyright: MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/memory.h> #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/module.h> #include <linux/pm.h> #include <linux/sched.h> #include <linux/gpio.h> #include <linux/jiffies.h> #include <linux/i2c-gpio.h> #include <linux/i2c/pxa-i2c.h> #include <linux/serial_8250.h> #include <linux/smc91x.h> #include <linux/pwm_backlight.h> #include <linux/usb/isp116x.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/syscore_ops.h> #include <mach/pxa25x.h> #include <mach/audio.h> #include <mach/pxafb.h> #include <mach/regs-uart.h> #include <mach/arcom-pcmcia.h> #include <mach/viper.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/sizes.h> #include <asm/system_info.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include "generic.h" #include "devices.h" static unsigned int icr; static void viper_icr_set_bit(unsigned int bit) { icr |= bit; VIPER_ICR = icr; } static void viper_icr_clear_bit(unsigned int bit) { icr &= ~bit; VIPER_ICR = icr; } /* This function is used from the pcmcia module to reset the CF */ static void viper_cf_reset(int state) { if (state) viper_icr_set_bit(VIPER_ICR_CF_RST); else viper_icr_clear_bit(VIPER_ICR_CF_RST); } static struct arcom_pcmcia_pdata viper_pcmcia_info = { .cd_gpio = VIPER_CF_CD_GPIO, .rdy_gpio = VIPER_CF_RDY_GPIO, .pwr_gpio = VIPER_CF_POWER_GPIO, .reset = viper_cf_reset, }; static struct platform_device viper_pcmcia_device = { .name = "viper-pcmcia", .id = -1, .dev = { .platform_data = &viper_pcmcia_info, }, }; /* * The CPLD version register was not present on VIPER boards prior to * v2i1. On v1 boards where the version register is not present we * will just read back the previous value from the databus. * * Therefore we do two reads. The first time we write 0 to the * (read-only) register before reading and the second time we write * 0xff first. If the two reads do not match or they read back as 0xff * or 0x00 then we have version 1 hardware. */ static u8 viper_hw_version(void) { u8 v1, v2; unsigned long flags; local_irq_save(flags); VIPER_VERSION = 0; v1 = VIPER_VERSION; VIPER_VERSION = 0xff; v2 = VIPER_VERSION; v1 = (v1 != v2 || v1 == 0xff) ? 0 : v1; local_irq_restore(flags); return v1; } /* CPU system core operations. */ static int viper_cpu_suspend(void) { viper_icr_set_bit(VIPER_ICR_R_DIS); return 0; } static void viper_cpu_resume(void) { viper_icr_clear_bit(VIPER_ICR_R_DIS); } static struct syscore_ops viper_cpu_syscore_ops = { .suspend = viper_cpu_suspend, .resume = viper_cpu_resume, }; static unsigned int current_voltage_divisor; /* * If force is not true then step from existing to new divisor. If * force is true then jump straight to the new divisor. Stepping is * used because if the jump in voltage is too large, the VCC can dip * too low and the regulator cuts out. * * force can be used to initialize the divisor to a know state by * setting the value for the current clock speed, since we are already * running at that speed we know the voltage should be pretty close so * the jump won't be too large */ static void viper_set_core_cpu_voltage(unsigned long khz, int force) { int i = 0; unsigned int divisor = 0; const char *v; if (khz < 200000) { v = "1.0"; divisor = 0xfff; } else if (khz < 300000) { v = "1.1"; divisor = 0xde5; } else { v = "1.3"; divisor = 0x325; } pr_debug("viper: setting CPU core voltage to %sV at %d.%03dMHz\n", v, (int)khz / 1000, (int)khz % 1000); #define STEP 0x100 do { int step; if (force) step = divisor; else if (current_voltage_divisor < divisor - STEP) step = current_voltage_divisor + STEP; else if (current_voltage_divisor > divisor + STEP) step = current_voltage_divisor - STEP; else step = divisor; force = 0; gpio_set_value(VIPER_PSU_CLK_GPIO, 0); gpio_set_value(VIPER_PSU_nCS_LD_GPIO, 0); for (i = 1 << 11 ; i > 0 ; i >>= 1) { udelay(1); gpio_set_value(VIPER_PSU_DATA_GPIO, step & i); udelay(1); gpio_set_value(VIPER_PSU_CLK_GPIO, 1); udelay(1); gpio_set_value(VIPER_PSU_CLK_GPIO, 0); } udelay(1); gpio_set_value(VIPER_PSU_nCS_LD_GPIO, 1); udelay(1); gpio_set_value(VIPER_PSU_nCS_LD_GPIO, 0); current_voltage_divisor = step; } while (current_voltage_divisor != divisor); } /* Interrupt handling */ static unsigned long viper_irq_enabled_mask; static const int viper_isa_irqs[] = { 3, 4, 5, 6, 7, 10, 11, 12, 9, 14, 15 }; static const int viper_isa_irq_map[] = { 0, /* ISA irq #0, invalid */ 0, /* ISA irq #1, invalid */ 0, /* ISA irq #2, invalid */ 1 << 0, /* ISA irq #3 */ 1 << 1, /* ISA irq #4 */ 1 << 2, /* ISA irq #5 */ 1 << 3, /* ISA irq #6 */ 1 << 4, /* ISA irq #7 */ 0, /* ISA irq #8, invalid */ 1 << 8, /* ISA irq #9 */ 1 << 5, /* ISA irq #10 */ 1 << 6, /* ISA irq #11 */ 1 << 7, /* ISA irq #12 */ 0, /* ISA irq #13, invalid */ 1 << 9, /* ISA irq #14 */ 1 << 10, /* ISA irq #15 */ }; static inline int viper_irq_to_bitmask(unsigned int irq) { return viper_isa_irq_map[irq - PXA_ISA_IRQ(0)]; } static inline int viper_bit_to_irq(int bit) { return viper_isa_irqs[bit] + PXA_ISA_IRQ(0); } static void viper_ack_irq(struct irq_data *d) { int viper_irq = viper_irq_to_bitmask(d->irq); if (viper_irq & 0xff) VIPER_LO_IRQ_STATUS = viper_irq; else VIPER_HI_IRQ_STATUS = (viper_irq >> 8); } static void viper_mask_irq(struct irq_data *d) { viper_irq_enabled_mask &= ~(viper_irq_to_bitmask(d->irq)); } static void viper_unmask_irq(struct irq_data *d) { viper_irq_enabled_mask |= viper_irq_to_bitmask(d->irq); } static inline unsigned long viper_irq_pending(void) { return (VIPER_HI_IRQ_STATUS << 8 | VIPER_LO_IRQ_STATUS) & viper_irq_enabled_mask; } static void viper_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned long pending; pending = viper_irq_pending(); do { /* we're in a chained irq handler, * so ack the interrupt by hand */ desc->irq_data.chip->irq_ack(&desc->irq_data); if (likely(pending)) { irq = viper_bit_to_irq(__ffs(pending)); generic_handle_irq(irq); } pending = viper_irq_pending(); } while (pending); } static struct irq_chip viper_irq_chip = { .name = "ISA", .irq_ack = viper_ack_irq, .irq_mask = viper_mask_irq, .irq_unmask = viper_unmask_irq }; static void __init viper_init_irq(void) { int level; int isa_irq; pxa25x_init_irq(); /* setup ISA IRQs */ for (level = 0; level < ARRAY_SIZE(viper_isa_irqs); level++) { isa_irq = viper_bit_to_irq(level); irq_set_chip_and_handler(isa_irq, &viper_irq_chip, handle_edge_irq); set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE); } irq_set_chained_handler(gpio_to_irq(VIPER_CPLD_GPIO), viper_irq_handler); irq_set_irq_type(gpio_to_irq(VIPER_CPLD_GPIO), IRQ_TYPE_EDGE_BOTH); } /* Flat Panel */ static struct pxafb_mode_info fb_mode_info[] = { { .pixclock = 157500, .xres = 320, .yres = 240, .bpp = 16, .hsync_len = 63, .left_margin = 7, .right_margin = 13, .vsync_len = 20, .upper_margin = 0, .lower_margin = 0, .sync = 0, }, }; static struct pxafb_mach_info fb_info = { .modes = fb_mode_info, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, }; static int viper_backlight_init(struct device *dev) { int ret; /* GPIO9 and 10 control FB backlight. Initialise to off */ ret = gpio_request(VIPER_BCKLIGHT_EN_GPIO, "Backlight"); if (ret) goto err_request_bckl; ret = gpio_request(VIPER_LCD_EN_GPIO, "LCD"); if (ret) goto err_request_lcd; ret = gpio_direction_output(VIPER_BCKLIGHT_EN_GPIO, 0); if (ret) goto err_dir; ret = gpio_direction_output(VIPER_LCD_EN_GPIO, 0); if (ret) goto err_dir; return 0; err_dir: gpio_free(VIPER_LCD_EN_GPIO); err_request_lcd: gpio_free(VIPER_BCKLIGHT_EN_GPIO); err_request_bckl: dev_err(dev, "Failed to setup LCD GPIOs\n"); return ret; } static int viper_backlight_notify(struct device *dev, int brightness) { gpio_set_value(VIPER_LCD_EN_GPIO, !!brightness); gpio_set_value(VIPER_BCKLIGHT_EN_GPIO, !!brightness); return brightness; } static void viper_backlight_exit(struct device *dev) { gpio_free(VIPER_LCD_EN_GPIO); gpio_free(VIPER_BCKLIGHT_EN_GPIO); } static struct platform_pwm_backlight_data viper_backlight_data = { .pwm_id = 0, .max_brightness = 100, .dft_brightness = 100, .pwm_period_ns = 1000000, .init = viper_backlight_init, .notify = viper_backlight_notify, .exit = viper_backlight_exit, }; static struct platform_device viper_backlight_device = { .name = "pwm-backlight", .dev = { .parent = &pxa25x_device_pwm0.dev, .platform_data = &viper_backlight_data, }, }; /* Ethernet */ static struct resource smc91x_resources[] = { [0] = { .name = "smc91x-regs", .start = VIPER_ETH_PHYS + 0x300, .end = VIPER_ETH_PHYS + 0x30f, .flags = IORESOURCE_MEM, }, [1] = { .start = PXA_GPIO_TO_IRQ(VIPER_ETH_GPIO), .end = PXA_GPIO_TO_IRQ(VIPER_ETH_GPIO), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, [2] = { .name = "smc91x-data32", .start = VIPER_ETH_DATA_PHYS, .end = VIPER_ETH_DATA_PHYS + 3, .flags = IORESOURCE_MEM, }, }; static struct smc91x_platdata viper_smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = -1, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &viper_smc91x_info, }, }; /* i2c */ static struct i2c_gpio_platform_data i2c_bus_data = { .sda_pin = VIPER_RTC_I2C_SDA_GPIO, .scl_pin = VIPER_RTC_I2C_SCL_GPIO, .udelay = 10, .timeout = HZ, }; static struct platform_device i2c_bus_device = { .name = "i2c-gpio", .id = 1, /* pxa2xx-i2c is bus 0, so start at 1 */ .dev = { .platform_data = &i2c_bus_data, } }; static struct i2c_board_info __initdata viper_i2c_devices[] = { { I2C_BOARD_INFO("ds1338", 0x68), }, }; /* * Serial configuration: * You can either have the standard PXA ports driven by the PXA driver, * or all the ports (PXA + 16850) driven by the 8250 driver. * Choose your poison. */ static struct resource viper_serial_resources[] = { #ifndef CONFIG_SERIAL_PXA { .start = 0x40100000, .end = 0x4010001f, .flags = IORESOURCE_MEM, }, { .start = 0x40200000, .end = 0x4020001f, .flags = IORESOURCE_MEM, }, { .start = 0x40700000, .end = 0x4070001f, .flags = IORESOURCE_MEM, }, { .start = VIPER_UARTA_PHYS, .end = VIPER_UARTA_PHYS + 0xf, .flags = IORESOURCE_MEM, }, { .start = VIPER_UARTB_PHYS, .end = VIPER_UARTB_PHYS + 0xf, .flags = IORESOURCE_MEM, }, #else { 0, }, #endif }; static struct plat_serial8250_port serial_platform_data[] = { #ifndef CONFIG_SERIAL_PXA /* Internal UARTs */ { .membase = (void *)&FFUART, .mapbase = __PREG(FFUART), .irq = IRQ_FFUART, .uartclk = 921600 * 16, .regshift = 2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, }, { .membase = (void *)&BTUART, .mapbase = __PREG(BTUART), .irq = IRQ_BTUART, .uartclk = 921600 * 16, .regshift = 2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, }, { .membase = (void *)&STUART, .mapbase = __PREG(STUART), .irq = IRQ_STUART, .uartclk = 921600 * 16, .regshift = 2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, }, /* External UARTs */ { .mapbase = VIPER_UARTA_PHYS, .irq = PXA_GPIO_TO_IRQ(VIPER_UARTA_GPIO), .irqflags = IRQF_TRIGGER_RISING, .uartclk = 1843200, .regshift = 1, .iotype = UPIO_MEM, .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_SKIP_TEST, }, { .mapbase = VIPER_UARTB_PHYS, .irq = PXA_GPIO_TO_IRQ(VIPER_UARTB_GPIO), .irqflags = IRQF_TRIGGER_RISING, .uartclk = 1843200, .regshift = 1, .iotype = UPIO_MEM, .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_SKIP_TEST, }, #endif { }, }; static struct platform_device serial_device = { .name = "serial8250", .id = 0, .dev = { .platform_data = serial_platform_data, }, .num_resources = ARRAY_SIZE(viper_serial_resources), .resource = viper_serial_resources, }; /* USB */ static void isp116x_delay(struct device *dev, int delay) { ndelay(delay); } static struct resource isp116x_resources[] = { [0] = { /* DATA */ .start = VIPER_USB_PHYS + 0, .end = VIPER_USB_PHYS + 1, .flags = IORESOURCE_MEM, }, [1] = { /* ADDR */ .start = VIPER_USB_PHYS + 2, .end = VIPER_USB_PHYS + 3, .flags = IORESOURCE_MEM, }, [2] = { .start = PXA_GPIO_TO_IRQ(VIPER_USB_GPIO), .end = PXA_GPIO_TO_IRQ(VIPER_USB_GPIO), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; /* (DataBusWidth16|AnalogOCEnable|DREQOutputPolarity|DownstreamPort15KRSel ) */ static struct isp116x_platform_data isp116x_platform_data = { /* Enable internal resistors on downstream ports */ .sel15Kres = 1, /* On-chip overcurrent protection */ .oc_enable = 1, /* INT output polarity */ .int_act_high = 1, /* INT edge or level triggered */ .int_edge_triggered = 0, /* WAKEUP pin connected - NOT SUPPORTED */ /* .remote_wakeup_connected = 0, */ /* Wakeup by devices on usb bus enabled */ .remote_wakeup_enable = 0, .delay = isp116x_delay, }; static struct platform_device isp116x_device = { .name = "isp116x-hcd", .id = -1, .num_resources = ARRAY_SIZE(isp116x_resources), .resource = isp116x_resources, .dev = { .platform_data = &isp116x_platform_data, }, }; /* MTD */ static struct resource mtd_resources[] = { [0] = { /* RedBoot config + filesystem flash */ .start = VIPER_FLASH_PHYS, .end = VIPER_FLASH_PHYS + SZ_32M - 1, .flags = IORESOURCE_MEM, }, [1] = { /* Boot flash */ .start = VIPER_BOOT_PHYS, .end = VIPER_BOOT_PHYS + SZ_1M - 1, .flags = IORESOURCE_MEM, }, [2] = { /* * SRAM size is actually 256KB, 8bits, with a sparse mapping * (each byte is on a 16bit boundary). */ .start = _VIPER_SRAM_BASE, .end = _VIPER_SRAM_BASE + SZ_512K - 1, .flags = IORESOURCE_MEM, }, }; static struct mtd_partition viper_boot_flash_partition = { .name = "RedBoot", .size = SZ_1M, .offset = 0, .mask_flags = MTD_WRITEABLE, /* force R/O */ }; static struct physmap_flash_data viper_flash_data[] = { [0] = { .width = 2, .parts = NULL, .nr_parts = 0, }, [1] = { .width = 2, .parts = &viper_boot_flash_partition, .nr_parts = 1, }, }; static struct platform_device viper_mtd_devices[] = { [0] = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &viper_flash_data[0], }, .resource = &mtd_resources[0], .num_resources = 1, }, [1] = { .name = "physmap-flash", .id = 1, .dev = { .platform_data = &viper_flash_data[1], }, .resource = &mtd_resources[1], .num_resources = 1, }, }; static struct platform_device *viper_devs[] __initdata = { &smc91x_device, &i2c_bus_device, &serial_device, &isp116x_device, &viper_mtd_devices[0], &viper_mtd_devices[1], &viper_backlight_device, &viper_pcmcia_device, }; static mfp_cfg_t viper_pin_config[] __initdata = { /* Chip selects */ GPIO15_nCS_1, GPIO78_nCS_2, GPIO79_nCS_3, GPIO80_nCS_4, GPIO33_nCS_5, /* AC97 */ GPIO28_AC97_BITCLK, GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, /* FP Backlight */ GPIO9_GPIO, /* VIPER_BCKLIGHT_EN_GPIO */ GPIO10_GPIO, /* VIPER_LCD_EN_GPIO */ GPIO16_PWM0_OUT, /* Ethernet PHY Ready */ GPIO18_RDY, /* Serial shutdown */ GPIO12_GPIO | MFP_LPM_DRIVE_HIGH, /* VIPER_UART_SHDN_GPIO */ /* Compact-Flash / PC104 */ GPIO48_nPOE, GPIO49_nPWE, GPIO50_nPIOR, GPIO51_nPIOW, GPIO52_nPCE_1, GPIO53_nPCE_2, GPIO54_nPSKTSEL, GPIO55_nPREG, GPIO56_nPWAIT, GPIO57_nIOIS16, GPIO8_GPIO, /* VIPER_CF_RDY_GPIO */ GPIO32_GPIO, /* VIPER_CF_CD_GPIO */ GPIO82_GPIO, /* VIPER_CF_POWER_GPIO */ /* Integrated UPS control */ GPIO20_GPIO, /* VIPER_UPS_GPIO */ /* Vcc regulator control */ GPIO6_GPIO, /* VIPER_PSU_DATA_GPIO */ GPIO11_GPIO, /* VIPER_PSU_CLK_GPIO */ GPIO19_GPIO, /* VIPER_PSU_nCS_LD_GPIO */ /* i2c busses */ GPIO26_GPIO, /* VIPER_TPM_I2C_SDA_GPIO */ GPIO27_GPIO, /* VIPER_TPM_I2C_SCL_GPIO */ GPIO83_GPIO, /* VIPER_RTC_I2C_SDA_GPIO */ GPIO84_GPIO, /* VIPER_RTC_I2C_SCL_GPIO */ /* PC/104 Interrupt */ GPIO1_GPIO | WAKEUP_ON_EDGE_RISE, /* VIPER_CPLD_GPIO */ }; static unsigned long viper_tpm; static int __init viper_tpm_setup(char *str) { strict_strtoul(str, 10, &viper_tpm); return 1; } __setup("tpm=", viper_tpm_setup); static void __init viper_tpm_init(void) { struct platform_device *tpm_device; struct i2c_gpio_platform_data i2c_tpm_data = { .sda_pin = VIPER_TPM_I2C_SDA_GPIO, .scl_pin = VIPER_TPM_I2C_SCL_GPIO, .udelay = 10, .timeout = HZ, }; char *errstr; /* Allocate TPM i2c bus if requested */ if (!viper_tpm) return; tpm_device = platform_device_alloc("i2c-gpio", 2); if (tpm_device) { if (!platform_device_add_data(tpm_device, &i2c_tpm_data, sizeof(i2c_tpm_data))) { if (platform_device_add(tpm_device)) { errstr = "register TPM i2c bus"; goto error_free_tpm; } } else { errstr = "allocate TPM i2c bus data"; goto error_free_tpm; } } else { errstr = "allocate TPM i2c device"; goto error_tpm; } return; error_free_tpm: kfree(tpm_device); error_tpm: pr_err("viper: Couldn't %s, giving up\n", errstr); } static void __init viper_init_vcore_gpios(void) { if (gpio_request(VIPER_PSU_DATA_GPIO, "PSU data")) goto err_request_data; if (gpio_request(VIPER_PSU_CLK_GPIO, "PSU clock")) goto err_request_clk; if (gpio_request(VIPER_PSU_nCS_LD_GPIO, "PSU cs")) goto err_request_cs; if (gpio_direction_output(VIPER_PSU_DATA_GPIO, 0) || gpio_direction_output(VIPER_PSU_CLK_GPIO, 0) || gpio_direction_output(VIPER_PSU_nCS_LD_GPIO, 0)) goto err_dir; /* c/should assume redboot set the correct level ??? */ viper_set_core_cpu_voltage(get_clk_frequency_khz(0), 1); return; err_dir: gpio_free(VIPER_PSU_nCS_LD_GPIO); err_request_cs: gpio_free(VIPER_PSU_CLK_GPIO); err_request_clk: gpio_free(VIPER_PSU_DATA_GPIO); err_request_data: pr_err("viper: Failed to setup vcore control GPIOs\n"); } static void __init viper_init_serial_gpio(void) { if (gpio_request(VIPER_UART_SHDN_GPIO, "UARTs shutdown")) goto err_request; if (gpio_direction_output(VIPER_UART_SHDN_GPIO, 0)) goto err_dir; return; err_dir: gpio_free(VIPER_UART_SHDN_GPIO); err_request: pr_err("viper: Failed to setup UART shutdown GPIO\n"); } #ifdef CONFIG_CPU_FREQ static int viper_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; /* TODO: Adjust timings??? */ switch (val) { case CPUFREQ_PRECHANGE: if (freq->old < freq->new) { /* we are getting faster so raise the voltage * before we change freq */ viper_set_core_cpu_voltage(freq->new, 0); } break; case CPUFREQ_POSTCHANGE: if (freq->old > freq->new) { /* we are slowing down so drop the power * after we change freq */ viper_set_core_cpu_voltage(freq->new, 0); } break; case CPUFREQ_RESUMECHANGE: viper_set_core_cpu_voltage(freq->new, 0); break; default: /* ignore */ break; } return 0; } static struct notifier_block viper_cpufreq_notifier_block = { .notifier_call = viper_cpufreq_notifier }; static void __init viper_init_cpufreq(void) { if (cpufreq_register_notifier(&viper_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER)) pr_err("viper: Failed to setup cpufreq notifier\n"); } #else static inline void viper_init_cpufreq(void) {} #endif static void viper_power_off(void) { pr_notice("Shutting off UPS\n"); gpio_set_value(VIPER_UPS_GPIO, 1); /* Spin to death... */ while (1); } static void __init viper_init(void) { u8 version; pm_power_off = viper_power_off; pxa2xx_mfp_config(ARRAY_AND_SIZE(viper_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); /* Wake-up serial console */ viper_init_serial_gpio(); pxa_set_fb_info(NULL, &fb_info); /* v1 hardware cannot use the datacs line */ version = viper_hw_version(); if (version == 0) smc91x_device.num_resources--; pxa_set_i2c_info(NULL); platform_add_devices(viper_devs, ARRAY_SIZE(viper_devs)); viper_init_vcore_gpios(); viper_init_cpufreq(); register_syscore_ops(&viper_cpu_syscore_ops); if (version) { pr_info("viper: hardware v%di%d detected. " "CPLD revision %d.\n", VIPER_BOARD_VERSION(version), VIPER_BOARD_ISSUE(version), VIPER_CPLD_REVISION(version)); system_rev = (VIPER_BOARD_VERSION(version) << 8) | (VIPER_BOARD_ISSUE(version) << 4) | VIPER_CPLD_REVISION(version); } else { pr_info("viper: No version register.\n"); } i2c_register_board_info(1, ARRAY_AND_SIZE(viper_i2c_devices)); viper_tpm_init(); pxa_set_ac97_info(NULL); } static struct map_desc viper_io_desc[] __initdata = { { .virtual = VIPER_CPLD_BASE, .pfn = __phys_to_pfn(VIPER_CPLD_PHYS), .length = 0x00300000, .type = MT_DEVICE, }, { .virtual = VIPER_PC104IO_BASE, .pfn = __phys_to_pfn(0x30000000), .length = 0x00800000, .type = MT_DEVICE, }, }; static void __init viper_map_io(void) { pxa25x_map_io(); iotable_init(viper_io_desc, ARRAY_SIZE(viper_io_desc)); PCFR |= PCFR_OPDE; } MACHINE_START(VIPER, "Arcom/Eurotech VIPER SBC") /* Maintainer: Marc Zyngier <maz@misterjones.org> */ .atag_offset = 0x100, .map_io = viper_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = viper_init_irq, .handle_irq = pxa25x_handle_irq, .timer = &pxa_timer, .init_machine = viper_init, .restart = pxa_restart, MACHINE_END
gpl-2.0