repo_name
string
path
string
copies
string
size
string
content
string
license
string
tim-yang/linux-3.8
drivers/net/ethernet/cadence/at91_ether.c
159
12996
/* * Ethernet driver for the Atmel AT91RM9200 (Thunder) * * Copyright (C) 2003 SAN People (Pty) Ltd * * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc. * Initial version by Rick Bronson 01/11/2003 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/dma-mapping.h> #include <linux/ethtool.h> #include <linux/platform_data/macb.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/gfp.h> #include <linux/phy.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_net.h> #include <linux/pinctrl/consumer.h> #include "macb.h" /* 1518 rounded up */ #define MAX_RBUFF_SZ 0x600 /* max number of receive buffers */ #define MAX_RX_DESCR 9 /* Initialize and start the Receiver and Transmit subsystems */ static int at91ether_start(struct net_device *dev) { struct macb *lp = netdev_priv(dev); dma_addr_t addr; u32 ctl; int i; lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, MAX_RX_DESCR * sizeof(struct macb_dma_desc), &lp->rx_ring_dma, GFP_KERNEL); if (!lp->rx_ring) { netdev_err(dev, "unable to alloc rx ring DMA buffer\n"); return -ENOMEM; } lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, MAX_RX_DESCR * MAX_RBUFF_SZ, &lp->rx_buffers_dma, GFP_KERNEL); if (!lp->rx_buffers) { netdev_err(dev, "unable to alloc rx data DMA buffer\n"); dma_free_coherent(&lp->pdev->dev, MAX_RX_DESCR * sizeof(struct macb_dma_desc), lp->rx_ring, lp->rx_ring_dma); lp->rx_ring = NULL; return -ENOMEM; } addr = lp->rx_buffers_dma; for (i = 0; i < MAX_RX_DESCR; i++) { lp->rx_ring[i].addr = addr; lp->rx_ring[i].ctrl = 0; addr += MAX_RBUFF_SZ; } /* Set the Wrap bit on the last descriptor */ lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); /* Reset buffer index */ lp->rx_tail = 0; /* Program address of descriptor list in Rx Buffer Queue register */ macb_writel(lp, RBQP, lp->rx_ring_dma); /* Enable Receive and Transmit */ ctl = macb_readl(lp, NCR); macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); return 0; } /* Open the ethernet interface */ static int at91ether_open(struct net_device *dev) { struct macb *lp = netdev_priv(dev); u32 ctl; int ret; /* Clear internal statistics */ ctl = macb_readl(lp, NCR); macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); macb_set_hwaddr(lp); ret = at91ether_start(dev); if (ret) return ret; /* Enable MAC interrupts */ macb_writel(lp, IER, MACB_BIT(RCOMP) | MACB_BIT(RXUBR) | MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE) | MACB_BIT(TCOMP) | MACB_BIT(ISR_ROVR) | MACB_BIT(HRESP)); /* schedule a link state check */ phy_start(lp->phy_dev); netif_start_queue(dev); return 0; } /* Close the interface */ static int at91ether_close(struct net_device *dev) { struct macb *lp = netdev_priv(dev); u32 ctl; /* Disable Receiver and Transmitter */ ctl = macb_readl(lp, NCR); macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); /* Disable MAC interrupts */ macb_writel(lp, IDR, MACB_BIT(RCOMP) | MACB_BIT(RXUBR) | MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE) | MACB_BIT(TCOMP) | MACB_BIT(ISR_ROVR) | MACB_BIT(HRESP)); netif_stop_queue(dev); dma_free_coherent(&lp->pdev->dev, MAX_RX_DESCR * sizeof(struct macb_dma_desc), lp->rx_ring, lp->rx_ring_dma); lp->rx_ring = NULL; dma_free_coherent(&lp->pdev->dev, MAX_RX_DESCR * MAX_RBUFF_SZ, lp->rx_buffers, lp->rx_buffers_dma); lp->rx_buffers = NULL; return 0; } /* Transmit packet */ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct macb *lp = netdev_priv(dev); if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { netif_stop_queue(dev); /* Store packet information (to free when Tx completed) */ lp->skb = skb; lp->skb_length = skb->len; lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); /* Set address of the data in the Transmit Address register */ macb_writel(lp, TAR, lp->skb_physaddr); /* Set length of the packet in the Transmit Control register */ macb_writel(lp, TCR, skb->len); } else { netdev_err(dev, "%s called, but device is busy!\n", __func__); return NETDEV_TX_BUSY; } return NETDEV_TX_OK; } /* Extract received frame from buffer descriptors and sent to upper layers. * (Called from interrupt context) */ static void at91ether_rx(struct net_device *dev) { struct macb *lp = netdev_priv(dev); unsigned char *p_recv; struct sk_buff *skb; unsigned int pktlen; while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ; pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); skb = netdev_alloc_skb(dev, pktlen + 2); if (skb) { skb_reserve(skb, 2); memcpy(skb_put(skb, pktlen), p_recv, pktlen); skb->protocol = eth_type_trans(skb, dev); lp->stats.rx_packets++; lp->stats.rx_bytes += pktlen; netif_rx(skb); } else { lp->stats.rx_dropped++; netdev_notice(dev, "Memory squeeze, dropping packet.\n"); } if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) lp->stats.multicast++; /* reset ownership bit */ lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); /* wrap after last buffer */ if (lp->rx_tail == MAX_RX_DESCR - 1) lp->rx_tail = 0; else lp->rx_tail++; } } /* MAC interrupt handler */ static irqreturn_t at91ether_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct macb *lp = netdev_priv(dev); u32 intstatus, ctl; /* MAC Interrupt Status register indicates what interrupts are pending. * It is automatically cleared once read. */ intstatus = macb_readl(lp, ISR); /* Receive complete */ if (intstatus & MACB_BIT(RCOMP)) at91ether_rx(dev); /* Transmit complete */ if (intstatus & MACB_BIT(TCOMP)) { /* The TCOM bit is set even if the transmission failed */ if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) lp->stats.tx_errors++; if (lp->skb) { dev_kfree_skb_irq(lp->skb); lp->skb = NULL; dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE); lp->stats.tx_packets++; lp->stats.tx_bytes += lp->skb_length; } netif_wake_queue(dev); } /* Work-around for EMAC Errata section 41.3.1 */ if (intstatus & MACB_BIT(RXUBR)) { ctl = macb_readl(lp, NCR); macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); macb_writel(lp, NCR, ctl | MACB_BIT(RE)); } if (intstatus & MACB_BIT(ISR_ROVR)) netdev_err(dev, "ROVR error\n"); return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER static void at91ether_poll_controller(struct net_device *dev) { unsigned long flags; local_irq_save(flags); at91ether_interrupt(dev->irq, dev); local_irq_restore(flags); } #endif static const struct net_device_ops at91ether_netdev_ops = { .ndo_open = at91ether_open, .ndo_stop = at91ether_close, .ndo_start_xmit = at91ether_start_xmit, .ndo_get_stats = macb_get_stats, .ndo_set_rx_mode = macb_set_rx_mode, .ndo_set_mac_address = eth_mac_addr, .ndo_do_ioctl = macb_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = at91ether_poll_controller, #endif }; #if defined(CONFIG_OF) static const struct of_device_id at91ether_dt_ids[] = { { .compatible = "cdns,at91rm9200-emac" }, { .compatible = "cdns,emac" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, at91ether_dt_ids); static int at91ether_get_phy_mode_dt(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; if (np) return of_get_phy_mode(np); return -ENODEV; } static int at91ether_get_hwaddr_dt(struct macb *bp) { struct device_node *np = bp->pdev->dev.of_node; if (np) { const char *mac = of_get_mac_address(np); if (mac) { memcpy(bp->dev->dev_addr, mac, ETH_ALEN); return 0; } } return -ENODEV; } #else static int at91ether_get_phy_mode_dt(struct platform_device *pdev) { return -ENODEV; } static int at91ether_get_hwaddr_dt(struct macb *bp) { return -ENODEV; } #endif /* Detect MAC & PHY and perform ethernet interface initialization */ static int __init at91ether_probe(struct platform_device *pdev) { struct macb_platform_data *board_data = pdev->dev.platform_data; struct resource *regs; struct net_device *dev; struct phy_device *phydev; struct pinctrl *pinctrl; struct macb *lp; int res; u32 reg; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) return -ENOENT; pinctrl = devm_pinctrl_get_select_default(&pdev->dev); if (IS_ERR(pinctrl)) { res = PTR_ERR(pinctrl); if (res == -EPROBE_DEFER) return res; dev_warn(&pdev->dev, "No pinctrl provided\n"); } dev = alloc_etherdev(sizeof(struct macb)); if (!dev) return -ENOMEM; lp = netdev_priv(dev); lp->pdev = pdev; lp->dev = dev; spin_lock_init(&lp->lock); /* physical base address */ dev->base_addr = regs->start; lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); if (!lp->regs) { res = -ENOMEM; goto err_free_dev; } /* Clock */ lp->pclk = devm_clk_get(&pdev->dev, "ether_clk"); if (IS_ERR(lp->pclk)) { res = PTR_ERR(lp->pclk); goto err_free_dev; } clk_enable(lp->pclk); /* Install the interrupt handler */ dev->irq = platform_get_irq(pdev, 0); res = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 0, dev->name, dev); if (res) goto err_disable_clock; ether_setup(dev); dev->netdev_ops = &at91ether_netdev_ops; dev->ethtool_ops = &macb_ethtool_ops; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); res = at91ether_get_hwaddr_dt(lp); if (res < 0) macb_get_hwaddr(lp); res = at91ether_get_phy_mode_dt(pdev); if (res < 0) { if (board_data && board_data->is_rmii) lp->phy_interface = PHY_INTERFACE_MODE_RMII; else lp->phy_interface = PHY_INTERFACE_MODE_MII; } else { lp->phy_interface = res; } macb_writel(lp, NCR, 0); reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); if (lp->phy_interface == PHY_INTERFACE_MODE_RMII) reg |= MACB_BIT(RM9200_RMII); macb_writel(lp, NCFGR, reg); /* Register the network interface */ res = register_netdev(dev); if (res) goto err_disable_clock; if (macb_mii_init(lp) != 0) goto err_out_unregister_netdev; /* will be enabled in open() */ netif_carrier_off(dev); phydev = lp->phy_dev; netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", phydev->drv->name, dev_name(&phydev->dev), phydev->irq); /* Display ethernet banner */ netdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n", dev->base_addr, dev->irq, dev->dev_addr); return 0; err_out_unregister_netdev: unregister_netdev(dev); err_disable_clock: clk_disable(lp->pclk); err_free_dev: free_netdev(dev); return res; } static int at91ether_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct macb *lp = netdev_priv(dev); if (lp->phy_dev) phy_disconnect(lp->phy_dev); mdiobus_unregister(lp->mii_bus); kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); unregister_netdev(dev); clk_disable(lp->pclk); free_netdev(dev); platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg) { struct net_device *net_dev = platform_get_drvdata(pdev); struct macb *lp = netdev_priv(net_dev); if (netif_running(net_dev)) { netif_stop_queue(net_dev); netif_device_detach(net_dev); clk_disable(lp->pclk); } return 0; } static int at91ether_resume(struct platform_device *pdev) { struct net_device *net_dev = platform_get_drvdata(pdev); struct macb *lp = netdev_priv(net_dev); if (netif_running(net_dev)) { clk_enable(lp->pclk); netif_device_attach(net_dev); netif_start_queue(net_dev); } return 0; } #else #define at91ether_suspend NULL #define at91ether_resume NULL #endif static struct platform_driver at91ether_driver = { .remove = at91ether_remove, .suspend = at91ether_suspend, .resume = at91ether_resume, .driver = { .name = "at91_ether", .owner = THIS_MODULE, .of_match_table = of_match_ptr(at91ether_dt_ids), }, }; static int __init at91ether_init(void) { return platform_driver_probe(&at91ether_driver, at91ether_probe); } static void __exit at91ether_exit(void) { platform_driver_unregister(&at91ether_driver); } module_init(at91ether_init) module_exit(at91ether_exit) MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver"); MODULE_AUTHOR("Andrew Victor"); MODULE_ALIAS("platform:at91_ether");
gpl-2.0
OSLL/linux
drivers/edac/cpc925_edac.c
1695
32410
/* * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller. * * Copyright (c) 2008 Wind River Systems, Inc. * * Authors: Cao Qingtao <qingtao.cao@windriver.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/edac.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/gfp.h> #include "edac_core.h" #include "edac_module.h" #define CPC925_EDAC_REVISION " Ver: 1.0.0" #define CPC925_EDAC_MOD_STR "cpc925_edac" #define cpc925_printk(level, fmt, arg...) \ edac_printk(level, "CPC925", fmt, ##arg) #define cpc925_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg) /* * CPC925 registers are of 32 bits with bit0 defined at the * most significant bit and bit31 at that of least significant. */ #define CPC925_BITS_PER_REG 32 #define CPC925_BIT(nr) (1UL << (CPC925_BITS_PER_REG - 1 - nr)) /* * EDAC device names for the error detections of * CPU Interface and Hypertransport Link. */ #define CPC925_CPU_ERR_DEV "cpu" #define CPC925_HT_LINK_DEV "htlink" /* Suppose DDR Refresh cycle is 15.6 microsecond */ #define CPC925_REF_FREQ 0xFA69 #define CPC925_SCRUB_BLOCK_SIZE 64 /* bytes */ #define CPC925_NR_CSROWS 8 /* * All registers and bits definitions are taken from * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02". */ /* * CPU and Memory Controller Registers */ /************************************************************ * Processor Interface Exception Mask Register (APIMASK) ************************************************************/ #define REG_APIMASK_OFFSET 0x30070 enum apimask_bits { APIMASK_DART = CPC925_BIT(0), /* DART Exception */ APIMASK_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */ APIMASK_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */ APIMASK_STAT = CPC925_BIT(3), /* Status Exception */ APIMASK_DERR = CPC925_BIT(4), /* Data Error Exception */ APIMASK_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */ APIMASK_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */ /* BIT(7) Reserved */ APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */ APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */ APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */ APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */ CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 | APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 | APIMASK_ADRS1), ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H | APIMASK_ECC_UE_L | APIMASK_ECC_CE_L), }; #define APIMASK_ADI(n) CPC925_BIT(((n)+1)) /************************************************************ * Processor Interface Exception Register (APIEXCP) ************************************************************/ #define REG_APIEXCP_OFFSET 0x30060 enum apiexcp_bits { APIEXCP_DART = CPC925_BIT(0), /* DART Exception */ APIEXCP_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */ APIEXCP_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */ APIEXCP_STAT = CPC925_BIT(3), /* Status Exception */ APIEXCP_DERR = CPC925_BIT(4), /* Data Error Exception */ APIEXCP_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */ APIEXCP_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */ /* BIT(7) Reserved */ APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */ APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */ APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */ APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */ CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 | APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 | APIEXCP_ADRS1), UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L), CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L), ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED), }; /************************************************************ * Memory Bus Configuration Register (MBCR) ************************************************************/ #define REG_MBCR_OFFSET 0x2190 #define MBCR_64BITCFG_SHIFT 23 #define MBCR_64BITCFG_MASK (1UL << MBCR_64BITCFG_SHIFT) #define MBCR_64BITBUS_SHIFT 22 #define MBCR_64BITBUS_MASK (1UL << MBCR_64BITBUS_SHIFT) /************************************************************ * Memory Bank Mode Register (MBMR) ************************************************************/ #define REG_MBMR_OFFSET 0x21C0 #define MBMR_MODE_MAX_VALUE 0xF #define MBMR_MODE_SHIFT 25 #define MBMR_MODE_MASK (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT) #define MBMR_BBA_SHIFT 24 #define MBMR_BBA_MASK (1UL << MBMR_BBA_SHIFT) /************************************************************ * Memory Bank Boundary Address Register (MBBAR) ************************************************************/ #define REG_MBBAR_OFFSET 0x21D0 #define MBBAR_BBA_MAX_VALUE 0xFF #define MBBAR_BBA_SHIFT 24 #define MBBAR_BBA_MASK (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT) /************************************************************ * Memory Scrub Control Register (MSCR) ************************************************************/ #define REG_MSCR_OFFSET 0x2400 #define MSCR_SCRUB_MOD_MASK 0xC0000000 /* scrub_mod - bit0:1*/ #define MSCR_BACKGR_SCRUB 0x40000000 /* 01 */ #define MSCR_SI_SHIFT 16 /* si - bit8:15*/ #define MSCR_SI_MAX_VALUE 0xFF #define MSCR_SI_MASK (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT) /************************************************************ * Memory Scrub Range Start Register (MSRSR) ************************************************************/ #define REG_MSRSR_OFFSET 0x2410 /************************************************************ * Memory Scrub Range End Register (MSRER) ************************************************************/ #define REG_MSRER_OFFSET 0x2420 /************************************************************ * Memory Scrub Pattern Register (MSPR) ************************************************************/ #define REG_MSPR_OFFSET 0x2430 /************************************************************ * Memory Check Control Register (MCCR) ************************************************************/ #define REG_MCCR_OFFSET 0x2440 enum mccr_bits { MCCR_ECC_EN = CPC925_BIT(0), /* ECC high and low check */ }; /************************************************************ * Memory Check Range End Register (MCRER) ************************************************************/ #define REG_MCRER_OFFSET 0x2450 /************************************************************ * Memory Error Address Register (MEAR) ************************************************************/ #define REG_MEAR_OFFSET 0x2460 #define MEAR_BCNT_MAX_VALUE 0x3 #define MEAR_BCNT_SHIFT 30 #define MEAR_BCNT_MASK (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT) #define MEAR_RANK_MAX_VALUE 0x7 #define MEAR_RANK_SHIFT 27 #define MEAR_RANK_MASK (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT) #define MEAR_COL_MAX_VALUE 0x7FF #define MEAR_COL_SHIFT 16 #define MEAR_COL_MASK (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT) #define MEAR_BANK_MAX_VALUE 0x3 #define MEAR_BANK_SHIFT 14 #define MEAR_BANK_MASK (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT) #define MEAR_ROW_MASK 0x00003FFF /************************************************************ * Memory Error Syndrome Register (MESR) ************************************************************/ #define REG_MESR_OFFSET 0x2470 #define MESR_ECC_SYN_H_MASK 0xFF00 #define MESR_ECC_SYN_L_MASK 0x00FF /************************************************************ * Memory Mode Control Register (MMCR) ************************************************************/ #define REG_MMCR_OFFSET 0x2500 enum mmcr_bits { MMCR_REG_DIMM_MODE = CPC925_BIT(3), }; /* * HyperTransport Link Registers */ /************************************************************ * Error Handling/Enumeration Scratch Pad Register (ERRCTRL) ************************************************************/ #define REG_ERRCTRL_OFFSET 0x70140 enum errctrl_bits { /* nonfatal interrupts for */ ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */ ERRCTRL_CRC_NF = CPC925_BIT(1), /* CRC error */ ERRCTRL_RSP_NF = CPC925_BIT(2), /* Response error */ ERRCTRL_EOC_NF = CPC925_BIT(3), /* End-Of-Chain error */ ERRCTRL_OVF_NF = CPC925_BIT(4), /* Overflow error */ ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */ ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */ ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */ HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF | ERRCTRL_RSP_NF | ERRCTRL_EOC_NF | ERRCTRL_OVF_NF | ERRCTRL_PROT_NF), HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL), }; /************************************************************ * Link Configuration and Link Control Register (LINKCTRL) ************************************************************/ #define REG_LINKCTRL_OFFSET 0x70110 enum linkctrl_bits { LINKCTRL_CRC_ERR = (CPC925_BIT(22) | CPC925_BIT(23)), LINKCTRL_LINK_FAIL = CPC925_BIT(27), HT_LINKCTRL_DETECTED = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL), }; /************************************************************ * Link FreqCap/Error/Freq/Revision ID Register (LINKERR) ************************************************************/ #define REG_LINKERR_OFFSET 0x70120 enum linkerr_bits { LINKERR_EOC_ERR = CPC925_BIT(17), /* End-Of-Chain error */ LINKERR_OVF_ERR = CPC925_BIT(18), /* Receive Buffer Overflow */ LINKERR_PROT_ERR = CPC925_BIT(19), /* Protocol error */ HT_LINKERR_DETECTED = (LINKERR_EOC_ERR | LINKERR_OVF_ERR | LINKERR_PROT_ERR), }; /************************************************************ * Bridge Control Register (BRGCTRL) ************************************************************/ #define REG_BRGCTRL_OFFSET 0x70300 enum brgctrl_bits { BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */ BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */ }; /* Private structure for edac memory controller */ struct cpc925_mc_pdata { void __iomem *vbase; unsigned long total_mem; const char *name; int edac_idx; }; /* Private structure for common edac device */ struct cpc925_dev_info { void __iomem *vbase; struct platform_device *pdev; char *ctl_name; int edac_idx; struct edac_device_ctl_info *edac_dev; void (*init)(struct cpc925_dev_info *dev_info); void (*exit)(struct cpc925_dev_info *dev_info); void (*check)(struct edac_device_ctl_info *edac_dev); }; /* Get total memory size from Open Firmware DTB */ static void get_total_mem(struct cpc925_mc_pdata *pdata) { struct device_node *np = NULL; const unsigned int *reg, *reg_end; int len, sw, aw; unsigned long start, size; np = of_find_node_by_type(NULL, "memory"); if (!np) return; aw = of_n_addr_cells(np); sw = of_n_size_cells(np); reg = (const unsigned int *)of_get_property(np, "reg", &len); reg_end = reg + len/4; pdata->total_mem = 0; do { start = of_read_number(reg, aw); reg += aw; size = of_read_number(reg, sw); reg += sw; edac_dbg(1, "start 0x%lx, size 0x%lx\n", start, size); pdata->total_mem += size; } while (reg < reg_end); of_node_put(np); edac_dbg(0, "total_mem 0x%lx\n", pdata->total_mem); } static void cpc925_init_csrows(struct mem_ctl_info *mci) { struct cpc925_mc_pdata *pdata = mci->pvt_info; struct csrow_info *csrow; struct dimm_info *dimm; enum dev_type dtype; int index, j; u32 mbmr, mbbar, bba, grain; unsigned long row_size, nr_pages, last_nr_pages = 0; get_total_mem(pdata); for (index = 0; index < mci->nr_csrows; index++) { mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET + 0x20 * index); mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET + 0x20 + index); bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) | ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT); if (bba == 0) continue; /* not populated */ csrow = mci->csrows[index]; row_size = bba * (1UL << 28); /* 256M */ csrow->first_page = last_nr_pages; nr_pages = row_size >> PAGE_SHIFT; csrow->last_page = csrow->first_page + nr_pages - 1; last_nr_pages = csrow->last_page + 1; switch (csrow->nr_channels) { case 1: /* Single channel */ grain = 32; /* four-beat burst of 32 bytes */ break; case 2: /* Dual channel */ default: grain = 64; /* four-beat burst of 64 bytes */ break; } switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) { case 6: /* 0110, no way to differentiate X8 VS X16 */ case 5: /* 0101 */ case 8: /* 1000 */ dtype = DEV_X16; break; case 7: /* 0111 */ case 9: /* 1001 */ dtype = DEV_X8; break; default: dtype = DEV_UNKNOWN; break; } for (j = 0; j < csrow->nr_channels; j++) { dimm = csrow->channels[j]->dimm; dimm->nr_pages = nr_pages / csrow->nr_channels; dimm->mtype = MEM_RDDR; dimm->edac_mode = EDAC_SECDED; dimm->grain = grain; dimm->dtype = dtype; } } } /* Enable memory controller ECC detection */ static void cpc925_mc_init(struct mem_ctl_info *mci) { struct cpc925_mc_pdata *pdata = mci->pvt_info; u32 apimask; u32 mccr; /* Enable various ECC error exceptions */ apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET); if ((apimask & ECC_MASK_ENABLE) == 0) { apimask |= ECC_MASK_ENABLE; __raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET); } /* Enable ECC detection */ mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET); if ((mccr & MCCR_ECC_EN) == 0) { mccr |= MCCR_ECC_EN; __raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET); } } /* Disable memory controller ECC detection */ static void cpc925_mc_exit(struct mem_ctl_info *mci) { /* * WARNING: * We are supposed to clear the ECC error detection bits, * and it will be no problem to do so. However, once they * are cleared here if we want to re-install CPC925 EDAC * module later, setting them up in cpc925_mc_init() will * trigger machine check exception. * Also, it's ok to leave ECC error detection bits enabled, * since they are reset to 1 by default or by boot loader. */ return; } /* * Revert DDR column/row/bank addresses into page frame number and * offset in page. * * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs), * physical address(PA) bits to column address(CA) bits mappings are: * CA 0 1 2 3 4 5 6 7 8 9 10 * PA 59 58 57 56 55 54 53 52 51 50 49 * * physical address(PA) bits to bank address(BA) bits mappings are: * BA 0 1 * PA 43 44 * * physical address(PA) bits to row address(RA) bits mappings are: * RA 0 1 2 3 4 5 6 7 8 9 10 11 12 * PA 36 35 34 48 47 46 45 40 41 42 39 38 37 */ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, unsigned long *pfn, unsigned long *offset, int *csrow) { u32 bcnt, rank, col, bank, row; u32 c; unsigned long pa; int i; bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT; rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT; col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT; bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT; row = mear & MEAR_ROW_MASK; *csrow = rank; #ifdef CONFIG_EDAC_DEBUG if (mci->csrows[rank]->first_page == 0) { cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a " "non-populated csrow, broken hardware?\n"); return; } #endif /* Revert csrow number */ pa = mci->csrows[rank]->first_page << PAGE_SHIFT; /* Revert column address */ col += bcnt; for (i = 0; i < 11; i++) { c = col & 0x1; col >>= 1; pa |= c << (14 - i); } /* Revert bank address */ pa |= bank << 19; /* Revert row address, in 4 steps */ for (i = 0; i < 3; i++) { c = row & 0x1; row >>= 1; pa |= c << (26 - i); } for (i = 0; i < 3; i++) { c = row & 0x1; row >>= 1; pa |= c << (21 + i); } for (i = 0; i < 4; i++) { c = row & 0x1; row >>= 1; pa |= c << (18 - i); } for (i = 0; i < 3; i++) { c = row & 0x1; row >>= 1; pa |= c << (29 - i); } *offset = pa & (PAGE_SIZE - 1); *pfn = pa >> PAGE_SHIFT; edac_dbg(0, "ECC physical address 0x%lx\n", pa); } static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome) { if ((syndrome & MESR_ECC_SYN_H_MASK) == 0) return 0; if ((syndrome & MESR_ECC_SYN_L_MASK) == 0) return 1; cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n", syndrome); return 1; } /* Check memory controller registers for ECC errors */ static void cpc925_mc_check(struct mem_ctl_info *mci) { struct cpc925_mc_pdata *pdata = mci->pvt_info; u32 apiexcp; u32 mear; u32 mesr; u16 syndrome; unsigned long pfn = 0, offset = 0; int csrow = 0, channel = 0; /* APIEXCP is cleared when read */ apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET); if ((apiexcp & ECC_EXCP_DETECTED) == 0) return; mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET); syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK); mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET); /* Revert column/row addresses into page frame number, etc */ cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow); if (apiexcp & CECC_EXCP_DETECTED) { cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n"); channel = cpc925_mc_find_channel(mci, syndrome); edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, pfn, offset, syndrome, csrow, channel, -1, mci->ctl_name, ""); } if (apiexcp & UECC_EXCP_DETECTED) { cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, pfn, offset, 0, csrow, -1, -1, mci->ctl_name, ""); } cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n"); cpc925_mc_printk(mci, KERN_INFO, "APIMASK 0x%08x\n", __raw_readl(pdata->vbase + REG_APIMASK_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "APIEXCP 0x%08x\n", apiexcp); cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl 0x%08x\n", __raw_readl(pdata->vbase + REG_MSCR_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start 0x%08x\n", __raw_readl(pdata->vbase + REG_MSRSR_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End 0x%08x\n", __raw_readl(pdata->vbase + REG_MSRER_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern 0x%08x\n", __raw_readl(pdata->vbase + REG_MSPR_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl 0x%08x\n", __raw_readl(pdata->vbase + REG_MCCR_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End 0x%08x\n", __raw_readl(pdata->vbase + REG_MCRER_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address 0x%08x\n", mesr); cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome 0x%08x\n", syndrome); } /******************** CPU err device********************************/ static u32 cpc925_cpu_mask_disabled(void) { struct device_node *cpus; struct device_node *cpunode = NULL; static u32 mask = 0; /* use cached value if available */ if (mask != 0) return mask; mask = APIMASK_ADI0 | APIMASK_ADI1; cpus = of_find_node_by_path("/cpus"); if (cpus == NULL) { cpc925_printk(KERN_DEBUG, "No /cpus node !\n"); return 0; } while ((cpunode = of_get_next_child(cpus, cpunode)) != NULL) { const u32 *reg = of_get_property(cpunode, "reg", NULL); if (strcmp(cpunode->type, "cpu")) { cpc925_printk(KERN_ERR, "Not a cpu node in /cpus: %s\n", cpunode->name); continue; } if (reg == NULL || *reg > 2) { cpc925_printk(KERN_ERR, "Bad reg value at %s\n", cpunode->full_name); continue; } mask &= ~APIMASK_ADI(*reg); } if (mask != (APIMASK_ADI0 | APIMASK_ADI1)) { /* We assume that each CPU sits on it's own PI and that * for present CPUs the reg property equals to the PI * interface id */ cpc925_printk(KERN_WARNING, "Assuming PI id is equal to CPU MPIC id!\n"); } of_node_put(cpunode); of_node_put(cpus); return mask; } /* Enable CPU Errors detection */ static void cpc925_cpu_init(struct cpc925_dev_info *dev_info) { u32 apimask; u32 cpumask; apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); cpumask = cpc925_cpu_mask_disabled(); if (apimask & cpumask) { cpc925_printk(KERN_WARNING, "CPU(s) not present, " "but enabled in APIMASK, disabling\n"); apimask &= ~cpumask; } if ((apimask & CPU_MASK_ENABLE) == 0) apimask |= CPU_MASK_ENABLE; __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET); } /* Disable CPU Errors detection */ static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info) { /* * WARNING: * We are supposed to clear the CPU error detection bits, * and it will be no problem to do so. However, once they * are cleared here if we want to re-install CPC925 EDAC * module later, setting them up in cpc925_cpu_init() will * trigger machine check exception. * Also, it's ok to leave CPU error detection bits enabled, * since they are reset to 1 by default. */ return; } /* Check for CPU Errors */ static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev) { struct cpc925_dev_info *dev_info = edac_dev->pvt_info; u32 apiexcp; u32 apimask; /* APIEXCP is cleared when read */ apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET); if ((apiexcp & CPU_EXCP_DETECTED) == 0) return; if ((apiexcp & ~cpc925_cpu_mask_disabled()) == 0) return; apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); cpc925_printk(KERN_INFO, "Processor Interface Fault\n" "Processor Interface register dump:\n"); cpc925_printk(KERN_INFO, "APIMASK 0x%08x\n", apimask); cpc925_printk(KERN_INFO, "APIEXCP 0x%08x\n", apiexcp); edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); } /******************** HT Link err device****************************/ /* Enable HyperTransport Link Error detection */ static void cpc925_htlink_init(struct cpc925_dev_info *dev_info) { u32 ht_errctrl; ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) { ht_errctrl |= HT_ERRCTRL_ENABLE; __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET); } } /* Disable HyperTransport Link Error detection */ static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info) { u32 ht_errctrl; ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); ht_errctrl &= ~HT_ERRCTRL_ENABLE; __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET); } /* Check for HyperTransport Link errors */ static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev) { struct cpc925_dev_info *dev_info = edac_dev->pvt_info; u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET); u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET); u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET); if (!((brgctrl & BRGCTRL_DETSERR) || (linkctrl & HT_LINKCTRL_DETECTED) || (errctrl & HT_ERRCTRL_DETECTED) || (linkerr & HT_LINKERR_DETECTED))) return; cpc925_printk(KERN_INFO, "HT Link Fault\n" "HT register dump:\n"); cpc925_printk(KERN_INFO, "Bridge Ctrl 0x%08x\n", brgctrl); cpc925_printk(KERN_INFO, "Link Config Ctrl 0x%08x\n", linkctrl); cpc925_printk(KERN_INFO, "Error Enum and Ctrl 0x%08x\n", errctrl); cpc925_printk(KERN_INFO, "Link Error 0x%08x\n", linkerr); /* Clear by write 1 */ if (brgctrl & BRGCTRL_DETSERR) __raw_writel(BRGCTRL_DETSERR, dev_info->vbase + REG_BRGCTRL_OFFSET); if (linkctrl & HT_LINKCTRL_DETECTED) __raw_writel(HT_LINKCTRL_DETECTED, dev_info->vbase + REG_LINKCTRL_OFFSET); /* Initiate Secondary Bus Reset to clear the chain failure */ if (errctrl & ERRCTRL_CHN_FAL) __raw_writel(BRGCTRL_SECBUSRESET, dev_info->vbase + REG_BRGCTRL_OFFSET); if (errctrl & ERRCTRL_RSP_ERR) __raw_writel(ERRCTRL_RSP_ERR, dev_info->vbase + REG_ERRCTRL_OFFSET); if (linkerr & HT_LINKERR_DETECTED) __raw_writel(HT_LINKERR_DETECTED, dev_info->vbase + REG_LINKERR_OFFSET); edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name); } static struct cpc925_dev_info cpc925_devs[] = { { .ctl_name = CPC925_CPU_ERR_DEV, .init = cpc925_cpu_init, .exit = cpc925_cpu_exit, .check = cpc925_cpu_check, }, { .ctl_name = CPC925_HT_LINK_DEV, .init = cpc925_htlink_init, .exit = cpc925_htlink_exit, .check = cpc925_htlink_check, }, { } }; /* * Add CPU Err detection and HyperTransport Link Err detection * as common "edac_device", they have no corresponding device * nodes in the Open Firmware DTB and we have to add platform * devices for them. Also, they will share the MMIO with that * of memory controller. */ static void cpc925_add_edac_devices(void __iomem *vbase) { struct cpc925_dev_info *dev_info; if (!vbase) { cpc925_printk(KERN_ERR, "MMIO not established yet\n"); return; } for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) { dev_info->vbase = vbase; dev_info->pdev = platform_device_register_simple( dev_info->ctl_name, 0, NULL, 0); if (IS_ERR(dev_info->pdev)) { cpc925_printk(KERN_ERR, "Can't register platform device for %s\n", dev_info->ctl_name); continue; } /* * Don't have to allocate private structure but * make use of cpc925_devs[] instead. */ dev_info->edac_idx = edac_device_alloc_index(); dev_info->edac_dev = edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx); if (!dev_info->edac_dev) { cpc925_printk(KERN_ERR, "No memory for edac device\n"); goto err1; } dev_info->edac_dev->pvt_info = dev_info; dev_info->edac_dev->dev = &dev_info->pdev->dev; dev_info->edac_dev->ctl_name = dev_info->ctl_name; dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR; dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev); if (edac_op_state == EDAC_OPSTATE_POLL) dev_info->edac_dev->edac_check = dev_info->check; if (dev_info->init) dev_info->init(dev_info); if (edac_device_add_device(dev_info->edac_dev) > 0) { cpc925_printk(KERN_ERR, "Unable to add edac device for %s\n", dev_info->ctl_name); goto err2; } edac_dbg(0, "Successfully added edac device for %s\n", dev_info->ctl_name); continue; err2: if (dev_info->exit) dev_info->exit(dev_info); edac_device_free_ctl_info(dev_info->edac_dev); err1: platform_device_unregister(dev_info->pdev); } } /* * Delete the common "edac_device" for CPU Err Detection * and HyperTransport Link Err Detection */ static void cpc925_del_edac_devices(void) { struct cpc925_dev_info *dev_info; for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) { if (dev_info->edac_dev) { edac_device_del_device(dev_info->edac_dev->dev); edac_device_free_ctl_info(dev_info->edac_dev); platform_device_unregister(dev_info->pdev); } if (dev_info->exit) dev_info->exit(dev_info); edac_dbg(0, "Successfully deleted edac device for %s\n", dev_info->ctl_name); } } /* Convert current back-ground scrub rate into byte/sec bandwidth */ static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci) { struct cpc925_mc_pdata *pdata = mci->pvt_info; int bw; u32 mscr; u8 si; mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET); si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT; edac_dbg(0, "Mem Scrub Ctrl Register 0x%x\n", mscr); if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || (si == 0)) { cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n"); bw = 0; } else bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; return bw; } /* Return 0 for single channel; 1 for dual channel */ static int cpc925_mc_get_channels(void __iomem *vbase) { int dual = 0; u32 mbcr; mbcr = __raw_readl(vbase + REG_MBCR_OFFSET); /* * Dual channel only when 128-bit wide physical bus * and 128-bit configuration. */ if (((mbcr & MBCR_64BITCFG_MASK) == 0) && ((mbcr & MBCR_64BITBUS_MASK) == 0)) dual = 1; edac_dbg(0, "%s channel\n", (dual > 0) ? "Dual" : "Single"); return dual; } static int cpc925_probe(struct platform_device *pdev) { static int edac_mc_idx; struct mem_ctl_info *mci; struct edac_mc_layer layers[2]; void __iomem *vbase; struct cpc925_mc_pdata *pdata; struct resource *r; int res = 0, nr_channels; edac_dbg(0, "%s platform device found!\n", pdev->name); if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) { res = -ENOMEM; goto out; } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { cpc925_printk(KERN_ERR, "Unable to get resource\n"); res = -ENOENT; goto err1; } if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r), pdev->name)) { cpc925_printk(KERN_ERR, "Unable to request mem region\n"); res = -EBUSY; goto err1; } vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!vbase) { cpc925_printk(KERN_ERR, "Unable to ioremap device\n"); res = -ENOMEM; goto err2; } nr_channels = cpc925_mc_get_channels(vbase) + 1; layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; layers[0].size = CPC925_NR_CSROWS; layers[0].is_virt_csrow = true; layers[1].type = EDAC_MC_LAYER_CHANNEL; layers[1].size = nr_channels; layers[1].is_virt_csrow = false; mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers, sizeof(struct cpc925_mc_pdata)); if (!mci) { cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n"); res = -ENOMEM; goto err2; } pdata = mci->pvt_info; pdata->vbase = vbase; pdata->edac_idx = edac_mc_idx++; pdata->name = pdev->name; mci->pdev = &pdev->dev; platform_set_drvdata(pdev, mci); mci->dev_name = dev_name(&pdev->dev); mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED; mci->mod_name = CPC925_EDAC_MOD_STR; mci->mod_ver = CPC925_EDAC_REVISION; mci->ctl_name = pdev->name; if (edac_op_state == EDAC_OPSTATE_POLL) mci->edac_check = cpc925_mc_check; mci->ctl_page_to_phys = NULL; mci->scrub_mode = SCRUB_SW_SRC; mci->set_sdram_scrub_rate = NULL; mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate; cpc925_init_csrows(mci); /* Setup memory controller registers */ cpc925_mc_init(mci); if (edac_mc_add_mc(mci) > 0) { cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n"); goto err3; } cpc925_add_edac_devices(vbase); /* get this far and it's successful */ edac_dbg(0, "success\n"); res = 0; goto out; err3: cpc925_mc_exit(mci); edac_mc_free(mci); err2: devm_release_mem_region(&pdev->dev, r->start, resource_size(r)); err1: devres_release_group(&pdev->dev, cpc925_probe); out: return res; } static int cpc925_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); /* * Delete common edac devices before edac mc, because * the former share the MMIO of the latter. */ cpc925_del_edac_devices(); cpc925_mc_exit(mci); edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); return 0; } static struct platform_driver cpc925_edac_driver = { .probe = cpc925_probe, .remove = cpc925_remove, .driver = { .name = "cpc925_edac", } }; static int __init cpc925_edac_init(void) { int ret = 0; printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n"); printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n"); /* Only support POLL mode so far */ edac_op_state = EDAC_OPSTATE_POLL; ret = platform_driver_register(&cpc925_edac_driver); if (ret) { printk(KERN_WARNING "Failed to register %s\n", CPC925_EDAC_MOD_STR); } return ret; } static void __exit cpc925_edac_exit(void) { platform_driver_unregister(&cpc925_edac_driver); } module_init(cpc925_edac_init); module_exit(cpc925_edac_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>"); MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module");
gpl-2.0
compulab/trimslice-android-kernel
drivers/infiniband/hw/ipath/ipath_sysfs.c
2975
30740
/* * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/ctype.h> #include "ipath_kernel.h" #include "ipath_verbs.h" #include "ipath_common.h" /** * ipath_parse_ushort - parse an unsigned short value in an arbitrary base * @str: the string containing the number * @valp: where to put the result * * returns the number of bytes consumed, or negative value on error */ int ipath_parse_ushort(const char *str, unsigned short *valp) { unsigned long val; char *end; int ret; if (!isdigit(str[0])) { ret = -EINVAL; goto bail; } val = simple_strtoul(str, &end, 0); if (val > 0xffff) { ret = -EINVAL; goto bail; } *valp = val; ret = end + 1 - str; if (ret == 0) ret = -EINVAL; bail: return ret; } static ssize_t show_version(struct device_driver *dev, char *buf) { /* The string printed here is already newline-terminated. */ return scnprintf(buf, PAGE_SIZE, "%s", ib_ipath_version); } static ssize_t show_num_units(struct device_driver *dev, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", ipath_count_units(NULL, NULL, NULL)); } static ssize_t show_status(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); ssize_t ret; if (!dd->ipath_statusp) { ret = -EINVAL; goto bail; } ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long) *(dd->ipath_statusp)); bail: return ret; } static const char *ipath_status_str[] = { "Initted", "Disabled", "Admin_Disabled", "", /* This used to be the old "OIB_SMA" status. */ "", /* This used to be the old "SMA" status. */ "Present", "IB_link_up", "IB_configured", "NoIBcable", "Fatal_Hardware_Error", NULL, }; static ssize_t show_status_str(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); int i, any; u64 s; ssize_t ret; if (!dd->ipath_statusp) { ret = -EINVAL; goto bail; } s = *(dd->ipath_statusp); *buf = '\0'; for (any = i = 0; s && ipath_status_str[i]; i++) { if (s & 1) { if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) /* overflow */ break; if (strlcat(buf, ipath_status_str[i], PAGE_SIZE) >= PAGE_SIZE) break; any = 1; } s >>= 1; } if (any) strlcat(buf, "\n", PAGE_SIZE); ret = strlen(buf); bail: return ret; } static ssize_t show_boardversion(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); /* The string printed here is already newline-terminated. */ return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_boardversion); } static ssize_t show_localbus_info(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); /* The string printed here is already newline-terminated. */ return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_lbus_info); } static ssize_t show_lmc(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_lmc); } static ssize_t store_lmc(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); u16 lmc = 0; int ret; ret = ipath_parse_ushort(buf, &lmc); if (ret < 0) goto invalid; if (lmc > 7) { ret = -EINVAL; goto invalid; } ipath_set_lid(dd, dd->ipath_lid, lmc); goto bail; invalid: ipath_dev_err(dd, "attempt to set invalid LMC %u\n", lmc); bail: return ret; } static ssize_t show_lid(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_lid); } static ssize_t store_lid(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); u16 lid = 0; int ret; ret = ipath_parse_ushort(buf, &lid); if (ret < 0) goto invalid; if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) { ret = -EINVAL; goto invalid; } ipath_set_lid(dd, lid, dd->ipath_lmc); goto bail; invalid: ipath_dev_err(dd, "attempt to set invalid LID 0x%x\n", lid); bail: return ret; } static ssize_t show_mlid(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_mlid); } static ssize_t store_mlid(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); u16 mlid; int ret; ret = ipath_parse_ushort(buf, &mlid); if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE) goto invalid; dd->ipath_mlid = mlid; goto bail; invalid: ipath_dev_err(dd, "attempt to set invalid MLID\n"); bail: return ret; } static ssize_t show_guid(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); u8 *guid; guid = (u8 *) & (dd->ipath_guid); return scnprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", guid[0], guid[1], guid[2], guid[3], guid[4], guid[5], guid[6], guid[7]); } static ssize_t store_guid(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); ssize_t ret; unsigned short guid[8]; __be64 new_guid; u8 *ng; int i; if (sscanf(buf, "%hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx", &guid[0], &guid[1], &guid[2], &guid[3], &guid[4], &guid[5], &guid[6], &guid[7]) != 8) goto invalid; ng = (u8 *) &new_guid; for (i = 0; i < 8; i++) { if (guid[i] > 0xff) goto invalid; ng[i] = guid[i]; } if (new_guid == 0) goto invalid; dd->ipath_guid = new_guid; dd->ipath_nguid = 1; if (dd->verbs_dev) dd->verbs_dev->ibdev.node_guid = new_guid; ret = strlen(buf); goto bail; invalid: ipath_dev_err(dd, "attempt to set invalid GUID\n"); ret = -EINVAL; bail: return ret; } static ssize_t show_nguid(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_nguid); } static ssize_t show_nports(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); /* Return the number of user ports available. */ return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_cfgports - 1); } static ssize_t show_serial(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); buf[sizeof dd->ipath_serial] = '\0'; memcpy(buf, dd->ipath_serial, sizeof dd->ipath_serial); strcat(buf, "\n"); return strlen(buf); } static ssize_t show_unit(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_unit); } static ssize_t show_jint_max_packets(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_max_packets); } static ssize_t store_jint_max_packets(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); u16 v = 0; int ret; ret = ipath_parse_ushort(buf, &v); if (ret < 0) ipath_dev_err(dd, "invalid jint_max_packets.\n"); else dd->ipath_f_config_jint(dd, dd->ipath_jint_idle_ticks, v); return ret; } static ssize_t show_jint_idle_ticks(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_idle_ticks); } static ssize_t store_jint_idle_ticks(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); u16 v = 0; int ret; ret = ipath_parse_ushort(buf, &v); if (ret < 0) ipath_dev_err(dd, "invalid jint_idle_ticks.\n"); else dd->ipath_f_config_jint(dd, v, dd->ipath_jint_max_packets); return ret; } #define DEVICE_COUNTER(name, attr) \ static ssize_t show_counter_##name(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct ipath_devdata *dd = dev_get_drvdata(dev); \ return scnprintf(\ buf, PAGE_SIZE, "%llu\n", (unsigned long long) \ ipath_snap_cntr( \ dd, offsetof(struct infinipath_counters, \ attr) / sizeof(u64))); \ } \ static DEVICE_ATTR(name, S_IRUGO, show_counter_##name, NULL); DEVICE_COUNTER(ib_link_downeds, IBLinkDownedCnt); DEVICE_COUNTER(ib_link_err_recoveries, IBLinkErrRecoveryCnt); DEVICE_COUNTER(ib_status_changes, IBStatusChangeCnt); DEVICE_COUNTER(ib_symbol_errs, IBSymbolErrCnt); DEVICE_COUNTER(lb_flow_stalls, LBFlowStallCnt); DEVICE_COUNTER(lb_ints, LBIntCnt); DEVICE_COUNTER(rx_bad_formats, RxBadFormatCnt); DEVICE_COUNTER(rx_buf_ovfls, RxBufOvflCnt); DEVICE_COUNTER(rx_data_pkts, RxDataPktCnt); DEVICE_COUNTER(rx_dropped_pkts, RxDroppedPktCnt); DEVICE_COUNTER(rx_dwords, RxDwordCnt); DEVICE_COUNTER(rx_ebps, RxEBPCnt); DEVICE_COUNTER(rx_flow_ctrl_errs, RxFlowCtrlErrCnt); DEVICE_COUNTER(rx_flow_pkts, RxFlowPktCnt); DEVICE_COUNTER(rx_icrc_errs, RxICRCErrCnt); DEVICE_COUNTER(rx_len_errs, RxLenErrCnt); DEVICE_COUNTER(rx_link_problems, RxLinkProblemCnt); DEVICE_COUNTER(rx_lpcrc_errs, RxLPCRCErrCnt); DEVICE_COUNTER(rx_max_min_len_errs, RxMaxMinLenErrCnt); DEVICE_COUNTER(rx_p0_hdr_egr_ovfls, RxP0HdrEgrOvflCnt); DEVICE_COUNTER(rx_p1_hdr_egr_ovfls, RxP1HdrEgrOvflCnt); DEVICE_COUNTER(rx_p2_hdr_egr_ovfls, RxP2HdrEgrOvflCnt); DEVICE_COUNTER(rx_p3_hdr_egr_ovfls, RxP3HdrEgrOvflCnt); DEVICE_COUNTER(rx_p4_hdr_egr_ovfls, RxP4HdrEgrOvflCnt); DEVICE_COUNTER(rx_p5_hdr_egr_ovfls, RxP5HdrEgrOvflCnt); DEVICE_COUNTER(rx_p6_hdr_egr_ovfls, RxP6HdrEgrOvflCnt); DEVICE_COUNTER(rx_p7_hdr_egr_ovfls, RxP7HdrEgrOvflCnt); DEVICE_COUNTER(rx_p8_hdr_egr_ovfls, RxP8HdrEgrOvflCnt); DEVICE_COUNTER(rx_pkey_mismatches, RxPKeyMismatchCnt); DEVICE_COUNTER(rx_tid_full_errs, RxTIDFullErrCnt); DEVICE_COUNTER(rx_tid_valid_errs, RxTIDValidErrCnt); DEVICE_COUNTER(rx_vcrc_errs, RxVCRCErrCnt); DEVICE_COUNTER(tx_data_pkts, TxDataPktCnt); DEVICE_COUNTER(tx_dropped_pkts, TxDroppedPktCnt); DEVICE_COUNTER(tx_dwords, TxDwordCnt); DEVICE_COUNTER(tx_flow_pkts, TxFlowPktCnt); DEVICE_COUNTER(tx_flow_stalls, TxFlowStallCnt); DEVICE_COUNTER(tx_len_errs, TxLenErrCnt); DEVICE_COUNTER(tx_max_min_len_errs, TxMaxMinLenErrCnt); DEVICE_COUNTER(tx_underruns, TxUnderrunCnt); DEVICE_COUNTER(tx_unsup_vl_errs, TxUnsupVLErrCnt); static struct attribute *dev_counter_attributes[] = { &dev_attr_ib_link_downeds.attr, &dev_attr_ib_link_err_recoveries.attr, &dev_attr_ib_status_changes.attr, &dev_attr_ib_symbol_errs.attr, &dev_attr_lb_flow_stalls.attr, &dev_attr_lb_ints.attr, &dev_attr_rx_bad_formats.attr, &dev_attr_rx_buf_ovfls.attr, &dev_attr_rx_data_pkts.attr, &dev_attr_rx_dropped_pkts.attr, &dev_attr_rx_dwords.attr, &dev_attr_rx_ebps.attr, &dev_attr_rx_flow_ctrl_errs.attr, &dev_attr_rx_flow_pkts.attr, &dev_attr_rx_icrc_errs.attr, &dev_attr_rx_len_errs.attr, &dev_attr_rx_link_problems.attr, &dev_attr_rx_lpcrc_errs.attr, &dev_attr_rx_max_min_len_errs.attr, &dev_attr_rx_p0_hdr_egr_ovfls.attr, &dev_attr_rx_p1_hdr_egr_ovfls.attr, &dev_attr_rx_p2_hdr_egr_ovfls.attr, &dev_attr_rx_p3_hdr_egr_ovfls.attr, &dev_attr_rx_p4_hdr_egr_ovfls.attr, &dev_attr_rx_p5_hdr_egr_ovfls.attr, &dev_attr_rx_p6_hdr_egr_ovfls.attr, &dev_attr_rx_p7_hdr_egr_ovfls.attr, &dev_attr_rx_p8_hdr_egr_ovfls.attr, &dev_attr_rx_pkey_mismatches.attr, &dev_attr_rx_tid_full_errs.attr, &dev_attr_rx_tid_valid_errs.attr, &dev_attr_rx_vcrc_errs.attr, &dev_attr_tx_data_pkts.attr, &dev_attr_tx_dropped_pkts.attr, &dev_attr_tx_dwords.attr, &dev_attr_tx_flow_pkts.attr, &dev_attr_tx_flow_stalls.attr, &dev_attr_tx_len_errs.attr, &dev_attr_tx_max_min_len_errs.attr, &dev_attr_tx_underruns.attr, &dev_attr_tx_unsup_vl_errs.attr, NULL }; static struct attribute_group dev_counter_attr_group = { .name = "counters", .attrs = dev_counter_attributes }; static ssize_t store_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret; if (count < 5 || memcmp(buf, "reset", 5)) { ret = -EINVAL; goto bail; } if (dd->ipath_flags & IPATH_DISABLED) { /* * post-reset init would re-enable interrupts, etc. * so don't allow reset on disabled devices. Not * perfect error, but about the best choice. */ dev_info(dev,"Unit %d is disabled, can't reset\n", dd->ipath_unit); ret = -EINVAL; goto bail; } ret = ipath_reset_device(dd->ipath_unit); bail: return ret<0 ? ret : count; } static ssize_t store_link_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret, r; u16 state; ret = ipath_parse_ushort(buf, &state); if (ret < 0) goto invalid; r = ipath_set_linkstate(dd, state); if (r < 0) { ret = r; goto bail; } goto bail; invalid: ipath_dev_err(dd, "attempt to set invalid link state\n"); bail: return ret; } static ssize_t show_mtu(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_ibmtu); } static ssize_t store_mtu(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); ssize_t ret; u16 mtu = 0; int r; ret = ipath_parse_ushort(buf, &mtu); if (ret < 0) goto invalid; r = ipath_set_mtu(dd, mtu); if (r < 0) ret = r; goto bail; invalid: ipath_dev_err(dd, "attempt to set invalid MTU\n"); bail: return ret; } static ssize_t show_enabled(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%u\n", (dd->ipath_flags & IPATH_DISABLED) ? 0 : 1); } static ssize_t store_enabled(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); ssize_t ret; u16 enable = 0; ret = ipath_parse_ushort(buf, &enable); if (ret < 0) { ipath_dev_err(dd, "attempt to use non-numeric on enable\n"); goto bail; } if (enable) { if (!(dd->ipath_flags & IPATH_DISABLED)) goto bail; dev_info(dev, "Enabling unit %d\n", dd->ipath_unit); /* same as post-reset */ ret = ipath_init_chip(dd, 1); if (ret) ipath_dev_err(dd, "Failed to enable unit %d\n", dd->ipath_unit); else { dd->ipath_flags &= ~IPATH_DISABLED; *dd->ipath_statusp &= ~IPATH_STATUS_ADMIN_DISABLED; } } else if (!(dd->ipath_flags & IPATH_DISABLED)) { dev_info(dev, "Disabling unit %d\n", dd->ipath_unit); ipath_shutdown_device(dd); dd->ipath_flags |= IPATH_DISABLED; *dd->ipath_statusp |= IPATH_STATUS_ADMIN_DISABLED; } bail: return ret; } static ssize_t store_rx_pol_inv(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret, r; u16 val; ret = ipath_parse_ushort(buf, &val); if (ret < 0) goto invalid; r = ipath_set_rx_pol_inv(dd, val); if (r < 0) { ret = r; goto bail; } goto bail; invalid: ipath_dev_err(dd, "attempt to set invalid Rx Polarity invert\n"); bail: return ret; } static ssize_t store_led_override(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret; u16 val; ret = ipath_parse_ushort(buf, &val); if (ret > 0) ipath_set_led_override(dd, val); else ipath_dev_err(dd, "attempt to set invalid LED override\n"); return ret; } static ssize_t show_logged_errs(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); int idx, count; /* force consistency with actual EEPROM */ if (ipath_update_eeprom_log(dd) != 0) return -ENXIO; count = 0; for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) { count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c", dd->ipath_eep_st_errs[idx], idx == (IPATH_EEP_LOG_CNT - 1) ? '\n' : ' '); } return count; } /* * New sysfs entries to control various IB config. These all turn into * accesses via ipath_f_get/set_ib_cfg. * * Get/Set heartbeat enable. Or of 1=enabled, 2=auto */ static ssize_t show_hrtbt_enb(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret; ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_HRTBT); if (ret >= 0) ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret); return ret; } static ssize_t store_hrtbt_enb(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret, r; u16 val; ret = ipath_parse_ushort(buf, &val); if (ret >= 0 && val > 3) ret = -EINVAL; if (ret < 0) { ipath_dev_err(dd, "attempt to set invalid Heartbeat enable\n"); goto bail; } /* * Set the "intentional" heartbeat enable per either of * "Enable" and "Auto", as these are normally set together. * This bit is consulted when leaving loopback mode, * because entering loopback mode overrides it and automatically * disables heartbeat. */ r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT, val); if (r < 0) ret = r; else if (val == IPATH_IB_HRTBT_OFF) dd->ipath_flags |= IPATH_NO_HRTBT; else dd->ipath_flags &= ~IPATH_NO_HRTBT; bail: return ret; } /* * Get/Set Link-widths enabled. Or of 1=1x, 2=4x (this is human/IB centric, * _not_ the particular encoding of any given chip) */ static ssize_t show_lwid_enb(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret; ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB); if (ret >= 0) ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret); return ret; } static ssize_t store_lwid_enb(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret, r; u16 val; ret = ipath_parse_ushort(buf, &val); if (ret >= 0 && (val == 0 || val > 3)) ret = -EINVAL; if (ret < 0) { ipath_dev_err(dd, "attempt to set invalid Link Width (enable)\n"); goto bail; } r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, val); if (r < 0) ret = r; bail: return ret; } /* Get current link width */ static ssize_t show_lwid(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret; ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID); if (ret >= 0) ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret); return ret; } /* * Get/Set Link-speeds enabled. Or of 1=SDR 2=DDR. */ static ssize_t show_spd_enb(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret; ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB); if (ret >= 0) ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret); return ret; } static ssize_t store_spd_enb(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret, r; u16 val; ret = ipath_parse_ushort(buf, &val); if (ret >= 0 && (val == 0 || val > (IPATH_IB_SDR | IPATH_IB_DDR))) ret = -EINVAL; if (ret < 0) { ipath_dev_err(dd, "attempt to set invalid Link Speed (enable)\n"); goto bail; } r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, val); if (r < 0) ret = r; bail: return ret; } /* Get current link speed */ static ssize_t show_spd(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret; ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD); if (ret >= 0) ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret); return ret; } /* * Get/Set RX polarity-invert enable. 0=no, 1=yes. */ static ssize_t show_rx_polinv_enb(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret; ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB); if (ret >= 0) ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret); return ret; } static ssize_t store_rx_polinv_enb(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret, r; u16 val; ret = ipath_parse_ushort(buf, &val); if (ret >= 0 && val > 1) { ipath_dev_err(dd, "attempt to set invalid Rx Polarity (enable)\n"); ret = -EINVAL; goto bail; } r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val); if (r < 0) ret = r; bail: return ret; } /* * Get/Set RX lane-reversal enable. 0=no, 1=yes. */ static ssize_t show_lanerev_enb(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret; ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB); if (ret >= 0) ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret); return ret; } static ssize_t store_lanerev_enb(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret, r; u16 val; ret = ipath_parse_ushort(buf, &val); if (ret >= 0 && val > 1) { ret = -EINVAL; ipath_dev_err(dd, "attempt to set invalid Lane reversal (enable)\n"); goto bail; } r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB, val); if (r < 0) ret = r; bail: return ret; } static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL); static DRIVER_ATTR(version, S_IRUGO, show_version, NULL); static struct attribute *driver_attributes[] = { &driver_attr_num_units.attr, &driver_attr_version.attr, NULL }; static struct attribute_group driver_attr_group = { .attrs = driver_attributes }; static ssize_t store_tempsense(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret, stat; u16 val; ret = ipath_parse_ushort(buf, &val); if (ret <= 0) { ipath_dev_err(dd, "attempt to set invalid tempsense config\n"); goto bail; } /* If anything but the highest limit, enable T_CRIT_A "interrupt" */ stat = ipath_tempsense_write(dd, 9, (val == 0x7f7f) ? 0x80 : 0); if (stat) { ipath_dev_err(dd, "Unable to set tempsense config\n"); ret = -1; goto bail; } stat = ipath_tempsense_write(dd, 0xB, (u8) (val & 0xFF)); if (stat) { ipath_dev_err(dd, "Unable to set local Tcrit\n"); ret = -1; goto bail; } stat = ipath_tempsense_write(dd, 0xD, (u8) (val >> 8)); if (stat) { ipath_dev_err(dd, "Unable to set remote Tcrit\n"); ret = -1; goto bail; } bail: return ret; } /* * dump tempsense regs. in decimal, to ease shell-scripts. */ static ssize_t show_tempsense(struct device *dev, struct device_attribute *attr, char *buf) { struct ipath_devdata *dd = dev_get_drvdata(dev); int ret; int idx; u8 regvals[8]; ret = -ENXIO; for (idx = 0; idx < 8; ++idx) { if (idx == 6) continue; ret = ipath_tempsense_read(dd, idx); if (ret < 0) break; regvals[idx] = ret; } if (idx == 8) ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n", *(signed char *)(regvals), *(signed char *)(regvals + 1), regvals[2], regvals[3], *(signed char *)(regvals + 5), *(signed char *)(regvals + 7)); return ret; } const struct attribute_group *ipath_driver_attr_groups[] = { &driver_attr_group, NULL, }; static DEVICE_ATTR(guid, S_IWUSR | S_IRUGO, show_guid, store_guid); static DEVICE_ATTR(lmc, S_IWUSR | S_IRUGO, show_lmc, store_lmc); static DEVICE_ATTR(lid, S_IWUSR | S_IRUGO, show_lid, store_lid); static DEVICE_ATTR(link_state, S_IWUSR, NULL, store_link_state); static DEVICE_ATTR(mlid, S_IWUSR | S_IRUGO, show_mlid, store_mlid); static DEVICE_ATTR(mtu, S_IWUSR | S_IRUGO, show_mtu, store_mtu); static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO, show_enabled, store_enabled); static DEVICE_ATTR(nguid, S_IRUGO, show_nguid, NULL); static DEVICE_ATTR(nports, S_IRUGO, show_nports, NULL); static DEVICE_ATTR(reset, S_IWUSR, NULL, store_reset); static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL); static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL); static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv); static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override); static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL); static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL); static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO, show_jint_max_packets, store_jint_max_packets); static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO, show_jint_idle_ticks, store_jint_idle_ticks); static DEVICE_ATTR(tempsense, S_IWUSR | S_IRUGO, show_tempsense, store_tempsense); static struct attribute *dev_attributes[] = { &dev_attr_guid.attr, &dev_attr_lmc.attr, &dev_attr_lid.attr, &dev_attr_link_state.attr, &dev_attr_mlid.attr, &dev_attr_mtu.attr, &dev_attr_nguid.attr, &dev_attr_nports.attr, &dev_attr_serial.attr, &dev_attr_status.attr, &dev_attr_status_str.attr, &dev_attr_boardversion.attr, &dev_attr_unit.attr, &dev_attr_enabled.attr, &dev_attr_rx_pol_inv.attr, &dev_attr_led_override.attr, &dev_attr_logged_errors.attr, &dev_attr_tempsense.attr, &dev_attr_localbus_info.attr, NULL }; static struct attribute_group dev_attr_group = { .attrs = dev_attributes }; static DEVICE_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb, store_hrtbt_enb); static DEVICE_ATTR(link_width_enable, S_IWUSR | S_IRUGO, show_lwid_enb, store_lwid_enb); static DEVICE_ATTR(link_width, S_IRUGO, show_lwid, NULL); static DEVICE_ATTR(link_speed_enable, S_IWUSR | S_IRUGO, show_spd_enb, store_spd_enb); static DEVICE_ATTR(link_speed, S_IRUGO, show_spd, NULL); static DEVICE_ATTR(rx_pol_inv_enable, S_IWUSR | S_IRUGO, show_rx_polinv_enb, store_rx_polinv_enb); static DEVICE_ATTR(rx_lane_rev_enable, S_IWUSR | S_IRUGO, show_lanerev_enb, store_lanerev_enb); static struct attribute *dev_ibcfg_attributes[] = { &dev_attr_hrtbt_enable.attr, &dev_attr_link_width_enable.attr, &dev_attr_link_width.attr, &dev_attr_link_speed_enable.attr, &dev_attr_link_speed.attr, &dev_attr_rx_pol_inv_enable.attr, &dev_attr_rx_lane_rev_enable.attr, NULL }; static struct attribute_group dev_ibcfg_attr_group = { .attrs = dev_ibcfg_attributes }; /** * ipath_expose_reset - create a device reset file * @dev: the device structure * * Only expose a file that lets us reset the device after someone * enters diag mode. A device reset is quite likely to crash the * machine entirely, so we don't want to normally make it * available. * * Called with ipath_mutex held. */ int ipath_expose_reset(struct device *dev) { static int exposed; int ret; if (!exposed) { ret = device_create_file(dev, &dev_attr_reset); exposed = 1; } else ret = 0; return ret; } int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd) { int ret; ret = sysfs_create_group(&dev->kobj, &dev_attr_group); if (ret) goto bail; ret = sysfs_create_group(&dev->kobj, &dev_counter_attr_group); if (ret) goto bail_attrs; if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) { ret = device_create_file(dev, &dev_attr_jint_idle_ticks); if (ret) goto bail_counter; ret = device_create_file(dev, &dev_attr_jint_max_packets); if (ret) goto bail_idle; ret = sysfs_create_group(&dev->kobj, &dev_ibcfg_attr_group); if (ret) goto bail_max; } return 0; bail_max: device_remove_file(dev, &dev_attr_jint_max_packets); bail_idle: device_remove_file(dev, &dev_attr_jint_idle_ticks); bail_counter: sysfs_remove_group(&dev->kobj, &dev_counter_attr_group); bail_attrs: sysfs_remove_group(&dev->kobj, &dev_attr_group); bail: return ret; } void ipath_device_remove_group(struct device *dev, struct ipath_devdata *dd) { sysfs_remove_group(&dev->kobj, &dev_counter_attr_group); if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) { sysfs_remove_group(&dev->kobj, &dev_ibcfg_attr_group); device_remove_file(dev, &dev_attr_jint_idle_ticks); device_remove_file(dev, &dev_attr_jint_max_packets); } sysfs_remove_group(&dev->kobj, &dev_attr_group); device_remove_file(dev, &dev_attr_reset); }
gpl-2.0
drod2169/drodspeed-bfs
arch/arm/mach-msm/board-mahimahi-smb329.c
5023
4070
/* drivers/i2c/chips/smb329.c * * SMB329B Switch Charger (SUMMIT Microelectronics) * * Copyright (C) 2009 HTC Corporation * Author: Justin Lin <Justin_Lin@htc.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/mutex.h> #include <asm/atomic.h> #include "board-mahimahi-smb329.h" static struct smb329_data { struct i2c_client *client; uint8_t version; struct work_struct work; struct mutex state_lock; int chg_state; } smb329; static int smb329_i2c_write(uint8_t *value, uint8_t reg, uint8_t num_bytes) { int ret; struct i2c_msg msg; /* write the first byte of buffer as the register address */ value[0] = reg; msg.addr = smb329.client->addr; msg.len = num_bytes + 1; msg.flags = 0; msg.buf = value; ret = i2c_transfer(smb329.client->adapter, &msg, 1); return (ret >= 0) ? 0 : ret; } static int smb329_i2c_read(uint8_t *value, uint8_t reg, uint8_t num_bytes) { int ret; struct i2c_msg msg[2]; /* setup the address to read */ msg[0].addr = smb329.client->addr; msg[0].len = 1; msg[0].flags = 0; msg[0].buf = &reg; /* setup the read buffer */ msg[1].addr = smb329.client->addr; msg[1].flags = I2C_M_RD; msg[1].len = num_bytes; msg[1].buf = value; ret = i2c_transfer(smb329.client->adapter, msg, 2); return (ret >= 0) ? 0 : ret; } static int smb329_i2c_write_byte(uint8_t value, uint8_t reg) { int ret; uint8_t buf[2] = { 0 }; buf[1] = value; ret = smb329_i2c_write(buf, reg, 1); if (ret) pr_err("smb329: write byte error (%d)\n", ret); return ret; } static int smb329_i2c_read_byte(uint8_t *value, uint8_t reg) { int ret = smb329_i2c_read(value, reg, 1); if (ret) pr_err("smb329: read byte error (%d)\n", ret); return ret; } int smb329_set_charger_ctrl(uint32_t ctl) { mutex_lock(&smb329.state_lock); smb329.chg_state = ctl; schedule_work(&smb329.work); mutex_unlock(&smb329.state_lock); return 0; } static void smb329_work_func(struct work_struct *work) { mutex_lock(&smb329.state_lock); switch (smb329.chg_state) { case SMB329_ENABLE_FAST_CHG: pr_info("smb329: charger on (fast)\n"); smb329_i2c_write_byte(0x84, 0x31); smb329_i2c_write_byte(0x08, 0x05); if ((smb329.version & 0x18) == 0x0) smb329_i2c_write_byte(0xA9, 0x00); break; case SMB329_DISABLE_CHG: case SMB329_ENABLE_SLOW_CHG: pr_info("smb329: charger off/slow\n"); smb329_i2c_write_byte(0x88, 0x31); smb329_i2c_write_byte(0x08, 0x05); break; default: pr_err("smb329: unknown charger state %d\n", smb329.chg_state); } mutex_unlock(&smb329.state_lock); } static int smb329_probe(struct i2c_client *client, const struct i2c_device_id *id) { if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) { dev_dbg(&client->dev, "[SMB329]:I2C fail\n"); return -EIO; } smb329.client = client; mutex_init(&smb329.state_lock); INIT_WORK(&smb329.work, smb329_work_func); smb329_i2c_read_byte(&smb329.version, 0x3B); pr_info("smb329 version: 0x%02x\n", smb329.version); return 0; } static const struct i2c_device_id smb329_id[] = { { "smb329", 0 }, { }, }; static struct i2c_driver smb329_driver = { .driver.name = "smb329", .id_table = smb329_id, .probe = smb329_probe, }; static int __init smb329_init(void) { int ret = i2c_add_driver(&smb329_driver); if (ret) pr_err("smb329_init: failed\n"); return ret; } module_init(smb329_init); MODULE_AUTHOR("Justin Lin <Justin_Lin@htc.com>"); MODULE_DESCRIPTION("SUMMIT Microelectronics SMB329B switch charger"); MODULE_LICENSE("GPL");
gpl-2.0
zarboz/m8wlv
drivers/media/dvb/frontends/nxt200x.c
5023
30294
/* * Support for NXT2002 and NXT2004 - VSB/QAM * * Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com> * Copyright (C) 2006 Michael Krufky <mkrufky@m1k.net> * based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net> * and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ /* * NOTES ABOUT THIS DRIVER * * This Linux driver supports: * B2C2/BBTI Technisat Air2PC - ATSC (NXT2002) * AverTVHD MCE A180 (NXT2004) * ATI HDTV Wonder (NXT2004) * * This driver needs external firmware. Please use the command * "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2002" or * "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2004" to * download/extract the appropriate firmware, and then copy it to * /usr/lib/hotplug/firmware/ or /lib/firmware/ * (depending on configuration of firmware hotplug). */ #define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw" #define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw" #define CRC_CCIT_MASK 0x1021 #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include "dvb_frontend.h" #include "nxt200x.h" struct nxt200x_state { struct i2c_adapter* i2c; const struct nxt200x_config* config; struct dvb_frontend frontend; /* demodulator private data */ nxt_chip_type demod_chip; u8 initialised:1; }; static int debug; #define dprintk(args...) \ do { \ if (debug) printk(KERN_DEBUG "nxt200x: " args); \ } while (0) static int i2c_writebytes (struct nxt200x_state* state, u8 addr, u8 *buf, u8 len) { int err; struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = buf, .len = len }; if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) { printk (KERN_WARNING "nxt200x: %s: i2c write error (addr 0x%02x, err == %i)\n", __func__, addr, err); return -EREMOTEIO; } return 0; } static int i2c_readbytes(struct nxt200x_state *state, u8 addr, u8 *buf, u8 len) { int err; struct i2c_msg msg = { .addr = addr, .flags = I2C_M_RD, .buf = buf, .len = len }; if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) { printk (KERN_WARNING "nxt200x: %s: i2c read error (addr 0x%02x, err == %i)\n", __func__, addr, err); return -EREMOTEIO; } return 0; } static int nxt200x_writebytes (struct nxt200x_state* state, u8 reg, const u8 *buf, u8 len) { u8 buf2 [len+1]; int err; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf2, .len = len + 1 }; buf2[0] = reg; memcpy(&buf2[1], buf, len); if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) { printk (KERN_WARNING "nxt200x: %s: i2c write error (addr 0x%02x, err == %i)\n", __func__, state->config->demod_address, err); return -EREMOTEIO; } return 0; } static int nxt200x_readbytes(struct nxt200x_state *state, u8 reg, u8 *buf, u8 len) { u8 reg2 [] = { reg }; struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = reg2, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = buf, .len = len } }; int err; if ((err = i2c_transfer (state->i2c, msg, 2)) != 2) { printk (KERN_WARNING "nxt200x: %s: i2c read error (addr 0x%02x, err == %i)\n", __func__, state->config->demod_address, err); return -EREMOTEIO; } return 0; } static u16 nxt200x_crc(u16 crc, u8 c) { u8 i; u16 input = (u16) c & 0xFF; input<<=8; for(i=0; i<8; i++) { if((crc^input) & 0x8000) crc=(crc<<1)^CRC_CCIT_MASK; else crc<<=1; input<<=1; } return crc; } static int nxt200x_writereg_multibyte (struct nxt200x_state* state, u8 reg, u8* data, u8 len) { u8 attr, len2, buf; dprintk("%s\n", __func__); /* set mutli register register */ nxt200x_writebytes(state, 0x35, &reg, 1); /* send the actual data */ nxt200x_writebytes(state, 0x36, data, len); switch (state->demod_chip) { case NXT2002: len2 = len; buf = 0x02; break; case NXT2004: /* probably not right, but gives correct values */ attr = 0x02; if (reg & 0x80) { attr = attr << 1; if (reg & 0x04) attr = attr >> 1; } /* set write bit */ len2 = ((attr << 4) | 0x10) | len; buf = 0x80; break; default: return -EINVAL; break; } /* set multi register length */ nxt200x_writebytes(state, 0x34, &len2, 1); /* toggle the multireg write bit */ nxt200x_writebytes(state, 0x21, &buf, 1); nxt200x_readbytes(state, 0x21, &buf, 1); switch (state->demod_chip) { case NXT2002: if ((buf & 0x02) == 0) return 0; break; case NXT2004: if (buf == 0) return 0; break; default: return -EINVAL; break; } printk(KERN_WARNING "nxt200x: Error writing multireg register 0x%02X\n",reg); return 0; } static int nxt200x_readreg_multibyte (struct nxt200x_state* state, u8 reg, u8* data, u8 len) { int i; u8 buf, len2, attr; dprintk("%s\n", __func__); /* set mutli register register */ nxt200x_writebytes(state, 0x35, &reg, 1); switch (state->demod_chip) { case NXT2002: /* set multi register length */ len2 = len & 0x80; nxt200x_writebytes(state, 0x34, &len2, 1); /* read the actual data */ nxt200x_readbytes(state, reg, data, len); return 0; break; case NXT2004: /* probably not right, but gives correct values */ attr = 0x02; if (reg & 0x80) { attr = attr << 1; if (reg & 0x04) attr = attr >> 1; } /* set multi register length */ len2 = (attr << 4) | len; nxt200x_writebytes(state, 0x34, &len2, 1); /* toggle the multireg bit*/ buf = 0x80; nxt200x_writebytes(state, 0x21, &buf, 1); /* read the actual data */ for(i = 0; i < len; i++) { nxt200x_readbytes(state, 0x36 + i, &data[i], 1); } return 0; break; default: return -EINVAL; break; } } static void nxt200x_microcontroller_stop (struct nxt200x_state* state) { u8 buf, stopval, counter = 0; dprintk("%s\n", __func__); /* set correct stop value */ switch (state->demod_chip) { case NXT2002: stopval = 0x40; break; case NXT2004: stopval = 0x10; break; default: stopval = 0; break; } buf = 0x80; nxt200x_writebytes(state, 0x22, &buf, 1); while (counter < 20) { nxt200x_readbytes(state, 0x31, &buf, 1); if (buf & stopval) return; msleep(10); counter++; } printk(KERN_WARNING "nxt200x: Timeout waiting for nxt200x to stop. This is ok after firmware upload.\n"); return; } static void nxt200x_microcontroller_start (struct nxt200x_state* state) { u8 buf; dprintk("%s\n", __func__); buf = 0x00; nxt200x_writebytes(state, 0x22, &buf, 1); } static void nxt2004_microcontroller_init (struct nxt200x_state* state) { u8 buf[9]; u8 counter = 0; dprintk("%s\n", __func__); buf[0] = 0x00; nxt200x_writebytes(state, 0x2b, buf, 1); buf[0] = 0x70; nxt200x_writebytes(state, 0x34, buf, 1); buf[0] = 0x04; nxt200x_writebytes(state, 0x35, buf, 1); buf[0] = 0x01; buf[1] = 0x23; buf[2] = 0x45; buf[3] = 0x67; buf[4] = 0x89; buf[5] = 0xAB; buf[6] = 0xCD; buf[7] = 0xEF; buf[8] = 0xC0; nxt200x_writebytes(state, 0x36, buf, 9); buf[0] = 0x80; nxt200x_writebytes(state, 0x21, buf, 1); while (counter < 20) { nxt200x_readbytes(state, 0x21, buf, 1); if (buf[0] == 0) return; msleep(10); counter++; } printk(KERN_WARNING "nxt200x: Timeout waiting for nxt2004 to init.\n"); return; } static int nxt200x_writetuner (struct nxt200x_state* state, u8* data) { u8 buf, count = 0; dprintk("%s\n", __func__); dprintk("Tuner Bytes: %02X %02X %02X %02X\n", data[1], data[2], data[3], data[4]); /* if NXT2004, write directly to tuner. if NXT2002, write through NXT chip. * direct write is required for Philips TUV1236D and ALPS TDHU2 */ switch (state->demod_chip) { case NXT2004: if (i2c_writebytes(state, data[0], data+1, 4)) printk(KERN_WARNING "nxt200x: error writing to tuner\n"); /* wait until we have a lock */ while (count < 20) { i2c_readbytes(state, data[0], &buf, 1); if (buf & 0x40) return 0; msleep(100); count++; } printk("nxt2004: timeout waiting for tuner lock\n"); break; case NXT2002: /* set the i2c transfer speed to the tuner */ buf = 0x03; nxt200x_writebytes(state, 0x20, &buf, 1); /* setup to transfer 4 bytes via i2c */ buf = 0x04; nxt200x_writebytes(state, 0x34, &buf, 1); /* write actual tuner bytes */ nxt200x_writebytes(state, 0x36, data+1, 4); /* set tuner i2c address */ buf = data[0] << 1; nxt200x_writebytes(state, 0x35, &buf, 1); /* write UC Opmode to begin transfer */ buf = 0x80; nxt200x_writebytes(state, 0x21, &buf, 1); while (count < 20) { nxt200x_readbytes(state, 0x21, &buf, 1); if ((buf & 0x80)== 0x00) return 0; msleep(100); count++; } printk("nxt2002: timeout error writing tuner\n"); break; default: return -EINVAL; break; } return 0; } static void nxt200x_agc_reset(struct nxt200x_state* state) { u8 buf; dprintk("%s\n", __func__); switch (state->demod_chip) { case NXT2002: buf = 0x08; nxt200x_writebytes(state, 0x08, &buf, 1); buf = 0x00; nxt200x_writebytes(state, 0x08, &buf, 1); break; case NXT2004: nxt200x_readreg_multibyte(state, 0x08, &buf, 1); buf = 0x08; nxt200x_writereg_multibyte(state, 0x08, &buf, 1); buf = 0x00; nxt200x_writereg_multibyte(state, 0x08, &buf, 1); break; default: break; } return; } static int nxt2002_load_firmware (struct dvb_frontend* fe, const struct firmware *fw) { struct nxt200x_state* state = fe->demodulator_priv; u8 buf[3], written = 0, chunkpos = 0; u16 rambase, position, crc = 0; dprintk("%s\n", __func__); dprintk("Firmware is %zu bytes\n", fw->size); /* Get the RAM base for this nxt2002 */ nxt200x_readbytes(state, 0x10, buf, 1); if (buf[0] & 0x10) rambase = 0x1000; else rambase = 0x0000; dprintk("rambase on this nxt2002 is %04X\n", rambase); /* Hold the micro in reset while loading firmware */ buf[0] = 0x80; nxt200x_writebytes(state, 0x2B, buf, 1); for (position = 0; position < fw->size; position++) { if (written == 0) { crc = 0; chunkpos = 0x28; buf[0] = ((rambase + position) >> 8); buf[1] = (rambase + position) & 0xFF; buf[2] = 0x81; /* write starting address */ nxt200x_writebytes(state, 0x29, buf, 3); } written++; chunkpos++; if ((written % 4) == 0) nxt200x_writebytes(state, chunkpos, &fw->data[position-3], 4); crc = nxt200x_crc(crc, fw->data[position]); if ((written == 255) || (position+1 == fw->size)) { /* write remaining bytes of firmware */ nxt200x_writebytes(state, chunkpos+4-(written %4), &fw->data[position-(written %4) + 1], written %4); buf[0] = crc << 8; buf[1] = crc & 0xFF; /* write crc */ nxt200x_writebytes(state, 0x2C, buf, 2); /* do a read to stop things */ nxt200x_readbytes(state, 0x2A, buf, 1); /* set transfer mode to complete */ buf[0] = 0x80; nxt200x_writebytes(state, 0x2B, buf, 1); written = 0; } } return 0; }; static int nxt2004_load_firmware (struct dvb_frontend* fe, const struct firmware *fw) { struct nxt200x_state* state = fe->demodulator_priv; u8 buf[3]; u16 rambase, position, crc=0; dprintk("%s\n", __func__); dprintk("Firmware is %zu bytes\n", fw->size); /* set rambase */ rambase = 0x1000; /* hold the micro in reset while loading firmware */ buf[0] = 0x80; nxt200x_writebytes(state, 0x2B, buf,1); /* calculate firmware CRC */ for (position = 0; position < fw->size; position++) { crc = nxt200x_crc(crc, fw->data[position]); } buf[0] = rambase >> 8; buf[1] = rambase & 0xFF; buf[2] = 0x81; /* write starting address */ nxt200x_writebytes(state,0x29,buf,3); for (position = 0; position < fw->size;) { nxt200x_writebytes(state, 0x2C, &fw->data[position], fw->size-position > 255 ? 255 : fw->size-position); position += (fw->size-position > 255 ? 255 : fw->size-position); } buf[0] = crc >> 8; buf[1] = crc & 0xFF; dprintk("firmware crc is 0x%02X 0x%02X\n", buf[0], buf[1]); /* write crc */ nxt200x_writebytes(state, 0x2C, buf,2); /* do a read to stop things */ nxt200x_readbytes(state, 0x2C, buf, 1); /* set transfer mode to complete */ buf[0] = 0x80; nxt200x_writebytes(state, 0x2B, buf,1); return 0; }; static int nxt200x_setup_frontend_parameters(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct nxt200x_state* state = fe->demodulator_priv; u8 buf[5]; /* stop the micro first */ nxt200x_microcontroller_stop(state); if (state->demod_chip == NXT2004) { /* make sure demod is set to digital */ buf[0] = 0x04; nxt200x_writebytes(state, 0x14, buf, 1); buf[0] = 0x00; nxt200x_writebytes(state, 0x17, buf, 1); } /* set additional params */ switch (p->modulation) { case QAM_64: case QAM_256: /* Set punctured clock for QAM */ /* This is just a guess since I am unable to test it */ if (state->config->set_ts_params) state->config->set_ts_params(fe, 1); break; case VSB_8: /* Set non-punctured clock for VSB */ if (state->config->set_ts_params) state->config->set_ts_params(fe, 0); break; default: return -EINVAL; break; } if (fe->ops.tuner_ops.calc_regs) { /* get tuning information */ fe->ops.tuner_ops.calc_regs(fe, buf, 5); /* write frequency information */ nxt200x_writetuner(state, buf); } /* reset the agc now that tuning has been completed */ nxt200x_agc_reset(state); /* set target power level */ switch (p->modulation) { case QAM_64: case QAM_256: buf[0] = 0x74; break; case VSB_8: buf[0] = 0x70; break; default: return -EINVAL; break; } nxt200x_writebytes(state, 0x42, buf, 1); /* configure sdm */ switch (state->demod_chip) { case NXT2002: buf[0] = 0x87; break; case NXT2004: buf[0] = 0x07; break; default: return -EINVAL; break; } nxt200x_writebytes(state, 0x57, buf, 1); /* write sdm1 input */ buf[0] = 0x10; buf[1] = 0x00; switch (state->demod_chip) { case NXT2002: nxt200x_writereg_multibyte(state, 0x58, buf, 2); break; case NXT2004: nxt200x_writebytes(state, 0x58, buf, 2); break; default: return -EINVAL; break; } /* write sdmx input */ switch (p->modulation) { case QAM_64: buf[0] = 0x68; break; case QAM_256: buf[0] = 0x64; break; case VSB_8: buf[0] = 0x60; break; default: return -EINVAL; break; } buf[1] = 0x00; switch (state->demod_chip) { case NXT2002: nxt200x_writereg_multibyte(state, 0x5C, buf, 2); break; case NXT2004: nxt200x_writebytes(state, 0x5C, buf, 2); break; default: return -EINVAL; break; } /* write adc power lpf fc */ buf[0] = 0x05; nxt200x_writebytes(state, 0x43, buf, 1); if (state->demod_chip == NXT2004) { /* write ??? */ buf[0] = 0x00; buf[1] = 0x00; nxt200x_writebytes(state, 0x46, buf, 2); } /* write accumulator2 input */ buf[0] = 0x80; buf[1] = 0x00; switch (state->demod_chip) { case NXT2002: nxt200x_writereg_multibyte(state, 0x4B, buf, 2); break; case NXT2004: nxt200x_writebytes(state, 0x4B, buf, 2); break; default: return -EINVAL; break; } /* write kg1 */ buf[0] = 0x00; nxt200x_writebytes(state, 0x4D, buf, 1); /* write sdm12 lpf fc */ buf[0] = 0x44; nxt200x_writebytes(state, 0x55, buf, 1); /* write agc control reg */ buf[0] = 0x04; nxt200x_writebytes(state, 0x41, buf, 1); if (state->demod_chip == NXT2004) { nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x24; nxt200x_writereg_multibyte(state, 0x80, buf, 1); /* soft reset? */ nxt200x_readreg_multibyte(state, 0x08, buf, 1); buf[0] = 0x10; nxt200x_writereg_multibyte(state, 0x08, buf, 1); nxt200x_readreg_multibyte(state, 0x08, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x08, buf, 1); nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x04; nxt200x_writereg_multibyte(state, 0x80, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x81, buf, 1); buf[0] = 0x80; buf[1] = 0x00; buf[2] = 0x00; nxt200x_writereg_multibyte(state, 0x82, buf, 3); nxt200x_readreg_multibyte(state, 0x88, buf, 1); buf[0] = 0x11; nxt200x_writereg_multibyte(state, 0x88, buf, 1); nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x44; nxt200x_writereg_multibyte(state, 0x80, buf, 1); } /* write agc ucgp0 */ switch (p->modulation) { case QAM_64: buf[0] = 0x02; break; case QAM_256: buf[0] = 0x03; break; case VSB_8: buf[0] = 0x00; break; default: return -EINVAL; break; } nxt200x_writebytes(state, 0x30, buf, 1); /* write agc control reg */ buf[0] = 0x00; nxt200x_writebytes(state, 0x41, buf, 1); /* write accumulator2 input */ buf[0] = 0x80; buf[1] = 0x00; switch (state->demod_chip) { case NXT2002: nxt200x_writereg_multibyte(state, 0x49, buf, 2); nxt200x_writereg_multibyte(state, 0x4B, buf, 2); break; case NXT2004: nxt200x_writebytes(state, 0x49, buf, 2); nxt200x_writebytes(state, 0x4B, buf, 2); break; default: return -EINVAL; break; } /* write agc control reg */ buf[0] = 0x04; nxt200x_writebytes(state, 0x41, buf, 1); nxt200x_microcontroller_start(state); if (state->demod_chip == NXT2004) { nxt2004_microcontroller_init(state); /* ???? */ buf[0] = 0xF0; buf[1] = 0x00; nxt200x_writebytes(state, 0x5C, buf, 2); } /* adjacent channel detection should be done here, but I don't have any stations with this need so I cannot test it */ return 0; } static int nxt200x_read_status(struct dvb_frontend* fe, fe_status_t* status) { struct nxt200x_state* state = fe->demodulator_priv; u8 lock; nxt200x_readbytes(state, 0x31, &lock, 1); *status = 0; if (lock & 0x20) { *status |= FE_HAS_SIGNAL; *status |= FE_HAS_CARRIER; *status |= FE_HAS_VITERBI; *status |= FE_HAS_SYNC; *status |= FE_HAS_LOCK; } return 0; } static int nxt200x_read_ber(struct dvb_frontend* fe, u32* ber) { struct nxt200x_state* state = fe->demodulator_priv; u8 b[3]; nxt200x_readreg_multibyte(state, 0xE6, b, 3); *ber = ((b[0] << 8) + b[1]) * 8; return 0; } static int nxt200x_read_signal_strength(struct dvb_frontend* fe, u16* strength) { struct nxt200x_state* state = fe->demodulator_priv; u8 b[2]; u16 temp = 0; /* setup to read cluster variance */ b[0] = 0x00; nxt200x_writebytes(state, 0xA1, b, 1); /* get multreg val */ nxt200x_readreg_multibyte(state, 0xA6, b, 2); temp = (b[0] << 8) | b[1]; *strength = ((0x7FFF - temp) & 0x0FFF) * 16; return 0; } static int nxt200x_read_snr(struct dvb_frontend* fe, u16* snr) { struct nxt200x_state* state = fe->demodulator_priv; u8 b[2]; u16 temp = 0, temp2; u32 snrdb = 0; /* setup to read cluster variance */ b[0] = 0x00; nxt200x_writebytes(state, 0xA1, b, 1); /* get multreg val from 0xA6 */ nxt200x_readreg_multibyte(state, 0xA6, b, 2); temp = (b[0] << 8) | b[1]; temp2 = 0x7FFF - temp; /* snr will be in db */ if (temp2 > 0x7F00) snrdb = 1000*24 + ( 1000*(30-24) * ( temp2 - 0x7F00 ) / ( 0x7FFF - 0x7F00 ) ); else if (temp2 > 0x7EC0) snrdb = 1000*18 + ( 1000*(24-18) * ( temp2 - 0x7EC0 ) / ( 0x7F00 - 0x7EC0 ) ); else if (temp2 > 0x7C00) snrdb = 1000*12 + ( 1000*(18-12) * ( temp2 - 0x7C00 ) / ( 0x7EC0 - 0x7C00 ) ); else snrdb = 1000*0 + ( 1000*(12-0) * ( temp2 - 0 ) / ( 0x7C00 - 0 ) ); /* the value reported back from the frontend will be FFFF=32db 0000=0db */ *snr = snrdb * (0xFFFF/32000); return 0; } static int nxt200x_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) { struct nxt200x_state* state = fe->demodulator_priv; u8 b[3]; nxt200x_readreg_multibyte(state, 0xE6, b, 3); *ucblocks = b[2]; return 0; } static int nxt200x_sleep(struct dvb_frontend* fe) { return 0; } static int nxt2002_init(struct dvb_frontend* fe) { struct nxt200x_state* state = fe->demodulator_priv; const struct firmware *fw; int ret; u8 buf[2]; /* request the firmware, this will block until someone uploads it */ printk("nxt2002: Waiting for firmware upload (%s)...\n", NXT2002_DEFAULT_FIRMWARE); ret = request_firmware(&fw, NXT2002_DEFAULT_FIRMWARE, state->i2c->dev.parent); printk("nxt2002: Waiting for firmware upload(2)...\n"); if (ret) { printk("nxt2002: No firmware uploaded (timeout or file not found?)\n"); return ret; } ret = nxt2002_load_firmware(fe, fw); release_firmware(fw); if (ret) { printk("nxt2002: Writing firmware to device failed\n"); return ret; } printk("nxt2002: Firmware upload complete\n"); /* Put the micro into reset */ nxt200x_microcontroller_stop(state); /* ensure transfer is complete */ buf[0]=0x00; nxt200x_writebytes(state, 0x2B, buf, 1); /* Put the micro into reset for real this time */ nxt200x_microcontroller_stop(state); /* soft reset everything (agc,frontend,eq,fec)*/ buf[0] = 0x0F; nxt200x_writebytes(state, 0x08, buf, 1); buf[0] = 0x00; nxt200x_writebytes(state, 0x08, buf, 1); /* write agc sdm configure */ buf[0] = 0xF1; nxt200x_writebytes(state, 0x57, buf, 1); /* write mod output format */ buf[0] = 0x20; nxt200x_writebytes(state, 0x09, buf, 1); /* write fec mpeg mode */ buf[0] = 0x7E; buf[1] = 0x00; nxt200x_writebytes(state, 0xE9, buf, 2); /* write mux selection */ buf[0] = 0x00; nxt200x_writebytes(state, 0xCC, buf, 1); return 0; } static int nxt2004_init(struct dvb_frontend* fe) { struct nxt200x_state* state = fe->demodulator_priv; const struct firmware *fw; int ret; u8 buf[3]; /* ??? */ buf[0]=0x00; nxt200x_writebytes(state, 0x1E, buf, 1); /* request the firmware, this will block until someone uploads it */ printk("nxt2004: Waiting for firmware upload (%s)...\n", NXT2004_DEFAULT_FIRMWARE); ret = request_firmware(&fw, NXT2004_DEFAULT_FIRMWARE, state->i2c->dev.parent); printk("nxt2004: Waiting for firmware upload(2)...\n"); if (ret) { printk("nxt2004: No firmware uploaded (timeout or file not found?)\n"); return ret; } ret = nxt2004_load_firmware(fe, fw); release_firmware(fw); if (ret) { printk("nxt2004: Writing firmware to device failed\n"); return ret; } printk("nxt2004: Firmware upload complete\n"); /* ensure transfer is complete */ buf[0] = 0x01; nxt200x_writebytes(state, 0x19, buf, 1); nxt2004_microcontroller_init(state); nxt200x_microcontroller_stop(state); nxt200x_microcontroller_stop(state); nxt2004_microcontroller_init(state); nxt200x_microcontroller_stop(state); /* soft reset everything (agc,frontend,eq,fec)*/ buf[0] = 0xFF; nxt200x_writereg_multibyte(state, 0x08, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x08, buf, 1); /* write agc sdm configure */ buf[0] = 0xD7; nxt200x_writebytes(state, 0x57, buf, 1); /* ???*/ buf[0] = 0x07; buf[1] = 0xfe; nxt200x_writebytes(state, 0x35, buf, 2); buf[0] = 0x12; nxt200x_writebytes(state, 0x34, buf, 1); buf[0] = 0x80; nxt200x_writebytes(state, 0x21, buf, 1); /* ???*/ buf[0] = 0x21; nxt200x_writebytes(state, 0x0A, buf, 1); /* ???*/ buf[0] = 0x01; nxt200x_writereg_multibyte(state, 0x80, buf, 1); /* write fec mpeg mode */ buf[0] = 0x7E; buf[1] = 0x00; nxt200x_writebytes(state, 0xE9, buf, 2); /* write mux selection */ buf[0] = 0x00; nxt200x_writebytes(state, 0xCC, buf, 1); /* ???*/ nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x80, buf, 1); /* soft reset? */ nxt200x_readreg_multibyte(state, 0x08, buf, 1); buf[0] = 0x10; nxt200x_writereg_multibyte(state, 0x08, buf, 1); nxt200x_readreg_multibyte(state, 0x08, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x08, buf, 1); /* ???*/ nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x01; nxt200x_writereg_multibyte(state, 0x80, buf, 1); buf[0] = 0x70; nxt200x_writereg_multibyte(state, 0x81, buf, 1); buf[0] = 0x31; buf[1] = 0x5E; buf[2] = 0x66; nxt200x_writereg_multibyte(state, 0x82, buf, 3); nxt200x_readreg_multibyte(state, 0x88, buf, 1); buf[0] = 0x11; nxt200x_writereg_multibyte(state, 0x88, buf, 1); nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x40; nxt200x_writereg_multibyte(state, 0x80, buf, 1); nxt200x_readbytes(state, 0x10, buf, 1); buf[0] = 0x10; nxt200x_writebytes(state, 0x10, buf, 1); nxt200x_readbytes(state, 0x0A, buf, 1); buf[0] = 0x21; nxt200x_writebytes(state, 0x0A, buf, 1); nxt2004_microcontroller_init(state); buf[0] = 0x21; nxt200x_writebytes(state, 0x0A, buf, 1); buf[0] = 0x7E; nxt200x_writebytes(state, 0xE9, buf, 1); buf[0] = 0x00; nxt200x_writebytes(state, 0xEA, buf, 1); nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x80, buf, 1); nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x80, buf, 1); /* soft reset? */ nxt200x_readreg_multibyte(state, 0x08, buf, 1); buf[0] = 0x10; nxt200x_writereg_multibyte(state, 0x08, buf, 1); nxt200x_readreg_multibyte(state, 0x08, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x08, buf, 1); nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x04; nxt200x_writereg_multibyte(state, 0x80, buf, 1); buf[0] = 0x00; nxt200x_writereg_multibyte(state, 0x81, buf, 1); buf[0] = 0x80; buf[1] = 0x00; buf[2] = 0x00; nxt200x_writereg_multibyte(state, 0x82, buf, 3); nxt200x_readreg_multibyte(state, 0x88, buf, 1); buf[0] = 0x11; nxt200x_writereg_multibyte(state, 0x88, buf, 1); nxt200x_readreg_multibyte(state, 0x80, buf, 1); buf[0] = 0x44; nxt200x_writereg_multibyte(state, 0x80, buf, 1); /* initialize tuner */ nxt200x_readbytes(state, 0x10, buf, 1); buf[0] = 0x12; nxt200x_writebytes(state, 0x10, buf, 1); buf[0] = 0x04; nxt200x_writebytes(state, 0x13, buf, 1); buf[0] = 0x00; nxt200x_writebytes(state, 0x16, buf, 1); buf[0] = 0x04; nxt200x_writebytes(state, 0x14, buf, 1); buf[0] = 0x00; nxt200x_writebytes(state, 0x14, buf, 1); nxt200x_writebytes(state, 0x17, buf, 1); nxt200x_writebytes(state, 0x14, buf, 1); nxt200x_writebytes(state, 0x17, buf, 1); return 0; } static int nxt200x_init(struct dvb_frontend* fe) { struct nxt200x_state* state = fe->demodulator_priv; int ret = 0; if (!state->initialised) { switch (state->demod_chip) { case NXT2002: ret = nxt2002_init(fe); break; case NXT2004: ret = nxt2004_init(fe); break; default: return -EINVAL; break; } state->initialised = 1; } return ret; } static int nxt200x_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings) { fesettings->min_delay_ms = 500; fesettings->step_size = 0; fesettings->max_drift = 0; return 0; } static void nxt200x_release(struct dvb_frontend* fe) { struct nxt200x_state* state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops nxt200x_ops; struct dvb_frontend* nxt200x_attach(const struct nxt200x_config* config, struct i2c_adapter* i2c) { struct nxt200x_state* state = NULL; u8 buf [] = {0,0,0,0,0}; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct nxt200x_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->config = config; state->i2c = i2c; state->initialised = 0; /* read card id */ nxt200x_readbytes(state, 0x00, buf, 5); dprintk("NXT info: %02X %02X %02X %02X %02X\n", buf[0], buf[1], buf[2], buf[3], buf[4]); /* set demod chip */ switch (buf[0]) { case 0x04: state->demod_chip = NXT2002; printk("nxt200x: NXT2002 Detected\n"); break; case 0x05: state->demod_chip = NXT2004; printk("nxt200x: NXT2004 Detected\n"); break; default: goto error; } /* make sure demod chip is supported */ switch (state->demod_chip) { case NXT2002: if (buf[0] != 0x04) goto error; /* device id */ if (buf[1] != 0x02) goto error; /* fab id */ if (buf[2] != 0x11) goto error; /* month */ if (buf[3] != 0x20) goto error; /* year msb */ if (buf[4] != 0x00) goto error; /* year lsb */ break; case NXT2004: if (buf[0] != 0x05) goto error; /* device id */ break; default: goto error; } /* create dvb_frontend */ memcpy(&state->frontend.ops, &nxt200x_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); printk("Unknown/Unsupported NXT chip: %02X %02X %02X %02X %02X\n", buf[0], buf[1], buf[2], buf[3], buf[4]); return NULL; } static struct dvb_frontend_ops nxt200x_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, .info = { .name = "Nextwave NXT200X VSB/QAM frontend", .frequency_min = 54000000, .frequency_max = 860000000, .frequency_stepsize = 166666, /* stepsize is just a guess */ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_8VSB | FE_CAN_QAM_64 | FE_CAN_QAM_256 }, .release = nxt200x_release, .init = nxt200x_init, .sleep = nxt200x_sleep, .set_frontend = nxt200x_setup_frontend_parameters, .get_tune_settings = nxt200x_get_tune_settings, .read_status = nxt200x_read_status, .read_ber = nxt200x_read_ber, .read_signal_strength = nxt200x_read_signal_strength, .read_snr = nxt200x_read_snr, .read_ucblocks = nxt200x_read_ucblocks, }; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("NXT200X (ATSC 8VSB & ITU-T J.83 AnnexB 64/256 QAM) Demodulator Driver"); MODULE_AUTHOR("Kirk Lapray, Michael Krufky, Jean-Francois Thibert, and Taylor Jacob"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(nxt200x_attach);
gpl-2.0
gandalf-3d/mordorKernel-note3
drivers/gpu/drm/radeon/cayman_blit_shaders.c
7327
8320
/* * Copyright 2010 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Alex Deucher <alexander.deucher@amd.com> */ #include <linux/bug.h> #include <linux/types.h> #include <linux/kernel.h> /* * evergreen cards need to use the 3D engine to blit data which requires * quite a bit of hw state setup. Rather than pull the whole 3D driver * (which normally generates the 3D state) into the DRM, we opt to use * statically generated state tables. The regsiter state and shaders * were hand generated to support blitting functionality. See the 3D * driver or documentation for descriptions of the registers and * shader instructions. */ const u32 cayman_default_state[] = { 0xc0066900, 0x00000000, 0x00000060, /* DB_RENDER_CONTROL */ 0x00000000, /* DB_COUNT_CONTROL */ 0x00000000, /* DB_DEPTH_VIEW */ 0x0000002a, /* DB_RENDER_OVERRIDE */ 0x00000000, /* DB_RENDER_OVERRIDE2 */ 0x00000000, /* DB_HTILE_DATA_BASE */ 0xc0026900, 0x0000000a, 0x00000000, /* DB_STENCIL_CLEAR */ 0x00000000, /* DB_DEPTH_CLEAR */ 0xc0036900, 0x0000000f, 0x00000000, /* DB_DEPTH_INFO */ 0x00000000, /* DB_Z_INFO */ 0x00000000, /* DB_STENCIL_INFO */ 0xc0016900, 0x00000080, 0x00000000, /* PA_SC_WINDOW_OFFSET */ 0xc00d6900, 0x00000083, 0x0000ffff, /* PA_SC_CLIPRECT_RULE */ 0x00000000, /* PA_SC_CLIPRECT_0_TL */ 0x20002000, /* PA_SC_CLIPRECT_0_BR */ 0x00000000, 0x20002000, 0x00000000, 0x20002000, 0x00000000, 0x20002000, 0xaaaaaaaa, /* PA_SC_EDGERULE */ 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */ 0x0000000f, /* CB_TARGET_MASK */ 0x0000000f, /* CB_SHADER_MASK */ 0xc0226900, 0x00000094, 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */ 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x00000000, /* PA_SC_VPORT_ZMIN_0 */ 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */ 0xc0016900, 0x000000d4, 0x00000000, /* SX_MISC */ 0xc0026900, 0x000000d9, 0x00000000, /* CP_RINGID */ 0x00000000, /* CP_VMID */ 0xc0096900, 0x00000100, 0x00ffffff, /* VGT_MAX_VTX_INDX */ 0x00000000, /* VGT_MIN_VTX_INDX */ 0x00000000, /* VGT_INDX_OFFSET */ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */ 0x00000000, /* SX_ALPHA_TEST_CONTROL */ 0x00000000, /* CB_BLEND_RED */ 0x00000000, /* CB_BLEND_GREEN */ 0x00000000, /* CB_BLEND_BLUE */ 0x00000000, /* CB_BLEND_ALPHA */ 0xc0016900, 0x00000187, 0x00000100, /* SPI_VS_OUT_ID_0 */ 0xc0026900, 0x00000191, 0x00000100, /* SPI_PS_INPUT_CNTL_0 */ 0x00000101, /* SPI_PS_INPUT_CNTL_1 */ 0xc0016900, 0x000001b1, 0x00000000, /* SPI_VS_OUT_CONFIG */ 0xc0106900, 0x000001b3, 0x20000001, /* SPI_PS_IN_CONTROL_0 */ 0x00000000, /* SPI_PS_IN_CONTROL_1 */ 0x00000000, /* SPI_INTERP_CONTROL_0 */ 0x00000000, /* SPI_INPUT_Z */ 0x00000000, /* SPI_FOG_CNTL */ 0x00100000, /* SPI_BARYC_CNTL */ 0x00000000, /* SPI_PS_IN_CONTROL_2 */ 0x00000000, /* SPI_COMPUTE_INPUT_CNTL */ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */ 0x00000000, /* SPI_GPR_MGMT */ 0x00000000, /* SPI_LDS_MGMT */ 0x00000000, /* SPI_STACK_MGMT */ 0x00000000, /* SPI_WAVE_MGMT_1 */ 0x00000000, /* SPI_WAVE_MGMT_2 */ 0xc0016900, 0x000001e0, 0x00000000, /* CB_BLEND0_CONTROL */ 0xc00e6900, 0x00000200, 0x00000000, /* DB_DEPTH_CONTROL */ 0x00000000, /* DB_EQAA */ 0x00cc0010, /* CB_COLOR_CONTROL */ 0x00000210, /* DB_SHADER_CONTROL */ 0x00010000, /* PA_CL_CLIP_CNTL */ 0x00000004, /* PA_SU_SC_MODE_CNTL */ 0x00000100, /* PA_CL_VTE_CNTL */ 0x00000000, /* PA_CL_VS_OUT_CNTL */ 0x00000000, /* PA_CL_NANINF_CNTL */ 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */ 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */ 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */ 0x00000000, /* */ 0x00000000, /* */ 0xc0026900, 0x00000229, 0x00000000, /* SQ_PGM_START_FS */ 0x00000000, 0xc0016900, 0x0000023b, 0x00000000, /* SQ_LDS_ALLOC_PS */ 0xc0066900, 0x00000240, 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0046900, 0x00000247, 0x00000000, /* SQ_GS_VERT_ITEMSIZE */ 0x00000000, 0x00000000, 0x00000000, 0xc0116900, 0x00000280, 0x00000000, /* PA_SU_POINT_SIZE */ 0x00000000, /* PA_SU_POINT_MINMAX */ 0x00000008, /* PA_SU_LINE_CNTL */ 0x00000000, /* PA_SC_LINE_STIPPLE */ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */ 0x00000000, /* VGT_HOS_CNTL */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, /* VGT_GS_MODE */ 0xc0026900, 0x00000292, 0x00000000, /* PA_SC_MODE_CNTL_0 */ 0x00000000, /* PA_SC_MODE_CNTL_1 */ 0xc0016900, 0x000002a1, 0x00000000, /* VGT_PRIMITIVEID_EN */ 0xc0016900, 0x000002a5, 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */ 0xc0026900, 0x000002a8, 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */ 0x00000000, 0xc0026900, 0x000002ad, 0x00000000, /* VGT_REUSE_OFF */ 0x00000000, 0xc0016900, 0x000002d5, 0x00000000, /* VGT_SHADER_STAGES_EN */ 0xc0016900, 0x000002dc, 0x0000aa00, /* DB_ALPHA_TO_MASK */ 0xc0066900, 0x000002de, 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0026900, 0x000002e5, 0x00000000, /* VGT_STRMOUT_CONFIG */ 0x00000000, 0xc01b6900, 0x000002f5, 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */ 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */ 0x00000000, /* PA_SC_LINE_CNTL */ 0x00000000, /* PA_SC_AA_CONFIG */ 0x00000005, /* PA_SU_VTX_CNTL */ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */ 0xffffffff, 0xc0026900, 0x00000316, 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 0x00000010, /* */ }; const u32 cayman_vs[] = { 0x00000004, 0x80400400, 0x0000a03c, 0x95000688, 0x00004000, 0x15000688, 0x00000000, 0x88000000, 0x04000000, 0x67961001, #ifdef __BIG_ENDIAN 0x00020000, #else 0x00000000, #endif 0x00000000, 0x04000000, 0x67961000, #ifdef __BIG_ENDIAN 0x00020008, #else 0x00000008, #endif 0x00000000, }; const u32 cayman_ps[] = { 0x00000004, 0xa00c0000, 0x00000008, 0x80400000, 0x00000000, 0x95000688, 0x00000000, 0x88000000, 0x00380400, 0x00146b10, 0x00380000, 0x20146b10, 0x00380400, 0x40146b00, 0x80380000, 0x60146b00, 0x00000010, 0x000d1000, 0xb0800000, 0x00000000, }; const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps); const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs); const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
gpl-2.0
sinutech/sinuos-kernel
drivers/staging/speakup/speakup_decext.c
7583
7240
/* * originally written by: Kirk Reiser <kirk@braille.uwo.ca> * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/kthread.h> #include "spk_priv.h" #include "serialio.h" #include "speakup.h" #define DRV_VERSION "2.14" #define SYNTH_CLEAR 0x03 #define PROCSPEECH 0x0b static unsigned char last_char; static inline u_char get_last_char(void) { u_char avail = inb_p(speakup_info.port_tts + UART_LSR) & UART_LSR_DR; if (avail) last_char = inb_p(speakup_info.port_tts + UART_RX); return last_char; } static inline bool synth_full(void) { return get_last_char() == 0x13; } static void do_catch_up(struct spk_synth *synth); static void synth_flush(struct spk_synth *synth); static int in_escape; static struct var_t vars[] = { { CAPS_START, .u.s = {"[:dv ap 222]" } }, { CAPS_STOP, .u.s = {"[:dv ap 100]" } }, { RATE, .u.n = {"[:ra %d]", 7, 0, 9, 150, 25, NULL } }, { PITCH, .u.n = {"[:dv ap %d]", 100, 0, 100, 0, 0, NULL } }, { VOL, .u.n = {"[:dv gv %d]", 13, 0, 16, 0, 5, NULL } }, { PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } }, { VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/decext. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_decext = { .name = "decext", .version = DRV_VERSION, .long_name = "Dectalk External", .init = "[:pe -380]", .procspeech = PROCSPEECH, .clear = SYNTH_CLEAR, .delay = 500, .trigger = 50, .jiffies = 50, .full = 40000, .flags = SF_DEC, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = serial_synth_probe, .release = spk_serial_release, .synth_immediate = spk_synth_immediate, .catch_up = do_catch_up, .flush = synth_flush, .is_alive = spk_synth_is_alive_restart, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = NULL, .indexing = { .command = NULL, .lowindex = 0, .highindex = 0, .currindex = 0, }, .attributes = { .attrs = synth_attrs, .name = "decext", }, }; static void do_catch_up(struct spk_synth *synth) { u_char ch; static u_char last = '\0'; unsigned long flags; unsigned long jiff_max; struct var_t *jiffy_delta; struct var_t *delay_time; int jiffy_delta_val = 0; int delay_time_val = 0; jiffy_delta = get_var(JIFFY); delay_time = get_var(DELAY); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; spk_unlock(flags); jiff_max = jiffies + jiffy_delta_val; while (!kthread_should_stop()) { spk_lock(flags); if (speakup_info.flushing) { speakup_info.flushing = 0; spk_unlock(flags); synth->flush(synth); continue; } if (synth_buffer_empty()) { spk_unlock(flags); break; } ch = synth_buffer_peek(); set_current_state(TASK_INTERRUPTIBLE); delay_time_val = delay_time->u.n.value; spk_unlock(flags); if (ch == '\n') ch = 0x0D; if (synth_full() || !spk_serial_out(ch)) { schedule_timeout(msecs_to_jiffies(delay_time_val)); continue; } set_current_state(TASK_RUNNING); spk_lock(flags); synth_buffer_getc(); spk_unlock(flags); if (ch == '[') in_escape = 1; else if (ch == ']') in_escape = 0; else if (ch <= SPACE) { if (!in_escape && strchr(",.!?;:", last)) spk_serial_out(PROCSPEECH); if (jiffies >= jiff_max) { if (!in_escape) spk_serial_out(PROCSPEECH); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; delay_time_val = delay_time->u.n.value; spk_unlock(flags); schedule_timeout(msecs_to_jiffies (delay_time_val)); jiff_max = jiffies + jiffy_delta_val; } } last = ch; } if (!in_escape) spk_serial_out(PROCSPEECH); } static void synth_flush(struct spk_synth *synth) { in_escape = 0; spk_synth_immediate(synth, "\033P;10z\033\\"); } module_param_named(ser, synth_decext.ser, int, S_IRUGO); module_param_named(start, synth_decext.startup, short, S_IRUGO); MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init decext_init(void) { return synth_add(&synth_decext); } static void __exit decext_exit(void) { synth_remove(&synth_decext); } module_init(decext_init); module_exit(decext_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for DECtalk External synthesizers"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
ska/linux-fsl
arch/mips/math-emu/ieee754.c
7839
3963
/* ieee754 floating point arithmetic * single and double precision * * BUGS * not much dp done * doesn't generate IEEE754_INEXACT * */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754int.h" #include "ieee754sp.h" #include "ieee754dp.h" #define DP_EBIAS 1023 #define DP_EMIN (-1022) #define DP_EMAX 1023 #define SP_EBIAS 127 #define SP_EMIN (-126) #define SP_EMAX 127 /* special constants */ #if (defined(BYTE_ORDER) && BYTE_ORDER == LITTLE_ENDIAN) || defined(__MIPSEL__) #define SPSTR(s, b, m) {m, b, s} #define DPSTR(s, b, mh, ml) {ml, mh, b, s} #endif #ifdef __MIPSEB__ #define SPSTR(s, b, m) {s, b, m} #define DPSTR(s, b, mh, ml) {s, b, mh, ml} #endif const struct ieee754dp_konst __ieee754dp_spcvals[] = { DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* + zero */ DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* - zero */ DPSTR(0, DP_EBIAS, 0, 0), /* + 1.0 */ DPSTR(1, DP_EBIAS, 0, 0), /* - 1.0 */ DPSTR(0, 3 + DP_EBIAS, 0x40000, 0), /* + 10.0 */ DPSTR(1, 3 + DP_EBIAS, 0x40000, 0), /* - 10.0 */ DPSTR(0, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* + infinity */ DPSTR(1, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* - infinity */ DPSTR(0, DP_EMAX+1+DP_EBIAS, 0x7FFFF, 0xFFFFFFFF), /* + indef quiet Nan */ DPSTR(0, DP_EMAX + DP_EBIAS, 0xFFFFF, 0xFFFFFFFF), /* + max */ DPSTR(1, DP_EMAX + DP_EBIAS, 0xFFFFF, 0xFFFFFFFF), /* - max */ DPSTR(0, DP_EMIN + DP_EBIAS, 0, 0), /* + min normal */ DPSTR(1, DP_EMIN + DP_EBIAS, 0, 0), /* - min normal */ DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* + min denormal */ DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* - min denormal */ DPSTR(0, 31 + DP_EBIAS, 0, 0), /* + 1.0e31 */ DPSTR(0, 63 + DP_EBIAS, 0, 0), /* + 1.0e63 */ }; const struct ieee754sp_konst __ieee754sp_spcvals[] = { SPSTR(0, SP_EMIN - 1 + SP_EBIAS, 0), /* + zero */ SPSTR(1, SP_EMIN - 1 + SP_EBIAS, 0), /* - zero */ SPSTR(0, SP_EBIAS, 0), /* + 1.0 */ SPSTR(1, SP_EBIAS, 0), /* - 1.0 */ SPSTR(0, 3 + SP_EBIAS, 0x200000), /* + 10.0 */ SPSTR(1, 3 + SP_EBIAS, 0x200000), /* - 10.0 */ SPSTR(0, SP_EMAX + 1 + SP_EBIAS, 0), /* + infinity */ SPSTR(1, SP_EMAX + 1 + SP_EBIAS, 0), /* - infinity */ SPSTR(0, SP_EMAX+1+SP_EBIAS, 0x3FFFFF), /* + indef quiet Nan */ SPSTR(0, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* + max normal */ SPSTR(1, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* - max normal */ SPSTR(0, SP_EMIN + SP_EBIAS, 0), /* + min normal */ SPSTR(1, SP_EMIN + SP_EBIAS, 0), /* - min normal */ SPSTR(0, SP_EMIN - 1 + SP_EBIAS, 1), /* + min denormal */ SPSTR(1, SP_EMIN - 1 + SP_EBIAS, 1), /* - min denormal */ SPSTR(0, 31 + SP_EBIAS, 0), /* + 1.0e31 */ SPSTR(0, 63 + SP_EBIAS, 0), /* + 1.0e63 */ }; int ieee754si_xcpt(int r, const char *op, ...) { struct ieee754xctx ax; if (!TSTX()) return r; ax.op = op; ax.rt = IEEE754_RT_SI; ax.rv.si = r; va_start(ax.ap, op); ieee754_xcpt(&ax); va_end(ax.ap); return ax.rv.si; } s64 ieee754di_xcpt(s64 r, const char *op, ...) { struct ieee754xctx ax; if (!TSTX()) return r; ax.op = op; ax.rt = IEEE754_RT_DI; ax.rv.di = r; va_start(ax.ap, op); ieee754_xcpt(&ax); va_end(ax.ap); return ax.rv.di; }
gpl-2.0
qpzm1258/shooterct-ics-3.0.16
arch/mips/math-emu/dp_sub.c
7839
4950
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y) { COMPXDP; COMPYDP; EXPLODEXDP; EXPLODEYDP; CLEARCX; FLUSHXDP; FLUSHYDP; switch (CLPAIR(xc, yc)) { case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_nanxcpt(ieee754dp_indef(), "sub", x, y); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return y; case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): return x; /* Infinity handling */ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): if (xs != ys) return x; SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_xcpt(ieee754dp_indef(), "sub", x, y); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): return ieee754dp_inf(ys ^ 1); case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): return x; /* Zero handling */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): if (xs != ys) return x; else return ieee754dp_zero(ieee754_csr.rm == IEEE754_RD); case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): return x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): /* quick fix up */ DPSIGN(y) ^= 1; return y; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; /* FALL THROUGH */ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): /* normalize ym,ye */ DPDNORMY; break; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): /* normalize xm,xe */ DPDNORMX; break; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): break; } /* flip sign of y and handle as add */ ys ^= 1; assert(xm & DP_HIDDEN_BIT); assert(ym & DP_HIDDEN_BIT); /* provide guard,round and stick bit dpace */ xm <<= 3; ym <<= 3; if (xe > ye) { /* have to shift y fraction right to align */ int s = xe - ye; ym = XDPSRS(ym, s); ye += s; } else if (ye > xe) { /* have to shift x fraction right to align */ int s = ye - xe; xm = XDPSRS(xm, s); xe += s; } assert(xe == ye); assert(xe <= DP_EMAX); if (xs == ys) { /* generate 28 bit result of adding two 27 bit numbers */ xm = xm + ym; xe = xe; xs = xs; if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */ xm = XDPSRS1(xm); /* shift preserving sticky */ xe++; } } else { if (xm >= ym) { xm = xm - ym; xe = xe; xs = xs; } else { xm = ym - xm; xe = xe; xs = ys; } if (xm == 0) { if (ieee754_csr.rm == IEEE754_RD) return ieee754dp_zero(1); /* round negative inf. => sign = -1 */ else return ieee754dp_zero(0); /* other round modes => sign = 1 */ } /* normalize to rounding precision */ while ((xm >> (DP_MBITS + 3)) == 0) { xm <<= 1; xe--; } } DPNORMRET2(xs, xe, xm, "sub", x, y); }
gpl-2.0
mastero9017/hammerhead-5.0
drivers/ide/pdc202xx_old.c
9119
10348
/* * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2006-2007, 2009 MontaVista Software, Inc. * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz * * Portions Copyright (C) 1999 Promise Technology, Inc. * Author: Frank Tiernan (frankt@promise.com) * Released under terms of General Public License */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ide.h> #include <asm/io.h> #define DRV_NAME "pdc202xx_old" static void pdc202xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(hwif->dev); u8 drive_pci = 0x60 + (drive->dn << 2); const u8 speed = drive->dma_mode; u8 AP = 0, BP = 0, CP = 0; u8 TA = 0, TB = 0, TC = 0; pci_read_config_byte(dev, drive_pci, &AP); pci_read_config_byte(dev, drive_pci + 1, &BP); pci_read_config_byte(dev, drive_pci + 2, &CP); switch(speed) { case XFER_UDMA_5: case XFER_UDMA_4: TB = 0x20; TC = 0x01; break; case XFER_UDMA_2: TB = 0x20; TC = 0x01; break; case XFER_UDMA_3: case XFER_UDMA_1: TB = 0x40; TC = 0x02; break; case XFER_UDMA_0: case XFER_MW_DMA_2: TB = 0x60; TC = 0x03; break; case XFER_MW_DMA_1: TB = 0x60; TC = 0x04; break; case XFER_MW_DMA_0: TB = 0xE0; TC = 0x0F; break; case XFER_PIO_4: TA = 0x01; TB = 0x04; break; case XFER_PIO_3: TA = 0x02; TB = 0x06; break; case XFER_PIO_2: TA = 0x03; TB = 0x08; break; case XFER_PIO_1: TA = 0x05; TB = 0x0C; break; case XFER_PIO_0: default: TA = 0x09; TB = 0x13; break; } if (speed < XFER_SW_DMA_0) { /* * preserve SYNC_INT / ERDDY_EN bits while clearing * Prefetch_EN / IORDY_EN / PA[3:0] bits of register A */ AP &= ~0x3f; if (ide_pio_need_iordy(drive, speed - XFER_PIO_0)) AP |= 0x20; /* set IORDY_EN bit */ if (drive->media == ide_disk) AP |= 0x10; /* set Prefetch_EN bit */ /* clear PB[4:0] bits of register B */ BP &= ~0x1f; pci_write_config_byte(dev, drive_pci, AP | TA); pci_write_config_byte(dev, drive_pci + 1, BP | TB); } else { /* clear MB[2:0] bits of register B */ BP &= ~0xe0; /* clear MC[3:0] bits of register C */ CP &= ~0x0f; pci_write_config_byte(dev, drive_pci + 1, BP | TB); pci_write_config_byte(dev, drive_pci + 2, CP | TC); } } static void pdc202xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { drive->dma_mode = drive->pio_mode; pdc202xx_set_mode(hwif, drive); } static int pdc202xx_test_irq(ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long high_16 = pci_resource_start(dev, 4); u8 sc1d = inb(high_16 + 0x1d); if (hwif->channel) { /* * bit 7: error, bit 6: interrupting, * bit 5: FIFO full, bit 4: FIFO empty */ return (sc1d & 0x40) ? 1 : 0; } else { /* * bit 3: error, bit 2: interrupting, * bit 1: FIFO full, bit 0: FIFO empty */ return (sc1d & 0x04) ? 1 : 0; } } static u8 pdc2026x_cable_detect(ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); u16 CIS, mask = hwif->channel ? (1 << 11) : (1 << 10); pci_read_config_word(dev, 0x50, &CIS); return (CIS & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80; } /* * Set the control register to use the 66MHz system * clock for UDMA 3/4/5 mode operation when necessary. * * FIXME: this register is shared by both channels, some locking is needed * * It may also be possible to leave the 66MHz clock on * and readjust the timing parameters. */ static void pdc_old_enable_66MHz_clock(ide_hwif_t *hwif) { unsigned long clock_reg = hwif->extra_base + 0x01; u8 clock = inb(clock_reg); outb(clock | (hwif->channel ? 0x08 : 0x02), clock_reg); } static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif) { unsigned long clock_reg = hwif->extra_base + 0x01; u8 clock = inb(clock_reg); outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg); } static void pdc2026x_init_hwif(ide_hwif_t *hwif) { pdc_old_disable_66MHz_clock(hwif); } static void pdc202xx_dma_start(ide_drive_t *drive) { if (drive->current_speed > XFER_UDMA_2) pdc_old_enable_66MHz_clock(drive->hwif); if (drive->media != ide_disk || (drive->dev_flags & IDE_DFLAG_LBA48)) { ide_hwif_t *hwif = drive->hwif; struct request *rq = hwif->rq; unsigned long high_16 = hwif->extra_base - 16; unsigned long atapi_reg = high_16 + (hwif->channel ? 0x24 : 0x20); u32 word_count = 0; u8 clock = inb(high_16 + 0x11); outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11); word_count = (blk_rq_sectors(rq) << 8); word_count = (rq_data_dir(rq) == READ) ? word_count | 0x05000000 : word_count | 0x06000000; outl(word_count, atapi_reg); } ide_dma_start(drive); } static int pdc202xx_dma_end(ide_drive_t *drive) { if (drive->media != ide_disk || (drive->dev_flags & IDE_DFLAG_LBA48)) { ide_hwif_t *hwif = drive->hwif; unsigned long high_16 = hwif->extra_base - 16; unsigned long atapi_reg = high_16 + (hwif->channel ? 0x24 : 0x20); u8 clock = 0; outl(0, atapi_reg); /* zero out extra */ clock = inb(high_16 + 0x11); outb(clock & ~(hwif->channel ? 0x08:0x02), high_16 + 0x11); } if (drive->current_speed > XFER_UDMA_2) pdc_old_disable_66MHz_clock(drive->hwif); return ide_dma_end(drive); } static int init_chipset_pdc202xx(struct pci_dev *dev) { unsigned long dmabase = pci_resource_start(dev, 4); u8 udma_speed_flag = 0, primary_mode = 0, secondary_mode = 0; if (dmabase == 0) goto out; udma_speed_flag = inb(dmabase | 0x1f); primary_mode = inb(dmabase | 0x1a); secondary_mode = inb(dmabase | 0x1b); printk(KERN_INFO "%s: (U)DMA Burst Bit %sABLED " \ "Primary %s Mode " \ "Secondary %s Mode.\n", pci_name(dev), (udma_speed_flag & 1) ? "EN" : "DIS", (primary_mode & 1) ? "MASTER" : "PCI", (secondary_mode & 1) ? "MASTER" : "PCI" ); if (!(udma_speed_flag & 1)) { printk(KERN_INFO "%s: FORCING BURST BIT 0x%02x->0x%02x ", pci_name(dev), udma_speed_flag, (udma_speed_flag|1)); outb(udma_speed_flag | 1, dmabase | 0x1f); printk("%sACTIVE\n", (inb(dmabase | 0x1f) & 1) ? "" : "IN"); } out: return 0; } static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev, const char *name) { if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) { u8 irq = 0, irq2 = 0; pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); /* 0xbc */ pci_read_config_byte(dev, (PCI_INTERRUPT_LINE)|0x80, &irq2); if (irq != irq2) { pci_write_config_byte(dev, (PCI_INTERRUPT_LINE)|0x80, irq); /* 0xbc */ printk(KERN_INFO "%s %s: PCI config space interrupt " "mirror fixed\n", name, pci_name(dev)); } } } #define IDE_HFLAGS_PDC202XX \ (IDE_HFLAG_ERROR_STOPS_FIFO | \ IDE_HFLAG_OFF_BOARD) static const struct ide_port_ops pdc20246_port_ops = { .set_pio_mode = pdc202xx_set_pio_mode, .set_dma_mode = pdc202xx_set_mode, .test_irq = pdc202xx_test_irq, }; static const struct ide_port_ops pdc2026x_port_ops = { .set_pio_mode = pdc202xx_set_pio_mode, .set_dma_mode = pdc202xx_set_mode, .test_irq = pdc202xx_test_irq, .cable_detect = pdc2026x_cable_detect, }; static const struct ide_dma_ops pdc2026x_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = pdc202xx_dma_start, .dma_end = pdc202xx_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = ide_dma_sff_read_status, }; #define DECLARE_PDC2026X_DEV(udma, sectors) \ { \ .name = DRV_NAME, \ .init_chipset = init_chipset_pdc202xx, \ .init_hwif = pdc2026x_init_hwif, \ .port_ops = &pdc2026x_port_ops, \ .dma_ops = &pdc2026x_dma_ops, \ .host_flags = IDE_HFLAGS_PDC202XX, \ .pio_mask = ATA_PIO4, \ .mwdma_mask = ATA_MWDMA2, \ .udma_mask = udma, \ .max_sectors = sectors, \ } static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = { { /* 0: PDC20246 */ .name = DRV_NAME, .init_chipset = init_chipset_pdc202xx, .port_ops = &pdc20246_port_ops, .dma_ops = &sff_dma_ops, .host_flags = IDE_HFLAGS_PDC202XX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, }, /* 1: PDC2026{2,3} */ DECLARE_PDC2026X_DEV(ATA_UDMA4, 0), /* 2: PDC2026{5,7}: UDMA5, limit LBA48 requests to 256 sectors */ DECLARE_PDC2026X_DEV(ATA_UDMA5, 256), }; /** * pdc202xx_init_one - called when a PDC202xx is found * @dev: the pdc202xx device * @id: the matching pci id * * Called when the PCI registration layer (or the IDE initialization) * finds a device matching our IDE device tables. */ static int __devinit pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id) { const struct ide_port_info *d; u8 idx = id->driver_data; d = &pdc202xx_chipsets[idx]; if (idx < 2) pdc202ata4_fixup_irq(dev, d->name); if (dev->vendor == PCI_DEVICE_ID_PROMISE_20265) { struct pci_dev *bridge = dev->bus->self; if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL && (bridge->device == PCI_DEVICE_ID_INTEL_I960 || bridge->device == PCI_DEVICE_ID_INTEL_I960RM)) { printk(KERN_INFO DRV_NAME " %s: skipping Promise " "PDC20265 attached to I2O RAID controller\n", pci_name(dev)); return -ENODEV; } } return ide_pci_init_one(dev, d, NULL); } static const struct pci_device_id pdc202xx_pci_tbl[] = { { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 }, { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 }, { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 }, { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 }, { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, pdc202xx_pci_tbl); static struct pci_driver pdc202xx_pci_driver = { .name = "Promise_Old_IDE", .id_table = pdc202xx_pci_tbl, .probe = pdc202xx_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init pdc202xx_ide_init(void) { return ide_pci_register_driver(&pdc202xx_pci_driver); } static void __exit pdc202xx_ide_exit(void) { pci_unregister_driver(&pdc202xx_pci_driver); } module_init(pdc202xx_ide_init); module_exit(pdc202xx_ide_exit); MODULE_AUTHOR("Andre Hedrick, Frank Tiernan, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("PCI driver module for older Promise IDE"); MODULE_LICENSE("GPL");
gpl-2.0
Split-Screen/android_kernel_samsung_jf
arch/tile/kernel/tile-desc_32.c
9375
113746
/* TILEPro opcode information. * * Copyright 2011 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * * * * */ /* This define is BFD_RELOC_##x for real bfd, or -1 for everyone else. */ #define BFD_RELOC(x) -1 /* Special registers. */ #define TREG_LR 55 #define TREG_SN 56 #define TREG_ZERO 63 #include <linux/stddef.h> #include <asm/tile-desc.h> const struct tilepro_opcode tilepro_opcodes[395] = { { "bpt", TILEPRO_OPC_BPT, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "info", TILEPRO_OPC_INFO, 0xf, 1, TREG_ZERO, 1, { { 0 }, { 1 }, { 2 }, { 3 }, { 0, } }, }, { "infol", TILEPRO_OPC_INFOL, 0x3, 1, TREG_ZERO, 1, { { 4 }, { 5 }, { 0, }, { 0, }, { 0, } }, }, { "j", TILEPRO_OPC_J, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 6 }, { 0, }, { 0, }, { 0, } }, }, { "jal", TILEPRO_OPC_JAL, 0x2, 1, TREG_LR, 1, { { 0, }, { 6 }, { 0, }, { 0, }, { 0, } }, }, { "move", TILEPRO_OPC_MOVE, 0xf, 2, TREG_ZERO, 1, { { 7, 8 }, { 9, 10 }, { 11, 12 }, { 13, 14 }, { 0, } }, }, { "move.sn", TILEPRO_OPC_MOVE_SN, 0x3, 2, TREG_SN, 1, { { 7, 8 }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "movei", TILEPRO_OPC_MOVEI, 0xf, 2, TREG_ZERO, 1, { { 7, 0 }, { 9, 1 }, { 11, 2 }, { 13, 3 }, { 0, } }, }, { "movei.sn", TILEPRO_OPC_MOVEI_SN, 0x3, 2, TREG_SN, 1, { { 7, 0 }, { 9, 1 }, { 0, }, { 0, }, { 0, } }, }, { "moveli", TILEPRO_OPC_MOVELI, 0x3, 2, TREG_ZERO, 1, { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } }, }, { "moveli.sn", TILEPRO_OPC_MOVELI_SN, 0x3, 2, TREG_SN, 1, { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } }, }, { "movelis", TILEPRO_OPC_MOVELIS, 0x3, 2, TREG_SN, 1, { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } }, }, { "prefetch", TILEPRO_OPC_PREFETCH, 0x12, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 15 } }, }, { "raise", TILEPRO_OPC_RAISE, 0x2, 0, TREG_ZERO, 1, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "add", TILEPRO_OPC_ADD, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "add.sn", TILEPRO_OPC_ADD_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addb", TILEPRO_OPC_ADDB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addb.sn", TILEPRO_OPC_ADDB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addbs_u", TILEPRO_OPC_ADDBS_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addbs_u.sn", TILEPRO_OPC_ADDBS_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addh", TILEPRO_OPC_ADDH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addh.sn", TILEPRO_OPC_ADDH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addhs", TILEPRO_OPC_ADDHS, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addhs.sn", TILEPRO_OPC_ADDHS_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "addi", TILEPRO_OPC_ADDI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, }, { "addi.sn", TILEPRO_OPC_ADDI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "addib", TILEPRO_OPC_ADDIB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "addib.sn", TILEPRO_OPC_ADDIB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "addih", TILEPRO_OPC_ADDIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "addih.sn", TILEPRO_OPC_ADDIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "addli", TILEPRO_OPC_ADDLI, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, }, { "addli.sn", TILEPRO_OPC_ADDLI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, }, { "addlis", TILEPRO_OPC_ADDLIS, 0x3, 3, TREG_SN, 1, { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, }, { "adds", TILEPRO_OPC_ADDS, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "adds.sn", TILEPRO_OPC_ADDS_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "adiffb_u", TILEPRO_OPC_ADIFFB_U, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "adiffb_u.sn", TILEPRO_OPC_ADIFFB_U_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "adiffh", TILEPRO_OPC_ADIFFH, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "adiffh.sn", TILEPRO_OPC_ADIFFH_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "and", TILEPRO_OPC_AND, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "and.sn", TILEPRO_OPC_AND_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "andi", TILEPRO_OPC_ANDI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, }, { "andi.sn", TILEPRO_OPC_ANDI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "auli", TILEPRO_OPC_AULI, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, }, { "avgb_u", TILEPRO_OPC_AVGB_U, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "avgb_u.sn", TILEPRO_OPC_AVGB_U_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "avgh", TILEPRO_OPC_AVGH, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "avgh.sn", TILEPRO_OPC_AVGH_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "bbns", TILEPRO_OPC_BBNS, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bbns.sn", TILEPRO_OPC_BBNS_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bbnst", TILEPRO_OPC_BBNST, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bbnst.sn", TILEPRO_OPC_BBNST_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bbs", TILEPRO_OPC_BBS, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bbs.sn", TILEPRO_OPC_BBS_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bbst", TILEPRO_OPC_BBST, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bbst.sn", TILEPRO_OPC_BBST_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgez", TILEPRO_OPC_BGEZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgez.sn", TILEPRO_OPC_BGEZ_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgezt", TILEPRO_OPC_BGEZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgezt.sn", TILEPRO_OPC_BGEZT_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgz", TILEPRO_OPC_BGZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgz.sn", TILEPRO_OPC_BGZ_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgzt", TILEPRO_OPC_BGZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bgzt.sn", TILEPRO_OPC_BGZT_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bitx", TILEPRO_OPC_BITX, 0x5, 2, TREG_ZERO, 1, { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, }, { "bitx.sn", TILEPRO_OPC_BITX_SN, 0x1, 2, TREG_SN, 1, { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "blez", TILEPRO_OPC_BLEZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blez.sn", TILEPRO_OPC_BLEZ_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blezt", TILEPRO_OPC_BLEZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blezt.sn", TILEPRO_OPC_BLEZT_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blz", TILEPRO_OPC_BLZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blz.sn", TILEPRO_OPC_BLZ_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blzt", TILEPRO_OPC_BLZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "blzt.sn", TILEPRO_OPC_BLZT_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bnz", TILEPRO_OPC_BNZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bnz.sn", TILEPRO_OPC_BNZ_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bnzt", TILEPRO_OPC_BNZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bnzt.sn", TILEPRO_OPC_BNZT_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bytex", TILEPRO_OPC_BYTEX, 0x5, 2, TREG_ZERO, 1, { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, }, { "bytex.sn", TILEPRO_OPC_BYTEX_SN, 0x1, 2, TREG_SN, 1, { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "bz", TILEPRO_OPC_BZ, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bz.sn", TILEPRO_OPC_BZ_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bzt", TILEPRO_OPC_BZT, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "bzt.sn", TILEPRO_OPC_BZT_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, }, { "clz", TILEPRO_OPC_CLZ, 0x5, 2, TREG_ZERO, 1, { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, }, { "clz.sn", TILEPRO_OPC_CLZ_SN, 0x1, 2, TREG_SN, 1, { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "crc32_32", TILEPRO_OPC_CRC32_32, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "crc32_32.sn", TILEPRO_OPC_CRC32_32_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "crc32_8", TILEPRO_OPC_CRC32_8, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "crc32_8.sn", TILEPRO_OPC_CRC32_8_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "ctz", TILEPRO_OPC_CTZ, 0x5, 2, TREG_ZERO, 1, { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, }, { "ctz.sn", TILEPRO_OPC_CTZ_SN, 0x1, 2, TREG_SN, 1, { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "drain", TILEPRO_OPC_DRAIN, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "dtlbpr", TILEPRO_OPC_DTLBPR, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "dword_align", TILEPRO_OPC_DWORD_ALIGN, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "dword_align.sn", TILEPRO_OPC_DWORD_ALIGN_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "finv", TILEPRO_OPC_FINV, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "flush", TILEPRO_OPC_FLUSH, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "fnop", TILEPRO_OPC_FNOP, 0xf, 0, TREG_ZERO, 1, { { }, { }, { }, { }, { 0, } }, }, { "icoh", TILEPRO_OPC_ICOH, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "ill", TILEPRO_OPC_ILL, 0xa, 0, TREG_ZERO, 1, { { 0, }, { }, { 0, }, { }, { 0, } }, }, { "inthb", TILEPRO_OPC_INTHB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "inthb.sn", TILEPRO_OPC_INTHB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "inthh", TILEPRO_OPC_INTHH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "inthh.sn", TILEPRO_OPC_INTHH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "intlb", TILEPRO_OPC_INTLB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "intlb.sn", TILEPRO_OPC_INTLB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "intlh", TILEPRO_OPC_INTLH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "intlh.sn", TILEPRO_OPC_INTLH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "inv", TILEPRO_OPC_INV, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "iret", TILEPRO_OPC_IRET, 0x2, 0, TREG_ZERO, 1, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "jalb", TILEPRO_OPC_JALB, 0x2, 1, TREG_LR, 1, { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, }, { "jalf", TILEPRO_OPC_JALF, 0x2, 1, TREG_LR, 1, { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, }, { "jalr", TILEPRO_OPC_JALR, 0x2, 1, TREG_LR, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "jalrp", TILEPRO_OPC_JALRP, 0x2, 1, TREG_LR, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "jb", TILEPRO_OPC_JB, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, }, { "jf", TILEPRO_OPC_JF, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, }, { "jr", TILEPRO_OPC_JR, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "jrp", TILEPRO_OPC_JRP, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "lb", TILEPRO_OPC_LB, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, }, { "lb.sn", TILEPRO_OPC_LB_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "lb_u", TILEPRO_OPC_LB_U, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, }, { "lb_u.sn", TILEPRO_OPC_LB_U_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "lbadd", TILEPRO_OPC_LBADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lbadd.sn", TILEPRO_OPC_LBADD_SN, 0x2, 3, TREG_SN, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lbadd_u", TILEPRO_OPC_LBADD_U, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lbadd_u.sn", TILEPRO_OPC_LBADD_U_SN, 0x2, 3, TREG_SN, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lh", TILEPRO_OPC_LH, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, }, { "lh.sn", TILEPRO_OPC_LH_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "lh_u", TILEPRO_OPC_LH_U, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, }, { "lh_u.sn", TILEPRO_OPC_LH_U_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "lhadd", TILEPRO_OPC_LHADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lhadd.sn", TILEPRO_OPC_LHADD_SN, 0x2, 3, TREG_SN, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lhadd_u", TILEPRO_OPC_LHADD_U, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lhadd_u.sn", TILEPRO_OPC_LHADD_U_SN, 0x2, 3, TREG_SN, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lnk", TILEPRO_OPC_LNK, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } }, }, { "lnk.sn", TILEPRO_OPC_LNK_SN, 0x2, 1, TREG_SN, 1, { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } }, }, { "lw", TILEPRO_OPC_LW, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, }, { "lw.sn", TILEPRO_OPC_LW_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "lw_na", TILEPRO_OPC_LW_NA, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "lw_na.sn", TILEPRO_OPC_LW_NA_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "lwadd", TILEPRO_OPC_LWADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lwadd.sn", TILEPRO_OPC_LWADD_SN, 0x2, 3, TREG_SN, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lwadd_na", TILEPRO_OPC_LWADD_NA, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "lwadd_na.sn", TILEPRO_OPC_LWADD_NA_SN, 0x2, 3, TREG_SN, 1, { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, }, { "maxb_u", TILEPRO_OPC_MAXB_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "maxb_u.sn", TILEPRO_OPC_MAXB_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "maxh", TILEPRO_OPC_MAXH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "maxh.sn", TILEPRO_OPC_MAXH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "maxib_u", TILEPRO_OPC_MAXIB_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "maxib_u.sn", TILEPRO_OPC_MAXIB_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "maxih", TILEPRO_OPC_MAXIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "maxih.sn", TILEPRO_OPC_MAXIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "mf", TILEPRO_OPC_MF, 0x2, 0, TREG_ZERO, 1, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "mfspr", TILEPRO_OPC_MFSPR, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 25 }, { 0, }, { 0, }, { 0, } }, }, { "minb_u", TILEPRO_OPC_MINB_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "minb_u.sn", TILEPRO_OPC_MINB_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "minh", TILEPRO_OPC_MINH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "minh.sn", TILEPRO_OPC_MINH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "minib_u", TILEPRO_OPC_MINIB_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "minib_u.sn", TILEPRO_OPC_MINIB_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "minih", TILEPRO_OPC_MINIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "minih.sn", TILEPRO_OPC_MINIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "mm", TILEPRO_OPC_MM, 0x3, 5, TREG_ZERO, 1, { { 7, 8, 16, 26, 27 }, { 9, 10, 17, 28, 29 }, { 0, }, { 0, }, { 0, } }, }, { "mnz", TILEPRO_OPC_MNZ, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "mnz.sn", TILEPRO_OPC_MNZ_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mnzb", TILEPRO_OPC_MNZB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mnzb.sn", TILEPRO_OPC_MNZB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mnzh", TILEPRO_OPC_MNZH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mnzh.sn", TILEPRO_OPC_MNZH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mtspr", TILEPRO_OPC_MTSPR, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 30, 10 }, { 0, }, { 0, }, { 0, } }, }, { "mulhh_ss", TILEPRO_OPC_MULHH_SS, 0x5, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, }, { "mulhh_ss.sn", TILEPRO_OPC_MULHH_SS_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhh_su", TILEPRO_OPC_MULHH_SU, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhh_su.sn", TILEPRO_OPC_MULHH_SU_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhh_uu", TILEPRO_OPC_MULHH_UU, 0x5, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, }, { "mulhh_uu.sn", TILEPRO_OPC_MULHH_UU_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhha_ss", TILEPRO_OPC_MULHHA_SS, 0x5, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, }, { "mulhha_ss.sn", TILEPRO_OPC_MULHHA_SS_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhha_su", TILEPRO_OPC_MULHHA_SU, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhha_su.sn", TILEPRO_OPC_MULHHA_SU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhha_uu", TILEPRO_OPC_MULHHA_UU, 0x5, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, }, { "mulhha_uu.sn", TILEPRO_OPC_MULHHA_UU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhhsa_uu", TILEPRO_OPC_MULHHSA_UU, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhhsa_uu.sn", TILEPRO_OPC_MULHHSA_UU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_ss", TILEPRO_OPC_MULHL_SS, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_ss.sn", TILEPRO_OPC_MULHL_SS_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_su", TILEPRO_OPC_MULHL_SU, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_su.sn", TILEPRO_OPC_MULHL_SU_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_us", TILEPRO_OPC_MULHL_US, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_us.sn", TILEPRO_OPC_MULHL_US_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_uu", TILEPRO_OPC_MULHL_UU, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhl_uu.sn", TILEPRO_OPC_MULHL_UU_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_ss", TILEPRO_OPC_MULHLA_SS, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_ss.sn", TILEPRO_OPC_MULHLA_SS_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_su", TILEPRO_OPC_MULHLA_SU, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_su.sn", TILEPRO_OPC_MULHLA_SU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_us", TILEPRO_OPC_MULHLA_US, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_us.sn", TILEPRO_OPC_MULHLA_US_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_uu", TILEPRO_OPC_MULHLA_UU, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhla_uu.sn", TILEPRO_OPC_MULHLA_UU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulhlsa_uu", TILEPRO_OPC_MULHLSA_UU, 0x5, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, }, { "mulhlsa_uu.sn", TILEPRO_OPC_MULHLSA_UU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulll_ss", TILEPRO_OPC_MULLL_SS, 0x5, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, }, { "mulll_ss.sn", TILEPRO_OPC_MULLL_SS_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulll_su", TILEPRO_OPC_MULLL_SU, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulll_su.sn", TILEPRO_OPC_MULLL_SU_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulll_uu", TILEPRO_OPC_MULLL_UU, 0x5, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, }, { "mulll_uu.sn", TILEPRO_OPC_MULLL_UU_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mullla_ss", TILEPRO_OPC_MULLLA_SS, 0x5, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, }, { "mullla_ss.sn", TILEPRO_OPC_MULLLA_SS_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mullla_su", TILEPRO_OPC_MULLLA_SU, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mullla_su.sn", TILEPRO_OPC_MULLLA_SU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mullla_uu", TILEPRO_OPC_MULLLA_UU, 0x5, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, }, { "mullla_uu.sn", TILEPRO_OPC_MULLLA_UU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulllsa_uu", TILEPRO_OPC_MULLLSA_UU, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mulllsa_uu.sn", TILEPRO_OPC_MULLLSA_UU_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mvnz", TILEPRO_OPC_MVNZ, 0x5, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, }, { "mvnz.sn", TILEPRO_OPC_MVNZ_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mvz", TILEPRO_OPC_MVZ, 0x5, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, }, { "mvz.sn", TILEPRO_OPC_MVZ_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "mz", TILEPRO_OPC_MZ, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "mz.sn", TILEPRO_OPC_MZ_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mzb", TILEPRO_OPC_MZB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mzb.sn", TILEPRO_OPC_MZB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mzh", TILEPRO_OPC_MZH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "mzh.sn", TILEPRO_OPC_MZH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "nap", TILEPRO_OPC_NAP, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "nop", TILEPRO_OPC_NOP, 0xf, 0, TREG_ZERO, 1, { { }, { }, { }, { }, { 0, } }, }, { "nor", TILEPRO_OPC_NOR, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "nor.sn", TILEPRO_OPC_NOR_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "or", TILEPRO_OPC_OR, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "or.sn", TILEPRO_OPC_OR_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "ori", TILEPRO_OPC_ORI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, }, { "ori.sn", TILEPRO_OPC_ORI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "packbs_u", TILEPRO_OPC_PACKBS_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "packbs_u.sn", TILEPRO_OPC_PACKBS_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "packhb", TILEPRO_OPC_PACKHB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "packhb.sn", TILEPRO_OPC_PACKHB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "packhs", TILEPRO_OPC_PACKHS, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "packhs.sn", TILEPRO_OPC_PACKHS_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "packlb", TILEPRO_OPC_PACKLB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "packlb.sn", TILEPRO_OPC_PACKLB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "pcnt", TILEPRO_OPC_PCNT, 0x5, 2, TREG_ZERO, 1, { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, }, { "pcnt.sn", TILEPRO_OPC_PCNT_SN, 0x1, 2, TREG_SN, 1, { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "rl", TILEPRO_OPC_RL, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "rl.sn", TILEPRO_OPC_RL_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "rli", TILEPRO_OPC_RLI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, }, { "rli.sn", TILEPRO_OPC_RLI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "s1a", TILEPRO_OPC_S1A, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "s1a.sn", TILEPRO_OPC_S1A_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "s2a", TILEPRO_OPC_S2A, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "s2a.sn", TILEPRO_OPC_S2A_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "s3a", TILEPRO_OPC_S3A, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "s3a.sn", TILEPRO_OPC_S3A_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sadab_u", TILEPRO_OPC_SADAB_U, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadab_u.sn", TILEPRO_OPC_SADAB_U_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadah", TILEPRO_OPC_SADAH, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadah.sn", TILEPRO_OPC_SADAH_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadah_u", TILEPRO_OPC_SADAH_U, 0x1, 3, TREG_ZERO, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadah_u.sn", TILEPRO_OPC_SADAH_U_SN, 0x1, 3, TREG_SN, 1, { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadb_u", TILEPRO_OPC_SADB_U, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadb_u.sn", TILEPRO_OPC_SADB_U_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadh", TILEPRO_OPC_SADH, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadh.sn", TILEPRO_OPC_SADH_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadh_u", TILEPRO_OPC_SADH_U, 0x1, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sadh_u.sn", TILEPRO_OPC_SADH_U_SN, 0x1, 3, TREG_SN, 1, { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "sb", TILEPRO_OPC_SB, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } }, }, { "sbadd", TILEPRO_OPC_SBADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } }, }, { "seq", TILEPRO_OPC_SEQ, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "seq.sn", TILEPRO_OPC_SEQ_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "seqb", TILEPRO_OPC_SEQB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "seqb.sn", TILEPRO_OPC_SEQB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "seqh", TILEPRO_OPC_SEQH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "seqh.sn", TILEPRO_OPC_SEQH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "seqi", TILEPRO_OPC_SEQI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, }, { "seqi.sn", TILEPRO_OPC_SEQI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "seqib", TILEPRO_OPC_SEQIB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "seqib.sn", TILEPRO_OPC_SEQIB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "seqih", TILEPRO_OPC_SEQIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "seqih.sn", TILEPRO_OPC_SEQIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sh", TILEPRO_OPC_SH, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } }, }, { "shadd", TILEPRO_OPC_SHADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } }, }, { "shl", TILEPRO_OPC_SHL, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "shl.sn", TILEPRO_OPC_SHL_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shlb", TILEPRO_OPC_SHLB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shlb.sn", TILEPRO_OPC_SHLB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shlh", TILEPRO_OPC_SHLH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shlh.sn", TILEPRO_OPC_SHLH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shli", TILEPRO_OPC_SHLI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, }, { "shli.sn", TILEPRO_OPC_SHLI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shlib", TILEPRO_OPC_SHLIB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shlib.sn", TILEPRO_OPC_SHLIB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shlih", TILEPRO_OPC_SHLIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shlih.sn", TILEPRO_OPC_SHLIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shr", TILEPRO_OPC_SHR, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "shr.sn", TILEPRO_OPC_SHR_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shrb", TILEPRO_OPC_SHRB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shrb.sn", TILEPRO_OPC_SHRB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shrh", TILEPRO_OPC_SHRH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shrh.sn", TILEPRO_OPC_SHRH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "shri", TILEPRO_OPC_SHRI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, }, { "shri.sn", TILEPRO_OPC_SHRI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shrib", TILEPRO_OPC_SHRIB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shrib.sn", TILEPRO_OPC_SHRIB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shrih", TILEPRO_OPC_SHRIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "shrih.sn", TILEPRO_OPC_SHRIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "slt", TILEPRO_OPC_SLT, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "slt.sn", TILEPRO_OPC_SLT_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slt_u", TILEPRO_OPC_SLT_U, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "slt_u.sn", TILEPRO_OPC_SLT_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sltb", TILEPRO_OPC_SLTB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sltb.sn", TILEPRO_OPC_SLTB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sltb_u", TILEPRO_OPC_SLTB_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sltb_u.sn", TILEPRO_OPC_SLTB_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slte", TILEPRO_OPC_SLTE, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "slte.sn", TILEPRO_OPC_SLTE_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slte_u", TILEPRO_OPC_SLTE_U, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "slte_u.sn", TILEPRO_OPC_SLTE_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteb", TILEPRO_OPC_SLTEB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteb.sn", TILEPRO_OPC_SLTEB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteb_u", TILEPRO_OPC_SLTEB_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteb_u.sn", TILEPRO_OPC_SLTEB_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteh", TILEPRO_OPC_SLTEH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteh.sn", TILEPRO_OPC_SLTEH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteh_u", TILEPRO_OPC_SLTEH_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slteh_u.sn", TILEPRO_OPC_SLTEH_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slth", TILEPRO_OPC_SLTH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slth.sn", TILEPRO_OPC_SLTH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slth_u", TILEPRO_OPC_SLTH_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slth_u.sn", TILEPRO_OPC_SLTH_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "slti", TILEPRO_OPC_SLTI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, }, { "slti.sn", TILEPRO_OPC_SLTI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "slti_u", TILEPRO_OPC_SLTI_U, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, }, { "slti_u.sn", TILEPRO_OPC_SLTI_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltib", TILEPRO_OPC_SLTIB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltib.sn", TILEPRO_OPC_SLTIB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltib_u", TILEPRO_OPC_SLTIB_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltib_u.sn", TILEPRO_OPC_SLTIB_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltih", TILEPRO_OPC_SLTIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltih.sn", TILEPRO_OPC_SLTIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltih_u", TILEPRO_OPC_SLTIH_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sltih_u.sn", TILEPRO_OPC_SLTIH_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "sne", TILEPRO_OPC_SNE, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "sne.sn", TILEPRO_OPC_SNE_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sneb", TILEPRO_OPC_SNEB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sneb.sn", TILEPRO_OPC_SNEB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sneh", TILEPRO_OPC_SNEH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sneh.sn", TILEPRO_OPC_SNEH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sra", TILEPRO_OPC_SRA, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "sra.sn", TILEPRO_OPC_SRA_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "srab", TILEPRO_OPC_SRAB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "srab.sn", TILEPRO_OPC_SRAB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "srah", TILEPRO_OPC_SRAH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "srah.sn", TILEPRO_OPC_SRAH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "srai", TILEPRO_OPC_SRAI, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, }, { "srai.sn", TILEPRO_OPC_SRAI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "sraib", TILEPRO_OPC_SRAIB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "sraib.sn", TILEPRO_OPC_SRAIB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "sraih", TILEPRO_OPC_SRAIH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "sraih.sn", TILEPRO_OPC_SRAIH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, }, { "sub", TILEPRO_OPC_SUB, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "sub.sn", TILEPRO_OPC_SUB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subb", TILEPRO_OPC_SUBB, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subb.sn", TILEPRO_OPC_SUBB_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subbs_u", TILEPRO_OPC_SUBBS_U, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subbs_u.sn", TILEPRO_OPC_SUBBS_U_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subh", TILEPRO_OPC_SUBH, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subh.sn", TILEPRO_OPC_SUBH_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subhs", TILEPRO_OPC_SUBHS, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subhs.sn", TILEPRO_OPC_SUBHS_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subs", TILEPRO_OPC_SUBS, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "subs.sn", TILEPRO_OPC_SUBS_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "sw", TILEPRO_OPC_SW, 0x12, 2, TREG_ZERO, 1, { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } }, }, { "swadd", TILEPRO_OPC_SWADD, 0x2, 3, TREG_ZERO, 1, { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } }, }, { "swint0", TILEPRO_OPC_SWINT0, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "swint1", TILEPRO_OPC_SWINT1, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "swint2", TILEPRO_OPC_SWINT2, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "swint3", TILEPRO_OPC_SWINT3, 0x2, 0, TREG_ZERO, 0, { { 0, }, { }, { 0, }, { 0, }, { 0, } }, }, { "tblidxb0", TILEPRO_OPC_TBLIDXB0, 0x5, 2, TREG_ZERO, 1, { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, }, { "tblidxb0.sn", TILEPRO_OPC_TBLIDXB0_SN, 0x1, 2, TREG_SN, 1, { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "tblidxb1", TILEPRO_OPC_TBLIDXB1, 0x5, 2, TREG_ZERO, 1, { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, }, { "tblidxb1.sn", TILEPRO_OPC_TBLIDXB1_SN, 0x1, 2, TREG_SN, 1, { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "tblidxb2", TILEPRO_OPC_TBLIDXB2, 0x5, 2, TREG_ZERO, 1, { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, }, { "tblidxb2.sn", TILEPRO_OPC_TBLIDXB2_SN, 0x1, 2, TREG_SN, 1, { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "tblidxb3", TILEPRO_OPC_TBLIDXB3, 0x5, 2, TREG_ZERO, 1, { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, }, { "tblidxb3.sn", TILEPRO_OPC_TBLIDXB3_SN, 0x1, 2, TREG_SN, 1, { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, }, { "tns", TILEPRO_OPC_TNS, 0x2, 2, TREG_ZERO, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "tns.sn", TILEPRO_OPC_TNS_SN, 0x2, 2, TREG_SN, 1, { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, }, { "wh64", TILEPRO_OPC_WH64, 0x2, 1, TREG_ZERO, 1, { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, }, { "xor", TILEPRO_OPC_XOR, 0xf, 3, TREG_ZERO, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, }, { "xor.sn", TILEPRO_OPC_XOR_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, }, { "xori", TILEPRO_OPC_XORI, 0x3, 3, TREG_ZERO, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { "xori.sn", TILEPRO_OPC_XORI_SN, 0x3, 3, TREG_SN, 1, { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, }, { NULL, TILEPRO_OPC_NONE, 0, 0, TREG_ZERO, 0, { { 0, } }, } }; #define BITFIELD(start, size) ((start) | (((1 << (size)) - 1) << 6)) #define CHILD(array_index) (TILEPRO_OPC_NONE + (array_index)) static const unsigned short decode_X0_fsm[1153] = { BITFIELD(22, 9) /* index 0 */, CHILD(513), CHILD(530), CHILD(547), CHILD(564), CHILD(596), CHILD(613), CHILD(630), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(663), CHILD(680), CHILD(697), CHILD(714), CHILD(746), CHILD(763), CHILD(780), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(873), CHILD(878), CHILD(883), CHILD(903), CHILD(908), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(913), CHILD(918), CHILD(923), CHILD(943), CHILD(948), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(953), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(988), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, CHILD(993), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(1076), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(18, 4) /* index 513 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB, TILEPRO_OPC_ADDH, TILEPRO_OPC_ADD, TILEPRO_OPC_ADIFFB_U, TILEPRO_OPC_ADIFFH, TILEPRO_OPC_AND, TILEPRO_OPC_AVGB_U, TILEPRO_OPC_AVGH, TILEPRO_OPC_CRC32_32, TILEPRO_OPC_CRC32_8, TILEPRO_OPC_INTHB, TILEPRO_OPC_INTHH, TILEPRO_OPC_INTLB, TILEPRO_OPC_INTLH, TILEPRO_OPC_MAXB_U, BITFIELD(18, 4) /* index 530 */, TILEPRO_OPC_MAXH, TILEPRO_OPC_MINB_U, TILEPRO_OPC_MINH, TILEPRO_OPC_MNZB, TILEPRO_OPC_MNZH, TILEPRO_OPC_MNZ, TILEPRO_OPC_MULHHA_SS, TILEPRO_OPC_MULHHA_SU, TILEPRO_OPC_MULHHA_UU, TILEPRO_OPC_MULHHSA_UU, TILEPRO_OPC_MULHH_SS, TILEPRO_OPC_MULHH_SU, TILEPRO_OPC_MULHH_UU, TILEPRO_OPC_MULHLA_SS, TILEPRO_OPC_MULHLA_SU, TILEPRO_OPC_MULHLA_US, BITFIELD(18, 4) /* index 547 */, TILEPRO_OPC_MULHLA_UU, TILEPRO_OPC_MULHLSA_UU, TILEPRO_OPC_MULHL_SS, TILEPRO_OPC_MULHL_SU, TILEPRO_OPC_MULHL_US, TILEPRO_OPC_MULHL_UU, TILEPRO_OPC_MULLLA_SS, TILEPRO_OPC_MULLLA_SU, TILEPRO_OPC_MULLLA_UU, TILEPRO_OPC_MULLLSA_UU, TILEPRO_OPC_MULLL_SS, TILEPRO_OPC_MULLL_SU, TILEPRO_OPC_MULLL_UU, TILEPRO_OPC_MVNZ, TILEPRO_OPC_MVZ, TILEPRO_OPC_MZB, BITFIELD(18, 4) /* index 564 */, TILEPRO_OPC_MZH, TILEPRO_OPC_MZ, TILEPRO_OPC_NOR, CHILD(581), TILEPRO_OPC_PACKHB, TILEPRO_OPC_PACKLB, TILEPRO_OPC_RL, TILEPRO_OPC_S1A, TILEPRO_OPC_S2A, TILEPRO_OPC_S3A, TILEPRO_OPC_SADAB_U, TILEPRO_OPC_SADAH, TILEPRO_OPC_SADAH_U, TILEPRO_OPC_SADB_U, TILEPRO_OPC_SADH, TILEPRO_OPC_SADH_U, BITFIELD(12, 2) /* index 581 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(586), BITFIELD(14, 2) /* index 586 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(591), BITFIELD(16, 2) /* index 591 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE, BITFIELD(18, 4) /* index 596 */, TILEPRO_OPC_SEQB, TILEPRO_OPC_SEQH, TILEPRO_OPC_SEQ, TILEPRO_OPC_SHLB, TILEPRO_OPC_SHLH, TILEPRO_OPC_SHL, TILEPRO_OPC_SHRB, TILEPRO_OPC_SHRH, TILEPRO_OPC_SHR, TILEPRO_OPC_SLTB, TILEPRO_OPC_SLTB_U, TILEPRO_OPC_SLTEB, TILEPRO_OPC_SLTEB_U, TILEPRO_OPC_SLTEH, TILEPRO_OPC_SLTEH_U, TILEPRO_OPC_SLTE, BITFIELD(18, 4) /* index 613 */, TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLTH, TILEPRO_OPC_SLTH_U, TILEPRO_OPC_SLT, TILEPRO_OPC_SLT_U, TILEPRO_OPC_SNEB, TILEPRO_OPC_SNEH, TILEPRO_OPC_SNE, TILEPRO_OPC_SRAB, TILEPRO_OPC_SRAH, TILEPRO_OPC_SRA, TILEPRO_OPC_SUBB, TILEPRO_OPC_SUBH, TILEPRO_OPC_SUB, TILEPRO_OPC_XOR, TILEPRO_OPC_DWORD_ALIGN, BITFIELD(18, 3) /* index 630 */, CHILD(639), CHILD(642), CHILD(645), CHILD(648), CHILD(651), CHILD(654), CHILD(657), CHILD(660), BITFIELD(21, 1) /* index 639 */, TILEPRO_OPC_ADDS, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 642 */, TILEPRO_OPC_SUBS, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 645 */, TILEPRO_OPC_ADDBS_U, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 648 */, TILEPRO_OPC_ADDHS, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 651 */, TILEPRO_OPC_SUBBS_U, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 654 */, TILEPRO_OPC_SUBHS, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 657 */, TILEPRO_OPC_PACKHS, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 660 */, TILEPRO_OPC_PACKBS_U, TILEPRO_OPC_NONE, BITFIELD(18, 4) /* index 663 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB_SN, TILEPRO_OPC_ADDH_SN, TILEPRO_OPC_ADD_SN, TILEPRO_OPC_ADIFFB_U_SN, TILEPRO_OPC_ADIFFH_SN, TILEPRO_OPC_AND_SN, TILEPRO_OPC_AVGB_U_SN, TILEPRO_OPC_AVGH_SN, TILEPRO_OPC_CRC32_32_SN, TILEPRO_OPC_CRC32_8_SN, TILEPRO_OPC_INTHB_SN, TILEPRO_OPC_INTHH_SN, TILEPRO_OPC_INTLB_SN, TILEPRO_OPC_INTLH_SN, TILEPRO_OPC_MAXB_U_SN, BITFIELD(18, 4) /* index 680 */, TILEPRO_OPC_MAXH_SN, TILEPRO_OPC_MINB_U_SN, TILEPRO_OPC_MINH_SN, TILEPRO_OPC_MNZB_SN, TILEPRO_OPC_MNZH_SN, TILEPRO_OPC_MNZ_SN, TILEPRO_OPC_MULHHA_SS_SN, TILEPRO_OPC_MULHHA_SU_SN, TILEPRO_OPC_MULHHA_UU_SN, TILEPRO_OPC_MULHHSA_UU_SN, TILEPRO_OPC_MULHH_SS_SN, TILEPRO_OPC_MULHH_SU_SN, TILEPRO_OPC_MULHH_UU_SN, TILEPRO_OPC_MULHLA_SS_SN, TILEPRO_OPC_MULHLA_SU_SN, TILEPRO_OPC_MULHLA_US_SN, BITFIELD(18, 4) /* index 697 */, TILEPRO_OPC_MULHLA_UU_SN, TILEPRO_OPC_MULHLSA_UU_SN, TILEPRO_OPC_MULHL_SS_SN, TILEPRO_OPC_MULHL_SU_SN, TILEPRO_OPC_MULHL_US_SN, TILEPRO_OPC_MULHL_UU_SN, TILEPRO_OPC_MULLLA_SS_SN, TILEPRO_OPC_MULLLA_SU_SN, TILEPRO_OPC_MULLLA_UU_SN, TILEPRO_OPC_MULLLSA_UU_SN, TILEPRO_OPC_MULLL_SS_SN, TILEPRO_OPC_MULLL_SU_SN, TILEPRO_OPC_MULLL_UU_SN, TILEPRO_OPC_MVNZ_SN, TILEPRO_OPC_MVZ_SN, TILEPRO_OPC_MZB_SN, BITFIELD(18, 4) /* index 714 */, TILEPRO_OPC_MZH_SN, TILEPRO_OPC_MZ_SN, TILEPRO_OPC_NOR_SN, CHILD(731), TILEPRO_OPC_PACKHB_SN, TILEPRO_OPC_PACKLB_SN, TILEPRO_OPC_RL_SN, TILEPRO_OPC_S1A_SN, TILEPRO_OPC_S2A_SN, TILEPRO_OPC_S3A_SN, TILEPRO_OPC_SADAB_U_SN, TILEPRO_OPC_SADAH_SN, TILEPRO_OPC_SADAH_U_SN, TILEPRO_OPC_SADB_U_SN, TILEPRO_OPC_SADH_SN, TILEPRO_OPC_SADH_U_SN, BITFIELD(12, 2) /* index 731 */, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(736), BITFIELD(14, 2) /* index 736 */, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(741), BITFIELD(16, 2) /* index 741 */, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_MOVE_SN, BITFIELD(18, 4) /* index 746 */, TILEPRO_OPC_SEQB_SN, TILEPRO_OPC_SEQH_SN, TILEPRO_OPC_SEQ_SN, TILEPRO_OPC_SHLB_SN, TILEPRO_OPC_SHLH_SN, TILEPRO_OPC_SHL_SN, TILEPRO_OPC_SHRB_SN, TILEPRO_OPC_SHRH_SN, TILEPRO_OPC_SHR_SN, TILEPRO_OPC_SLTB_SN, TILEPRO_OPC_SLTB_U_SN, TILEPRO_OPC_SLTEB_SN, TILEPRO_OPC_SLTEB_U_SN, TILEPRO_OPC_SLTEH_SN, TILEPRO_OPC_SLTEH_U_SN, TILEPRO_OPC_SLTE_SN, BITFIELD(18, 4) /* index 763 */, TILEPRO_OPC_SLTE_U_SN, TILEPRO_OPC_SLTH_SN, TILEPRO_OPC_SLTH_U_SN, TILEPRO_OPC_SLT_SN, TILEPRO_OPC_SLT_U_SN, TILEPRO_OPC_SNEB_SN, TILEPRO_OPC_SNEH_SN, TILEPRO_OPC_SNE_SN, TILEPRO_OPC_SRAB_SN, TILEPRO_OPC_SRAH_SN, TILEPRO_OPC_SRA_SN, TILEPRO_OPC_SUBB_SN, TILEPRO_OPC_SUBH_SN, TILEPRO_OPC_SUB_SN, TILEPRO_OPC_XOR_SN, TILEPRO_OPC_DWORD_ALIGN_SN, BITFIELD(18, 3) /* index 780 */, CHILD(789), CHILD(792), CHILD(795), CHILD(798), CHILD(801), CHILD(804), CHILD(807), CHILD(810), BITFIELD(21, 1) /* index 789 */, TILEPRO_OPC_ADDS_SN, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 792 */, TILEPRO_OPC_SUBS_SN, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 795 */, TILEPRO_OPC_ADDBS_U_SN, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 798 */, TILEPRO_OPC_ADDHS_SN, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 801 */, TILEPRO_OPC_SUBBS_U_SN, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 804 */, TILEPRO_OPC_SUBHS_SN, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 807 */, TILEPRO_OPC_PACKHS_SN, TILEPRO_OPC_NONE, BITFIELD(21, 1) /* index 810 */, TILEPRO_OPC_PACKBS_U_SN, TILEPRO_OPC_NONE, BITFIELD(6, 2) /* index 813 */, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, CHILD(818), BITFIELD(8, 2) /* index 818 */, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, CHILD(823), BITFIELD(10, 2) /* index 823 */, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_MOVELI_SN, BITFIELD(6, 2) /* index 828 */, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(833), BITFIELD(8, 2) /* index 833 */, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(838), BITFIELD(10, 2) /* index 838 */, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_MOVELI, BITFIELD(0, 2) /* index 843 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(848), BITFIELD(2, 2) /* index 848 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(853), BITFIELD(4, 2) /* index 853 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(858), BITFIELD(6, 2) /* index 858 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(863), BITFIELD(8, 2) /* index 863 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(868), BITFIELD(10, 2) /* index 868 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_INFOL, BITFIELD(20, 2) /* index 873 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB, TILEPRO_OPC_ADDIH, TILEPRO_OPC_ADDI, BITFIELD(20, 2) /* index 878 */, TILEPRO_OPC_MAXIB_U, TILEPRO_OPC_MAXIH, TILEPRO_OPC_MINIB_U, TILEPRO_OPC_MINIH, BITFIELD(20, 2) /* index 883 */, CHILD(888), TILEPRO_OPC_SEQIB, TILEPRO_OPC_SEQIH, TILEPRO_OPC_SEQI, BITFIELD(6, 2) /* index 888 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(893), BITFIELD(8, 2) /* index 893 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(898), BITFIELD(10, 2) /* index 898 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI, BITFIELD(20, 2) /* index 903 */, TILEPRO_OPC_SLTIB, TILEPRO_OPC_SLTIB_U, TILEPRO_OPC_SLTIH, TILEPRO_OPC_SLTIH_U, BITFIELD(20, 2) /* index 908 */, TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(20, 2) /* index 913 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB_SN, TILEPRO_OPC_ADDIH_SN, TILEPRO_OPC_ADDI_SN, BITFIELD(20, 2) /* index 918 */, TILEPRO_OPC_MAXIB_U_SN, TILEPRO_OPC_MAXIH_SN, TILEPRO_OPC_MINIB_U_SN, TILEPRO_OPC_MINIH_SN, BITFIELD(20, 2) /* index 923 */, CHILD(928), TILEPRO_OPC_SEQIB_SN, TILEPRO_OPC_SEQIH_SN, TILEPRO_OPC_SEQI_SN, BITFIELD(6, 2) /* index 928 */, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(933), BITFIELD(8, 2) /* index 933 */, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(938), BITFIELD(10, 2) /* index 938 */, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_MOVEI_SN, BITFIELD(20, 2) /* index 943 */, TILEPRO_OPC_SLTIB_SN, TILEPRO_OPC_SLTIB_U_SN, TILEPRO_OPC_SLTIH_SN, TILEPRO_OPC_SLTIH_U_SN, BITFIELD(20, 2) /* index 948 */, TILEPRO_OPC_SLTI_SN, TILEPRO_OPC_SLTI_U_SN, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(20, 2) /* index 953 */, TILEPRO_OPC_NONE, CHILD(958), TILEPRO_OPC_XORI, TILEPRO_OPC_NONE, BITFIELD(0, 2) /* index 958 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(963), BITFIELD(2, 2) /* index 963 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(968), BITFIELD(4, 2) /* index 968 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(973), BITFIELD(6, 2) /* index 973 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(978), BITFIELD(8, 2) /* index 978 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(983), BITFIELD(10, 2) /* index 983 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO, BITFIELD(20, 2) /* index 988 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ANDI_SN, TILEPRO_OPC_XORI_SN, TILEPRO_OPC_NONE, BITFIELD(17, 5) /* index 993 */, TILEPRO_OPC_NONE, TILEPRO_OPC_RLI, TILEPRO_OPC_SHLIB, TILEPRO_OPC_SHLIH, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHRIB, TILEPRO_OPC_SHRIH, TILEPRO_OPC_SHRI, TILEPRO_OPC_SRAIB, TILEPRO_OPC_SRAIH, TILEPRO_OPC_SRAI, CHILD(1026), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(12, 4) /* index 1026 */, TILEPRO_OPC_NONE, CHILD(1043), CHILD(1046), CHILD(1049), CHILD(1052), CHILD(1055), CHILD(1058), CHILD(1061), CHILD(1064), CHILD(1067), CHILD(1070), CHILD(1073), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1043 */, TILEPRO_OPC_BITX, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1046 */, TILEPRO_OPC_BYTEX, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1049 */, TILEPRO_OPC_CLZ, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1052 */, TILEPRO_OPC_CTZ, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1055 */, TILEPRO_OPC_FNOP, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1058 */, TILEPRO_OPC_NOP, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1061 */, TILEPRO_OPC_PCNT, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1064 */, TILEPRO_OPC_TBLIDXB0, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1067 */, TILEPRO_OPC_TBLIDXB1, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1070 */, TILEPRO_OPC_TBLIDXB2, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1073 */, TILEPRO_OPC_TBLIDXB3, TILEPRO_OPC_NONE, BITFIELD(17, 5) /* index 1076 */, TILEPRO_OPC_NONE, TILEPRO_OPC_RLI_SN, TILEPRO_OPC_SHLIB_SN, TILEPRO_OPC_SHLIH_SN, TILEPRO_OPC_SHLI_SN, TILEPRO_OPC_SHRIB_SN, TILEPRO_OPC_SHRIH_SN, TILEPRO_OPC_SHRI_SN, TILEPRO_OPC_SRAIB_SN, TILEPRO_OPC_SRAIH_SN, TILEPRO_OPC_SRAI_SN, CHILD(1109), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(12, 4) /* index 1109 */, TILEPRO_OPC_NONE, CHILD(1126), CHILD(1129), CHILD(1132), CHILD(1135), CHILD(1055), CHILD(1058), CHILD(1138), CHILD(1141), CHILD(1144), CHILD(1147), CHILD(1150), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1126 */, TILEPRO_OPC_BITX_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1129 */, TILEPRO_OPC_BYTEX_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1132 */, TILEPRO_OPC_CLZ_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1135 */, TILEPRO_OPC_CTZ_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1138 */, TILEPRO_OPC_PCNT_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1141 */, TILEPRO_OPC_TBLIDXB0_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1144 */, TILEPRO_OPC_TBLIDXB1_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1147 */, TILEPRO_OPC_TBLIDXB2_SN, TILEPRO_OPC_NONE, BITFIELD(16, 1) /* index 1150 */, TILEPRO_OPC_TBLIDXB3_SN, TILEPRO_OPC_NONE, }; static const unsigned short decode_X1_fsm[1540] = { BITFIELD(54, 9) /* index 0 */, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(513), CHILD(561), CHILD(594), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(641), CHILD(689), CHILD(722), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(860), CHILD(899), CHILD(923), CHILD(932), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(941), CHILD(950), CHILD(974), CHILD(983), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, CHILD(992), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(1334), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(49, 5) /* index 513 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB, TILEPRO_OPC_ADDH, TILEPRO_OPC_ADD, TILEPRO_OPC_AND, TILEPRO_OPC_INTHB, TILEPRO_OPC_INTHH, TILEPRO_OPC_INTLB, TILEPRO_OPC_INTLH, TILEPRO_OPC_JALRP, TILEPRO_OPC_JALR, TILEPRO_OPC_JRP, TILEPRO_OPC_JR, TILEPRO_OPC_LNK, TILEPRO_OPC_MAXB_U, TILEPRO_OPC_MAXH, TILEPRO_OPC_MINB_U, TILEPRO_OPC_MINH, TILEPRO_OPC_MNZB, TILEPRO_OPC_MNZH, TILEPRO_OPC_MNZ, TILEPRO_OPC_MZB, TILEPRO_OPC_MZH, TILEPRO_OPC_MZ, TILEPRO_OPC_NOR, CHILD(546), TILEPRO_OPC_PACKHB, TILEPRO_OPC_PACKLB, TILEPRO_OPC_RL, TILEPRO_OPC_S1A, TILEPRO_OPC_S2A, TILEPRO_OPC_S3A, BITFIELD(43, 2) /* index 546 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(551), BITFIELD(45, 2) /* index 551 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(556), BITFIELD(47, 2) /* index 556 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE, BITFIELD(49, 5) /* index 561 */, TILEPRO_OPC_SB, TILEPRO_OPC_SEQB, TILEPRO_OPC_SEQH, TILEPRO_OPC_SEQ, TILEPRO_OPC_SHLB, TILEPRO_OPC_SHLH, TILEPRO_OPC_SHL, TILEPRO_OPC_SHRB, TILEPRO_OPC_SHRH, TILEPRO_OPC_SHR, TILEPRO_OPC_SH, TILEPRO_OPC_SLTB, TILEPRO_OPC_SLTB_U, TILEPRO_OPC_SLTEB, TILEPRO_OPC_SLTEB_U, TILEPRO_OPC_SLTEH, TILEPRO_OPC_SLTEH_U, TILEPRO_OPC_SLTE, TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLTH, TILEPRO_OPC_SLTH_U, TILEPRO_OPC_SLT, TILEPRO_OPC_SLT_U, TILEPRO_OPC_SNEB, TILEPRO_OPC_SNEH, TILEPRO_OPC_SNE, TILEPRO_OPC_SRAB, TILEPRO_OPC_SRAH, TILEPRO_OPC_SRA, TILEPRO_OPC_SUBB, TILEPRO_OPC_SUBH, TILEPRO_OPC_SUB, BITFIELD(49, 4) /* index 594 */, CHILD(611), CHILD(614), CHILD(617), CHILD(620), CHILD(623), CHILD(626), CHILD(629), CHILD(632), CHILD(635), CHILD(638), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 611 */, TILEPRO_OPC_SW, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 614 */, TILEPRO_OPC_XOR, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 617 */, TILEPRO_OPC_ADDS, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 620 */, TILEPRO_OPC_SUBS, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 623 */, TILEPRO_OPC_ADDBS_U, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 626 */, TILEPRO_OPC_ADDHS, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 629 */, TILEPRO_OPC_SUBBS_U, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 632 */, TILEPRO_OPC_SUBHS, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 635 */, TILEPRO_OPC_PACKHS, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 638 */, TILEPRO_OPC_PACKBS_U, TILEPRO_OPC_NONE, BITFIELD(49, 5) /* index 641 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB_SN, TILEPRO_OPC_ADDH_SN, TILEPRO_OPC_ADD_SN, TILEPRO_OPC_AND_SN, TILEPRO_OPC_INTHB_SN, TILEPRO_OPC_INTHH_SN, TILEPRO_OPC_INTLB_SN, TILEPRO_OPC_INTLH_SN, TILEPRO_OPC_JALRP, TILEPRO_OPC_JALR, TILEPRO_OPC_JRP, TILEPRO_OPC_JR, TILEPRO_OPC_LNK_SN, TILEPRO_OPC_MAXB_U_SN, TILEPRO_OPC_MAXH_SN, TILEPRO_OPC_MINB_U_SN, TILEPRO_OPC_MINH_SN, TILEPRO_OPC_MNZB_SN, TILEPRO_OPC_MNZH_SN, TILEPRO_OPC_MNZ_SN, TILEPRO_OPC_MZB_SN, TILEPRO_OPC_MZH_SN, TILEPRO_OPC_MZ_SN, TILEPRO_OPC_NOR_SN, CHILD(674), TILEPRO_OPC_PACKHB_SN, TILEPRO_OPC_PACKLB_SN, TILEPRO_OPC_RL_SN, TILEPRO_OPC_S1A_SN, TILEPRO_OPC_S2A_SN, TILEPRO_OPC_S3A_SN, BITFIELD(43, 2) /* index 674 */, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(679), BITFIELD(45, 2) /* index 679 */, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(684), BITFIELD(47, 2) /* index 684 */, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_MOVE_SN, BITFIELD(49, 5) /* index 689 */, TILEPRO_OPC_SB, TILEPRO_OPC_SEQB_SN, TILEPRO_OPC_SEQH_SN, TILEPRO_OPC_SEQ_SN, TILEPRO_OPC_SHLB_SN, TILEPRO_OPC_SHLH_SN, TILEPRO_OPC_SHL_SN, TILEPRO_OPC_SHRB_SN, TILEPRO_OPC_SHRH_SN, TILEPRO_OPC_SHR_SN, TILEPRO_OPC_SH, TILEPRO_OPC_SLTB_SN, TILEPRO_OPC_SLTB_U_SN, TILEPRO_OPC_SLTEB_SN, TILEPRO_OPC_SLTEB_U_SN, TILEPRO_OPC_SLTEH_SN, TILEPRO_OPC_SLTEH_U_SN, TILEPRO_OPC_SLTE_SN, TILEPRO_OPC_SLTE_U_SN, TILEPRO_OPC_SLTH_SN, TILEPRO_OPC_SLTH_U_SN, TILEPRO_OPC_SLT_SN, TILEPRO_OPC_SLT_U_SN, TILEPRO_OPC_SNEB_SN, TILEPRO_OPC_SNEH_SN, TILEPRO_OPC_SNE_SN, TILEPRO_OPC_SRAB_SN, TILEPRO_OPC_SRAH_SN, TILEPRO_OPC_SRA_SN, TILEPRO_OPC_SUBB_SN, TILEPRO_OPC_SUBH_SN, TILEPRO_OPC_SUB_SN, BITFIELD(49, 4) /* index 722 */, CHILD(611), CHILD(739), CHILD(742), CHILD(745), CHILD(748), CHILD(751), CHILD(754), CHILD(757), CHILD(760), CHILD(763), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 739 */, TILEPRO_OPC_XOR_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 742 */, TILEPRO_OPC_ADDS_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 745 */, TILEPRO_OPC_SUBS_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 748 */, TILEPRO_OPC_ADDBS_U_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 751 */, TILEPRO_OPC_ADDHS_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 754 */, TILEPRO_OPC_SUBBS_U_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 757 */, TILEPRO_OPC_SUBHS_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 760 */, TILEPRO_OPC_PACKHS_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 763 */, TILEPRO_OPC_PACKBS_U_SN, TILEPRO_OPC_NONE, BITFIELD(37, 2) /* index 766 */, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, CHILD(771), BITFIELD(39, 2) /* index 771 */, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, CHILD(776), BITFIELD(41, 2) /* index 776 */, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_MOVELI_SN, BITFIELD(37, 2) /* index 781 */, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(786), BITFIELD(39, 2) /* index 786 */, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(791), BITFIELD(41, 2) /* index 791 */, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_MOVELI, BITFIELD(31, 2) /* index 796 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(801), BITFIELD(33, 2) /* index 801 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(806), BITFIELD(35, 2) /* index 806 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(811), BITFIELD(37, 2) /* index 811 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(816), BITFIELD(39, 2) /* index 816 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(821), BITFIELD(41, 2) /* index 821 */, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_INFOL, BITFIELD(31, 4) /* index 826 */, TILEPRO_OPC_BZ, TILEPRO_OPC_BZT, TILEPRO_OPC_BNZ, TILEPRO_OPC_BNZT, TILEPRO_OPC_BGZ, TILEPRO_OPC_BGZT, TILEPRO_OPC_BGEZ, TILEPRO_OPC_BGEZT, TILEPRO_OPC_BLZ, TILEPRO_OPC_BLZT, TILEPRO_OPC_BLEZ, TILEPRO_OPC_BLEZT, TILEPRO_OPC_BBS, TILEPRO_OPC_BBST, TILEPRO_OPC_BBNS, TILEPRO_OPC_BBNST, BITFIELD(31, 4) /* index 843 */, TILEPRO_OPC_BZ_SN, TILEPRO_OPC_BZT_SN, TILEPRO_OPC_BNZ_SN, TILEPRO_OPC_BNZT_SN, TILEPRO_OPC_BGZ_SN, TILEPRO_OPC_BGZT_SN, TILEPRO_OPC_BGEZ_SN, TILEPRO_OPC_BGEZT_SN, TILEPRO_OPC_BLZ_SN, TILEPRO_OPC_BLZT_SN, TILEPRO_OPC_BLEZ_SN, TILEPRO_OPC_BLEZT_SN, TILEPRO_OPC_BBS_SN, TILEPRO_OPC_BBST_SN, TILEPRO_OPC_BBNS_SN, TILEPRO_OPC_BBNST_SN, BITFIELD(51, 3) /* index 860 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB, TILEPRO_OPC_ADDIH, TILEPRO_OPC_ADDI, CHILD(869), TILEPRO_OPC_MAXIB_U, TILEPRO_OPC_MAXIH, TILEPRO_OPC_MFSPR, BITFIELD(31, 2) /* index 869 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(874), BITFIELD(33, 2) /* index 874 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(879), BITFIELD(35, 2) /* index 879 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(884), BITFIELD(37, 2) /* index 884 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(889), BITFIELD(39, 2) /* index 889 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(894), BITFIELD(41, 2) /* index 894 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO, BITFIELD(51, 3) /* index 899 */, TILEPRO_OPC_MINIB_U, TILEPRO_OPC_MINIH, TILEPRO_OPC_MTSPR, CHILD(908), TILEPRO_OPC_SEQIB, TILEPRO_OPC_SEQIH, TILEPRO_OPC_SEQI, TILEPRO_OPC_SLTIB, BITFIELD(37, 2) /* index 908 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(913), BITFIELD(39, 2) /* index 913 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(918), BITFIELD(41, 2) /* index 918 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI, BITFIELD(51, 3) /* index 923 */, TILEPRO_OPC_SLTIB_U, TILEPRO_OPC_SLTIH, TILEPRO_OPC_SLTIH_U, TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U, TILEPRO_OPC_XORI, TILEPRO_OPC_LBADD, TILEPRO_OPC_LBADD_U, BITFIELD(51, 3) /* index 932 */, TILEPRO_OPC_LHADD, TILEPRO_OPC_LHADD_U, TILEPRO_OPC_LWADD, TILEPRO_OPC_LWADD_NA, TILEPRO_OPC_SBADD, TILEPRO_OPC_SHADD, TILEPRO_OPC_SWADD, TILEPRO_OPC_NONE, BITFIELD(51, 3) /* index 941 */, TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB_SN, TILEPRO_OPC_ADDIH_SN, TILEPRO_OPC_ADDI_SN, TILEPRO_OPC_ANDI_SN, TILEPRO_OPC_MAXIB_U_SN, TILEPRO_OPC_MAXIH_SN, TILEPRO_OPC_MFSPR, BITFIELD(51, 3) /* index 950 */, TILEPRO_OPC_MINIB_U_SN, TILEPRO_OPC_MINIH_SN, TILEPRO_OPC_MTSPR, CHILD(959), TILEPRO_OPC_SEQIB_SN, TILEPRO_OPC_SEQIH_SN, TILEPRO_OPC_SEQI_SN, TILEPRO_OPC_SLTIB_SN, BITFIELD(37, 2) /* index 959 */, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(964), BITFIELD(39, 2) /* index 964 */, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(969), BITFIELD(41, 2) /* index 969 */, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_MOVEI_SN, BITFIELD(51, 3) /* index 974 */, TILEPRO_OPC_SLTIB_U_SN, TILEPRO_OPC_SLTIH_SN, TILEPRO_OPC_SLTIH_U_SN, TILEPRO_OPC_SLTI_SN, TILEPRO_OPC_SLTI_U_SN, TILEPRO_OPC_XORI_SN, TILEPRO_OPC_LBADD_SN, TILEPRO_OPC_LBADD_U_SN, BITFIELD(51, 3) /* index 983 */, TILEPRO_OPC_LHADD_SN, TILEPRO_OPC_LHADD_U_SN, TILEPRO_OPC_LWADD_SN, TILEPRO_OPC_LWADD_NA_SN, TILEPRO_OPC_SBADD, TILEPRO_OPC_SHADD, TILEPRO_OPC_SWADD, TILEPRO_OPC_NONE, BITFIELD(46, 7) /* index 992 */, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(1121), CHILD(1121), CHILD(1121), CHILD(1121), CHILD(1124), CHILD(1124), CHILD(1124), CHILD(1124), CHILD(1127), CHILD(1127), CHILD(1127), CHILD(1127), CHILD(1130), CHILD(1130), CHILD(1130), CHILD(1130), CHILD(1133), CHILD(1133), CHILD(1133), CHILD(1133), CHILD(1136), CHILD(1136), CHILD(1136), CHILD(1136), CHILD(1139), CHILD(1139), CHILD(1139), CHILD(1139), CHILD(1142), CHILD(1142), CHILD(1142), CHILD(1142), CHILD(1145), CHILD(1145), CHILD(1145), CHILD(1145), CHILD(1148), CHILD(1148), CHILD(1148), CHILD(1148), CHILD(1151), CHILD(1242), CHILD(1290), CHILD(1323), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1121 */, TILEPRO_OPC_RLI, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1124 */, TILEPRO_OPC_SHLIB, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1127 */, TILEPRO_OPC_SHLIH, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1130 */, TILEPRO_OPC_SHLI, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1133 */, TILEPRO_OPC_SHRIB, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1136 */, TILEPRO_OPC_SHRIH, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1139 */, TILEPRO_OPC_SHRI, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1142 */, TILEPRO_OPC_SRAIB, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1145 */, TILEPRO_OPC_SRAIH, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1148 */, TILEPRO_OPC_SRAI, TILEPRO_OPC_NONE, BITFIELD(43, 3) /* index 1151 */, TILEPRO_OPC_NONE, CHILD(1160), CHILD(1163), CHILD(1166), CHILD(1169), CHILD(1172), CHILD(1175), CHILD(1178), BITFIELD(53, 1) /* index 1160 */, TILEPRO_OPC_DRAIN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1163 */, TILEPRO_OPC_DTLBPR, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1166 */, TILEPRO_OPC_FINV, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1169 */, TILEPRO_OPC_FLUSH, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1172 */, TILEPRO_OPC_FNOP, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1175 */, TILEPRO_OPC_ICOH, TILEPRO_OPC_NONE, BITFIELD(31, 2) /* index 1178 */, CHILD(1183), CHILD(1211), CHILD(1239), CHILD(1239), BITFIELD(53, 1) /* index 1183 */, CHILD(1186), TILEPRO_OPC_NONE, BITFIELD(33, 2) /* index 1186 */, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, CHILD(1191), BITFIELD(35, 2) /* index 1191 */, TILEPRO_OPC_ILL, CHILD(1196), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, BITFIELD(37, 2) /* index 1196 */, TILEPRO_OPC_ILL, CHILD(1201), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, BITFIELD(39, 2) /* index 1201 */, TILEPRO_OPC_ILL, CHILD(1206), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, BITFIELD(41, 2) /* index 1206 */, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_BPT, TILEPRO_OPC_ILL, BITFIELD(53, 1) /* index 1211 */, CHILD(1214), TILEPRO_OPC_NONE, BITFIELD(33, 2) /* index 1214 */, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, CHILD(1219), BITFIELD(35, 2) /* index 1219 */, TILEPRO_OPC_ILL, CHILD(1224), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, BITFIELD(37, 2) /* index 1224 */, TILEPRO_OPC_ILL, CHILD(1229), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, BITFIELD(39, 2) /* index 1229 */, TILEPRO_OPC_ILL, CHILD(1234), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, BITFIELD(41, 2) /* index 1234 */, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_RAISE, TILEPRO_OPC_ILL, BITFIELD(53, 1) /* index 1239 */, TILEPRO_OPC_ILL, TILEPRO_OPC_NONE, BITFIELD(43, 3) /* index 1242 */, CHILD(1251), CHILD(1254), CHILD(1257), CHILD(1275), CHILD(1278), CHILD(1281), CHILD(1284), CHILD(1287), BITFIELD(53, 1) /* index 1251 */, TILEPRO_OPC_INV, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1254 */, TILEPRO_OPC_IRET, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1257 */, CHILD(1260), TILEPRO_OPC_NONE, BITFIELD(31, 2) /* index 1260 */, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(1265), BITFIELD(33, 2) /* index 1265 */, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(1270), BITFIELD(35, 2) /* index 1270 */, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_PREFETCH, BITFIELD(53, 1) /* index 1275 */, TILEPRO_OPC_LB_U, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1278 */, TILEPRO_OPC_LH, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1281 */, TILEPRO_OPC_LH_U, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1284 */, TILEPRO_OPC_LW, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1287 */, TILEPRO_OPC_MF, TILEPRO_OPC_NONE, BITFIELD(43, 3) /* index 1290 */, CHILD(1299), CHILD(1302), CHILD(1305), CHILD(1308), CHILD(1311), CHILD(1314), CHILD(1317), CHILD(1320), BITFIELD(53, 1) /* index 1299 */, TILEPRO_OPC_NAP, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1302 */, TILEPRO_OPC_NOP, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1305 */, TILEPRO_OPC_SWINT0, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1308 */, TILEPRO_OPC_SWINT1, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1311 */, TILEPRO_OPC_SWINT2, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1314 */, TILEPRO_OPC_SWINT3, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1317 */, TILEPRO_OPC_TNS, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1320 */, TILEPRO_OPC_WH64, TILEPRO_OPC_NONE, BITFIELD(43, 2) /* index 1323 */, CHILD(1328), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(45, 1) /* index 1328 */, CHILD(1331), TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1331 */, TILEPRO_OPC_LW_NA, TILEPRO_OPC_NONE, BITFIELD(46, 7) /* index 1334 */, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(1463), CHILD(1463), CHILD(1463), CHILD(1463), CHILD(1466), CHILD(1466), CHILD(1466), CHILD(1466), CHILD(1469), CHILD(1469), CHILD(1469), CHILD(1469), CHILD(1472), CHILD(1472), CHILD(1472), CHILD(1472), CHILD(1475), CHILD(1475), CHILD(1475), CHILD(1475), CHILD(1478), CHILD(1478), CHILD(1478), CHILD(1478), CHILD(1481), CHILD(1481), CHILD(1481), CHILD(1481), CHILD(1484), CHILD(1484), CHILD(1484), CHILD(1484), CHILD(1487), CHILD(1487), CHILD(1487), CHILD(1487), CHILD(1490), CHILD(1490), CHILD(1490), CHILD(1490), CHILD(1151), CHILD(1493), CHILD(1517), CHILD(1529), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1463 */, TILEPRO_OPC_RLI_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1466 */, TILEPRO_OPC_SHLIB_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1469 */, TILEPRO_OPC_SHLIH_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1472 */, TILEPRO_OPC_SHLI_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1475 */, TILEPRO_OPC_SHRIB_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1478 */, TILEPRO_OPC_SHRIH_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1481 */, TILEPRO_OPC_SHRI_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1484 */, TILEPRO_OPC_SRAIB_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1487 */, TILEPRO_OPC_SRAIH_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1490 */, TILEPRO_OPC_SRAI_SN, TILEPRO_OPC_NONE, BITFIELD(43, 3) /* index 1493 */, CHILD(1251), CHILD(1254), CHILD(1502), CHILD(1505), CHILD(1508), CHILD(1511), CHILD(1514), CHILD(1287), BITFIELD(53, 1) /* index 1502 */, TILEPRO_OPC_LB_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1505 */, TILEPRO_OPC_LB_U_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1508 */, TILEPRO_OPC_LH_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1511 */, TILEPRO_OPC_LH_U_SN, TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1514 */, TILEPRO_OPC_LW_SN, TILEPRO_OPC_NONE, BITFIELD(43, 3) /* index 1517 */, CHILD(1299), CHILD(1302), CHILD(1305), CHILD(1308), CHILD(1311), CHILD(1314), CHILD(1526), CHILD(1320), BITFIELD(53, 1) /* index 1526 */, TILEPRO_OPC_TNS_SN, TILEPRO_OPC_NONE, BITFIELD(43, 2) /* index 1529 */, CHILD(1534), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(45, 1) /* index 1534 */, CHILD(1537), TILEPRO_OPC_NONE, BITFIELD(53, 1) /* index 1537 */, TILEPRO_OPC_LW_NA_SN, TILEPRO_OPC_NONE, }; static const unsigned short decode_Y0_fsm[168] = { BITFIELD(27, 4) /* index 0 */, TILEPRO_OPC_NONE, CHILD(17), CHILD(22), CHILD(27), CHILD(47), CHILD(52), CHILD(57), CHILD(62), CHILD(67), TILEPRO_OPC_ADDI, CHILD(72), CHILD(102), TILEPRO_OPC_SEQI, CHILD(117), TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U, BITFIELD(18, 2) /* index 17 */, TILEPRO_OPC_ADD, TILEPRO_OPC_S1A, TILEPRO_OPC_S2A, TILEPRO_OPC_SUB, BITFIELD(18, 2) /* index 22 */, TILEPRO_OPC_MNZ, TILEPRO_OPC_MVNZ, TILEPRO_OPC_MVZ, TILEPRO_OPC_MZ, BITFIELD(18, 2) /* index 27 */, TILEPRO_OPC_AND, TILEPRO_OPC_NOR, CHILD(32), TILEPRO_OPC_XOR, BITFIELD(12, 2) /* index 32 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(37), BITFIELD(14, 2) /* index 37 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(42), BITFIELD(16, 2) /* index 42 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE, BITFIELD(18, 2) /* index 47 */, TILEPRO_OPC_RL, TILEPRO_OPC_SHL, TILEPRO_OPC_SHR, TILEPRO_OPC_SRA, BITFIELD(18, 2) /* index 52 */, TILEPRO_OPC_SLTE, TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLT, TILEPRO_OPC_SLT_U, BITFIELD(18, 2) /* index 57 */, TILEPRO_OPC_MULHLSA_UU, TILEPRO_OPC_S3A, TILEPRO_OPC_SEQ, TILEPRO_OPC_SNE, BITFIELD(18, 2) /* index 62 */, TILEPRO_OPC_MULHH_SS, TILEPRO_OPC_MULHH_UU, TILEPRO_OPC_MULLL_SS, TILEPRO_OPC_MULLL_UU, BITFIELD(18, 2) /* index 67 */, TILEPRO_OPC_MULHHA_SS, TILEPRO_OPC_MULHHA_UU, TILEPRO_OPC_MULLLA_SS, TILEPRO_OPC_MULLLA_UU, BITFIELD(0, 2) /* index 72 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(77), BITFIELD(2, 2) /* index 77 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(82), BITFIELD(4, 2) /* index 82 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(87), BITFIELD(6, 2) /* index 87 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(92), BITFIELD(8, 2) /* index 92 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(97), BITFIELD(10, 2) /* index 97 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO, BITFIELD(6, 2) /* index 102 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(107), BITFIELD(8, 2) /* index 107 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(112), BITFIELD(10, 2) /* index 112 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI, BITFIELD(15, 5) /* index 117 */, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_RLI, TILEPRO_OPC_RLI, TILEPRO_OPC_RLI, TILEPRO_OPC_RLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHRI, TILEPRO_OPC_SHRI, TILEPRO_OPC_SHRI, TILEPRO_OPC_SHRI, TILEPRO_OPC_SRAI, TILEPRO_OPC_SRAI, TILEPRO_OPC_SRAI, TILEPRO_OPC_SRAI, CHILD(150), CHILD(159), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(12, 3) /* index 150 */, TILEPRO_OPC_NONE, TILEPRO_OPC_BITX, TILEPRO_OPC_BYTEX, TILEPRO_OPC_CLZ, TILEPRO_OPC_CTZ, TILEPRO_OPC_FNOP, TILEPRO_OPC_NOP, TILEPRO_OPC_PCNT, BITFIELD(12, 3) /* index 159 */, TILEPRO_OPC_TBLIDXB0, TILEPRO_OPC_TBLIDXB1, TILEPRO_OPC_TBLIDXB2, TILEPRO_OPC_TBLIDXB3, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, }; static const unsigned short decode_Y1_fsm[140] = { BITFIELD(59, 4) /* index 0 */, TILEPRO_OPC_NONE, CHILD(17), CHILD(22), CHILD(27), CHILD(47), CHILD(52), CHILD(57), TILEPRO_OPC_ADDI, CHILD(62), CHILD(92), TILEPRO_OPC_SEQI, CHILD(107), TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(49, 2) /* index 17 */, TILEPRO_OPC_ADD, TILEPRO_OPC_S1A, TILEPRO_OPC_S2A, TILEPRO_OPC_SUB, BITFIELD(49, 2) /* index 22 */, TILEPRO_OPC_NONE, TILEPRO_OPC_MNZ, TILEPRO_OPC_MZ, TILEPRO_OPC_NONE, BITFIELD(49, 2) /* index 27 */, TILEPRO_OPC_AND, TILEPRO_OPC_NOR, CHILD(32), TILEPRO_OPC_XOR, BITFIELD(43, 2) /* index 32 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(37), BITFIELD(45, 2) /* index 37 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(42), BITFIELD(47, 2) /* index 42 */, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE, BITFIELD(49, 2) /* index 47 */, TILEPRO_OPC_RL, TILEPRO_OPC_SHL, TILEPRO_OPC_SHR, TILEPRO_OPC_SRA, BITFIELD(49, 2) /* index 52 */, TILEPRO_OPC_SLTE, TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLT, TILEPRO_OPC_SLT_U, BITFIELD(49, 2) /* index 57 */, TILEPRO_OPC_NONE, TILEPRO_OPC_S3A, TILEPRO_OPC_SEQ, TILEPRO_OPC_SNE, BITFIELD(31, 2) /* index 62 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(67), BITFIELD(33, 2) /* index 67 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(72), BITFIELD(35, 2) /* index 72 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(77), BITFIELD(37, 2) /* index 77 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(82), BITFIELD(39, 2) /* index 82 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(87), BITFIELD(41, 2) /* index 87 */, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO, BITFIELD(37, 2) /* index 92 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(97), BITFIELD(39, 2) /* index 97 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(102), BITFIELD(41, 2) /* index 102 */, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI, BITFIELD(48, 3) /* index 107 */, TILEPRO_OPC_NONE, TILEPRO_OPC_RLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHRI, TILEPRO_OPC_SRAI, CHILD(116), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(43, 3) /* index 116 */, TILEPRO_OPC_NONE, CHILD(125), CHILD(130), CHILD(135), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(46, 2) /* index 125 */, TILEPRO_OPC_FNOP, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(46, 2) /* index 130 */, TILEPRO_OPC_ILL, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, BITFIELD(46, 2) /* index 135 */, TILEPRO_OPC_NOP, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, }; static const unsigned short decode_Y2_fsm[24] = { BITFIELD(56, 3) /* index 0 */, CHILD(9), TILEPRO_OPC_LB_U, TILEPRO_OPC_LH, TILEPRO_OPC_LH_U, TILEPRO_OPC_LW, TILEPRO_OPC_SB, TILEPRO_OPC_SH, TILEPRO_OPC_SW, BITFIELD(20, 2) /* index 9 */, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(14), BITFIELD(22, 2) /* index 14 */, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(19), BITFIELD(24, 2) /* index 19 */, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_PREFETCH, }; #undef BITFIELD #undef CHILD const unsigned short * const tilepro_bundle_decoder_fsms[TILEPRO_NUM_PIPELINE_ENCODINGS] = { decode_X0_fsm, decode_X1_fsm, decode_Y0_fsm, decode_Y1_fsm, decode_Y2_fsm }; const struct tilepro_operand tilepro_operands[43] = { { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_X0), 8, 1, 0, 0, 0, 0, create_Imm8_X0, get_Imm8_X0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_X1), 8, 1, 0, 0, 0, 0, create_Imm8_X1, get_Imm8_X1 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_Y0), 8, 1, 0, 0, 0, 0, create_Imm8_Y0, get_Imm8_Y0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_Y1), 8, 1, 0, 0, 0, 0, create_Imm8_Y1, get_Imm8_Y1 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM16_X0), 16, 1, 0, 0, 0, 0, create_Imm16_X0, get_Imm16_X0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM16_X1), 16, 1, 0, 0, 0, 0, create_Imm16_X1, get_Imm16_X1 }, { TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(TILEPRO_JOFFLONG_X1), 29, 1, 0, 0, 1, TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, create_JOffLong_X1, get_JOffLong_X1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_Dest_X0, get_Dest_X0 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_X0, get_SrcA_X0 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_Dest_X1, get_Dest_X1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_X1, get_SrcA_X1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_Dest_Y0, get_Dest_Y0 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_Y0, get_SrcA_Y0 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_Dest_Y1, get_Dest_Y1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_Y1, get_SrcA_Y1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcA_Y2, get_SrcA_Y2 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcB_X0, get_SrcB_X0 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcB_X1, get_SrcB_X1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcB_Y0, get_SrcB_Y0 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcB_Y1, get_SrcB_Y1 }, { TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(TILEPRO_BROFF_X1), 17, 1, 0, 0, 1, TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, create_BrOff_X1, get_BrOff_X1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 1, 0, 0, create_Dest_X0, get_Dest_X0 }, { TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(NONE), 28, 1, 0, 0, 1, TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, create_JOff_X1, get_JOff_X1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 0, 1, 0, 0, create_SrcBDest_Y2, get_SrcBDest_Y2 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 1, 0, 0, create_SrcA_X1, get_SrcA_X1 }, { TILEPRO_OP_TYPE_SPR, BFD_RELOC(TILEPRO_MF_IMM15_X1), 15, 0, 0, 0, 0, 0, create_MF_Imm15_X1, get_MF_Imm15_X1 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMSTART_X0), 5, 0, 0, 0, 0, 0, create_MMStart_X0, get_MMStart_X0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMEND_X0), 5, 0, 0, 0, 0, 0, create_MMEnd_X0, get_MMEnd_X0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMSTART_X1), 5, 0, 0, 0, 0, 0, create_MMStart_X1, get_MMStart_X1 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMEND_X1), 5, 0, 0, 0, 0, 0, create_MMEnd_X1, get_MMEnd_X1 }, { TILEPRO_OP_TYPE_SPR, BFD_RELOC(TILEPRO_MT_IMM15_X1), 15, 0, 0, 0, 0, 0, create_MT_Imm15_X1, get_MT_Imm15_X1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 1, 0, 0, create_Dest_Y0, get_Dest_Y0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_X0), 5, 0, 0, 0, 0, 0, create_ShAmt_X0, get_ShAmt_X0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_X1), 5, 0, 0, 0, 0, 0, create_ShAmt_X1, get_ShAmt_X1 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_Y0), 5, 0, 0, 0, 0, 0, create_ShAmt_Y0, get_ShAmt_Y0 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_Y1), 5, 0, 0, 0, 0, 0, create_ShAmt_Y1, get_ShAmt_Y1 }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 6, 0, 1, 0, 0, 0, create_SrcBDest_Y2, get_SrcBDest_Y2 }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_DEST_IMM8_X1), 8, 1, 0, 0, 0, 0, create_Dest_Imm8_X1, get_Dest_Imm8_X1 }, { TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(NONE), 10, 1, 0, 0, 1, TILEPRO_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES, create_BrOff_SN, get_BrOff_SN }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE), 8, 0, 0, 0, 0, 0, create_Imm8_SN, get_Imm8_SN }, { TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE), 8, 1, 0, 0, 0, 0, create_Imm8_SN, get_Imm8_SN }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 2, 0, 0, 1, 0, 0, create_Dest_SN, get_Dest_SN }, { TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE), 2, 0, 1, 0, 0, 0, create_Src_SN, get_Src_SN } }; /* Given a set of bundle bits and a specific pipe, returns which * instruction the bundle contains in that pipe. */ const struct tilepro_opcode * find_opcode(tilepro_bundle_bits bits, tilepro_pipeline pipe) { const unsigned short *table = tilepro_bundle_decoder_fsms[pipe]; int index = 0; while (1) { unsigned short bitspec = table[index]; unsigned int bitfield = ((unsigned int)(bits >> (bitspec & 63))) & (bitspec >> 6); unsigned short next = table[index + 1 + bitfield]; if (next <= TILEPRO_OPC_NONE) return &tilepro_opcodes[next]; index = next - TILEPRO_OPC_NONE; } } int parse_insn_tilepro(tilepro_bundle_bits bits, unsigned int pc, struct tilepro_decoded_instruction decoded[TILEPRO_MAX_INSTRUCTIONS_PER_BUNDLE]) { int num_instructions = 0; int pipe; int min_pipe, max_pipe; if ((bits & TILEPRO_BUNDLE_Y_ENCODING_MASK) == 0) { min_pipe = TILEPRO_PIPELINE_X0; max_pipe = TILEPRO_PIPELINE_X1; } else { min_pipe = TILEPRO_PIPELINE_Y0; max_pipe = TILEPRO_PIPELINE_Y2; } /* For each pipe, find an instruction that fits. */ for (pipe = min_pipe; pipe <= max_pipe; pipe++) { const struct tilepro_opcode *opc; struct tilepro_decoded_instruction *d; int i; d = &decoded[num_instructions++]; opc = find_opcode (bits, (tilepro_pipeline)pipe); d->opcode = opc; /* Decode each operand, sign extending, etc. as appropriate. */ for (i = 0; i < opc->num_operands; i++) { const struct tilepro_operand *op = &tilepro_operands[opc->operands[pipe][i]]; int opval = op->extract (bits); if (op->is_signed) { /* Sign-extend the operand. */ int shift = (int)((sizeof(int) * 8) - op->num_bits); opval = (opval << shift) >> shift; } /* Adjust PC-relative scaled branch offsets. */ if (op->type == TILEPRO_OP_TYPE_ADDRESS) { opval *= TILEPRO_BUNDLE_SIZE_IN_BYTES; opval += (int)pc; } /* Record the final value. */ d->operands[i] = op; d->operand_values[i] = opval; } } return num_instructions; }
gpl-2.0
ebildude123/Geass-Kernel-TF300T
arch/sparc/kernel/led.c
12959
3186
#include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/uaccess.h> #include <asm/auxio.h> #define LED_MAX_LENGTH 8 /* maximum chars written to proc file */ static inline void led_toggle(void) { unsigned char val = get_auxio(); unsigned char on, off; if (val & AUXIO_LED) { on = 0; off = AUXIO_LED; } else { on = AUXIO_LED; off = 0; } set_auxio(on, off); } static struct timer_list led_blink_timer; static void led_blink(unsigned long timeout) { led_toggle(); /* reschedule */ if (!timeout) { /* blink according to load */ led_blink_timer.expires = jiffies + ((1 + (avenrun[0] >> FSHIFT)) * HZ); led_blink_timer.data = 0; } else { /* blink at user specified interval */ led_blink_timer.expires = jiffies + (timeout * HZ); led_blink_timer.data = timeout; } add_timer(&led_blink_timer); } static int led_proc_show(struct seq_file *m, void *v) { if (get_auxio() & AUXIO_LED) seq_puts(m, "on\n"); else seq_puts(m, "off\n"); return 0; } static int led_proc_open(struct inode *inode, struct file *file) { return single_open(file, led_proc_show, NULL); } static ssize_t led_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char *buf = NULL; if (count > LED_MAX_LENGTH) count = LED_MAX_LENGTH; buf = kmalloc(sizeof(char) * (count + 1), GFP_KERNEL); if (!buf) return -ENOMEM; if (copy_from_user(buf, buffer, count)) { kfree(buf); return -EFAULT; } buf[count] = '\0'; /* work around \n when echo'ing into proc */ if (buf[count - 1] == '\n') buf[count - 1] = '\0'; /* before we change anything we want to stop any running timers, * otherwise calls such as on will have no persistent effect */ del_timer_sync(&led_blink_timer); if (!strcmp(buf, "on")) { auxio_set_led(AUXIO_LED_ON); } else if (!strcmp(buf, "toggle")) { led_toggle(); } else if ((*buf > '0') && (*buf <= '9')) { led_blink(simple_strtoul(buf, NULL, 10)); } else if (!strcmp(buf, "load")) { led_blink(0); } else { auxio_set_led(AUXIO_LED_OFF); } kfree(buf); return count; } static const struct file_operations led_proc_fops = { .owner = THIS_MODULE, .open = led_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = led_proc_write, }; static struct proc_dir_entry *led; #define LED_VERSION "0.1" static int __init led_init(void) { init_timer(&led_blink_timer); led_blink_timer.function = led_blink; led = proc_create("led", 0, NULL, &led_proc_fops); if (!led) return -ENOMEM; printk(KERN_INFO "led: version %s, Lars Kotthoff <metalhead@metalhead.ws>\n", LED_VERSION); return 0; } static void __exit led_exit(void) { remove_proc_entry("led", NULL); del_timer_sync(&led_blink_timer); } module_init(led_init); module_exit(led_exit); MODULE_AUTHOR("Lars Kotthoff <metalhead@metalhead.ws>"); MODULE_DESCRIPTION("Provides control of the front LED on SPARC systems."); MODULE_LICENSE("GPL"); MODULE_VERSION(LED_VERSION);
gpl-2.0
linuxandroid/kernel
drivers/scsi/mpt2sas/mpt2sas_ctl.c
160
88415
/* * Management Module Support for MPT (Message Passing Technology) based * controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c * Copyright (C) 2007-2010 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/compat.h> #include <linux/poll.h> #include <linux/io.h> #include <linux/uaccess.h> #include "mpt2sas_base.h" #include "mpt2sas_ctl.h" static DEFINE_MUTEX(_ctl_mutex); static struct fasync_struct *async_queue; static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait); static int _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset); /** * enum block_state - blocking state * @NON_BLOCKING: non blocking * @BLOCKING: blocking * * These states are for ioctls that need to wait for a response * from firmware, so they probably require sleep. */ enum block_state { NON_BLOCKING, BLOCKING, }; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING /** * _ctl_sas_device_find_by_handle - sas device search * @ioc: per adapter object * @handle: sas device handle (assigned by firmware) * Context: Calling function should acquire ioc->sas_device_lock * * This searches for sas_device based on sas_address, then return sas_device * object. */ static struct _sas_device * _ctl_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct _sas_device *sas_device, *r; r = NULL; list_for_each_entry(sas_device, &ioc->sas_device_list, list) { if (sas_device->handle != handle) continue; r = sas_device; goto out; } out: return r; } /** * _ctl_display_some_debug - debug routine * @ioc: per adapter object * @smid: system request message index * @calling_function_name: string pass from calling function * @mpi_reply: reply message frame * Context: none. * * Function for displaying debug info helpful when debugging issues * in this module. */ static void _ctl_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid, char *calling_function_name, MPI2DefaultReply_t *mpi_reply) { Mpi2ConfigRequest_t *mpi_request; char *desc = NULL; if (!(ioc->logging_level & MPT_DEBUG_IOCTL)) return; mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); switch (mpi_request->Function) { case MPI2_FUNCTION_SCSI_IO_REQUEST: { Mpi2SCSIIORequest_t *scsi_request = (Mpi2SCSIIORequest_t *)mpi_request; snprintf(ioc->tmp_string, MPT_STRING_LENGTH, "scsi_io, cmd(0x%02x), cdb_len(%d)", scsi_request->CDB.CDB32[0], le16_to_cpu(scsi_request->IoFlags) & 0xF); desc = ioc->tmp_string; break; } case MPI2_FUNCTION_SCSI_TASK_MGMT: desc = "task_mgmt"; break; case MPI2_FUNCTION_IOC_INIT: desc = "ioc_init"; break; case MPI2_FUNCTION_IOC_FACTS: desc = "ioc_facts"; break; case MPI2_FUNCTION_CONFIG: { Mpi2ConfigRequest_t *config_request = (Mpi2ConfigRequest_t *)mpi_request; snprintf(ioc->tmp_string, MPT_STRING_LENGTH, "config, type(0x%02x), ext_type(0x%02x), number(%d)", (config_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType, config_request->Header.PageNumber); desc = ioc->tmp_string; break; } case MPI2_FUNCTION_PORT_FACTS: desc = "port_facts"; break; case MPI2_FUNCTION_PORT_ENABLE: desc = "port_enable"; break; case MPI2_FUNCTION_EVENT_NOTIFICATION: desc = "event_notification"; break; case MPI2_FUNCTION_FW_DOWNLOAD: desc = "fw_download"; break; case MPI2_FUNCTION_FW_UPLOAD: desc = "fw_upload"; break; case MPI2_FUNCTION_RAID_ACTION: desc = "raid_action"; break; case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: { Mpi2SCSIIORequest_t *scsi_request = (Mpi2SCSIIORequest_t *)mpi_request; snprintf(ioc->tmp_string, MPT_STRING_LENGTH, "raid_pass, cmd(0x%02x), cdb_len(%d)", scsi_request->CDB.CDB32[0], le16_to_cpu(scsi_request->IoFlags) & 0xF); desc = ioc->tmp_string; break; } case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: desc = "sas_iounit_cntl"; break; case MPI2_FUNCTION_SATA_PASSTHROUGH: desc = "sata_pass"; break; case MPI2_FUNCTION_DIAG_BUFFER_POST: desc = "diag_buffer_post"; break; case MPI2_FUNCTION_DIAG_RELEASE: desc = "diag_release"; break; case MPI2_FUNCTION_SMP_PASSTHROUGH: desc = "smp_passthrough"; break; } if (!desc) return; printk(MPT2SAS_INFO_FMT "%s: %s, smid(%d)\n", ioc->name, calling_function_name, desc, smid); if (!mpi_reply) return; if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) printk(MPT2SAS_INFO_FMT "\tiocstatus(0x%04x), loginfo(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo)); if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { Mpi2SCSIIOReply_t *scsi_reply = (Mpi2SCSIIOReply_t *)mpi_reply; struct _sas_device *sas_device = NULL; unsigned long flags; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _ctl_sas_device_find_by_handle(ioc, le16_to_cpu(scsi_reply->DevHandle)); if (sas_device) { printk(MPT2SAS_WARN_FMT "\tsas_address(0x%016llx), " "phy(%d)\n", ioc->name, (unsigned long long) sas_device->sas_address, sas_device->phy); printk(MPT2SAS_WARN_FMT "\tenclosure_logical_id(0x%016llx), slot(%d)\n", ioc->name, sas_device->enclosure_logical_id, sas_device->slot); } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) printk(MPT2SAS_INFO_FMT "\tscsi_state(0x%02x), scsi_status" "(0x%02x)\n", ioc->name, scsi_reply->SCSIState, scsi_reply->SCSIStatus); } } #endif /** * mpt2sas_ctl_done - ctl module completion routine * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: none. * * The callback handler when using ioc->ctl_cb_idx. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ u8 mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { MPI2DefaultReply_t *mpi_reply; Mpi2SCSIIOReply_t *scsiio_reply; const void *sense_data; u32 sz; if (ioc->ctl_cmds.status == MPT2_CMD_NOT_USED) return 1; if (ioc->ctl_cmds.smid != smid) return 1; ioc->ctl_cmds.status |= MPT2_CMD_COMPLETE; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); if (mpi_reply) { memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); ioc->ctl_cmds.status |= MPT2_CMD_REPLY_VALID; /* get sense data */ if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_reply->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply; if (scsiio_reply->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, le32_to_cpu(scsiio_reply->SenseCount)); sense_data = mpt2sas_base_get_sense_buffer(ioc, smid); memcpy(ioc->ctl_cmds.sense, sense_data, sz); } } } #ifdef CONFIG_SCSI_MPT2SAS_LOGGING _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); #endif ioc->ctl_cmds.status &= ~MPT2_CMD_PENDING; complete(&ioc->ctl_cmds.done); return 1; } /** * _ctl_check_event_type - determines when an event needs logging * @ioc: per adapter object * @event: firmware event * * The bitmask in ioc->event_type[] indicates which events should be * be saved in the driver event_log. This bitmask is set by application. * * Returns 1 when event should be captured, or zero means no match. */ static int _ctl_check_event_type(struct MPT2SAS_ADAPTER *ioc, u16 event) { u16 i; u32 desired_event; if (event >= 128 || !event || !ioc->event_log) return 0; desired_event = (1 << (event % 32)); if (!desired_event) desired_event = 1; i = event / 32; return desired_event & ioc->event_type[i]; } /** * mpt2sas_ctl_add_to_event_log - add event * @ioc: per adapter object * @mpi_reply: reply message frame * * Return nothing. */ void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc, Mpi2EventNotificationReply_t *mpi_reply) { struct MPT2_IOCTL_EVENTS *event_log; u16 event; int i; u32 sz, event_data_sz; u8 send_aen = 0; if (!ioc->event_log) return; event = le16_to_cpu(mpi_reply->Event); if (_ctl_check_event_type(ioc, event)) { /* insert entry into circular event_log */ i = ioc->event_context % MPT2SAS_CTL_EVENT_LOG_SIZE; event_log = ioc->event_log; event_log[i].event = event; event_log[i].context = ioc->event_context++; event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4; sz = min_t(u32, event_data_sz, MPT2_EVENT_DATA_SIZE); memset(event_log[i].data, 0, MPT2_EVENT_DATA_SIZE); memcpy(event_log[i].data, mpi_reply->EventData, sz); send_aen = 1; } /* This aen_event_read_flag flag is set until the * application has read the event log. * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify. */ if (event == MPI2_EVENT_LOG_ENTRY_ADDED || (send_aen && !ioc->aen_event_read_flag)) { ioc->aen_event_read_flag = 1; wake_up_interruptible(&ctl_poll_wait); if (async_queue) kill_fasync(&async_queue, SIGIO, POLL_IN); } } /** * mpt2sas_ctl_event_callback - firmware event handler (called at ISR time) * @ioc: per adapter object * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: interrupt. * * This function merely adds a new work task into ioc->firmware_event_thread. * The tasks are worked from _firmware_event_work in user context. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ u8 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) { Mpi2EventNotificationReply_t *mpi_reply; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); mpt2sas_ctl_add_to_event_log(ioc, mpi_reply); return 1; } /** * _ctl_verify_adapter - validates ioc_number passed from application * @ioc: per adapter object * @iocpp: The ioc pointer is returned in this. * * Return (-1) means error, else ioc_number. */ static int _ctl_verify_adapter(int ioc_number, struct MPT2SAS_ADAPTER **iocpp) { struct MPT2SAS_ADAPTER *ioc; list_for_each_entry(ioc, &mpt2sas_ioc_list, list) { if (ioc->id != ioc_number) continue; *iocpp = ioc; return ioc_number; } *iocpp = NULL; return -1; } /** * mpt2sas_ctl_reset_handler - reset callback handler (for ctl) * @ioc: per adapter object * @reset_phase: phase * * The handler for doing any required cleanup or initialization. * * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET, * MPT2_IOC_DONE_RESET */ void mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) { int i; u8 issue_reset; switch (reset_phase) { case MPT2_IOC_PRE_RESET: dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "MPT2_IOC_PRE_RESET\n", ioc->name, __func__)); for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { if (!(ioc->diag_buffer_status[i] & MPT2_DIAG_BUFFER_IS_REGISTERED)) continue; if ((ioc->diag_buffer_status[i] & MPT2_DIAG_BUFFER_IS_RELEASED)) continue; _ctl_send_release(ioc, i, &issue_reset); } break; case MPT2_IOC_AFTER_RESET: dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__)); if (ioc->ctl_cmds.status & MPT2_CMD_PENDING) { ioc->ctl_cmds.status |= MPT2_CMD_RESET; mpt2sas_base_free_smid(ioc, ioc->ctl_cmds.smid); complete(&ioc->ctl_cmds.done); } break; case MPT2_IOC_DONE_RESET: dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { if (!(ioc->diag_buffer_status[i] & MPT2_DIAG_BUFFER_IS_REGISTERED)) continue; if ((ioc->diag_buffer_status[i] & MPT2_DIAG_BUFFER_IS_RELEASED)) continue; ioc->diag_buffer_status[i] |= MPT2_DIAG_BUFFER_IS_DIAG_RESET; } break; } } /** * _ctl_fasync - * @fd - * @filep - * @mode - * * Called when application request fasyn callback handler. */ static int _ctl_fasync(int fd, struct file *filep, int mode) { return fasync_helper(fd, filep, mode, &async_queue); } /** * _ctl_release - * @inode - * @filep - * * Called when application releases the fasyn callback handler. */ static int _ctl_release(struct inode *inode, struct file *filep) { return fasync_helper(-1, filep, 0, &async_queue); } /** * _ctl_poll - * @file - * @wait - * */ static unsigned int _ctl_poll(struct file *filep, poll_table *wait) { struct MPT2SAS_ADAPTER *ioc; poll_wait(filep, &ctl_poll_wait, wait); list_for_each_entry(ioc, &mpt2sas_ioc_list, list) { if (ioc->aen_event_read_flag) return POLLIN | POLLRDNORM; } return 0; } /** * _ctl_set_task_mid - assign an active smid to tm request * @ioc: per adapter object * @karg - (struct mpt2_ioctl_command) * @tm_request - pointer to mf from user space * * Returns 0 when an smid if found, else fail. * during failure, the reply frame is filled. */ static int _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, Mpi2SCSITaskManagementRequest_t *tm_request) { u8 found = 0; u16 i; u16 handle; struct scsi_cmnd *scmd; struct MPT2SAS_DEVICE *priv_data; unsigned long flags; Mpi2SCSITaskManagementReply_t *tm_reply; u32 sz; u32 lun; char *desc = NULL; if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) desc = "abort_task"; else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) desc = "query_task"; else return 0; lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); handle = le16_to_cpu(tm_request->DevHandle); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); for (i = ioc->scsiio_depth; i && !found; i--) { scmd = ioc->scsi_lookup[i - 1].scmd; if (scmd == NULL || scmd->device == NULL || scmd->device->hostdata == NULL) continue; if (lun != scmd->device->lun) continue; priv_data = scmd->device->hostdata; if (priv_data->sas_target == NULL) continue; if (priv_data->sas_target->handle != handle) continue; tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid); found = 1; } spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); if (!found) { dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name, desc, le16_to_cpu(tm_request->DevHandle), lun)); tm_reply = ioc->ctl_cmds.reply; tm_reply->DevHandle = tm_request->DevHandle; tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; tm_reply->TaskType = tm_request->TaskType; tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; tm_reply->VP_ID = tm_request->VP_ID; tm_reply->VF_ID = tm_request->VF_ID; sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz)) printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return 1; } dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, desc, le16_to_cpu(tm_request->DevHandle), lun, le16_to_cpu(tm_request->TaskMID))); return 0; } /** * _ctl_do_mpt_command - main handler for MPT2COMMAND opcode * @ioc: per adapter object * @karg - (struct mpt2_ioctl_command) * @mf - pointer to mf in user space * @state - NON_BLOCKING or BLOCKING */ static long _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command karg, void __user *mf, enum block_state state) { MPI2RequestHeader_t *mpi_request = NULL, *request; MPI2DefaultReply_t *mpi_reply; u32 ioc_state; u16 ioc_status; u16 smid; unsigned long timeout, timeleft; u8 issue_reset; u32 sz; void *psge; void *data_out = NULL; dma_addr_t data_out_dma; size_t data_out_sz = 0; void *data_in = NULL; dma_addr_t data_in_dma; size_t data_in_sz = 0; u32 sgl_flags; long ret; u16 wait_state_count; issue_reset = 0; if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex)) return -EAGAIN; else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) return -ERESTARTSYS; if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n", ioc->name, __func__); ret = -EAGAIN; goto out; } wait_state_count = 0; ioc_state = mpt2sas_base_get_iocstate(ioc, 1); while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { if (wait_state_count++ == 10) { printk(MPT2SAS_ERR_FMT "%s: failed due to ioc not operational\n", ioc->name, __func__); ret = -EFAULT; goto out; } ssleep(1); ioc_state = mpt2sas_base_get_iocstate(ioc, 1); printk(MPT2SAS_INFO_FMT "%s: waiting for " "operational state(count=%d)\n", ioc->name, __func__, wait_state_count); } if (wait_state_count) printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n", ioc->name, __func__); mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); if (!mpi_request) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a memory for " "mpi_request\n", ioc->name, __func__); ret = -ENOMEM; goto out; } /* Check for overflow and wraparound */ if (karg.data_sge_offset * 4 > ioc->request_sz || karg.data_sge_offset > (UINT_MAX / 4)) { ret = -EINVAL; goto out; } /* copy in request message frame from user */ if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -EFAULT; goto out; } if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { smid = mpt2sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); ret = -EAGAIN; goto out; } } else { smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); ret = -EAGAIN; goto out; } } ret = 0; ioc->ctl_cmds.status = MPT2_CMD_PENDING; memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); request = mpt2sas_base_get_msg_frame(ioc, smid); memcpy(request, mpi_request, karg.data_sge_offset*4); ioc->ctl_cmds.smid = smid; data_out_sz = karg.data_out_size; data_in_sz = karg.data_in_size; if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { if (!le16_to_cpu(mpi_request->FunctionDependent1) || le16_to_cpu(mpi_request->FunctionDependent1) > ioc->facts.MaxDevHandle) { ret = -EINVAL; mpt2sas_base_free_smid(ioc, smid); goto out; } } /* obtain dma-able memory for data transfer */ if (data_out_sz) /* WRITE */ { data_out = pci_alloc_consistent(ioc->pdev, data_out_sz, &data_out_dma); if (!data_out) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -ENOMEM; mpt2sas_base_free_smid(ioc, smid); goto out; } if (copy_from_user(data_out, karg.data_out_buf_ptr, data_out_sz)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -EFAULT; mpt2sas_base_free_smid(ioc, smid); goto out; } } if (data_in_sz) /* READ */ { data_in = pci_alloc_consistent(ioc->pdev, data_in_sz, &data_in_dma); if (!data_in) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -ENOMEM; mpt2sas_base_free_smid(ioc, smid); goto out; } } /* add scatter gather elements */ psge = (void *)request + (karg.data_sge_offset*4); if (!data_out_sz && !data_in_sz) { mpt2sas_base_build_zero_len_sge(ioc, psge); } else if (data_out_sz && data_in_sz) { /* WRITE sgel first */ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; ioc->base_add_sg_single(psge, sgl_flags | data_out_sz, data_out_dma); /* incr sgel */ psge += ioc->sge_size; /* READ sgel last */ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST); sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; ioc->base_add_sg_single(psge, sgl_flags | data_in_sz, data_in_dma); } else if (data_out_sz) /* WRITE */ { sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC); sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; ioc->base_add_sg_single(psge, sgl_flags | data_out_sz, data_out_dma); } else if (data_in_sz) /* READ */ { sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST); sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; ioc->base_add_sg_single(psge, sgl_flags | data_in_sz, data_in_dma); } /* send command to firmware */ #ifdef CONFIG_SCSI_MPT2SAS_LOGGING _ctl_display_some_debug(ioc, smid, "ctl_request", NULL); #endif switch (mpi_request->Function) { case MPI2_FUNCTION_SCSI_IO_REQUEST: case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: { Mpi2SCSIIORequest_t *scsiio_request = (Mpi2SCSIIORequest_t *)request; scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; scsiio_request->SenseBufferLowAddress = mpt2sas_base_get_sense_buffer_dma(ioc, smid); memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) mpt2sas_base_put_smid_scsi_io(ioc, smid, le16_to_cpu(mpi_request->FunctionDependent1)); else mpt2sas_base_put_smid_default(ioc, smid); break; } case MPI2_FUNCTION_SCSI_TASK_MGMT: { Mpi2SCSITaskManagementRequest_t *tm_request = (Mpi2SCSITaskManagementRequest_t *)request; dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "TASK_MGMT: " "handle(0x%04x), task_type(0x%02x)\n", ioc->name, le16_to_cpu(tm_request->DevHandle), tm_request->TaskType)); if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { if (_ctl_set_task_mid(ioc, &karg, tm_request)) { mpt2sas_base_free_smid(ioc, smid); goto out; } } mpt2sas_scsih_set_tm_flag(ioc, le16_to_cpu( tm_request->DevHandle)); mpt2sas_base_put_smid_hi_priority(ioc, smid); break; } case MPI2_FUNCTION_SMP_PASSTHROUGH: { Mpi2SmpPassthroughRequest_t *smp_request = (Mpi2SmpPassthroughRequest_t *)mpi_request; u8 *data; /* ioc determines which port to use */ smp_request->PhysicalPort = 0xFF; if (smp_request->PassthroughFlags & MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE) data = (u8 *)&smp_request->SGL; else data = data_out; if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { ioc->ioc_link_reset_in_progress = 1; ioc->ignore_loginfos = 1; } mpt2sas_base_put_smid_default(ioc, smid); break; } case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: { Mpi2SasIoUnitControlRequest_t *sasiounit_request = (Mpi2SasIoUnitControlRequest_t *)mpi_request; if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || sasiounit_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) { ioc->ioc_link_reset_in_progress = 1; ioc->ignore_loginfos = 1; } mpt2sas_base_put_smid_default(ioc, smid); break; } default: mpt2sas_base_put_smid_default(ioc, smid); break; } if (karg.timeout < MPT2_IOCTL_DEFAULT_TIMEOUT) timeout = MPT2_IOCTL_DEFAULT_TIMEOUT; else timeout = karg.timeout; init_completion(&ioc->ctl_cmds.done); timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ); if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { Mpi2SCSITaskManagementRequest_t *tm_request = (Mpi2SCSITaskManagementRequest_t *)mpi_request; mpt2sas_scsih_clear_tm_flag(ioc, le16_to_cpu( tm_request->DevHandle)); } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH || mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) && ioc->ioc_link_reset_in_progress) { ioc->ioc_link_reset_in_progress = 0; ioc->ignore_loginfos = 0; } if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) { printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name, __func__); _debug_dump_mf(mpi_request, karg.data_sge_offset); if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET)) issue_reset = 1; goto issue_host_reset; } mpi_reply = ioc->ctl_cmds.reply; ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT && (ioc->logging_level & MPT_DEBUG_TM)) { Mpi2SCSITaskManagementReply_t *tm_reply = (Mpi2SCSITaskManagementReply_t *)mpi_reply; printk(MPT2SAS_INFO_FMT "TASK_MGMT: " "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " "TerminationCount(0x%08x)\n", ioc->name, le16_to_cpu(tm_reply->IOCStatus), le32_to_cpu(tm_reply->IOCLogInfo), le32_to_cpu(tm_reply->TerminationCount)); } #endif /* copy out xdata to user */ if (data_in_sz) { if (copy_to_user(karg.data_in_buf_ptr, data_in, data_in_sz)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -ENODATA; goto out; } } /* copy out reply message frame to user */ if (karg.max_reply_bytes) { sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -ENODATA; goto out; } } /* copy out sense to user */ if (karg.max_sense_bytes && (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE); if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, sz)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); ret = -ENODATA; goto out; } } issue_host_reset: if (issue_reset) { ret = -ENODATA; if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { printk(MPT2SAS_INFO_FMT "issue target reset: handle " "= (0x%04x)\n", ioc->name, le16_to_cpu(mpi_request->FunctionDependent1)); mpt2sas_halt_firmware(ioc); mpt2sas_scsih_issue_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10, 0, TM_MUTEX_ON); ioc->tm_cmds.status = MPT2_CMD_NOT_USED; } else mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); } out: /* free memory associated with sg buffers */ if (data_in) pci_free_consistent(ioc->pdev, data_in_sz, data_in, data_in_dma); if (data_out) pci_free_consistent(ioc->pdev, data_out_sz, data_out, data_out_dma); kfree(mpi_request); ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; mutex_unlock(&ioc->ctl_cmds.mutex); return ret; } /** * _ctl_getiocinfo - main handler for MPT2IOCINFO opcode * @arg - user space buffer containing ioctl content */ static long _ctl_getiocinfo(void __user *arg) { struct mpt2_ioctl_iocinfo karg; struct MPT2SAS_ADAPTER *ioc; u8 revision; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); memset(&karg, 0 , sizeof(karg)); if (ioc->is_warpdrive) karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200; else karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; if (ioc->pfacts) karg.port_number = ioc->pfacts[0].PortNumber; pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); karg.hw_rev = revision; karg.pci_id = ioc->pdev->device; karg.subsystem_device = ioc->pdev->subsystem_device; karg.subsystem_vendor = ioc->pdev->subsystem_vendor; karg.pci_information.u.bits.bus = ioc->pdev->bus->number; karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); karg.firmware_version = ioc->facts.FWVersion.Word; strcpy(karg.driver_version, MPT2SAS_DRIVER_NAME); strcat(karg.driver_version, "-"); strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION); karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); if (copy_to_user(arg, &karg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } return 0; } /** * _ctl_eventquery - main handler for MPT2EVENTQUERY opcode * @arg - user space buffer containing ioctl content */ static long _ctl_eventquery(void __user *arg) { struct mpt2_ioctl_eventquery karg; struct MPT2SAS_ADAPTER *ioc; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); karg.event_entries = MPT2SAS_CTL_EVENT_LOG_SIZE; memcpy(karg.event_types, ioc->event_type, MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); if (copy_to_user(arg, &karg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } return 0; } /** * _ctl_eventenable - main handler for MPT2EVENTENABLE opcode * @arg - user space buffer containing ioctl content */ static long _ctl_eventenable(void __user *arg) { struct mpt2_ioctl_eventenable karg; struct MPT2SAS_ADAPTER *ioc; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); if (ioc->event_log) return 0; memcpy(ioc->event_type, karg.event_types, MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); mpt2sas_base_validate_event_type(ioc, ioc->event_type); /* initialize event_log */ ioc->event_context = 0; ioc->aen_event_read_flag = 0; ioc->event_log = kcalloc(MPT2SAS_CTL_EVENT_LOG_SIZE, sizeof(struct MPT2_IOCTL_EVENTS), GFP_KERNEL); if (!ioc->event_log) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -ENOMEM; } return 0; } /** * _ctl_eventreport - main handler for MPT2EVENTREPORT opcode * @arg - user space buffer containing ioctl content */ static long _ctl_eventreport(void __user *arg) { struct mpt2_ioctl_eventreport karg; struct MPT2SAS_ADAPTER *ioc; u32 number_bytes, max_events, max; struct mpt2_ioctl_eventreport __user *uarg = arg; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); number_bytes = karg.hdr.max_data_size - sizeof(struct mpt2_ioctl_header); max_events = number_bytes/sizeof(struct MPT2_IOCTL_EVENTS); max = min_t(u32, MPT2SAS_CTL_EVENT_LOG_SIZE, max_events); /* If fewer than 1 event is requested, there must have * been some type of error. */ if (!max || !ioc->event_log) return -ENODATA; number_bytes = max * sizeof(struct MPT2_IOCTL_EVENTS); if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } /* reset flag so SIGIO can restart */ ioc->aen_event_read_flag = 0; return 0; } /** * _ctl_do_reset - main handler for MPT2HARDRESET opcode * @arg - user space buffer containing ioctl content */ static long _ctl_do_reset(void __user *arg) { struct mpt2_ioctl_diag_reset karg; struct MPT2SAS_ADAPTER *ioc; int retval; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; if (ioc->shost_recovery || ioc->pci_error_recovery || ioc->is_driver_loading) return -EAGAIN; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); printk(MPT2SAS_INFO_FMT "host reset: %s\n", ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); return 0; } /** * _ctl_btdh_search_sas_device - searching for sas device * @ioc: per adapter object * @btdh: btdh ioctl payload */ static int _ctl_btdh_search_sas_device(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_btdh_mapping *btdh) { struct _sas_device *sas_device; unsigned long flags; int rc = 0; if (list_empty(&ioc->sas_device_list)) return rc; spin_lock_irqsave(&ioc->sas_device_lock, flags); list_for_each_entry(sas_device, &ioc->sas_device_list, list) { if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && btdh->handle == sas_device->handle) { btdh->bus = sas_device->channel; btdh->id = sas_device->id; rc = 1; goto out; } else if (btdh->bus == sas_device->channel && btdh->id == sas_device->id && btdh->handle == 0xFFFF) { btdh->handle = sas_device->handle; rc = 1; goto out; } } out: spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return rc; } /** * _ctl_btdh_search_raid_device - searching for raid device * @ioc: per adapter object * @btdh: btdh ioctl payload */ static int _ctl_btdh_search_raid_device(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_btdh_mapping *btdh) { struct _raid_device *raid_device; unsigned long flags; int rc = 0; if (list_empty(&ioc->raid_device_list)) return rc; spin_lock_irqsave(&ioc->raid_device_lock, flags); list_for_each_entry(raid_device, &ioc->raid_device_list, list) { if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && btdh->handle == raid_device->handle) { btdh->bus = raid_device->channel; btdh->id = raid_device->id; rc = 1; goto out; } else if (btdh->bus == raid_device->channel && btdh->id == raid_device->id && btdh->handle == 0xFFFF) { btdh->handle = raid_device->handle; rc = 1; goto out; } } out: spin_unlock_irqrestore(&ioc->raid_device_lock, flags); return rc; } /** * _ctl_btdh_mapping - main handler for MPT2BTDHMAPPING opcode * @arg - user space buffer containing ioctl content */ static long _ctl_btdh_mapping(void __user *arg) { struct mpt2_ioctl_btdh_mapping karg; struct MPT2SAS_ADAPTER *ioc; int rc; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); rc = _ctl_btdh_search_sas_device(ioc, &karg); if (!rc) _ctl_btdh_search_raid_device(ioc, &karg); if (copy_to_user(arg, &karg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } return 0; } /** * _ctl_diag_capability - return diag buffer capability * @ioc: per adapter object * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED * * returns 1 when diag buffer support is enabled in firmware */ static u8 _ctl_diag_capability(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type) { u8 rc = 0; switch (buffer_type) { case MPI2_DIAG_BUF_TYPE_TRACE: if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) rc = 1; break; case MPI2_DIAG_BUF_TYPE_SNAPSHOT: if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) rc = 1; break; case MPI2_DIAG_BUF_TYPE_EXTENDED: if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) rc = 1; } return rc; } /** * _ctl_diag_register_2 - wrapper for registering diag buffer support * @ioc: per adapter object * @diag_register: the diag_register struct passed in from user space * */ static long _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc, struct mpt2_diag_register *diag_register) { int rc, i; void *request_data = NULL; dma_addr_t request_data_dma; u32 request_data_sz = 0; Mpi2DiagBufferPostRequest_t *mpi_request; Mpi2DiagBufferPostReply_t *mpi_reply; u8 buffer_type; unsigned long timeleft; u16 smid; u16 ioc_status; u8 issue_reset = 0; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n", ioc->name, __func__); rc = -EAGAIN; goto out; } buffer_type = diag_register->buffer_type; if (!_ctl_diag_capability(ioc, buffer_type)) { printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } if (ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_REGISTERED) { printk(MPT2SAS_ERR_FMT "%s: already has a registered " "buffer for buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EINVAL; } if (diag_register->requested_buffer_size % 4) { printk(MPT2SAS_ERR_FMT "%s: the requested_buffer_size " "is not 4 byte aligned\n", ioc->name, __func__); return -EINVAL; } smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); rc = -EAGAIN; goto out; } rc = 0; ioc->ctl_cmds.status = MPT2_CMD_PENDING; memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); ioc->ctl_cmds.smid = smid; request_data = ioc->diag_buffer[buffer_type]; request_data_sz = diag_register->requested_buffer_size; ioc->unique_id[buffer_type] = diag_register->unique_id; ioc->diag_buffer_status[buffer_type] = 0; memcpy(ioc->product_specific[buffer_type], diag_register->product_specific, MPT2_PRODUCT_SPECIFIC_DWORDS); ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; if (request_data) { request_data_dma = ioc->diag_buffer_dma[buffer_type]; if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) { pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[buffer_type], request_data, request_data_dma); request_data = NULL; } } if (request_data == NULL) { ioc->diag_buffer_sz[buffer_type] = 0; ioc->diag_buffer_dma[buffer_type] = 0; request_data = pci_alloc_consistent( ioc->pdev, request_data_sz, &request_data_dma); if (request_data == NULL) { printk(MPT2SAS_ERR_FMT "%s: failed allocating memory" " for diag buffers, requested size(%d)\n", ioc->name, __func__, request_data_sz); mpt2sas_base_free_smid(ioc, smid); return -ENOMEM; } ioc->diag_buffer[buffer_type] = request_data; ioc->diag_buffer_sz[buffer_type] = request_data_sz; ioc->diag_buffer_dma[buffer_type] = request_data_dma; } mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; mpi_request->BufferType = diag_register->buffer_type; mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags); mpi_request->BufferAddress = cpu_to_le64(request_data_dma); mpi_request->BufferLength = cpu_to_le32(request_data_sz); mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: diag_buffer(0x%p), " "dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data, (unsigned long long)request_data_dma, le32_to_cpu(mpi_request->BufferLength))); for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++) mpi_request->ProductSpecific[i] = cpu_to_le32(ioc->product_specific[buffer_type][i]); mpt2sas_base_put_smid_default(ioc, smid); init_completion(&ioc->ctl_cmds.done); timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, MPT2_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) { printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2DiagBufferPostRequest_t)/4); if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET)) issue_reset = 1; goto issue_host_reset; } /* process the completed Reply Message Frame */ if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) { printk(MPT2SAS_ERR_FMT "%s: no reply message\n", ioc->name, __func__); rc = -EFAULT; goto out; } mpi_reply = ioc->ctl_cmds.reply; ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { ioc->diag_buffer_status[buffer_type] |= MPT2_DIAG_BUFFER_IS_REGISTERED; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n", ioc->name, __func__)); } else { printk(MPT2SAS_INFO_FMT "%s: ioc_status(0x%04x) " "log_info(0x%08x)\n", ioc->name, __func__, ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); rc = -EFAULT; } issue_host_reset: if (issue_reset) mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); out: if (rc && request_data) pci_free_consistent(ioc->pdev, request_data_sz, request_data, request_data_dma); ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; return rc; } /** * mpt2sas_enable_diag_buffer - enabling diag_buffers support driver load time * @ioc: per adapter object * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1 * * This is called when command line option diag_buffer_enable is enabled * at driver load time. */ void mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc, u8 bits_to_register) { struct mpt2_diag_register diag_register; memset(&diag_register, 0, sizeof(struct mpt2_diag_register)); if (bits_to_register & 1) { printk(MPT2SAS_INFO_FMT "registering trace buffer support\n", ioc->name); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; /* register for 1MB buffers */ diag_register.requested_buffer_size = (1024 * 1024); diag_register.unique_id = 0x7075900; _ctl_diag_register_2(ioc, &diag_register); } if (bits_to_register & 2) { printk(MPT2SAS_INFO_FMT "registering snapshot buffer support\n", ioc->name); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; /* register for 2MB buffers */ diag_register.requested_buffer_size = 2 * (1024 * 1024); diag_register.unique_id = 0x7075901; _ctl_diag_register_2(ioc, &diag_register); } if (bits_to_register & 4) { printk(MPT2SAS_INFO_FMT "registering extended buffer support\n", ioc->name); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; /* register for 2MB buffers */ diag_register.requested_buffer_size = 2 * (1024 * 1024); diag_register.unique_id = 0x7075901; _ctl_diag_register_2(ioc, &diag_register); } } /** * _ctl_diag_register - application register with driver * @arg - user space buffer containing ioctl content * @state - NON_BLOCKING or BLOCKING * * This will allow the driver to setup any required buffers that will be * needed by firmware to communicate with the driver. */ static long _ctl_diag_register(void __user *arg, enum block_state state) { struct mpt2_diag_register karg; struct MPT2SAS_ADAPTER *ioc; long rc; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex)) return -EAGAIN; else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) return -ERESTARTSYS; rc = _ctl_diag_register_2(ioc, &karg); mutex_unlock(&ioc->ctl_cmds.mutex); return rc; } /** * _ctl_diag_unregister - application unregister with driver * @arg - user space buffer containing ioctl content * * This will allow the driver to cleanup any memory allocated for diag * messages and to free up any resources. */ static long _ctl_diag_unregister(void __user *arg) { struct mpt2_diag_unregister karg; struct MPT2SAS_ADAPTER *ioc; void *request_data; dma_addr_t request_data_dma; u32 request_data_sz; u8 buffer_type; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); buffer_type = karg.unique_id & 0x000000ff; if (!_ctl_diag_capability(ioc, buffer_type)) { printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } if ((ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) { printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) is not " "registered\n", ioc->name, __func__, buffer_type); return -EINVAL; } if ((ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_RELEASED) == 0) { printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) has not been " "released\n", ioc->name, __func__, buffer_type); return -EINVAL; } if (karg.unique_id != ioc->unique_id[buffer_type]) { printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not " "registered\n", ioc->name, __func__, karg.unique_id); return -EINVAL; } request_data = ioc->diag_buffer[buffer_type]; if (!request_data) { printk(MPT2SAS_ERR_FMT "%s: doesn't have memory allocated for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -ENOMEM; } request_data_sz = ioc->diag_buffer_sz[buffer_type]; request_data_dma = ioc->diag_buffer_dma[buffer_type]; pci_free_consistent(ioc->pdev, request_data_sz, request_data, request_data_dma); ioc->diag_buffer[buffer_type] = NULL; ioc->diag_buffer_status[buffer_type] = 0; return 0; } /** * _ctl_diag_query - query relevant info associated with diag buffers * @arg - user space buffer containing ioctl content * * The application will send only buffer_type and unique_id. Driver will * inspect unique_id first, if valid, fill in all the info. If unique_id is * 0x00, the driver will return info specified by Buffer Type. */ static long _ctl_diag_query(void __user *arg) { struct mpt2_diag_query karg; struct MPT2SAS_ADAPTER *ioc; void *request_data; int i; u8 buffer_type; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); karg.application_flags = 0; buffer_type = karg.buffer_type; if (!_ctl_diag_capability(ioc, buffer_type)) { printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } if ((ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) { printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) is not " "registered\n", ioc->name, __func__, buffer_type); return -EINVAL; } if (karg.unique_id & 0xffffff00) { if (karg.unique_id != ioc->unique_id[buffer_type]) { printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not " "registered\n", ioc->name, __func__, karg.unique_id); return -EINVAL; } } request_data = ioc->diag_buffer[buffer_type]; if (!request_data) { printk(MPT2SAS_ERR_FMT "%s: doesn't have buffer for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -ENOMEM; } if (ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_RELEASED) karg.application_flags = (MPT2_APP_FLAGS_APP_OWNED | MPT2_APP_FLAGS_BUFFER_VALID); else karg.application_flags = (MPT2_APP_FLAGS_APP_OWNED | MPT2_APP_FLAGS_BUFFER_VALID | MPT2_APP_FLAGS_FW_BUFFER_ACCESS); for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++) karg.product_specific[i] = ioc->product_specific[buffer_type][i]; karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type]; karg.driver_added_buffer_size = 0; karg.unique_id = ioc->unique_id[buffer_type]; karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type]; if (copy_to_user(arg, &karg, sizeof(struct mpt2_diag_query))) { printk(MPT2SAS_ERR_FMT "%s: unable to write mpt2_diag_query " "data @ %p\n", ioc->name, __func__, arg); return -EFAULT; } return 0; } /** * _ctl_send_release - Diag Release Message * @ioc: per adapter object * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED * @issue_reset - specifies whether host reset is required. * */ static int _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset) { Mpi2DiagReleaseRequest_t *mpi_request; Mpi2DiagReleaseReply_t *mpi_reply; u16 smid; u16 ioc_status; u32 ioc_state; int rc; unsigned long timeleft; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); rc = 0; *issue_reset = 0; ioc_state = mpt2sas_base_get_iocstate(ioc, 1); if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "skipping due to FAULT state\n", ioc->name, __func__)); rc = -EAGAIN; goto out; } if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n", ioc->name, __func__); rc = -EAGAIN; goto out; } smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); rc = -EAGAIN; goto out; } ioc->ctl_cmds.status = MPT2_CMD_PENDING; memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); ioc->ctl_cmds.smid = smid; mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE; mpi_request->BufferType = buffer_type; mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; mpt2sas_base_put_smid_default(ioc, smid); init_completion(&ioc->ctl_cmds.done); timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, MPT2_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) { printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2DiagReleaseRequest_t)/4); if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET)) *issue_reset = 1; rc = -EFAULT; goto out; } /* process the completed Reply Message Frame */ if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) { printk(MPT2SAS_ERR_FMT "%s: no reply message\n", ioc->name, __func__); rc = -EFAULT; goto out; } mpi_reply = ioc->ctl_cmds.reply; ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { ioc->diag_buffer_status[buffer_type] |= MPT2_DIAG_BUFFER_IS_RELEASED; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n", ioc->name, __func__)); } else { printk(MPT2SAS_INFO_FMT "%s: ioc_status(0x%04x) " "log_info(0x%08x)\n", ioc->name, __func__, ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); rc = -EFAULT; } out: ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; return rc; } /** * _ctl_diag_release - request to send Diag Release Message to firmware * @arg - user space buffer containing ioctl content * @state - NON_BLOCKING or BLOCKING * * This allows ownership of the specified buffer to returned to the driver, * allowing an application to read the buffer without fear that firmware is * overwritting information in the buffer. */ static long _ctl_diag_release(void __user *arg, enum block_state state) { struct mpt2_diag_release karg; struct MPT2SAS_ADAPTER *ioc; void *request_data; int rc; u8 buffer_type; u8 issue_reset = 0; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); buffer_type = karg.unique_id & 0x000000ff; if (!_ctl_diag_capability(ioc, buffer_type)) { printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } if ((ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) { printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) is not " "registered\n", ioc->name, __func__, buffer_type); return -EINVAL; } if (karg.unique_id != ioc->unique_id[buffer_type]) { printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not " "registered\n", ioc->name, __func__, karg.unique_id); return -EINVAL; } if (ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_RELEASED) { printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) " "is already released\n", ioc->name, __func__, buffer_type); return 0; } request_data = ioc->diag_buffer[buffer_type]; if (!request_data) { printk(MPT2SAS_ERR_FMT "%s: doesn't have memory allocated for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -ENOMEM; } /* buffers were released by due to host reset */ if ((ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_DIAG_RESET)) { ioc->diag_buffer_status[buffer_type] |= MPT2_DIAG_BUFFER_IS_RELEASED; ioc->diag_buffer_status[buffer_type] &= ~MPT2_DIAG_BUFFER_IS_DIAG_RESET; printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) " "was released due to host reset\n", ioc->name, __func__, buffer_type); return 0; } if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex)) return -EAGAIN; else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) return -ERESTARTSYS; rc = _ctl_send_release(ioc, buffer_type, &issue_reset); if (issue_reset) mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); mutex_unlock(&ioc->ctl_cmds.mutex); return rc; } /** * _ctl_diag_read_buffer - request for copy of the diag buffer * @arg - user space buffer containing ioctl content * @state - NON_BLOCKING or BLOCKING */ static long _ctl_diag_read_buffer(void __user *arg, enum block_state state) { struct mpt2_diag_read_buffer karg; struct mpt2_diag_read_buffer __user *uarg = arg; struct MPT2SAS_ADAPTER *ioc; void *request_data, *diag_data; Mpi2DiagBufferPostRequest_t *mpi_request; Mpi2DiagBufferPostReply_t *mpi_reply; int rc, i; u8 buffer_type; unsigned long timeleft, request_size, copy_size; u16 smid; u16 ioc_status; u8 issue_reset = 0; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); buffer_type = karg.unique_id & 0x000000ff; if (!_ctl_diag_capability(ioc, buffer_type)) { printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -EPERM; } if (karg.unique_id != ioc->unique_id[buffer_type]) { printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not " "registered\n", ioc->name, __func__, karg.unique_id); return -EINVAL; } request_data = ioc->diag_buffer[buffer_type]; if (!request_data) { printk(MPT2SAS_ERR_FMT "%s: doesn't have buffer for " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type); return -ENOMEM; } request_size = ioc->diag_buffer_sz[buffer_type]; if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { printk(MPT2SAS_ERR_FMT "%s: either the starting_offset " "or bytes_to_read are not 4 byte aligned\n", ioc->name, __func__); return -EINVAL; } if (karg.starting_offset > request_size) return -EINVAL; diag_data = (void *)(request_data + karg.starting_offset); dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: diag_buffer(%p), " "offset(%d), sz(%d)\n", ioc->name, __func__, diag_data, karg.starting_offset, karg.bytes_to_read)); /* Truncate data on requests that are too large */ if ((diag_data + karg.bytes_to_read < diag_data) || (diag_data + karg.bytes_to_read > request_data + request_size)) copy_size = request_size - karg.starting_offset; else copy_size = karg.bytes_to_read; if (copy_to_user((void __user *)uarg->diagnostic_data, diag_data, copy_size)) { printk(MPT2SAS_ERR_FMT "%s: Unable to write " "mpt_diag_read_buffer_t data @ %p\n", ioc->name, __func__, diag_data); return -EFAULT; } if ((karg.flags & MPT2_FLAGS_REREGISTER) == 0) return 0; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: Reregister " "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type)); if ((ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_RELEASED) == 0) { dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "buffer_type(0x%02x) is still registered\n", ioc->name, __func__, buffer_type)); return 0; } /* Get a free request frame and save the message context. */ if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex)) return -EAGAIN; else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) return -ERESTARTSYS; if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n", ioc->name, __func__); rc = -EAGAIN; goto out; } smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); rc = -EAGAIN; goto out; } rc = 0; ioc->ctl_cmds.status = MPT2_CMD_PENDING; memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); ioc->ctl_cmds.smid = smid; mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; mpi_request->BufferType = buffer_type; mpi_request->BufferLength = cpu_to_le32(ioc->diag_buffer_sz[buffer_type]); mpi_request->BufferAddress = cpu_to_le64(ioc->diag_buffer_dma[buffer_type]); for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++) mpi_request->ProductSpecific[i] = cpu_to_le32(ioc->product_specific[buffer_type][i]); mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; mpt2sas_base_put_smid_default(ioc, smid); init_completion(&ioc->ctl_cmds.done); timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, MPT2_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) { printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2DiagBufferPostRequest_t)/4); if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET)) issue_reset = 1; goto issue_host_reset; } /* process the completed Reply Message Frame */ if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) { printk(MPT2SAS_ERR_FMT "%s: no reply message\n", ioc->name, __func__); rc = -EFAULT; goto out; } mpi_reply = ioc->ctl_cmds.reply; ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { ioc->diag_buffer_status[buffer_type] |= MPT2_DIAG_BUFFER_IS_REGISTERED; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n", ioc->name, __func__)); } else { printk(MPT2SAS_INFO_FMT "%s: ioc_status(0x%04x) " "log_info(0x%08x)\n", ioc->name, __func__, ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); rc = -EFAULT; } issue_host_reset: if (issue_reset) mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); out: ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; mutex_unlock(&ioc->ctl_cmds.mutex); return rc; } /** * _ctl_ioctl_main - main ioctl entry point * @file - (struct file) * @cmd - ioctl opcode * @arg - */ static long _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg) { enum block_state state; long ret = -EINVAL; state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; switch (cmd) { case MPT2IOCINFO: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_iocinfo)) ret = _ctl_getiocinfo(arg); break; case MPT2COMMAND: { struct mpt2_ioctl_command karg; struct mpt2_ioctl_command __user *uarg; struct MPT2SAS_ADAPTER *ioc; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; if (ioc->shost_recovery || ioc->pci_error_recovery || ioc->is_driver_loading) return -EAGAIN; if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) { uarg = arg; ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf, state); } break; } case MPT2EVENTQUERY: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventquery)) ret = _ctl_eventquery(arg); break; case MPT2EVENTENABLE: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventenable)) ret = _ctl_eventenable(arg); break; case MPT2EVENTREPORT: ret = _ctl_eventreport(arg); break; case MPT2HARDRESET: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_diag_reset)) ret = _ctl_do_reset(arg); break; case MPT2BTDHMAPPING: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_btdh_mapping)) ret = _ctl_btdh_mapping(arg); break; case MPT2DIAGREGISTER: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_register)) ret = _ctl_diag_register(arg, state); break; case MPT2DIAGUNREGISTER: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_unregister)) ret = _ctl_diag_unregister(arg); break; case MPT2DIAGQUERY: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_query)) ret = _ctl_diag_query(arg); break; case MPT2DIAGRELEASE: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_release)) ret = _ctl_diag_release(arg, state); break; case MPT2DIAGREADBUFFER: if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_read_buffer)) ret = _ctl_diag_read_buffer(arg, state); break; default: { struct mpt2_ioctl_command karg; struct MPT2SAS_ADAPTER *ioc; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd)); break; } } return ret; } /** * _ctl_ioctl - main ioctl entry point (unlocked) * @file - (struct file) * @cmd - ioctl opcode * @arg - */ static long _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; mutex_lock(&_ctl_mutex); ret = _ctl_ioctl_main(file, cmd, (void __user *)arg); mutex_unlock(&_ctl_mutex); return ret; } #ifdef CONFIG_COMPAT /** * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. * @file - (struct file) * @cmd - ioctl opcode * @arg - (struct mpt2_ioctl_command32) * * MPT2COMMAND32 - Handle 32bit applications running on 64bit os. */ static long _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg) { struct mpt2_ioctl_command32 karg32; struct mpt2_ioctl_command32 __user *uarg; struct mpt2_ioctl_command karg; struct MPT2SAS_ADAPTER *ioc; enum block_state state; if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32)) return -EINVAL; uarg = (struct mpt2_ioctl_command32 __user *) arg; if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -EFAULT; } if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; if (ioc->shost_recovery || ioc->pci_error_recovery || ioc->is_driver_loading) return -EAGAIN; memset(&karg, 0, sizeof(struct mpt2_ioctl_command)); karg.hdr.ioc_number = karg32.hdr.ioc_number; karg.hdr.port_number = karg32.hdr.port_number; karg.hdr.max_data_size = karg32.hdr.max_data_size; karg.timeout = karg32.timeout; karg.max_reply_bytes = karg32.max_reply_bytes; karg.data_in_size = karg32.data_in_size; karg.data_out_size = karg32.data_out_size; karg.max_sense_bytes = karg32.max_sense_bytes; karg.data_sge_offset = karg32.data_sge_offset; karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state); } /** * _ctl_ioctl_compat - main ioctl entry point (compat) * @file - * @cmd - * @arg - * * This routine handles 32 bit applications in 64bit os. */ static long _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) { long ret; mutex_lock(&_ctl_mutex); if (cmd == MPT2COMMAND32) ret = _ctl_compat_mpt_command(file, cmd, arg); else ret = _ctl_ioctl_main(file, cmd, (void __user *)arg); mutex_unlock(&_ctl_mutex); return ret; } #endif /* scsi host attributes */ /** * _ctl_version_fw_show - firmware version * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_version_fw_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, ioc->facts.FWVersion.Word & 0x000000FF); } static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL); /** * _ctl_version_bios_show - bios version * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_version_bios_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", (version & 0xFF000000) >> 24, (version & 0x00FF0000) >> 16, (version & 0x0000FF00) >> 8, version & 0x000000FF); } static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL); /** * _ctl_version_mpi_show - MPI (message passing interface) version * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); } static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL); /** * _ctl_version_product_show - product name * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_version_product_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); } static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL); /** * _ctl_version_nvdata_persistent_show - ndvata persistent version * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_version_nvdata_persistent_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%08xh\n", le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); } static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, _ctl_version_nvdata_persistent_show, NULL); /** * _ctl_version_nvdata_default_show - nvdata default version * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%08xh\n", le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); } static DEVICE_ATTR(version_nvdata_default, S_IRUGO, _ctl_version_nvdata_default_show, NULL); /** * _ctl_board_name_show - board name * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_board_name_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); } static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL); /** * _ctl_board_assembly_show - board assembly name * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); } static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL); /** * _ctl_board_tracer_show - board tracer number * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); } static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL); /** * _ctl_io_delay_show - io missing delay * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is for firmware implemention for deboucing device * removal events. * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_io_delay_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); } static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL); /** * _ctl_device_delay_show - device missing delay * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is for firmware implemention for deboucing device * removal events. * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_device_delay_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); } static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL); /** * _ctl_fw_queue_depth_show - global credits * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is firmware queue depth limit * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); } static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL); /** * _ctl_sas_address_show - sas address * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is the controller sas address * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "0x%016llx\n", (unsigned long long)ioc->sas_hba.sas_address); } static DEVICE_ATTR(host_sas_address, S_IRUGO, _ctl_host_sas_address_show, NULL); /** * _ctl_logging_level_show - logging level * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. */ static ssize_t _ctl_logging_level_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); } static ssize_t _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); int val = 0; if (sscanf(buf, "%x", &val) != 1) return -EINVAL; ioc->logging_level = val; printk(MPT2SAS_INFO_FMT "logging_level=%08xh\n", ioc->name, ioc->logging_level); return strlen(buf); } static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show, _ctl_logging_level_store); /* device attributes */ /* * _ctl_fwfault_debug_show - show/store fwfault_debug * @cdev - pointer to embedded class device * @buf - the buffer returned * * mpt2sas_fwfault_debug is command line option * A sysfs 'read/write' shost attribute. */ static ssize_t _ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); } static ssize_t _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); int val = 0; if (sscanf(buf, "%d", &val) != 1) return -EINVAL; ioc->fwfault_debug = val; printk(MPT2SAS_INFO_FMT "fwfault_debug=%d\n", ioc->name, ioc->fwfault_debug); return strlen(buf); } static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, _ctl_fwfault_debug_show, _ctl_fwfault_debug_store); /** * _ctl_ioc_reset_count_show - ioc reset count * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is firmware queue depth limit * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%08d\n", ioc->ioc_reset_count); } static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL); /** * _ctl_ioc_reply_queue_count_show - number of reply queues * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is number of reply queues * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_ioc_reply_queue_count_show(struct device *cdev, struct device_attribute *attr, char *buf) { u8 reply_queue_count; struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); if ((ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) reply_queue_count = ioc->reply_queue_count; else reply_queue_count = 1; return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); } static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show, NULL); struct DIAG_BUFFER_START { __le32 Size; __le32 DiagVersion; u8 BufferType; u8 Reserved[3]; __le32 Reserved1; __le32 Reserved2; __le32 Reserved3; }; /** * _ctl_host_trace_buffer_size_show - host buffer size (trace only) * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_host_trace_buffer_size_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); u32 size = 0; struct DIAG_BUFFER_START *request_data; if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not " "registered\n", ioc->name, __func__); return 0; } if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) { printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not " "registered\n", ioc->name, __func__); return 0; } request_data = (struct DIAG_BUFFER_START *) ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]; if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 || le32_to_cpu(request_data->DiagVersion) == 0x01000000) && le32_to_cpu(request_data->Reserved3) == 0x4742444c) size = le32_to_cpu(request_data->Size); ioc->ring_buffer_sz = size; return snprintf(buf, PAGE_SIZE, "%d\n", size); } static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO, _ctl_host_trace_buffer_size_show, NULL); /** * _ctl_host_trace_buffer_show - firmware ring buffer (trace only) * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. * * You will only be able to read 4k bytes of ring buffer at a time. * In order to read beyond 4k bytes, you will have to write out the * offset to the same attribute, it will move the pointer. */ static ssize_t _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); void *request_data; u32 size; if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not " "registered\n", ioc->name, __func__); return 0; } if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) { printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not " "registered\n", ioc->name, __func__); return 0; } if (ioc->ring_buffer_offset > ioc->ring_buffer_sz) return 0; size = ioc->ring_buffer_sz - ioc->ring_buffer_offset; size = (size > PAGE_SIZE) ? PAGE_SIZE : size; request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset; memcpy(buf, request_data, size); return size; } static ssize_t _ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); int val = 0; if (sscanf(buf, "%d", &val) != 1) return -EINVAL; ioc->ring_buffer_offset = val; return strlen(buf); } static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR, _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store); /*****************************************/ /** * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only) * @cdev - pointer to embedded class device * @buf - the buffer returned * * A sysfs 'read/write' shost attribute. * * This is a mechnism to post/release host_trace_buffers */ static ssize_t _ctl_host_trace_buffer_enable_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) || ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_REGISTERED) == 0)) return snprintf(buf, PAGE_SIZE, "off\n"); else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_RELEASED)) return snprintf(buf, PAGE_SIZE, "release\n"); else return snprintf(buf, PAGE_SIZE, "post\n"); } static ssize_t _ctl_host_trace_buffer_enable_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); char str[10] = ""; struct mpt2_diag_register diag_register; u8 issue_reset = 0; if (sscanf(buf, "%s", str) != 1) return -EINVAL; if (!strcmp(str, "post")) { /* exit out if host buffers are already posted */ if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) && (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_REGISTERED) && ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_RELEASED) == 0)) goto out; memset(&diag_register, 0, sizeof(struct mpt2_diag_register)); printk(MPT2SAS_INFO_FMT "posting host trace buffers\n", ioc->name); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; diag_register.requested_buffer_size = (1024 * 1024); diag_register.unique_id = 0x7075900; ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; _ctl_diag_register_2(ioc, &diag_register); } else if (!strcmp(str, "release")) { /* exit out if host buffers are already released */ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) goto out; if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) goto out; if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT2_DIAG_BUFFER_IS_RELEASED)) goto out; printk(MPT2SAS_INFO_FMT "releasing host trace buffer\n", ioc->name); _ctl_send_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, &issue_reset); } out: return strlen(buf); } static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR, _ctl_host_trace_buffer_enable_show, _ctl_host_trace_buffer_enable_store); struct device_attribute *mpt2sas_host_attrs[] = { &dev_attr_version_fw, &dev_attr_version_bios, &dev_attr_version_mpi, &dev_attr_version_product, &dev_attr_version_nvdata_persistent, &dev_attr_version_nvdata_default, &dev_attr_board_name, &dev_attr_board_assembly, &dev_attr_board_tracer, &dev_attr_io_delay, &dev_attr_device_delay, &dev_attr_logging_level, &dev_attr_fwfault_debug, &dev_attr_fw_queue_depth, &dev_attr_host_sas_address, &dev_attr_ioc_reset_count, &dev_attr_host_trace_buffer_size, &dev_attr_host_trace_buffer, &dev_attr_host_trace_buffer_enable, &dev_attr_reply_queue_count, NULL, }; /** * _ctl_device_sas_address_show - sas address * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is the sas address for the target * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct MPT2SAS_DEVICE *sas_device_priv_data = sdev->hostdata; return snprintf(buf, PAGE_SIZE, "0x%016llx\n", (unsigned long long)sas_device_priv_data->sas_target->sas_address); } static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL); /** * _ctl_device_handle_show - device handle * @cdev - pointer to embedded class device * @buf - the buffer returned * * This is the firmware assigned device handle * * A sysfs 'read-only' shost attribute. */ static ssize_t _ctl_device_handle_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct MPT2SAS_DEVICE *sas_device_priv_data = sdev->hostdata; return snprintf(buf, PAGE_SIZE, "0x%04x\n", sas_device_priv_data->sas_target->handle); } static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL); struct device_attribute *mpt2sas_dev_attrs[] = { &dev_attr_sas_address, &dev_attr_sas_device_handle, NULL, }; static const struct file_operations ctl_fops = { .owner = THIS_MODULE, .unlocked_ioctl = _ctl_ioctl, .release = _ctl_release, .poll = _ctl_poll, .fasync = _ctl_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = _ctl_ioctl_compat, #endif .llseek = noop_llseek, }; static struct miscdevice ctl_dev = { .minor = MPT2SAS_MINOR, .name = MPT2SAS_DEV_NAME, .fops = &ctl_fops, }; /** * mpt2sas_ctl_init - main entry point for ctl. * */ void mpt2sas_ctl_init(void) { async_queue = NULL; if (misc_register(&ctl_dev) < 0) printk(KERN_ERR "%s can't register misc device [minor=%d]\n", MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR); init_waitqueue_head(&ctl_poll_wait); } /** * mpt2sas_ctl_exit - exit point for ctl * */ void mpt2sas_ctl_exit(void) { struct MPT2SAS_ADAPTER *ioc; int i; list_for_each_entry(ioc, &mpt2sas_ioc_list, list) { /* free memory associated to diag buffers */ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { if (!ioc->diag_buffer[i]) continue; pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i], ioc->diag_buffer[i], ioc->diag_buffer_dma[i]); ioc->diag_buffer[i] = NULL; ioc->diag_buffer_status[i] = 0; } kfree(ioc->event_log); } misc_deregister(&ctl_dev); }
gpl-2.0
DrGrip/tiamat-2.6.38-LEO-Dr_Grip
arch/powerpc/sysdev/fsl_pci.c
160
22787
/* * MPC83xx/85xx/86xx PCI/PCIE support routing. * * Copyright 2007-2010 Freescale Semiconductor, Inc. * Copyright 2008-2009 MontaVista Software, Inc. * * Initial author: Xianghua Xiao <x.xiao@freescale.com> * Recode: ZHANG WEI <wei.zhang@freescale.com> * Rewrite the routing for Frescale PCI and PCI Express * Roy Zang <tie-fei.zang@freescale.com> * MPC83xx PCI-Express support: * Tony Li <tony.li@freescale.com> * Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/log2.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> static int fsl_pcie_bus_fixup, is_mpc83xx_pci; static void __init quirk_fsl_pcie_header(struct pci_dev *dev) { /* if we aren't a PCIe don't bother */ if (!pci_find_capability(dev, PCI_CAP_ID_EXP)) return; dev->class = PCI_CLASS_BRIDGE_PCI << 8; fsl_pcie_bus_fixup = 1; return; } static int __init fsl_pcie_check_link(struct pci_controller *hose) { u32 val; early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val); if (val < PCIE_LTSSM_L0) return 1; return 0; } #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) static int __init setup_one_atmu(struct ccsr_pci __iomem *pci, unsigned int index, const struct resource *res, resource_size_t offset) { resource_size_t pci_addr = res->start - offset; resource_size_t phys_addr = res->start; resource_size_t size = res->end - res->start + 1; u32 flags = 0x80044000; /* enable & mem R/W */ unsigned int i; pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n", (u64)res->start, (u64)size); if (res->flags & IORESOURCE_PREFETCH) flags |= 0x10000000; /* enable relaxed ordering */ for (i = 0; size > 0; i++) { unsigned int bits = min(__ilog2(size), __ffs(pci_addr | phys_addr)); if (index + i >= 5) return -1; out_be32(&pci->pow[index + i].potar, pci_addr >> 12); out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44); out_be32(&pci->pow[index + i].powbar, phys_addr >> 12); out_be32(&pci->pow[index + i].powar, flags | (bits - 1)); pci_addr += (resource_size_t)1U << bits; phys_addr += (resource_size_t)1U << bits; size -= (resource_size_t)1U << bits; } return i; } /* atmu setup for fsl pci/pcie controller */ static void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc) { struct ccsr_pci __iomem *pci; int i, j, n, mem_log, win_idx = 2; u64 mem, sz, paddr_hi = 0; u64 paddr_lo = ULLONG_MAX; u32 pcicsrbar = 0, pcicsrbar_sz; u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL | PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP; char *name = hose->dn->full_name; pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n", (u64)rsrc->start, (u64)rsrc->end - (u64)rsrc->start + 1); pci = ioremap(rsrc->start, rsrc->end - rsrc->start + 1); if (!pci) { dev_err(hose->parent, "Unable to map ATMU registers\n"); return; } /* Disable all windows (except powar0 since it's ignored) */ for(i = 1; i < 5; i++) out_be32(&pci->pow[i].powar, 0); for(i = 0; i < 3; i++) out_be32(&pci->piw[i].piwar, 0); /* Setup outbound MEM window */ for(i = 0, j = 1; i < 3; i++) { if (!(hose->mem_resources[i].flags & IORESOURCE_MEM)) continue; paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start); paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end); n = setup_one_atmu(pci, j, &hose->mem_resources[i], hose->pci_mem_offset); if (n < 0 || j >= 5) { pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i); hose->mem_resources[i].flags |= IORESOURCE_DISABLED; } else j += n; } /* Setup outbound IO window */ if (hose->io_resource.flags & IORESOURCE_IO) { if (j >= 5) { pr_err("Ran out of outbound PCI ATMUs for IO resource\n"); } else { pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, " "phy base 0x%016llx.\n", (u64)hose->io_resource.start, (u64)hose->io_resource.end - (u64)hose->io_resource.start + 1, (u64)hose->io_base_phys); out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12)); out_be32(&pci->pow[j].potear, 0); out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12)); /* Enable, IO R/W */ out_be32(&pci->pow[j].powar, 0x80088000 | (__ilog2(hose->io_resource.end - hose->io_resource.start + 1) - 1)); } } /* convert to pci address space */ paddr_hi -= hose->pci_mem_offset; paddr_lo -= hose->pci_mem_offset; if (paddr_hi == paddr_lo) { pr_err("%s: No outbound window space\n", name); return ; } if (paddr_lo == 0) { pr_err("%s: No space for inbound window\n", name); return ; } /* setup PCSRBAR/PEXCSRBAR */ early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff); early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz); pcicsrbar_sz = ~pcicsrbar_sz + 1; if (paddr_hi < (0x100000000ull - pcicsrbar_sz) || (paddr_lo > 0x100000000ull)) pcicsrbar = 0x100000000ull - pcicsrbar_sz; else pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz; early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar); paddr_lo = min(paddr_lo, (u64)pcicsrbar); pr_info("%s: PCICSRBAR @ 0x%x\n", name, pcicsrbar); /* Setup inbound mem window */ mem = memblock_end_of_DRAM(); sz = min(mem, paddr_lo); mem_log = __ilog2_u64(sz); /* PCIe can overmap inbound & outbound since RX & TX are separated */ if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { /* Size window to exact size if power-of-two or one size up */ if ((1ull << mem_log) != mem) { if ((1ull << mem_log) > mem) pr_info("%s: Setting PCI inbound window " "greater than memory size\n", name); mem_log++; } piwar |= (mem_log - 1); /* Setup inbound memory window */ out_be32(&pci->piw[win_idx].pitar, 0x00000000); out_be32(&pci->piw[win_idx].piwbar, 0x00000000); out_be32(&pci->piw[win_idx].piwar, piwar); win_idx--; hose->dma_window_base_cur = 0x00000000; hose->dma_window_size = (resource_size_t)sz; } else { u64 paddr = 0; /* Setup inbound memory window */ out_be32(&pci->piw[win_idx].pitar, paddr >> 12); out_be32(&pci->piw[win_idx].piwbar, paddr >> 12); out_be32(&pci->piw[win_idx].piwar, (piwar | (mem_log - 1))); win_idx--; paddr += 1ull << mem_log; sz -= 1ull << mem_log; if (sz) { mem_log = __ilog2_u64(sz); piwar |= (mem_log - 1); out_be32(&pci->piw[win_idx].pitar, paddr >> 12); out_be32(&pci->piw[win_idx].piwbar, paddr >> 12); out_be32(&pci->piw[win_idx].piwar, piwar); win_idx--; paddr += 1ull << mem_log; } hose->dma_window_base_cur = 0x00000000; hose->dma_window_size = (resource_size_t)paddr; } if (hose->dma_window_size < mem) { #ifndef CONFIG_SWIOTLB pr_err("%s: ERROR: Memory size exceeds PCI ATMU ability to " "map - enable CONFIG_SWIOTLB to avoid dma errors.\n", name); #endif /* adjusting outbound windows could reclaim space in mem map */ if (paddr_hi < 0xffffffffull) pr_warning("%s: WARNING: Outbound window cfg leaves " "gaps in memory map. Adjusting the memory map " "could reduce unnecessary bounce buffering.\n", name); pr_info("%s: DMA window size is 0x%llx\n", name, (u64)hose->dma_window_size); } iounmap(pci); } static void __init setup_pci_cmd(struct pci_controller *hose) { u16 cmd; int cap_x; early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd); cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO; early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd); cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX); if (cap_x) { int pci_x_cmd = cap_x + PCI_X_CMD; cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ | PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E; early_write_config_word(hose, 0, 0, pci_x_cmd, cmd); } else { early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80); } } void fsl_pcibios_fixup_bus(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); int i; if ((bus->parent == hose->bus) && ((fsl_pcie_bus_fixup && early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) || (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK))) { for (i = 0; i < 4; ++i) { struct resource *res = bus->resource[i]; struct resource *par = bus->parent->resource[i]; if (res) { res->start = 0; res->end = 0; res->flags = 0; } if (res && par) { res->start = par->start; res->end = par->end; res->flags = par->flags; } } } } int __init fsl_add_bridge(struct device_node *dev, int is_primary) { int len; struct pci_controller *hose; struct resource rsrc; const int *bus_range; pr_debug("Adding PCI host bridge %s\n", dev->full_name); /* Fetch host bridge registers address */ if (of_address_to_resource(dev, 0, &rsrc)) { printk(KERN_WARNING "Can't get pci register base!"); return -ENOMEM; } /* Get bus range if any */ bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) printk(KERN_WARNING "Can't get bus-range for %s, assume" " bus 0\n", dev->full_name); ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS); hose = pcibios_alloc_controller(dev); if (!hose) return -ENOMEM; hose->first_busno = bus_range ? bus_range[0] : 0x0; hose->last_busno = bus_range ? bus_range[1] : 0xff; setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, PPC_INDIRECT_TYPE_BIG_ENDIAN); setup_pci_cmd(hose); /* check PCI express link status */ if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG | PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS; if (fsl_pcie_check_link(hose)) hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK; } printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " "Firmware bus number: %d->%d\n", (unsigned long long)rsrc.start, hose->first_busno, hose->last_busno); pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n", hose, hose->cfg_addr, hose->cfg_data); /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ pci_process_bridge_OF_ranges(hose, dev, is_primary); /* Setup PEX window registers */ setup_pci_atmu(hose, &rsrc); return 0; } DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8548E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8548, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8543E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8543, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8547E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8545E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8545, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8569E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8569, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8568E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8568, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8567E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8567, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8533E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8533, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8544E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8544, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8572E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8572, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8536E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8536, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2040E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2040, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P3041E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P3041, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5010E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5010, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5020E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5020, quirk_fsl_pcie_header); #endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */ #if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x) DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8308, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8314E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8314, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8315E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8315, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8377E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8377, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8378E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8378, quirk_fsl_pcie_header); struct mpc83xx_pcie_priv { void __iomem *cfg_type0; void __iomem *cfg_type1; u32 dev_base; }; struct pex_inbound_window { u32 ar; u32 tar; u32 barl; u32 barh; }; /* * With the convention of u-boot, the PCIE outbound window 0 serves * as configuration transactions outbound. */ #define PEX_OUTWIN0_BAR 0xCA4 #define PEX_OUTWIN0_TAL 0xCA8 #define PEX_OUTWIN0_TAH 0xCAC #define PEX_RC_INWIN_BASE 0xE60 #define PEX_RCIWARn_EN 0x1 static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn) { struct pci_controller *hose = pci_bus_to_host(bus); if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) return PCIBIOS_DEVICE_NOT_FOUND; /* * Workaround for the HW bug: for Type 0 configure transactions the * PCI-E controller does not check the device number bits and just * assumes that the device number bits are 0. */ if (bus->number == hose->first_busno || bus->primary == hose->first_busno) { if (devfn & 0xf8) return PCIBIOS_DEVICE_NOT_FOUND; } if (ppc_md.pci_exclude_device) { if (ppc_md.pci_exclude_device(hose, bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; } return PCIBIOS_SUCCESSFUL; } static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus, unsigned int devfn, int offset) { struct pci_controller *hose = pci_bus_to_host(bus); struct mpc83xx_pcie_priv *pcie = hose->dn->data; u32 dev_base = bus->number << 24 | devfn << 16; int ret; ret = mpc83xx_pcie_exclude_device(bus, devfn); if (ret) return NULL; offset &= 0xfff; /* Type 0 */ if (bus->number == hose->first_busno) return pcie->cfg_type0 + offset; if (pcie->dev_base == dev_base) goto mapped; out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base); pcie->dev_base = dev_base; mapped: return pcie->cfg_type1 + offset; } static int mpc83xx_pcie_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { void __iomem *cfg_addr; cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset); if (!cfg_addr) return PCIBIOS_DEVICE_NOT_FOUND; switch (len) { case 1: *val = in_8(cfg_addr); break; case 2: *val = in_le16(cfg_addr); break; default: *val = in_le32(cfg_addr); break; } return PCIBIOS_SUCCESSFUL; } static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { struct pci_controller *hose = pci_bus_to_host(bus); void __iomem *cfg_addr; cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset); if (!cfg_addr) return PCIBIOS_DEVICE_NOT_FOUND; /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */ if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno) val &= 0xffffff00; switch (len) { case 1: out_8(cfg_addr, val); break; case 2: out_le16(cfg_addr, val); break; default: out_le32(cfg_addr, val); break; } return PCIBIOS_SUCCESSFUL; } static struct pci_ops mpc83xx_pcie_ops = { .read = mpc83xx_pcie_read_config, .write = mpc83xx_pcie_write_config, }; static int __init mpc83xx_pcie_setup(struct pci_controller *hose, struct resource *reg) { struct mpc83xx_pcie_priv *pcie; u32 cfg_bar; int ret = -ENOMEM; pcie = zalloc_maybe_bootmem(sizeof(*pcie), GFP_KERNEL); if (!pcie) return ret; pcie->cfg_type0 = ioremap(reg->start, resource_size(reg)); if (!pcie->cfg_type0) goto err0; cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR); if (!cfg_bar) { /* PCI-E isn't configured. */ ret = -ENODEV; goto err1; } pcie->cfg_type1 = ioremap(cfg_bar, 0x1000); if (!pcie->cfg_type1) goto err1; WARN_ON(hose->dn->data); hose->dn->data = pcie; hose->ops = &mpc83xx_pcie_ops; out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0); out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0); if (fsl_pcie_check_link(hose)) hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK; return 0; err1: iounmap(pcie->cfg_type0); err0: kfree(pcie); return ret; } int __init mpc83xx_add_bridge(struct device_node *dev) { int ret; int len; struct pci_controller *hose; struct resource rsrc_reg; struct resource rsrc_cfg; const int *bus_range; int primary; is_mpc83xx_pci = 1; if (!of_device_is_available(dev)) { pr_warning("%s: disabled by the firmware.\n", dev->full_name); return -ENODEV; } pr_debug("Adding PCI host bridge %s\n", dev->full_name); /* Fetch host bridge registers address */ if (of_address_to_resource(dev, 0, &rsrc_reg)) { printk(KERN_WARNING "Can't get pci register base!\n"); return -ENOMEM; } memset(&rsrc_cfg, 0, sizeof(rsrc_cfg)); if (of_address_to_resource(dev, 1, &rsrc_cfg)) { printk(KERN_WARNING "No pci config register base in dev tree, " "using default\n"); /* * MPC83xx supports up to two host controllers * one at 0x8500 has config space registers at 0x8300 * one at 0x8600 has config space registers at 0x8380 */ if ((rsrc_reg.start & 0xfffff) == 0x8500) rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300; else if ((rsrc_reg.start & 0xfffff) == 0x8600) rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380; } /* * Controller at offset 0x8500 is primary */ if ((rsrc_reg.start & 0xfffff) == 0x8500) primary = 1; else primary = 0; /* Get bus range if any */ bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s, assume" " bus 0\n", dev->full_name); } ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS); hose = pcibios_alloc_controller(dev); if (!hose) return -ENOMEM; hose->first_busno = bus_range ? bus_range[0] : 0; hose->last_busno = bus_range ? bus_range[1] : 0xff; if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) { ret = mpc83xx_pcie_setup(hose, &rsrc_reg); if (ret) goto err0; } else { setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 4, 0); } printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " "Firmware bus number: %d->%d\n", (unsigned long long)rsrc_reg.start, hose->first_busno, hose->last_busno); pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n", hose, hose->cfg_addr, hose->cfg_data); /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ pci_process_bridge_OF_ranges(hose, dev, primary); return 0; err0: pcibios_free_controller(hose); return ret; } #endif /* CONFIG_PPC_83xx */ u64 fsl_pci_immrbar_base(struct pci_controller *hose) { #ifdef CONFIG_PPC_83xx if (is_mpc83xx_pci) { struct mpc83xx_pcie_priv *pcie = hose->dn->data; struct pex_inbound_window *in; int i; /* Walk the Root Complex Inbound windows to match IMMR base */ in = pcie->cfg_type0 + PEX_RC_INWIN_BASE; for (i = 0; i < 4; i++) { /* not enabled, skip */ if (!in_le32(&in[i].ar) & PEX_RCIWARn_EN) continue; if (get_immrbase() == in_le32(&in[i].tar)) return (u64)in_le32(&in[i].barh) << 32 | in_le32(&in[i].barl); } printk(KERN_WARNING "could not find PCI BAR matching IMMR\n"); } #endif #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) if (!is_mpc83xx_pci) { u32 base; pci_bus_read_config_dword(hose->bus, PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base); return base; } #endif return 0; }
gpl-2.0
cneira/ebpf-backports
linux-3.10.0-514.21.1.el7.x86_64/drivers/acpi/acpica/evregion.c
160
22574
/****************************************************************************** * * Module Name: evregion - Operation Region support * *****************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #include "acinterp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evregion") extern u8 acpi_gbl_default_address_spaces[]; /* Local prototypes */ static void acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node); static acpi_status acpi_ev_reg_run(acpi_handle obj_handle, u32 level, void *context, void **return_value); /******************************************************************************* * * FUNCTION: acpi_ev_initialize_op_regions * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Execute _REG methods for all Operation Regions that have * an installed default region handler. * ******************************************************************************/ acpi_status acpi_ev_initialize_op_regions(void) { acpi_status status; u32 i; ACPI_FUNCTION_TRACE(ev_initialize_op_regions); status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Run the _REG methods for op_regions in each default address space */ for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) { /* * Make sure the installed handler is the DEFAULT handler. If not the * default, the _REG methods will have already been run (when the * handler was installed) */ if (acpi_ev_has_default_handler(acpi_gbl_root_node, acpi_gbl_default_address_spaces [i])) { status = acpi_ev_execute_reg_methods(acpi_gbl_root_node, acpi_gbl_default_address_spaces [i]); } } acpi_gbl_reg_methods_executed = TRUE; (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_address_space_dispatch * * PARAMETERS: region_obj - Internal region object * field_obj - Corresponding field. Can be NULL. * function - Read or Write operation * region_offset - Where in the region to read or write * bit_width - Field width in bits (8, 16, 32, or 64) * value - Pointer to in or out value, must be * a full 64-bit integer * * RETURN: Status * * DESCRIPTION: Dispatch an address space or operation region access to * a previously installed handler. * ******************************************************************************/ acpi_status acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, union acpi_operand_object *field_obj, u32 function, u32 region_offset, u32 bit_width, u64 *value) { acpi_status status; acpi_adr_space_handler handler; acpi_adr_space_setup region_setup; union acpi_operand_object *handler_desc; union acpi_operand_object *region_obj2; void *region_context = NULL; struct acpi_connection_info *context; ACPI_FUNCTION_TRACE(ev_address_space_dispatch); region_obj2 = acpi_ns_get_secondary_object(region_obj); if (!region_obj2) { return_ACPI_STATUS(AE_NOT_EXIST); } /* Ensure that there is a handler associated with this region */ handler_desc = region_obj->region.handler; if (!handler_desc) { ACPI_ERROR((AE_INFO, "No handler for Region [%4.4s] (%p) [%s]", acpi_ut_get_node_name(region_obj->region.node), region_obj, acpi_ut_get_region_name(region_obj->region. space_id))); return_ACPI_STATUS(AE_NOT_EXIST); } context = handler_desc->address_space.context; /* * It may be the case that the region has never been initialized. * Some types of regions require special init code */ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { /* This region has not been initialized yet, do it */ region_setup = handler_desc->address_space.setup; if (!region_setup) { /* No initialization routine, exit with error */ ACPI_ERROR((AE_INFO, "No init routine for region(%p) [%s]", region_obj, acpi_ut_get_region_name(region_obj->region. space_id))); return_ACPI_STATUS(AE_NOT_EXIST); } /* * We must exit the interpreter because the region setup will * potentially execute control methods (for example, the _REG method * for this region) */ acpi_ex_exit_interpreter(); status = region_setup(region_obj, ACPI_REGION_ACTIVATE, context, &region_context); /* Re-enter the interpreter */ acpi_ex_enter_interpreter(); /* Check for failure of the Region Setup */ if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During region initialization: [%s]", acpi_ut_get_region_name(region_obj-> region. space_id))); return_ACPI_STATUS(status); } /* Region initialization may have been completed by region_setup */ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE; if (region_obj2->extra.region_context) { /* The handler for this region was already installed */ ACPI_FREE(region_context); } else { /* * Save the returned context for use in all accesses to * this particular region */ region_obj2->extra.region_context = region_context; } } } /* We have everything we need, we can invoke the address space handler */ handler = handler_desc->address_space.handler; ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", &region_obj->region.handler->address_space, handler, ACPI_FORMAT_NATIVE_UINT(region_obj->region.address + region_offset), acpi_ut_get_region_name(region_obj->region. space_id))); /* * Special handling for generic_serial_bus and general_purpose_io: * There are three extra parameters that must be passed to the * handler via the context: * 1) Connection buffer, a resource template from Connection() op. * 2) Length of the above buffer. * 3) Actual access length from the access_as() op. */ if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) || (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) && context && field_obj) { /* Get the Connection (resource_template) buffer */ context->connection = field_obj->field.resource_buffer; context->length = field_obj->field.resource_length; context->access_length = field_obj->field.access_length; } if (!(handler_desc->address_space.handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { /* * For handlers other than the default (supplied) handlers, we must * exit the interpreter because the handler *might* block -- we don't * know what it will do, so we can't hold the lock on the intepreter. */ acpi_ex_exit_interpreter(); } /* Call the handler */ status = handler(function, (region_obj->region.address + region_offset), bit_width, value, context, region_obj2->extra.region_context); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]", acpi_ut_get_region_name(region_obj->region. space_id))); } if (!(handler_desc->address_space.handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { /* * We just returned from a non-default handler, we must re-enter the * interpreter */ acpi_ex_enter_interpreter(); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_detach_region * * PARAMETERS: region_obj - Region Object * acpi_ns_is_locked - Namespace Region Already Locked? * * RETURN: None * * DESCRIPTION: Break the association between the handler and the region * this is a two way association. * ******************************************************************************/ void acpi_ev_detach_region(union acpi_operand_object *region_obj, u8 acpi_ns_is_locked) { union acpi_operand_object *handler_obj; union acpi_operand_object *obj_desc; union acpi_operand_object **last_obj_ptr; acpi_adr_space_setup region_setup; void **region_context; union acpi_operand_object *region_obj2; acpi_status status; ACPI_FUNCTION_TRACE(ev_detach_region); region_obj2 = acpi_ns_get_secondary_object(region_obj); if (!region_obj2) { return_VOID; } region_context = &region_obj2->extra.region_context; /* Get the address handler from the region object */ handler_obj = region_obj->region.handler; if (!handler_obj) { /* This region has no handler, all done */ return_VOID; } /* Find this region in the handler's list */ obj_desc = handler_obj->address_space.region_list; last_obj_ptr = &handler_obj->address_space.region_list; while (obj_desc) { /* Is this the correct Region? */ if (obj_desc == region_obj) { ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Removing Region %p from address handler %p\n", region_obj, handler_obj)); /* This is it, remove it from the handler's list */ *last_obj_ptr = obj_desc->region.next; obj_desc->region.next = NULL; /* Must clear field */ if (acpi_ns_is_locked) { status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_VOID; } } /* Now stop region accesses by executing the _REG method */ status = acpi_ev_execute_reg_method(region_obj, ACPI_REG_DISCONNECT); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "from region _REG, [%s]", acpi_ut_get_region_name (region_obj->region.space_id))); } if (acpi_ns_is_locked) { status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_VOID; } } /* * If the region has been activated, call the setup handler with * the deactivate notification */ if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) { region_setup = handler_obj->address_space.setup; status = region_setup(region_obj, ACPI_REGION_DEACTIVATE, handler_obj->address_space. context, region_context); /* Init routine may fail, Just ignore errors */ if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "from region handler - deactivate, [%s]", acpi_ut_get_region_name (region_obj->region. space_id))); } region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); } /* * Remove handler reference in the region * * NOTE: this doesn't mean that the region goes away, the region * is just inaccessible as indicated to the _REG method * * If the region is on the handler's list, this must be the * region's handler */ region_obj->region.handler = NULL; acpi_ut_remove_reference(handler_obj); return_VOID; } /* Walk the linked list of handlers */ last_obj_ptr = &obj_desc->region.next; obj_desc = obj_desc->region.next; } /* If we get here, the region was not in the handler's region list */ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Cannot remove region %p from address handler %p\n", region_obj, handler_obj)); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ev_attach_region * * PARAMETERS: handler_obj - Handler Object * region_obj - Region Object * acpi_ns_is_locked - Namespace Region Already Locked? * * RETURN: None * * DESCRIPTION: Create the association between the handler and the region * this is a two way association. * ******************************************************************************/ acpi_status acpi_ev_attach_region(union acpi_operand_object *handler_obj, union acpi_operand_object *region_obj, u8 acpi_ns_is_locked) { ACPI_FUNCTION_TRACE(ev_attach_region); ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Adding Region [%4.4s] %p to address handler %p [%s]\n", acpi_ut_get_node_name(region_obj->region.node), region_obj, handler_obj, acpi_ut_get_region_name(region_obj->region. space_id))); /* Link this region to the front of the handler's list */ region_obj->region.next = handler_obj->address_space.region_list; handler_obj->address_space.region_list = region_obj; /* Install the region's handler */ if (region_obj->region.handler) { return_ACPI_STATUS(AE_ALREADY_EXISTS); } region_obj->region.handler = handler_obj; acpi_ut_add_reference(handler_obj); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_execute_reg_method * * PARAMETERS: region_obj - Region object * function - Passed to _REG: On (1) or Off (0) * * RETURN: Status * * DESCRIPTION: Execute _REG method for a region * ******************************************************************************/ acpi_status acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function) { struct acpi_evaluate_info *info; union acpi_operand_object *args[3]; union acpi_operand_object *region_obj2; acpi_status status; ACPI_FUNCTION_TRACE(ev_execute_reg_method); region_obj2 = acpi_ns_get_secondary_object(region_obj); if (!region_obj2) { return_ACPI_STATUS(AE_NOT_EXIST); } if (region_obj2->extra.method_REG == NULL) { return_ACPI_STATUS(AE_OK); } /* Allocate and initialize the evaluation information block */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { return_ACPI_STATUS(AE_NO_MEMORY); } info->prefix_node = region_obj2->extra.method_REG; info->relative_pathname = NULL; info->parameters = args; info->flags = ACPI_IGNORE_RETURN_VALUE; /* * The _REG method has two arguments: * * arg0 - Integer: * Operation region space ID Same value as region_obj->Region.space_id * * arg1 - Integer: * connection status 1 for connecting the handler, 0 for disconnecting * the handler (Passed as a parameter) */ args[0] = acpi_ut_create_integer_object((u64)region_obj->region.space_id); if (!args[0]) { status = AE_NO_MEMORY; goto cleanup1; } args[1] = acpi_ut_create_integer_object((u64)function); if (!args[1]) { status = AE_NO_MEMORY; goto cleanup2; } args[2] = NULL; /* Terminate list */ /* Execute the method, no return value */ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, info->prefix_node, NULL)); status = acpi_ns_evaluate(info); acpi_ut_remove_reference(args[1]); cleanup2: acpi_ut_remove_reference(args[0]); cleanup1: ACPI_FREE(info); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_execute_reg_methods * * PARAMETERS: node - Namespace node for the device * space_id - The address space ID * * RETURN: Status * * DESCRIPTION: Run all _REG methods for the input Space ID; * Note: assumes namespace is locked, or system init time. * ******************************************************************************/ acpi_status acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, acpi_adr_space_type space_id) { acpi_status status; ACPI_FUNCTION_TRACE(ev_execute_reg_methods); /* * Run all _REG methods for all Operation Regions for this space ID. This * is a separate walk in order to handle any interdependencies between * regions and _REG methods. (i.e. handlers must be installed for all * regions of this Space ID before we can run any _REG methods) */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL, &space_id, NULL); /* Special case for EC: handle "orphan" _REG methods with no region */ if (space_id == ACPI_ADR_SPACE_EC) { acpi_ev_orphan_ec_reg_method(node); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_reg_run * * PARAMETERS: walk_namespace callback * * DESCRIPTION: Run _REG method for region objects of the requested spaceID * ******************************************************************************/ static acpi_status acpi_ev_reg_run(acpi_handle obj_handle, u32 level, void *context, void **return_value) { union acpi_operand_object *obj_desc; struct acpi_namespace_node *node; acpi_adr_space_type space_id; acpi_status status; space_id = *ACPI_CAST_PTR(acpi_adr_space_type, context); /* Convert and validate the device handle */ node = acpi_ns_validate_handle(obj_handle); if (!node) { return (AE_BAD_PARAMETER); } /* * We only care about regions.and objects that are allowed to have address * space handlers */ if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) { return (AE_OK); } /* Check for an existing internal object */ obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { /* No object, just exit */ return (AE_OK); } /* Object is a Region */ if (obj_desc->region.space_id != space_id) { /* This region is for a different address space, just ignore it */ return (AE_OK); } status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT); return (status); } /******************************************************************************* * * FUNCTION: acpi_ev_orphan_ec_reg_method * * PARAMETERS: ec_device_node - Namespace node for an EC device * * RETURN: None * * DESCRIPTION: Execute an "orphan" _REG method that appears under the EC * device. This is a _REG method that has no corresponding region * within the EC device scope. The orphan _REG method appears to * have been enabled by the description of the ECDT in the ACPI * specification: "The availability of the region space can be * detected by providing a _REG method object underneath the * Embedded Controller device." * * To quickly access the EC device, we use the ec_device_node used * during EC handler installation. Otherwise, we would need to * perform a time consuming namespace walk, executing _HID * methods to find the EC device. * * MUTEX: Assumes the namespace is locked * ******************************************************************************/ static void acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node) { acpi_handle reg_method; struct acpi_namespace_node *next_node; acpi_status status; struct acpi_object_list args; union acpi_object objects[2]; ACPI_FUNCTION_TRACE(ev_orphan_ec_reg_method); if (!ec_device_node) { return_VOID; } /* Namespace is currently locked, must release */ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); /* Get a handle to a _REG method immediately under the EC device */ status = acpi_get_handle(ec_device_node, METHOD_NAME__REG, &reg_method); if (ACPI_FAILURE(status)) { goto exit; /* There is no _REG method present */ } /* * Execute the _REG method only if there is no Operation Region in * this scope with the Embedded Controller space ID. Otherwise, it * will already have been executed. Note, this allows for Regions * with other space IDs to be present; but the code below will then * execute the _REG method with the embedded_control space_ID argument. */ next_node = acpi_ns_get_next_node(ec_device_node, NULL); while (next_node) { if ((next_node->type == ACPI_TYPE_REGION) && (next_node->object) && (next_node->object->region.space_id == ACPI_ADR_SPACE_EC)) { goto exit; /* Do not execute the _REG */ } next_node = acpi_ns_get_next_node(ec_device_node, next_node); } /* Evaluate the _REG(embedded_control,Connect) method */ args.count = 2; args.pointer = objects; objects[0].type = ACPI_TYPE_INTEGER; objects[0].integer.value = ACPI_ADR_SPACE_EC; objects[1].type = ACPI_TYPE_INTEGER; objects[1].integer.value = ACPI_REG_CONNECT; status = acpi_evaluate_object(reg_method, NULL, &args, NULL); exit: /* We ignore all errors from above, don't care */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); return_VOID; }
gpl-2.0
gabriel-fernandez/kernel
arch/x86/kernel/i387.c
160
14237
/* * Copyright (C) 1994 Linus Torvalds * * Pentium III FXSR, SSE support * General FPU state handling cleanups * Gareth Hughes <gareth@valinux.com>, May 2000 */ #include <linux/module.h> #include <linux/regset.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/sigcontext.h> #include <asm/processor.h> #include <asm/math_emu.h> #include <asm/uaccess.h> #include <asm/ptrace.h> #include <asm/i387.h> #include <asm/fpu-internal.h> #include <asm/user.h> /* * Were we in an interrupt that interrupted kernel mode? * * For now, with eagerfpu we will return interrupted kernel FPU * state as not-idle. TBD: Ideally we can change the return value * to something like __thread_has_fpu(current). But we need to * be careful of doing __thread_clear_has_fpu() before saving * the FPU etc for supporting nested uses etc. For now, take * the simple route! * * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that * pair does nothing at all: the thread must not have fpu (so * that we don't try to save the FPU state), and TS must * be set (so that the clts/stts pair does nothing that is * visible in the interrupted kernel thread). */ static inline bool interrupted_kernel_fpu_idle(void) { if (use_eager_fpu()) return 0; return !__thread_has_fpu(current) && (read_cr0() & X86_CR0_TS); } /* * Were we in user mode (or vm86 mode) when we were * interrupted? * * Doing kernel_fpu_begin/end() is ok if we are running * in an interrupt context from user mode - we'll just * save the FPU state as required. */ static inline bool interrupted_user_mode(void) { struct pt_regs *regs = get_irq_regs(); return regs && user_mode_vm(regs); } /* * Can we use the FPU in kernel mode with the * whole "kernel_fpu_begin/end()" sequence? * * It's always ok in process context (ie "not interrupt") * but it is sometimes ok even from an irq. */ bool irq_fpu_usable(void) { return !in_interrupt() || interrupted_user_mode() || interrupted_kernel_fpu_idle(); } EXPORT_SYMBOL(irq_fpu_usable); void __kernel_fpu_begin(void) { struct task_struct *me = current; if (__thread_has_fpu(me)) { __save_init_fpu(me); __thread_clear_has_fpu(me); /* We do 'stts()' in __kernel_fpu_end() */ } else if (!use_eager_fpu()) { this_cpu_write(fpu_owner_task, NULL); clts(); } } EXPORT_SYMBOL(__kernel_fpu_begin); void __kernel_fpu_end(void) { if (use_eager_fpu()) math_state_restore(); else stts(); } EXPORT_SYMBOL(__kernel_fpu_end); void unlazy_fpu(struct task_struct *tsk) { preempt_disable(); if (__thread_has_fpu(tsk)) { __save_init_fpu(tsk); __thread_fpu_end(tsk); } else tsk->fpu_counter = 0; preempt_enable(); } EXPORT_SYMBOL(unlazy_fpu); unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; unsigned int xstate_size; EXPORT_SYMBOL_GPL(xstate_size); static struct i387_fxsave_struct fx_scratch __cpuinitdata; static void __cpuinit mxcsr_feature_mask_init(void) { unsigned long mask = 0; if (cpu_has_fxsr) { memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); asm volatile("fxsave %0" : : "m" (fx_scratch)); mask = fx_scratch.mxcsr_mask; if (mask == 0) mask = 0x0000ffbf; } mxcsr_feature_mask &= mask; } static void __cpuinit init_thread_xstate(void) { /* * Note that xstate_size might be overwriten later during * xsave_init(). */ if (!HAVE_HWFP) { /* * Disable xsave as we do not support it if i387 * emulation is enabled. */ setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); xstate_size = sizeof(struct i387_soft_struct); return; } if (cpu_has_fxsr) xstate_size = sizeof(struct i387_fxsave_struct); else xstate_size = sizeof(struct i387_fsave_struct); } /* * Called at bootup to set up the initial FPU state that is later cloned * into all processes. */ void __cpuinit fpu_init(void) { unsigned long cr0; unsigned long cr4_mask = 0; if (cpu_has_fxsr) cr4_mask |= X86_CR4_OSFXSR; if (cpu_has_xmm) cr4_mask |= X86_CR4_OSXMMEXCPT; if (cr4_mask) set_in_cr4(cr4_mask); cr0 = read_cr0(); cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */ if (!HAVE_HWFP) cr0 |= X86_CR0_EM; write_cr0(cr0); /* * init_thread_xstate is only called once to avoid overriding * xstate_size during boot time or during CPU hotplug. */ if (xstate_size == 0) init_thread_xstate(); mxcsr_feature_mask_init(); xsave_init(); eager_fpu_init(); } void fpu_finit(struct fpu *fpu) { if (!HAVE_HWFP) { finit_soft_fpu(&fpu->state->soft); return; } if (cpu_has_fxsr) { fx_finit(&fpu->state->fxsave); } else { struct i387_fsave_struct *fp = &fpu->state->fsave; memset(fp, 0, xstate_size); fp->cwd = 0xffff037fu; fp->swd = 0xffff0000u; fp->twd = 0xffffffffu; fp->fos = 0xffff0000u; } } EXPORT_SYMBOL_GPL(fpu_finit); /* * The _current_ task is using the FPU for the first time * so initialize it and set the mxcsr to its default * value at reset if we support XMM instructions and then * remember the current task has used the FPU. */ int init_fpu(struct task_struct *tsk) { int ret; if (tsk_used_math(tsk)) { if (HAVE_HWFP && tsk == current) unlazy_fpu(tsk); tsk->thread.fpu.last_cpu = ~0; return 0; } /* * Memory allocation at the first usage of the FPU and other state. */ ret = fpu_alloc(&tsk->thread.fpu); if (ret) return ret; fpu_finit(&tsk->thread.fpu); set_stopped_child_used_math(tsk); return 0; } EXPORT_SYMBOL_GPL(init_fpu); /* * The xstateregs_active() routine is the same as the fpregs_active() routine, * as the "regset->n" for the xstate regset will be updated based on the feature * capabilites supported by the xsave. */ int fpregs_active(struct task_struct *target, const struct user_regset *regset) { return tsk_used_math(target) ? regset->n : 0; } int xfpregs_active(struct task_struct *target, const struct user_regset *regset) { return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0; } int xfpregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { int ret; if (!cpu_has_fxsr) return -ENODEV; ret = init_fpu(target); if (ret) return ret; sanitize_i387_state(target); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.fpu.state->fxsave, 0, -1); } int xfpregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; if (!cpu_has_fxsr) return -ENODEV; ret = init_fpu(target); if (ret) return ret; sanitize_i387_state(target); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.fpu.state->fxsave, 0, -1); /* * mxcsr reserved bits must be masked to zero for security reasons. */ target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; /* * update the header bits in the xsave header, indicating the * presence of FP and SSE state. */ if (cpu_has_xsave) target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; return ret; } int xstateregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { int ret; if (!cpu_has_xsave) return -ENODEV; ret = init_fpu(target); if (ret) return ret; /* * Copy the 48bytes defined by the software first into the xstate * memory layout in the thread struct, so that we can copy the entire * xstateregs to the user using one user_regset_copyout(). */ memcpy(&target->thread.fpu.state->fxsave.sw_reserved, xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); /* * Copy the xstate memory layout. */ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.fpu.state->xsave, 0, -1); return ret; } int xstateregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct xsave_hdr_struct *xsave_hdr; if (!cpu_has_xsave) return -ENODEV; ret = init_fpu(target); if (ret) return ret; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.fpu.state->xsave, 0, -1); /* * mxcsr reserved bits must be masked to zero for security reasons. */ target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr; xsave_hdr->xstate_bv &= pcntxt_mask; /* * These bits must be zero. */ xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0; return ret; } #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION /* * FPU tag word conversions. */ static inline unsigned short twd_i387_to_fxsr(unsigned short twd) { unsigned int tmp; /* to avoid 16 bit prefixes in the code */ /* Transform each pair of bits into 01 (valid) or 00 (empty) */ tmp = ~twd; tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ /* and move the valid bits to the lower byte. */ tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ return tmp; } #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16) #define FP_EXP_TAG_VALID 0 #define FP_EXP_TAG_ZERO 1 #define FP_EXP_TAG_SPECIAL 2 #define FP_EXP_TAG_EMPTY 3 static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) { struct _fpxreg *st; u32 tos = (fxsave->swd >> 11) & 7; u32 twd = (unsigned long) fxsave->twd; u32 tag; u32 ret = 0xffff0000u; int i; for (i = 0; i < 8; i++, twd >>= 1) { if (twd & 0x1) { st = FPREG_ADDR(fxsave, (i - tos) & 7); switch (st->exponent & 0x7fff) { case 0x7fff: tag = FP_EXP_TAG_SPECIAL; break; case 0x0000: if (!st->significand[0] && !st->significand[1] && !st->significand[2] && !st->significand[3]) tag = FP_EXP_TAG_ZERO; else tag = FP_EXP_TAG_SPECIAL; break; default: if (st->significand[3] & 0x8000) tag = FP_EXP_TAG_VALID; else tag = FP_EXP_TAG_SPECIAL; break; } } else { tag = FP_EXP_TAG_EMPTY; } ret |= tag << (2 * i); } return ret; } /* * FXSR floating point environment conversions. */ void convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) { struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; int i; env->cwd = fxsave->cwd | 0xffff0000u; env->swd = fxsave->swd | 0xffff0000u; env->twd = twd_fxsr_to_i387(fxsave); #ifdef CONFIG_X86_64 env->fip = fxsave->rip; env->foo = fxsave->rdp; /* * should be actually ds/cs at fpu exception time, but * that information is not available in 64bit mode. */ env->fcs = task_pt_regs(tsk)->cs; if (tsk == current) { savesegment(ds, env->fos); } else { env->fos = tsk->thread.ds; } env->fos |= 0xffff0000; #else env->fip = fxsave->fip; env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16); env->foo = fxsave->foo; env->fos = fxsave->fos; #endif for (i = 0; i < 8; ++i) memcpy(&to[i], &from[i], sizeof(to[0])); } void convert_to_fxsr(struct task_struct *tsk, const struct user_i387_ia32_struct *env) { struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; int i; fxsave->cwd = env->cwd; fxsave->swd = env->swd; fxsave->twd = twd_i387_to_fxsr(env->twd); fxsave->fop = (u16) ((u32) env->fcs >> 16); #ifdef CONFIG_X86_64 fxsave->rip = env->fip; fxsave->rdp = env->foo; /* cs and ds ignored */ #else fxsave->fip = env->fip; fxsave->fcs = (env->fcs & 0xffff); fxsave->foo = env->foo; fxsave->fos = env->fos; #endif for (i = 0; i < 8; ++i) memcpy(&to[i], &from[i], sizeof(from[0])); } int fpregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { struct user_i387_ia32_struct env; int ret; ret = init_fpu(target); if (ret) return ret; if (!HAVE_HWFP) return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); if (!cpu_has_fxsr) { return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.fpu.state->fsave, 0, -1); } sanitize_i387_state(target); if (kbuf && pos == 0 && count == sizeof(env)) { convert_from_fxsr(kbuf, target); return 0; } convert_from_fxsr(&env, target); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1); } int fpregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct user_i387_ia32_struct env; int ret; ret = init_fpu(target); if (ret) return ret; sanitize_i387_state(target); if (!HAVE_HWFP) return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); if (!cpu_has_fxsr) { return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.fpu.state->fsave, 0, -1); } if (pos > 0 || count < sizeof(env)) convert_from_fxsr(&env, target); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1); if (!ret) convert_to_fxsr(target, &env); /* * update the header bit in the xsave header, indicating the * presence of FP. */ if (cpu_has_xsave) target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP; return ret; } /* * FPU state for core dumps. * This is only used for a.out dumps now. * It is declared generically using elf_fpregset_t (which is * struct user_i387_struct) but is in fact only used for 32-bit * dumps, so on 64-bit it is really struct user_i387_ia32_struct. */ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu) { struct task_struct *tsk = current; int fpvalid; fpvalid = !!used_math(); if (fpvalid) fpvalid = !fpregs_get(tsk, NULL, 0, sizeof(struct user_i387_ia32_struct), fpu, NULL); return fpvalid; } EXPORT_SYMBOL(dump_fpu); #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
gpl-2.0
goodhanrry/G9250_goodhanrry_kernel
drivers/amba/bus.c
160
18531
/* * linux/arch/arm/common/amba.c * * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/amba/bus.h> #include <linux/sizes.h> #include <plat/cpu.h> #include <asm/irq.h> #define to_amba_driver(d) container_of(d, struct amba_driver, drv) void adma_init_clock(void) { unsigned int reg; void __iomem *lpass_dma_reset; /** * Audio DMA in some Exynos-based SoCs are placed within LPASS block. * The LPASS block needs to be reset before accessing the registers of * ADMA controller. LPASS base address is different for Exynos7580 as * compared to other SoCs. Adding an explicit check for this. */ if (soc_is_exynos7580()) lpass_dma_reset = ioremap(0x11000000, SZ_32); else lpass_dma_reset = ioremap(0x11400000, SZ_32); reg = __raw_readl(lpass_dma_reset + 0x8); reg &= ~0x1; __raw_writel(reg, lpass_dma_reset + 0x8); reg |= 0x1; __raw_writel(reg, lpass_dma_reset + 0x8); iounmap(lpass_dma_reset); } static const struct amba_id * amba_lookup(const struct amba_id *table, struct amba_device *dev) { int ret = 0; while (table->mask) { ret = (dev->periphid & table->mask) == table->id; if (ret) break; table++; } return ret ? table : NULL; } static int amba_match(struct device *dev, struct device_driver *drv) { struct amba_device *pcdev = to_amba_device(dev); struct amba_driver *pcdrv = to_amba_driver(drv); return amba_lookup(pcdrv->id_table, pcdev) != NULL; } static int amba_uevent(struct device *dev, struct kobj_uevent_env *env) { struct amba_device *pcdev = to_amba_device(dev); int retval = 0; retval = add_uevent_var(env, "AMBA_ID=%08x", pcdev->periphid); if (retval) return retval; retval = add_uevent_var(env, "MODALIAS=amba:d%08X", pcdev->periphid); return retval; } #define amba_attr_func(name,fmt,arg...) \ static ssize_t name##_show(struct device *_dev, \ struct device_attribute *attr, char *buf) \ { \ struct amba_device *dev = to_amba_device(_dev); \ return sprintf(buf, fmt, arg); \ } #define amba_attr(name,fmt,arg...) \ amba_attr_func(name,fmt,arg) \ static DEVICE_ATTR(name, S_IRUGO, name##_show, NULL) amba_attr_func(id, "%08x\n", dev->periphid); amba_attr(irq0, "%u\n", dev->irq[0]); amba_attr(irq1, "%u\n", dev->irq[1]); amba_attr_func(resource, "\t%016llx\t%016llx\t%016lx\n", (unsigned long long)dev->res.start, (unsigned long long)dev->res.end, dev->res.flags); static struct device_attribute amba_dev_attrs[] = { __ATTR_RO(id), __ATTR_RO(resource), __ATTR_NULL, }; #ifdef CONFIG_PM_SLEEP static int amba_legacy_suspend(struct device *dev, pm_message_t mesg) { struct amba_driver *adrv = to_amba_driver(dev->driver); struct amba_device *adev = to_amba_device(dev); int ret = 0; if (dev->driver && adrv->suspend) ret = adrv->suspend(adev, mesg); return ret; } static int amba_legacy_resume(struct device *dev) { struct amba_driver *adrv = to_amba_driver(dev->driver); struct amba_device *adev = to_amba_device(dev); int ret = 0; if (dev->driver && adrv->resume) ret = adrv->resume(adev); return ret; } #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_SUSPEND static int amba_pm_suspend(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->suspend) ret = drv->pm->suspend(dev); } else { ret = amba_legacy_suspend(dev, PMSG_SUSPEND); } return ret; } static int amba_pm_resume(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->resume) ret = drv->pm->resume(dev); } else { ret = amba_legacy_resume(dev); } return ret; } #else /* !CONFIG_SUSPEND */ #define amba_pm_suspend NULL #define amba_pm_resume NULL #endif /* !CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS static int amba_pm_freeze(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->freeze) ret = drv->pm->freeze(dev); } else { ret = amba_legacy_suspend(dev, PMSG_FREEZE); } return ret; } static int amba_pm_thaw(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->thaw) ret = drv->pm->thaw(dev); } else { ret = amba_legacy_resume(dev); } return ret; } static int amba_pm_poweroff(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->poweroff) ret = drv->pm->poweroff(dev); } else { ret = amba_legacy_suspend(dev, PMSG_HIBERNATE); } return ret; } static int amba_pm_restore(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->restore) ret = drv->pm->restore(dev); } else { ret = amba_legacy_resume(dev); } return ret; } #else /* !CONFIG_HIBERNATE_CALLBACKS */ #define amba_pm_freeze NULL #define amba_pm_thaw NULL #define amba_pm_poweroff NULL #define amba_pm_restore NULL #endif /* !CONFIG_HIBERNATE_CALLBACKS */ #ifdef CONFIG_PM_RUNTIME /* * Hooks to provide runtime PM of the pclk (bus clock). It is safe to * enable/disable the bus clock at runtime PM suspend/resume as this * does not result in loss of context. */ static int amba_pm_runtime_suspend(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); int ret = pm_generic_runtime_suspend(dev); if (ret == 0 && dev->driver) clk_disable(pcdev->pclk); return ret; } static int amba_pm_runtime_resume(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); int ret; if (dev->driver) { ret = clk_enable(pcdev->pclk); /* Failure is probably fatal to the system, but... */ if (ret) return ret; } return pm_generic_runtime_resume(dev); } #endif #ifdef CONFIG_PM static const struct dev_pm_ops amba_pm = { .suspend = amba_pm_suspend, .resume = amba_pm_resume, .freeze = amba_pm_freeze, .thaw = amba_pm_thaw, .poweroff = amba_pm_poweroff, .restore = amba_pm_restore, SET_RUNTIME_PM_OPS( amba_pm_runtime_suspend, amba_pm_runtime_resume, pm_generic_runtime_idle ) }; #define AMBA_PM (&amba_pm) #else /* !CONFIG_PM */ #define AMBA_PM NULL #endif /* !CONFIG_PM */ /* * Primecells are part of the Advanced Microcontroller Bus Architecture, * so we call the bus "amba". */ struct bus_type amba_bustype = { .name = "amba", .dev_attrs = amba_dev_attrs, .match = amba_match, .uevent = amba_uevent, .pm = AMBA_PM, }; static int __init amba_init(void) { return bus_register(&amba_bustype); } postcore_initcall(amba_init); static int amba_get_enable_pclk(struct amba_device *pcdev) { struct clk *pclk = clk_get(&pcdev->dev, "apb_pclk"); int ret; pcdev->pclk = pclk; if (IS_ERR(pclk)) return PTR_ERR(pclk); ret = clk_prepare(pclk); if (ret) { clk_put(pclk); return ret; } ret = clk_enable(pclk); if (ret) { clk_unprepare(pclk); clk_put(pclk); } return ret; } static void amba_put_disable_pclk(struct amba_device *pcdev) { struct clk *pclk = pcdev->pclk; clk_disable(pclk); clk_unprepare(pclk); clk_put(pclk); } /* * These are the device model conversion veneers; they convert the * device model structures to our more specific structures. */ static int amba_probe(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); struct amba_driver *pcdrv = to_amba_driver(dev->driver); const struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev); int ret; do { ret = amba_get_enable_pclk(pcdev); if (ret) break; pm_runtime_get_noresume(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); ret = pcdrv->probe(pcdev, id); if (ret == 0) break; pm_runtime_disable(dev); pm_runtime_set_suspended(dev); pm_runtime_put_noidle(dev); amba_put_disable_pclk(pcdev); } while (0); return ret; } static int amba_remove(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); struct amba_driver *drv = to_amba_driver(dev->driver); int ret; pm_runtime_get_sync(dev); ret = drv->remove(pcdev); pm_runtime_put_noidle(dev); /* Undo the runtime PM settings in amba_probe() */ pm_runtime_disable(dev); pm_runtime_set_suspended(dev); pm_runtime_put_noidle(dev); amba_put_disable_pclk(pcdev); return ret; } static void amba_shutdown(struct device *dev) { struct amba_driver *drv = to_amba_driver(dev->driver); drv->shutdown(to_amba_device(dev)); } /** * amba_driver_register - register an AMBA device driver * @drv: amba device driver structure * * Register an AMBA device driver with the Linux device model * core. If devices pre-exist, the drivers probe function will * be called. */ int amba_driver_register(struct amba_driver *drv) { drv->drv.bus = &amba_bustype; #define SETFN(fn) if (drv->fn) drv->drv.fn = amba_##fn SETFN(probe); SETFN(remove); SETFN(shutdown); return driver_register(&drv->drv); } /** * amba_driver_unregister - remove an AMBA device driver * @drv: AMBA device driver structure to remove * * Unregister an AMBA device driver from the Linux device * model. The device model will call the drivers remove function * for each device the device driver is currently handling. */ void amba_driver_unregister(struct amba_driver *drv) { driver_unregister(&drv->drv); } static void amba_device_release(struct device *dev) { struct amba_device *d = to_amba_device(dev); if (d->res.parent) release_resource(&d->res); kfree(d); } /** * amba_device_add - add a previously allocated AMBA device structure * @dev: AMBA device allocated by amba_device_alloc * @parent: resource parent for this devices resources * * Claim the resource, and read the device cell ID if not already * initialized. Register the AMBA device with the Linux device * manager. */ int amba_device_add(struct amba_device *dev, struct resource *parent) { u32 size; void __iomem *tmp; int i, ret; WARN_ON(dev->irq[0] == (unsigned int)-1); WARN_ON(dev->irq[1] == (unsigned int)-1); if (strstr(dev_name(&dev->dev),"adma")) adma_init_clock(); ret = request_resource(parent, &dev->res); if (ret) goto err_out; /* Hard-coded primecell ID instead of plug-n-play */ if (dev->periphid != 0) goto skip_probe; /* * Dynamically calculate the size of the resource * and use this for iomap */ size = resource_size(&dev->res); tmp = ioremap(dev->res.start, size); if (!tmp) { ret = -ENOMEM; goto err_release; } ret = amba_get_enable_pclk(dev); if (ret == 0) { u32 pid, cid; /* * Read pid and cid based on size of resource * they are located at end of region */ for (pid = 0, i = 0; i < 4; i++) pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << (i * 8); for (cid = 0, i = 0; i < 4; i++) cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8); amba_put_disable_pclk(dev); if (cid == AMBA_CID) dev->periphid = pid; if (!dev->periphid) { if (strstr(dev_name(&dev->dev), "adma")) dev_err(&dev->dev, "Please reset LPASS\n"); ret = -ENODEV; } } iounmap(tmp); if (ret) goto err_release; skip_probe: ret = device_add(&dev->dev); if (ret) goto err_release; if (dev->irq[0]) ret = device_create_file(&dev->dev, &dev_attr_irq0); if (ret == 0 && dev->irq[1]) ret = device_create_file(&dev->dev, &dev_attr_irq1); if (ret == 0) return ret; device_unregister(&dev->dev); err_release: release_resource(&dev->res); err_out: return ret; } EXPORT_SYMBOL_GPL(amba_device_add); static struct amba_device * amba_aphb_device_add(struct device *parent, const char *name, resource_size_t base, size_t size, int irq1, int irq2, void *pdata, unsigned int periphid, u64 dma_mask, struct resource *resbase) { struct amba_device *dev; int ret; dev = amba_device_alloc(name, base, size); if (!dev) return ERR_PTR(-ENOMEM); dev->dma_mask = dma_mask; dev->dev.coherent_dma_mask = dma_mask; dev->irq[0] = irq1; dev->irq[1] = irq2; dev->periphid = periphid; dev->dev.platform_data = pdata; dev->dev.parent = parent; ret = amba_device_add(dev, resbase); if (ret) { amba_device_put(dev); return ERR_PTR(ret); } return dev; } struct amba_device * amba_apb_device_add(struct device *parent, const char *name, resource_size_t base, size_t size, int irq1, int irq2, void *pdata, unsigned int periphid) { return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata, periphid, 0, &iomem_resource); } EXPORT_SYMBOL_GPL(amba_apb_device_add); struct amba_device * amba_ahb_device_add(struct device *parent, const char *name, resource_size_t base, size_t size, int irq1, int irq2, void *pdata, unsigned int periphid) { return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata, periphid, ~0ULL, &iomem_resource); } EXPORT_SYMBOL_GPL(amba_ahb_device_add); struct amba_device * amba_apb_device_add_res(struct device *parent, const char *name, resource_size_t base, size_t size, int irq1, int irq2, void *pdata, unsigned int periphid, struct resource *resbase) { return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata, periphid, 0, resbase); } EXPORT_SYMBOL_GPL(amba_apb_device_add_res); struct amba_device * amba_ahb_device_add_res(struct device *parent, const char *name, resource_size_t base, size_t size, int irq1, int irq2, void *pdata, unsigned int periphid, struct resource *resbase) { return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata, periphid, ~0ULL, resbase); } EXPORT_SYMBOL_GPL(amba_ahb_device_add_res); static void amba_device_initialize(struct amba_device *dev, const char *name) { device_initialize(&dev->dev); if (name) dev_set_name(&dev->dev, "%s", name); dev->dev.release = amba_device_release; dev->dev.bus = &amba_bustype; dev->dev.dma_mask = &dev->dma_mask; dev->res.name = dev_name(&dev->dev); } /** * amba_device_alloc - allocate an AMBA device * @name: sysfs name of the AMBA device * @base: base of AMBA device * @size: size of AMBA device * * Allocate and initialize an AMBA device structure. Returns %NULL * on failure. */ struct amba_device *amba_device_alloc(const char *name, resource_size_t base, size_t size) { struct amba_device *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev) { amba_device_initialize(dev, name); dev->res.start = base; dev->res.end = base + size - 1; dev->res.flags = IORESOURCE_MEM; } return dev; } EXPORT_SYMBOL_GPL(amba_device_alloc); /** * amba_device_register - register an AMBA device * @dev: AMBA device to register * @parent: parent memory resource * * Setup the AMBA device, reading the cell ID if present. * Claim the resource, and register the AMBA device with * the Linux device manager. */ int amba_device_register(struct amba_device *dev, struct resource *parent) { amba_device_initialize(dev, dev->dev.init_name); dev->dev.init_name = NULL; if (!dev->dev.coherent_dma_mask && dev->dma_mask) dev_warn(&dev->dev, "coherent dma mask is unset\n"); return amba_device_add(dev, parent); } /** * amba_device_put - put an AMBA device * @dev: AMBA device to put */ void amba_device_put(struct amba_device *dev) { put_device(&dev->dev); } EXPORT_SYMBOL_GPL(amba_device_put); /** * amba_device_unregister - unregister an AMBA device * @dev: AMBA device to remove * * Remove the specified AMBA device from the Linux device * manager. All files associated with this object will be * destroyed, and device drivers notified that the device has * been removed. The AMBA device's resources including * the amba_device structure will be freed once all * references to it have been dropped. */ void amba_device_unregister(struct amba_device *dev) { device_unregister(&dev->dev); } struct find_data { struct amba_device *dev; struct device *parent; const char *busid; unsigned int id; unsigned int mask; }; static int amba_find_match(struct device *dev, void *data) { struct find_data *d = data; struct amba_device *pcdev = to_amba_device(dev); int r; r = (pcdev->periphid & d->mask) == d->id; if (d->parent) r &= d->parent == dev->parent; if (d->busid) r &= strcmp(dev_name(dev), d->busid) == 0; if (r) { get_device(dev); d->dev = pcdev; } return r; } /** * amba_find_device - locate an AMBA device given a bus id * @busid: bus id for device (or NULL) * @parent: parent device (or NULL) * @id: peripheral ID (or 0) * @mask: peripheral ID mask (or 0) * * Return the AMBA device corresponding to the supplied parameters. * If no device matches, returns NULL. * * NOTE: When a valid device is found, its refcount is * incremented, and must be decremented before the returned * reference. */ struct amba_device * amba_find_device(const char *busid, struct device *parent, unsigned int id, unsigned int mask) { struct find_data data; data.dev = NULL; data.parent = parent; data.busid = busid; data.id = id; data.mask = mask; bus_for_each_dev(&amba_bustype, NULL, &data, amba_find_match); return data.dev; } /** * amba_request_regions - request all mem regions associated with device * @dev: amba_device structure for device * @name: name, or NULL to use driver name */ int amba_request_regions(struct amba_device *dev, const char *name) { int ret = 0; u32 size; if (!name) name = dev->dev.driver->name; size = resource_size(&dev->res); if (!request_mem_region(dev->res.start, size, name)) ret = -EBUSY; return ret; } /** * amba_release_regions - release mem regions associated with device * @dev: amba_device structure for device * * Release regions claimed by a successful call to amba_request_regions. */ void amba_release_regions(struct amba_device *dev) { u32 size; size = resource_size(&dev->res); release_mem_region(dev->res.start, size); } EXPORT_SYMBOL(amba_driver_register); EXPORT_SYMBOL(amba_driver_unregister); EXPORT_SYMBOL(amba_device_register); EXPORT_SYMBOL(amba_device_unregister); EXPORT_SYMBOL(amba_find_device); EXPORT_SYMBOL(amba_request_regions); EXPORT_SYMBOL(amba_release_regions);
gpl-2.0
Jazz-823/kernel_ayame
drivers/mfd/mfd-core.c
160
2761
/* * drivers/mfd/mfd-core.c * * core MFD support * Copyright (c) 2006 Ian Molton * Copyright (c) 2007,2008 Dmitry Baryshkov * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/mfd/core.h> static int mfd_add_device(struct device *parent, int id, const struct mfd_cell *cell, struct resource *mem_base, int irq_base) { struct resource *res; struct platform_device *pdev; int ret = -ENOMEM; int r; pdev = platform_device_alloc(cell->name, id + cell->id); if (!pdev) goto fail_alloc; res = kzalloc(sizeof(*res) * cell->num_resources, GFP_KERNEL); if (!res) goto fail_device; pdev->dev.parent = parent; platform_set_drvdata(pdev, cell->driver_data); if (cell->data_size) { ret = platform_device_add_data(pdev, cell->platform_data, cell->data_size); if (ret) goto fail_res; } for (r = 0; r < cell->num_resources; r++) { res[r].name = cell->resources[r].name; res[r].flags = cell->resources[r].flags; /* Find out base to use */ if (cell->resources[r].flags & IORESOURCE_MEM) { res[r].parent = mem_base; res[r].start = mem_base->start + cell->resources[r].start; res[r].end = mem_base->start + cell->resources[r].end; } else if (cell->resources[r].flags & IORESOURCE_IRQ) { res[r].start = irq_base + cell->resources[r].start; res[r].end = irq_base + cell->resources[r].end; } else { res[r].parent = cell->resources[r].parent; res[r].start = cell->resources[r].start; res[r].end = cell->resources[r].end; } } platform_device_add_resources(pdev, res, cell->num_resources); ret = platform_device_add(pdev); if (ret) goto fail_res; kfree(res); return 0; /* platform_device_del(pdev); */ fail_res: kfree(res); fail_device: platform_device_put(pdev); fail_alloc: return ret; } int mfd_add_devices(struct device *parent, int id, const struct mfd_cell *cells, int n_devs, struct resource *mem_base, int irq_base) { int i; int ret = 0; for (i = 0; i < n_devs; i++) { ret = mfd_add_device(parent, id, cells + i, mem_base, irq_base); if (ret) break; } if (ret) mfd_remove_devices(parent); return ret; } EXPORT_SYMBOL(mfd_add_devices); static int mfd_remove_devices_fn(struct device *dev, void *unused) { platform_device_unregister(to_platform_device(dev)); return 0; } void mfd_remove_devices(struct device *parent) { device_for_each_child(parent, NULL, mfd_remove_devices_fn); } EXPORT_SYMBOL(mfd_remove_devices); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");
gpl-2.0
geneyeung/linux-3.10.17
arch/arm/mach-omap2/gpmc.c
416
46659
/* * GPMC support functions * * Copyright (C) 2005-2006 Nokia Corporation * * Author: Juha Yrjola * * Copyright (C) 2009 Texas Instruments * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/irq.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/ioport.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_mtd.h> #include <linux/of_device.h> #include <linux/mtd/nand.h> #include <linux/platform_data/mtd-nand-omap2.h> #include <asm/mach-types.h> #include "soc.h" #include "common.h" #include "omap_device.h" #include "gpmc.h" #include "gpmc-nand.h" #include "gpmc-onenand.h" #define DEVICE_NAME "omap-gpmc" /* GPMC register offsets */ #define GPMC_REVISION 0x00 #define GPMC_SYSCONFIG 0x10 #define GPMC_SYSSTATUS 0x14 #define GPMC_IRQSTATUS 0x18 #define GPMC_IRQENABLE 0x1c #define GPMC_TIMEOUT_CONTROL 0x40 #define GPMC_ERR_ADDRESS 0x44 #define GPMC_ERR_TYPE 0x48 #define GPMC_CONFIG 0x50 #define GPMC_STATUS 0x54 #define GPMC_PREFETCH_CONFIG1 0x1e0 #define GPMC_PREFETCH_CONFIG2 0x1e4 #define GPMC_PREFETCH_CONTROL 0x1ec #define GPMC_PREFETCH_STATUS 0x1f0 #define GPMC_ECC_CONFIG 0x1f4 #define GPMC_ECC_CONTROL 0x1f8 #define GPMC_ECC_SIZE_CONFIG 0x1fc #define GPMC_ECC1_RESULT 0x200 #define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */ #define GPMC_ECC_BCH_RESULT_1 0x244 /* not available on OMAP2 */ #define GPMC_ECC_BCH_RESULT_2 0x248 /* not available on OMAP2 */ #define GPMC_ECC_BCH_RESULT_3 0x24c /* not available on OMAP2 */ /* GPMC ECC control settings */ #define GPMC_ECC_CTRL_ECCCLEAR 0x100 #define GPMC_ECC_CTRL_ECCDISABLE 0x000 #define GPMC_ECC_CTRL_ECCREG1 0x001 #define GPMC_ECC_CTRL_ECCREG2 0x002 #define GPMC_ECC_CTRL_ECCREG3 0x003 #define GPMC_ECC_CTRL_ECCREG4 0x004 #define GPMC_ECC_CTRL_ECCREG5 0x005 #define GPMC_ECC_CTRL_ECCREG6 0x006 #define GPMC_ECC_CTRL_ECCREG7 0x007 #define GPMC_ECC_CTRL_ECCREG8 0x008 #define GPMC_ECC_CTRL_ECCREG9 0x009 #define GPMC_CONFIG2_CSEXTRADELAY BIT(7) #define GPMC_CONFIG3_ADVEXTRADELAY BIT(7) #define GPMC_CONFIG4_OEEXTRADELAY BIT(7) #define GPMC_CONFIG4_WEEXTRADELAY BIT(23) #define GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN BIT(6) #define GPMC_CONFIG6_CYCLE2CYCLESAMECSEN BIT(7) #define GPMC_CS0_OFFSET 0x60 #define GPMC_CS_SIZE 0x30 #define GPMC_BCH_SIZE 0x10 #define GPMC_MEM_END 0x3FFFFFFF #define GPMC_CHUNK_SHIFT 24 /* 16 MB */ #define GPMC_SECTION_SHIFT 28 /* 128 MB */ #define CS_NUM_SHIFT 24 #define ENABLE_PREFETCH (0x1 << 7) #define DMA_MPU_MODE 2 #define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf) #define GPMC_REVISION_MINOR(l) (l & 0xf) #define GPMC_HAS_WR_ACCESS 0x1 #define GPMC_HAS_WR_DATA_MUX_BUS 0x2 #define GPMC_HAS_MUX_AAD 0x4 #define GPMC_NR_WAITPINS 4 /* XXX: Only NAND irq has been considered,currently these are the only ones used */ #define GPMC_NR_IRQ 2 struct gpmc_client_irq { unsigned irq; u32 bitmask; }; /* Structure to save gpmc cs context */ struct gpmc_cs_config { u32 config1; u32 config2; u32 config3; u32 config4; u32 config5; u32 config6; u32 config7; int is_valid; }; /* * Structure to save/restore gpmc context * to support core off on OMAP3 */ struct omap3_gpmc_regs { u32 sysconfig; u32 irqenable; u32 timeout_ctrl; u32 config; u32 prefetch_config1; u32 prefetch_config2; u32 prefetch_control; struct gpmc_cs_config cs_context[GPMC_CS_NUM]; }; static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ]; static struct irq_chip gpmc_irq_chip; static unsigned gpmc_irq_start; static struct resource gpmc_mem_root; static struct resource gpmc_cs_mem[GPMC_CS_NUM]; static DEFINE_SPINLOCK(gpmc_mem_lock); /* Define chip-selects as reserved by default until probe completes */ static unsigned int gpmc_cs_map = ((1 << GPMC_CS_NUM) - 1); static unsigned int gpmc_nr_waitpins; static struct device *gpmc_dev; static int gpmc_irq; static resource_size_t phys_base, mem_size; static unsigned gpmc_capability; static void __iomem *gpmc_base; static struct clk *gpmc_l3_clk; static irqreturn_t gpmc_handle_irq(int irq, void *dev); static void gpmc_write_reg(int idx, u32 val) { __raw_writel(val, gpmc_base + idx); } static u32 gpmc_read_reg(int idx) { return __raw_readl(gpmc_base + idx); } void gpmc_cs_write_reg(int cs, int idx, u32 val) { void __iomem *reg_addr; reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx; __raw_writel(val, reg_addr); } static u32 gpmc_cs_read_reg(int cs, int idx) { void __iomem *reg_addr; reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx; return __raw_readl(reg_addr); } /* TODO: Add support for gpmc_fck to clock framework and use it */ static unsigned long gpmc_get_fclk_period(void) { unsigned long rate = clk_get_rate(gpmc_l3_clk); if (rate == 0) { printk(KERN_WARNING "gpmc_l3_clk not enabled\n"); return 0; } rate /= 1000; rate = 1000000000 / rate; /* In picoseconds */ return rate; } static unsigned int gpmc_ns_to_ticks(unsigned int time_ns) { unsigned long tick_ps; /* Calculate in picosecs to yield more exact results */ tick_ps = gpmc_get_fclk_period(); return (time_ns * 1000 + tick_ps - 1) / tick_ps; } static unsigned int gpmc_ps_to_ticks(unsigned int time_ps) { unsigned long tick_ps; /* Calculate in picosecs to yield more exact results */ tick_ps = gpmc_get_fclk_period(); return (time_ps + tick_ps - 1) / tick_ps; } unsigned int gpmc_ticks_to_ns(unsigned int ticks) { return ticks * gpmc_get_fclk_period() / 1000; } static unsigned int gpmc_ticks_to_ps(unsigned int ticks) { return ticks * gpmc_get_fclk_period(); } static unsigned int gpmc_round_ps_to_ticks(unsigned int time_ps) { unsigned long ticks = gpmc_ps_to_ticks(time_ps); return ticks * gpmc_get_fclk_period(); } static inline void gpmc_cs_modify_reg(int cs, int reg, u32 mask, bool value) { u32 l; l = gpmc_cs_read_reg(cs, reg); if (value) l |= mask; else l &= ~mask; gpmc_cs_write_reg(cs, reg, l); } static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p) { gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG1, GPMC_CONFIG1_TIME_PARA_GRAN, p->time_para_granularity); gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG2, GPMC_CONFIG2_CSEXTRADELAY, p->cs_extra_delay); gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG3, GPMC_CONFIG3_ADVEXTRADELAY, p->adv_extra_delay); gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay); gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay); gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, GPMC_CONFIG6_CYCLE2CYCLESAMECSEN, p->cycle2cyclesamecsen); gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN, p->cycle2cyclediffcsen); } #ifdef DEBUG static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, int time, const char *name) #else static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, int time) #endif { u32 l; int ticks, mask, nr_bits; if (time == 0) ticks = 0; else ticks = gpmc_ns_to_ticks(time); nr_bits = end_bit - st_bit + 1; if (ticks >= 1 << nr_bits) { #ifdef DEBUG printk(KERN_INFO "GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n", cs, name, time, ticks, 1 << nr_bits); #endif return -1; } mask = (1 << nr_bits) - 1; l = gpmc_cs_read_reg(cs, reg); #ifdef DEBUG printk(KERN_INFO "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n", cs, name, ticks, gpmc_get_fclk_period() * ticks / 1000, (l >> st_bit) & mask, time); #endif l &= ~(mask << st_bit); l |= ticks << st_bit; gpmc_cs_write_reg(cs, reg, l); return 0; } #ifdef DEBUG #define GPMC_SET_ONE(reg, st, end, field) \ if (set_gpmc_timing_reg(cs, (reg), (st), (end), \ t->field, #field) < 0) \ return -1 #else #define GPMC_SET_ONE(reg, st, end, field) \ if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \ return -1 #endif int gpmc_calc_divider(unsigned int sync_clk) { int div; u32 l; l = sync_clk + (gpmc_get_fclk_period() - 1); div = l / gpmc_get_fclk_period(); if (div > 4) return -1; if (div <= 0) div = 1; return div; } int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t) { int div; u32 l; div = gpmc_calc_divider(t->sync_clk); if (div < 0) return div; GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on); GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off); GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off); GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on); GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off); GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off); GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on); GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off); GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on); GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off); GPMC_SET_ONE(GPMC_CS_CONFIG5, 0, 4, rd_cycle); GPMC_SET_ONE(GPMC_CS_CONFIG5, 8, 12, wr_cycle); GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access); GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access); GPMC_SET_ONE(GPMC_CS_CONFIG6, 0, 3, bus_turnaround); GPMC_SET_ONE(GPMC_CS_CONFIG6, 8, 11, cycle2cycle_delay); GPMC_SET_ONE(GPMC_CS_CONFIG1, 18, 19, wait_monitoring); GPMC_SET_ONE(GPMC_CS_CONFIG1, 25, 26, clk_activation); if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS) GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus); if (gpmc_capability & GPMC_HAS_WR_ACCESS) GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access); /* caller is expected to have initialized CONFIG1 to cover * at least sync vs async */ l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); if (l & (GPMC_CONFIG1_READTYPE_SYNC | GPMC_CONFIG1_WRITETYPE_SYNC)) { #ifdef DEBUG printk(KERN_INFO "GPMC CS%d CLK period is %lu ns (div %d)\n", cs, (div * gpmc_get_fclk_period()) / 1000, div); #endif l &= ~0x03; l |= (div - 1); gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l); } gpmc_cs_bool_timings(cs, &t->bool_timings); return 0; } static int gpmc_cs_enable_mem(int cs, u32 base, u32 size) { u32 l; u32 mask; /* * Ensure that base address is aligned on a * boundary equal to or greater than size. */ if (base & (size - 1)) return -EINVAL; mask = (1 << GPMC_SECTION_SHIFT) - size; l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); l &= ~0x3f; l = (base >> GPMC_CHUNK_SHIFT) & 0x3f; l &= ~(0x0f << 8); l |= ((mask >> GPMC_CHUNK_SHIFT) & 0x0f) << 8; l |= GPMC_CONFIG7_CSVALID; gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l); return 0; } static void gpmc_cs_disable_mem(int cs) { u32 l; l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); l &= ~GPMC_CONFIG7_CSVALID; gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l); } static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size) { u32 l; u32 mask; l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); *base = (l & 0x3f) << GPMC_CHUNK_SHIFT; mask = (l >> 8) & 0x0f; *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT); } static int gpmc_cs_mem_enabled(int cs) { u32 l; l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); return l & GPMC_CONFIG7_CSVALID; } static void gpmc_cs_set_reserved(int cs, int reserved) { gpmc_cs_map &= ~(1 << cs); gpmc_cs_map |= (reserved ? 1 : 0) << cs; } static bool gpmc_cs_reserved(int cs) { return gpmc_cs_map & (1 << cs); } static unsigned long gpmc_mem_align(unsigned long size) { int order; size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1); order = GPMC_CHUNK_SHIFT - 1; do { size >>= 1; order++; } while (size); size = 1 << order; return size; } static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size) { struct resource *res = &gpmc_cs_mem[cs]; int r; size = gpmc_mem_align(size); spin_lock(&gpmc_mem_lock); res->start = base; res->end = base + size - 1; r = request_resource(&gpmc_mem_root, res); spin_unlock(&gpmc_mem_lock); return r; } static int gpmc_cs_delete_mem(int cs) { struct resource *res = &gpmc_cs_mem[cs]; int r; spin_lock(&gpmc_mem_lock); r = release_resource(&gpmc_cs_mem[cs]); res->start = 0; res->end = 0; spin_unlock(&gpmc_mem_lock); return r; } /** * gpmc_cs_remap - remaps a chip-select physical base address * @cs: chip-select to remap * @base: physical base address to re-map chip-select to * * Re-maps a chip-select to a new physical base address specified by * "base". Returns 0 on success and appropriate negative error code * on failure. */ static int gpmc_cs_remap(int cs, u32 base) { int ret; u32 old_base, size; if (cs > GPMC_CS_NUM) return -ENODEV; gpmc_cs_get_memconf(cs, &old_base, &size); if (base == old_base) return 0; gpmc_cs_disable_mem(cs); ret = gpmc_cs_delete_mem(cs); if (ret < 0) return ret; ret = gpmc_cs_insert_mem(cs, base, size); if (ret < 0) return ret; ret = gpmc_cs_enable_mem(cs, base, size); if (ret < 0) return ret; return 0; } int gpmc_cs_request(int cs, unsigned long size, unsigned long *base) { struct resource *res = &gpmc_cs_mem[cs]; int r = -1; if (cs > GPMC_CS_NUM) return -ENODEV; size = gpmc_mem_align(size); if (size > (1 << GPMC_SECTION_SHIFT)) return -ENOMEM; spin_lock(&gpmc_mem_lock); if (gpmc_cs_reserved(cs)) { r = -EBUSY; goto out; } if (gpmc_cs_mem_enabled(cs)) r = adjust_resource(res, res->start & ~(size - 1), size); if (r < 0) r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0, size, NULL, NULL); if (r < 0) goto out; r = gpmc_cs_enable_mem(cs, res->start, resource_size(res)); if (r < 0) { release_resource(res); goto out; } *base = res->start; gpmc_cs_set_reserved(cs, 1); out: spin_unlock(&gpmc_mem_lock); return r; } EXPORT_SYMBOL(gpmc_cs_request); void gpmc_cs_free(int cs) { spin_lock(&gpmc_mem_lock); if (cs >= GPMC_CS_NUM || cs < 0 || !gpmc_cs_reserved(cs)) { printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs); BUG(); spin_unlock(&gpmc_mem_lock); return; } gpmc_cs_disable_mem(cs); release_resource(&gpmc_cs_mem[cs]); gpmc_cs_set_reserved(cs, 0); spin_unlock(&gpmc_mem_lock); } EXPORT_SYMBOL(gpmc_cs_free); /** * gpmc_configure - write request to configure gpmc * @cmd: command type * @wval: value to write * @return status of the operation */ int gpmc_configure(int cmd, int wval) { u32 regval; switch (cmd) { case GPMC_ENABLE_IRQ: gpmc_write_reg(GPMC_IRQENABLE, wval); break; case GPMC_SET_IRQ_STATUS: gpmc_write_reg(GPMC_IRQSTATUS, wval); break; case GPMC_CONFIG_WP: regval = gpmc_read_reg(GPMC_CONFIG); if (wval) regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */ else regval |= GPMC_CONFIG_WRITEPROTECT; /* WP is OFF */ gpmc_write_reg(GPMC_CONFIG, regval); break; default: pr_err("%s: command not supported\n", __func__); return -EINVAL; } return 0; } EXPORT_SYMBOL(gpmc_configure); void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs) { int i; reg->gpmc_status = gpmc_base + GPMC_STATUS; reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET + GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs; reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET + GPMC_CS_NAND_ADDRESS + GPMC_CS_SIZE * cs; reg->gpmc_nand_data = gpmc_base + GPMC_CS0_OFFSET + GPMC_CS_NAND_DATA + GPMC_CS_SIZE * cs; reg->gpmc_prefetch_config1 = gpmc_base + GPMC_PREFETCH_CONFIG1; reg->gpmc_prefetch_config2 = gpmc_base + GPMC_PREFETCH_CONFIG2; reg->gpmc_prefetch_control = gpmc_base + GPMC_PREFETCH_CONTROL; reg->gpmc_prefetch_status = gpmc_base + GPMC_PREFETCH_STATUS; reg->gpmc_ecc_config = gpmc_base + GPMC_ECC_CONFIG; reg->gpmc_ecc_control = gpmc_base + GPMC_ECC_CONTROL; reg->gpmc_ecc_size_config = gpmc_base + GPMC_ECC_SIZE_CONFIG; reg->gpmc_ecc1_result = gpmc_base + GPMC_ECC1_RESULT; for (i = 0; i < GPMC_BCH_NUM_REMAINDER; i++) { reg->gpmc_bch_result0[i] = gpmc_base + GPMC_ECC_BCH_RESULT_0 + GPMC_BCH_SIZE * i; reg->gpmc_bch_result1[i] = gpmc_base + GPMC_ECC_BCH_RESULT_1 + GPMC_BCH_SIZE * i; reg->gpmc_bch_result2[i] = gpmc_base + GPMC_ECC_BCH_RESULT_2 + GPMC_BCH_SIZE * i; reg->gpmc_bch_result3[i] = gpmc_base + GPMC_ECC_BCH_RESULT_3 + GPMC_BCH_SIZE * i; } } int gpmc_get_client_irq(unsigned irq_config) { int i; if (hweight32(irq_config) > 1) return 0; for (i = 0; i < GPMC_NR_IRQ; i++) if (gpmc_client_irq[i].bitmask & irq_config) return gpmc_client_irq[i].irq; return 0; } static int gpmc_irq_endis(unsigned irq, bool endis) { int i; u32 regval; for (i = 0; i < GPMC_NR_IRQ; i++) if (irq == gpmc_client_irq[i].irq) { regval = gpmc_read_reg(GPMC_IRQENABLE); if (endis) regval |= gpmc_client_irq[i].bitmask; else regval &= ~gpmc_client_irq[i].bitmask; gpmc_write_reg(GPMC_IRQENABLE, regval); break; } return 0; } static void gpmc_irq_disable(struct irq_data *p) { gpmc_irq_endis(p->irq, false); } static void gpmc_irq_enable(struct irq_data *p) { gpmc_irq_endis(p->irq, true); } static void gpmc_irq_noop(struct irq_data *data) { } static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; } static int gpmc_setup_irq(void) { int i; u32 regval; if (!gpmc_irq) return -EINVAL; gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0); if (gpmc_irq_start < 0) { pr_err("irq_alloc_descs failed\n"); return gpmc_irq_start; } gpmc_irq_chip.name = "gpmc"; gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret; gpmc_irq_chip.irq_enable = gpmc_irq_enable; gpmc_irq_chip.irq_disable = gpmc_irq_disable; gpmc_irq_chip.irq_shutdown = gpmc_irq_noop; gpmc_irq_chip.irq_ack = gpmc_irq_noop; gpmc_irq_chip.irq_mask = gpmc_irq_noop; gpmc_irq_chip.irq_unmask = gpmc_irq_noop; gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE; gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT; for (i = 0; i < GPMC_NR_IRQ; i++) { gpmc_client_irq[i].irq = gpmc_irq_start + i; irq_set_chip_and_handler(gpmc_client_irq[i].irq, &gpmc_irq_chip, handle_simple_irq); set_irq_flags(gpmc_client_irq[i].irq, IRQF_VALID | IRQF_NOAUTOEN); } /* Disable interrupts */ gpmc_write_reg(GPMC_IRQENABLE, 0); /* clear interrupts */ regval = gpmc_read_reg(GPMC_IRQSTATUS); gpmc_write_reg(GPMC_IRQSTATUS, regval); return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL); } static int gpmc_free_irq(void) { int i; if (gpmc_irq) free_irq(gpmc_irq, NULL); for (i = 0; i < GPMC_NR_IRQ; i++) { irq_set_handler(gpmc_client_irq[i].irq, NULL); irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip); irq_modify_status(gpmc_client_irq[i].irq, 0, 0); } irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ); return 0; } static void gpmc_mem_exit(void) { int cs; for (cs = 0; cs < GPMC_CS_NUM; cs++) { if (!gpmc_cs_mem_enabled(cs)) continue; gpmc_cs_delete_mem(cs); } } static void gpmc_mem_init(void) { int cs; /* * The first 1MB of GPMC address space is typically mapped to * the internal ROM. Never allocate the first page, to * facilitate bug detection; even if we didn't boot from ROM. */ gpmc_mem_root.start = SZ_1M; gpmc_mem_root.end = GPMC_MEM_END; /* Reserve all regions that has been set up by bootloader */ for (cs = 0; cs < GPMC_CS_NUM; cs++) { u32 base, size; if (!gpmc_cs_mem_enabled(cs)) continue; gpmc_cs_get_memconf(cs, &base, &size); if (gpmc_cs_insert_mem(cs, base, size)) { pr_warn("%s: disabling cs %d mapped at 0x%x-0x%x\n", __func__, cs, base, base + size); gpmc_cs_disable_mem(cs); } } } static u32 gpmc_round_ps_to_sync_clk(u32 time_ps, u32 sync_clk) { u32 temp; int div; div = gpmc_calc_divider(sync_clk); temp = gpmc_ps_to_ticks(time_ps); temp = (temp + div - 1) / div; return gpmc_ticks_to_ps(temp * div); } /* XXX: can the cycles be avoided ? */ static int gpmc_calc_sync_read_timings(struct gpmc_timings *gpmc_t, struct gpmc_device_timings *dev_t, bool mux) { u32 temp; /* adv_rd_off */ temp = dev_t->t_avdp_r; /* XXX: mux check required ? */ if (mux) { /* XXX: t_avdp not to be required for sync, only added for tusb * this indirectly necessitates requirement of t_avdp_r and * t_avdp_w instead of having a single t_avdp */ temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_avdh); temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp); } gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp); /* oe_on */ temp = dev_t->t_oeasu; /* XXX: remove this ? */ if (mux) { temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_ach); temp = max_t(u32, temp, gpmc_t->adv_rd_off + gpmc_ticks_to_ps(dev_t->cyc_aavdh_oe)); } gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp); /* access */ /* XXX: any scope for improvement ?, by combining oe_on * and clk_activation, need to check whether * access = clk_activation + round to sync clk ? */ temp = max_t(u32, dev_t->t_iaa, dev_t->cyc_iaa * gpmc_t->sync_clk); temp += gpmc_t->clk_activation; if (dev_t->cyc_oe) temp = max_t(u32, temp, gpmc_t->oe_on + gpmc_ticks_to_ps(dev_t->cyc_oe)); gpmc_t->access = gpmc_round_ps_to_ticks(temp); gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1); gpmc_t->cs_rd_off = gpmc_t->oe_off; /* rd_cycle */ temp = max_t(u32, dev_t->t_cez_r, dev_t->t_oez); temp = gpmc_round_ps_to_sync_clk(temp, gpmc_t->sync_clk) + gpmc_t->access; /* XXX: barter t_ce_rdyz with t_cez_r ? */ if (dev_t->t_ce_rdyz) temp = max_t(u32, temp, gpmc_t->cs_rd_off + dev_t->t_ce_rdyz); gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp); return 0; } static int gpmc_calc_sync_write_timings(struct gpmc_timings *gpmc_t, struct gpmc_device_timings *dev_t, bool mux) { u32 temp; /* adv_wr_off */ temp = dev_t->t_avdp_w; if (mux) { temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_avdh); temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp); } gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp); /* wr_data_mux_bus */ temp = max_t(u32, dev_t->t_weasu, gpmc_t->clk_activation + dev_t->t_rdyo); /* XXX: shouldn't mux be kept as a whole for wr_data_mux_bus ?, * and in that case remember to handle we_on properly */ if (mux) { temp = max_t(u32, temp, gpmc_t->adv_wr_off + dev_t->t_aavdh); temp = max_t(u32, temp, gpmc_t->adv_wr_off + gpmc_ticks_to_ps(dev_t->cyc_aavdh_we)); } gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp); /* we_on */ if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS) gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu); else gpmc_t->we_on = gpmc_t->wr_data_mux_bus; /* wr_access */ /* XXX: gpmc_capability check reqd ? , even if not, will not harm */ gpmc_t->wr_access = gpmc_t->access; /* we_off */ temp = gpmc_t->we_on + dev_t->t_wpl; temp = max_t(u32, temp, gpmc_t->wr_access + gpmc_ticks_to_ps(1)); temp = max_t(u32, temp, gpmc_t->we_on + gpmc_ticks_to_ps(dev_t->cyc_wpl)); gpmc_t->we_off = gpmc_round_ps_to_ticks(temp); gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off + dev_t->t_wph); /* wr_cycle */ temp = gpmc_round_ps_to_sync_clk(dev_t->t_cez_w, gpmc_t->sync_clk); temp += gpmc_t->wr_access; /* XXX: barter t_ce_rdyz with t_cez_w ? */ if (dev_t->t_ce_rdyz) temp = max_t(u32, temp, gpmc_t->cs_wr_off + dev_t->t_ce_rdyz); gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp); return 0; } static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t, struct gpmc_device_timings *dev_t, bool mux) { u32 temp; /* adv_rd_off */ temp = dev_t->t_avdp_r; if (mux) temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp); gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp); /* oe_on */ temp = dev_t->t_oeasu; if (mux) temp = max_t(u32, temp, gpmc_t->adv_rd_off + dev_t->t_aavdh); gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp); /* access */ temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */ gpmc_t->oe_on + dev_t->t_oe); temp = max_t(u32, temp, gpmc_t->cs_on + dev_t->t_ce); temp = max_t(u32, temp, gpmc_t->adv_on + dev_t->t_aa); gpmc_t->access = gpmc_round_ps_to_ticks(temp); gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1); gpmc_t->cs_rd_off = gpmc_t->oe_off; /* rd_cycle */ temp = max_t(u32, dev_t->t_rd_cycle, gpmc_t->cs_rd_off + dev_t->t_cez_r); temp = max_t(u32, temp, gpmc_t->oe_off + dev_t->t_oez); gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp); return 0; } static int gpmc_calc_async_write_timings(struct gpmc_timings *gpmc_t, struct gpmc_device_timings *dev_t, bool mux) { u32 temp; /* adv_wr_off */ temp = dev_t->t_avdp_w; if (mux) temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp); gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp); /* wr_data_mux_bus */ temp = dev_t->t_weasu; if (mux) { temp = max_t(u32, temp, gpmc_t->adv_wr_off + dev_t->t_aavdh); temp = max_t(u32, temp, gpmc_t->adv_wr_off + gpmc_ticks_to_ps(dev_t->cyc_aavdh_we)); } gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp); /* we_on */ if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS) gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu); else gpmc_t->we_on = gpmc_t->wr_data_mux_bus; /* we_off */ temp = gpmc_t->we_on + dev_t->t_wpl; gpmc_t->we_off = gpmc_round_ps_to_ticks(temp); gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off + dev_t->t_wph); /* wr_cycle */ temp = max_t(u32, dev_t->t_wr_cycle, gpmc_t->cs_wr_off + dev_t->t_cez_w); gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp); return 0; } static int gpmc_calc_sync_common_timings(struct gpmc_timings *gpmc_t, struct gpmc_device_timings *dev_t) { u32 temp; gpmc_t->sync_clk = gpmc_calc_divider(dev_t->clk) * gpmc_get_fclk_period(); gpmc_t->page_burst_access = gpmc_round_ps_to_sync_clk( dev_t->t_bacc, gpmc_t->sync_clk); temp = max_t(u32, dev_t->t_ces, dev_t->t_avds); gpmc_t->clk_activation = gpmc_round_ps_to_ticks(temp); if (gpmc_calc_divider(gpmc_t->sync_clk) != 1) return 0; if (dev_t->ce_xdelay) gpmc_t->bool_timings.cs_extra_delay = true; if (dev_t->avd_xdelay) gpmc_t->bool_timings.adv_extra_delay = true; if (dev_t->oe_xdelay) gpmc_t->bool_timings.oe_extra_delay = true; if (dev_t->we_xdelay) gpmc_t->bool_timings.we_extra_delay = true; return 0; } static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t, struct gpmc_device_timings *dev_t, bool sync) { u32 temp; /* cs_on */ gpmc_t->cs_on = gpmc_round_ps_to_ticks(dev_t->t_ceasu); /* adv_on */ temp = dev_t->t_avdasu; if (dev_t->t_ce_avd) temp = max_t(u32, temp, gpmc_t->cs_on + dev_t->t_ce_avd); gpmc_t->adv_on = gpmc_round_ps_to_ticks(temp); if (sync) gpmc_calc_sync_common_timings(gpmc_t, dev_t); return 0; } /* TODO: remove this function once all peripherals are confirmed to * work with generic timing. Simultaneously gpmc_cs_set_timings() * has to be modified to handle timings in ps instead of ns */ static void gpmc_convert_ps_to_ns(struct gpmc_timings *t) { t->cs_on /= 1000; t->cs_rd_off /= 1000; t->cs_wr_off /= 1000; t->adv_on /= 1000; t->adv_rd_off /= 1000; t->adv_wr_off /= 1000; t->we_on /= 1000; t->we_off /= 1000; t->oe_on /= 1000; t->oe_off /= 1000; t->page_burst_access /= 1000; t->access /= 1000; t->rd_cycle /= 1000; t->wr_cycle /= 1000; t->bus_turnaround /= 1000; t->cycle2cycle_delay /= 1000; t->wait_monitoring /= 1000; t->clk_activation /= 1000; t->wr_access /= 1000; t->wr_data_mux_bus /= 1000; } int gpmc_calc_timings(struct gpmc_timings *gpmc_t, struct gpmc_settings *gpmc_s, struct gpmc_device_timings *dev_t) { bool mux = false, sync = false; if (gpmc_s) { mux = gpmc_s->mux_add_data ? true : false; sync = (gpmc_s->sync_read || gpmc_s->sync_write); } memset(gpmc_t, 0, sizeof(*gpmc_t)); gpmc_calc_common_timings(gpmc_t, dev_t, sync); if (gpmc_s && gpmc_s->sync_read) gpmc_calc_sync_read_timings(gpmc_t, dev_t, mux); else gpmc_calc_async_read_timings(gpmc_t, dev_t, mux); if (gpmc_s && gpmc_s->sync_write) gpmc_calc_sync_write_timings(gpmc_t, dev_t, mux); else gpmc_calc_async_write_timings(gpmc_t, dev_t, mux); /* TODO: remove, see function definition */ gpmc_convert_ps_to_ns(gpmc_t); return 0; } /** * gpmc_cs_program_settings - programs non-timing related settings * @cs: GPMC chip-select to program * @p: pointer to GPMC settings structure * * Programs non-timing related settings for a GPMC chip-select, such as * bus-width, burst configuration, etc. Function should be called once * for each chip-select that is being used and must be called before * calling gpmc_cs_set_timings() as timing parameters in the CONFIG1 * register will be initialised to zero by this function. Returns 0 on * success and appropriate negative error code on failure. */ int gpmc_cs_program_settings(int cs, struct gpmc_settings *p) { u32 config1; if ((!p->device_width) || (p->device_width > GPMC_DEVWIDTH_16BIT)) { pr_err("%s: invalid width %d!", __func__, p->device_width); return -EINVAL; } /* Address-data multiplexing not supported for NAND devices */ if (p->device_nand && p->mux_add_data) { pr_err("%s: invalid configuration!\n", __func__); return -EINVAL; } if ((p->mux_add_data > GPMC_MUX_AD) || ((p->mux_add_data == GPMC_MUX_AAD) && !(gpmc_capability & GPMC_HAS_MUX_AAD))) { pr_err("%s: invalid multiplex configuration!\n", __func__); return -EINVAL; } /* Page/burst mode supports lengths of 4, 8 and 16 bytes */ if (p->burst_read || p->burst_write) { switch (p->burst_len) { case GPMC_BURST_4: case GPMC_BURST_8: case GPMC_BURST_16: break; default: pr_err("%s: invalid page/burst-length (%d)\n", __func__, p->burst_len); return -EINVAL; } } if ((p->wait_on_read || p->wait_on_write) && (p->wait_pin > gpmc_nr_waitpins)) { pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin); return -EINVAL; } config1 = GPMC_CONFIG1_DEVICESIZE((p->device_width - 1)); if (p->sync_read) config1 |= GPMC_CONFIG1_READTYPE_SYNC; if (p->sync_write) config1 |= GPMC_CONFIG1_WRITETYPE_SYNC; if (p->wait_on_read) config1 |= GPMC_CONFIG1_WAIT_READ_MON; if (p->wait_on_write) config1 |= GPMC_CONFIG1_WAIT_WRITE_MON; if (p->wait_on_read || p->wait_on_write) config1 |= GPMC_CONFIG1_WAIT_PIN_SEL(p->wait_pin); if (p->device_nand) config1 |= GPMC_CONFIG1_DEVICETYPE(GPMC_DEVICETYPE_NAND); if (p->mux_add_data) config1 |= GPMC_CONFIG1_MUXTYPE(p->mux_add_data); if (p->burst_read) config1 |= GPMC_CONFIG1_READMULTIPLE_SUPP; if (p->burst_write) config1 |= GPMC_CONFIG1_WRITEMULTIPLE_SUPP; if (p->burst_read || p->burst_write) { config1 |= GPMC_CONFIG1_PAGE_LEN(p->burst_len >> 3); config1 |= p->burst_wrap ? GPMC_CONFIG1_WRAPBURST_SUPP : 0; } gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, config1); return 0; } #ifdef CONFIG_OF static struct of_device_id gpmc_dt_ids[] = { { .compatible = "ti,omap2420-gpmc" }, { .compatible = "ti,omap2430-gpmc" }, { .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */ { .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */ { .compatible = "ti,am3352-gpmc" }, /* am335x devices */ { } }; MODULE_DEVICE_TABLE(of, gpmc_dt_ids); /** * gpmc_read_settings_dt - read gpmc settings from device-tree * @np: pointer to device-tree node for a gpmc child device * @p: pointer to gpmc settings structure * * Reads the GPMC settings for a GPMC child device from device-tree and * stores them in the GPMC settings structure passed. The GPMC settings * structure is initialised to zero by this function and so any * previously stored settings will be cleared. */ void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p) { memset(p, 0, sizeof(struct gpmc_settings)); p->sync_read = of_property_read_bool(np, "gpmc,sync-read"); p->sync_write = of_property_read_bool(np, "gpmc,sync-write"); p->device_nand = of_property_read_bool(np, "gpmc,device-nand"); of_property_read_u32(np, "gpmc,device-width", &p->device_width); of_property_read_u32(np, "gpmc,mux-add-data", &p->mux_add_data); if (!of_property_read_u32(np, "gpmc,burst-length", &p->burst_len)) { p->burst_wrap = of_property_read_bool(np, "gpmc,burst-wrap"); p->burst_read = of_property_read_bool(np, "gpmc,burst-read"); p->burst_write = of_property_read_bool(np, "gpmc,burst-write"); if (!p->burst_read && !p->burst_write) pr_warn("%s: page/burst-length set but not used!\n", __func__); } if (!of_property_read_u32(np, "gpmc,wait-pin", &p->wait_pin)) { p->wait_on_read = of_property_read_bool(np, "gpmc,wait-on-read"); p->wait_on_write = of_property_read_bool(np, "gpmc,wait-on-write"); if (!p->wait_on_read && !p->wait_on_write) pr_warn("%s: read/write wait monitoring not enabled!\n", __func__); } } static void __maybe_unused gpmc_read_timings_dt(struct device_node *np, struct gpmc_timings *gpmc_t) { struct gpmc_bool_timings *p; if (!np || !gpmc_t) return; memset(gpmc_t, 0, sizeof(*gpmc_t)); /* minimum clock period for syncronous mode */ of_property_read_u32(np, "gpmc,sync-clk-ps", &gpmc_t->sync_clk); /* chip select timtings */ of_property_read_u32(np, "gpmc,cs-on-ns", &gpmc_t->cs_on); of_property_read_u32(np, "gpmc,cs-rd-off-ns", &gpmc_t->cs_rd_off); of_property_read_u32(np, "gpmc,cs-wr-off-ns", &gpmc_t->cs_wr_off); /* ADV signal timings */ of_property_read_u32(np, "gpmc,adv-on-ns", &gpmc_t->adv_on); of_property_read_u32(np, "gpmc,adv-rd-off-ns", &gpmc_t->adv_rd_off); of_property_read_u32(np, "gpmc,adv-wr-off-ns", &gpmc_t->adv_wr_off); /* WE signal timings */ of_property_read_u32(np, "gpmc,we-on-ns", &gpmc_t->we_on); of_property_read_u32(np, "gpmc,we-off-ns", &gpmc_t->we_off); /* OE signal timings */ of_property_read_u32(np, "gpmc,oe-on-ns", &gpmc_t->oe_on); of_property_read_u32(np, "gpmc,oe-off-ns", &gpmc_t->oe_off); /* access and cycle timings */ of_property_read_u32(np, "gpmc,page-burst-access-ns", &gpmc_t->page_burst_access); of_property_read_u32(np, "gpmc,access-ns", &gpmc_t->access); of_property_read_u32(np, "gpmc,rd-cycle-ns", &gpmc_t->rd_cycle); of_property_read_u32(np, "gpmc,wr-cycle-ns", &gpmc_t->wr_cycle); of_property_read_u32(np, "gpmc,bus-turnaround-ns", &gpmc_t->bus_turnaround); of_property_read_u32(np, "gpmc,cycle2cycle-delay-ns", &gpmc_t->cycle2cycle_delay); of_property_read_u32(np, "gpmc,wait-monitoring-ns", &gpmc_t->wait_monitoring); of_property_read_u32(np, "gpmc,clk-activation-ns", &gpmc_t->clk_activation); /* only applicable to OMAP3+ */ of_property_read_u32(np, "gpmc,wr-access-ns", &gpmc_t->wr_access); of_property_read_u32(np, "gpmc,wr-data-mux-bus-ns", &gpmc_t->wr_data_mux_bus); /* bool timing parameters */ p = &gpmc_t->bool_timings; p->cycle2cyclediffcsen = of_property_read_bool(np, "gpmc,cycle2cycle-diffcsen"); p->cycle2cyclesamecsen = of_property_read_bool(np, "gpmc,cycle2cycle-samecsen"); p->we_extra_delay = of_property_read_bool(np, "gpmc,we-extra-delay"); p->oe_extra_delay = of_property_read_bool(np, "gpmc,oe-extra-delay"); p->adv_extra_delay = of_property_read_bool(np, "gpmc,adv-extra-delay"); p->cs_extra_delay = of_property_read_bool(np, "gpmc,cs-extra-delay"); p->time_para_granularity = of_property_read_bool(np, "gpmc,time-para-granularity"); } #ifdef CONFIG_MTD_NAND static const char * const nand_ecc_opts[] = { [OMAP_ECC_HAMMING_CODE_DEFAULT] = "sw", [OMAP_ECC_HAMMING_CODE_HW] = "hw", [OMAP_ECC_HAMMING_CODE_HW_ROMCODE] = "hw-romcode", [OMAP_ECC_BCH4_CODE_HW] = "bch4", [OMAP_ECC_BCH8_CODE_HW] = "bch8", }; static int gpmc_probe_nand_child(struct platform_device *pdev, struct device_node *child) { u32 val; const char *s; struct gpmc_timings gpmc_t; struct omap_nand_platform_data *gpmc_nand_data; if (of_property_read_u32(child, "reg", &val) < 0) { dev_err(&pdev->dev, "%s has no 'reg' property\n", child->full_name); return -ENODEV; } gpmc_nand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_nand_data), GFP_KERNEL); if (!gpmc_nand_data) return -ENOMEM; gpmc_nand_data->cs = val; gpmc_nand_data->of_node = child; if (!of_property_read_string(child, "ti,nand-ecc-opt", &s)) for (val = 0; val < ARRAY_SIZE(nand_ecc_opts); val++) if (!strcasecmp(s, nand_ecc_opts[val])) { gpmc_nand_data->ecc_opt = val; break; } val = of_get_nand_bus_width(child); if (val == 16) gpmc_nand_data->devsize = NAND_BUSWIDTH_16; gpmc_read_timings_dt(child, &gpmc_t); gpmc_nand_init(gpmc_nand_data, &gpmc_t); return 0; } #else static int gpmc_probe_nand_child(struct platform_device *pdev, struct device_node *child) { return 0; } #endif #ifdef CONFIG_MTD_ONENAND static int gpmc_probe_onenand_child(struct platform_device *pdev, struct device_node *child) { u32 val; struct omap_onenand_platform_data *gpmc_onenand_data; if (of_property_read_u32(child, "reg", &val) < 0) { dev_err(&pdev->dev, "%s has no 'reg' property\n", child->full_name); return -ENODEV; } gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data), GFP_KERNEL); if (!gpmc_onenand_data) return -ENOMEM; gpmc_onenand_data->cs = val; gpmc_onenand_data->of_node = child; gpmc_onenand_data->dma_channel = -1; if (!of_property_read_u32(child, "dma-channel", &val)) gpmc_onenand_data->dma_channel = val; gpmc_onenand_init(gpmc_onenand_data); return 0; } #else static int gpmc_probe_onenand_child(struct platform_device *pdev, struct device_node *child) { return 0; } #endif /** * gpmc_probe_generic_child - configures the gpmc for a child device * @pdev: pointer to gpmc platform device * @child: pointer to device-tree node for child device * * Allocates and configures a GPMC chip-select for a child device. * Returns 0 on success and appropriate negative error code on failure. */ static int gpmc_probe_generic_child(struct platform_device *pdev, struct device_node *child) { struct gpmc_settings gpmc_s; struct gpmc_timings gpmc_t; struct resource res; unsigned long base; int ret, cs; if (of_property_read_u32(child, "reg", &cs) < 0) { dev_err(&pdev->dev, "%s has no 'reg' property\n", child->full_name); return -ENODEV; } if (of_address_to_resource(child, 0, &res) < 0) { dev_err(&pdev->dev, "%s has malformed 'reg' property\n", child->full_name); return -ENODEV; } ret = gpmc_cs_request(cs, resource_size(&res), &base); if (ret < 0) { dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs); return ret; } /* * FIXME: gpmc_cs_request() will map the CS to an arbitary * location in the gpmc address space. When booting with * device-tree we want the NOR flash to be mapped to the * location specified in the device-tree blob. So remap the * CS to this location. Once DT migration is complete should * just make gpmc_cs_request() map a specific address. */ ret = gpmc_cs_remap(cs, res.start); if (ret < 0) { dev_err(&pdev->dev, "cannot remap GPMC CS %d to 0x%x\n", cs, res.start); goto err; } gpmc_read_settings_dt(child, &gpmc_s); ret = of_property_read_u32(child, "bank-width", &gpmc_s.device_width); if (ret < 0) goto err; ret = gpmc_cs_program_settings(cs, &gpmc_s); if (ret < 0) goto err; gpmc_read_timings_dt(child, &gpmc_t); gpmc_cs_set_timings(cs, &gpmc_t); if (of_platform_device_create(child, NULL, &pdev->dev)) return 0; dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name); ret = -ENODEV; err: gpmc_cs_free(cs); return ret; } static int gpmc_probe_dt(struct platform_device *pdev) { int ret; struct device_node *child; const struct of_device_id *of_id = of_match_device(gpmc_dt_ids, &pdev->dev); if (!of_id) return 0; ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-waitpins", &gpmc_nr_waitpins); if (ret < 0) { pr_err("%s: number of wait pins not found!\n", __func__); return ret; } for_each_child_of_node(pdev->dev.of_node, child) { if (!child->name) continue; if (of_node_cmp(child->name, "nand") == 0) ret = gpmc_probe_nand_child(pdev, child); else if (of_node_cmp(child->name, "onenand") == 0) ret = gpmc_probe_onenand_child(pdev, child); else if (of_node_cmp(child->name, "ethernet") == 0 || of_node_cmp(child->name, "nor") == 0) ret = gpmc_probe_generic_child(pdev, child); if (WARN(ret < 0, "%s: probing gpmc child %s failed\n", __func__, child->full_name)) of_node_put(child); } return 0; } #else static int gpmc_probe_dt(struct platform_device *pdev) { return 0; } #endif static int gpmc_probe(struct platform_device *pdev) { int rc; u32 l; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) return -ENOENT; phys_base = res->start; mem_size = resource_size(res); gpmc_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(gpmc_base)) return PTR_ERR(gpmc_base); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) dev_warn(&pdev->dev, "Failed to get resource: irq\n"); else gpmc_irq = res->start; gpmc_l3_clk = clk_get(&pdev->dev, "fck"); if (IS_ERR(gpmc_l3_clk)) { dev_err(&pdev->dev, "error: clk_get\n"); gpmc_irq = 0; return PTR_ERR(gpmc_l3_clk); } clk_prepare_enable(gpmc_l3_clk); gpmc_dev = &pdev->dev; l = gpmc_read_reg(GPMC_REVISION); /* * FIXME: Once device-tree migration is complete the below flags * should be populated based upon the device-tree compatible * string. For now just use the IP revision. OMAP3+ devices have * the wr_access and wr_data_mux_bus register fields. OMAP4+ * devices support the addr-addr-data multiplex protocol. * * GPMC IP revisions: * - OMAP24xx = 2.0 * - OMAP3xxx = 5.0 * - OMAP44xx/54xx/AM335x = 6.0 */ if (GPMC_REVISION_MAJOR(l) > 0x4) gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS; if (GPMC_REVISION_MAJOR(l) > 0x5) gpmc_capability |= GPMC_HAS_MUX_AAD; dev_info(gpmc_dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l), GPMC_REVISION_MINOR(l)); gpmc_mem_init(); if (gpmc_setup_irq() < 0) dev_warn(gpmc_dev, "gpmc_setup_irq failed\n"); /* Now the GPMC is initialised, unreserve the chip-selects */ gpmc_cs_map = 0; if (!pdev->dev.of_node) gpmc_nr_waitpins = GPMC_NR_WAITPINS; rc = gpmc_probe_dt(pdev); if (rc < 0) { clk_disable_unprepare(gpmc_l3_clk); clk_put(gpmc_l3_clk); dev_err(gpmc_dev, "failed to probe DT parameters\n"); return rc; } return 0; } static int gpmc_remove(struct platform_device *pdev) { gpmc_free_irq(); gpmc_mem_exit(); gpmc_dev = NULL; return 0; } static struct platform_driver gpmc_driver = { .probe = gpmc_probe, .remove = gpmc_remove, .driver = { .name = DEVICE_NAME, .owner = THIS_MODULE, .of_match_table = of_match_ptr(gpmc_dt_ids), }, }; static __init int gpmc_init(void) { return platform_driver_register(&gpmc_driver); } static __exit void gpmc_exit(void) { platform_driver_unregister(&gpmc_driver); } omap_postcore_initcall(gpmc_init); module_exit(gpmc_exit); static int __init omap_gpmc_init(void) { struct omap_hwmod *oh; struct platform_device *pdev; char *oh_name = "gpmc"; /* * if the board boots up with a populated DT, do not * manually add the device from this initcall */ if (of_have_populated_dt()) return -ENODEV; oh = omap_hwmod_lookup(oh_name); if (!oh) { pr_err("Could not look up %s\n", oh_name); return -ENODEV; } pdev = omap_device_build(DEVICE_NAME, -1, oh, NULL, 0); WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); return IS_ERR(pdev) ? PTR_ERR(pdev) : 0; } omap_postcore_initcall(omap_gpmc_init); static irqreturn_t gpmc_handle_irq(int irq, void *dev) { int i; u32 regval; regval = gpmc_read_reg(GPMC_IRQSTATUS); if (!regval) return IRQ_NONE; for (i = 0; i < GPMC_NR_IRQ; i++) if (regval & gpmc_client_irq[i].bitmask) generic_handle_irq(gpmc_client_irq[i].irq); gpmc_write_reg(GPMC_IRQSTATUS, regval); return IRQ_HANDLED; } #ifdef CONFIG_ARCH_OMAP3 static struct omap3_gpmc_regs gpmc_context; void omap3_gpmc_save_context(void) { int i; gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG); gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE); gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL); gpmc_context.config = gpmc_read_reg(GPMC_CONFIG); gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1); gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2); gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL); for (i = 0; i < GPMC_CS_NUM; i++) { gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i); if (gpmc_context.cs_context[i].is_valid) { gpmc_context.cs_context[i].config1 = gpmc_cs_read_reg(i, GPMC_CS_CONFIG1); gpmc_context.cs_context[i].config2 = gpmc_cs_read_reg(i, GPMC_CS_CONFIG2); gpmc_context.cs_context[i].config3 = gpmc_cs_read_reg(i, GPMC_CS_CONFIG3); gpmc_context.cs_context[i].config4 = gpmc_cs_read_reg(i, GPMC_CS_CONFIG4); gpmc_context.cs_context[i].config5 = gpmc_cs_read_reg(i, GPMC_CS_CONFIG5); gpmc_context.cs_context[i].config6 = gpmc_cs_read_reg(i, GPMC_CS_CONFIG6); gpmc_context.cs_context[i].config7 = gpmc_cs_read_reg(i, GPMC_CS_CONFIG7); } } } void omap3_gpmc_restore_context(void) { int i; gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig); gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable); gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl); gpmc_write_reg(GPMC_CONFIG, gpmc_context.config); gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1); gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2); gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control); for (i = 0; i < GPMC_CS_NUM; i++) { if (gpmc_context.cs_context[i].is_valid) { gpmc_cs_write_reg(i, GPMC_CS_CONFIG1, gpmc_context.cs_context[i].config1); gpmc_cs_write_reg(i, GPMC_CS_CONFIG2, gpmc_context.cs_context[i].config2); gpmc_cs_write_reg(i, GPMC_CS_CONFIG3, gpmc_context.cs_context[i].config3); gpmc_cs_write_reg(i, GPMC_CS_CONFIG4, gpmc_context.cs_context[i].config4); gpmc_cs_write_reg(i, GPMC_CS_CONFIG5, gpmc_context.cs_context[i].config5); gpmc_cs_write_reg(i, GPMC_CS_CONFIG6, gpmc_context.cs_context[i].config6); gpmc_cs_write_reg(i, GPMC_CS_CONFIG7, gpmc_context.cs_context[i].config7); } } } #endif /* CONFIG_ARCH_OMAP3 */
gpl-2.0
krash86/android_kernel_google_pixel
drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
416
55371
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include <linux/export.h> #include "dm_common.h" #include "phy_common.h" #include "../pci.h" #include "../base.h" #define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1) #define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1) #define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1) #define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1) #define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1) #define RTLPRIV (struct rtl_priv *) #define GET_UNDECORATED_AVERAGE_RSSI(_priv) \ ((RTLPRIV(_priv))->mac80211.opmode == \ NL80211_IFTYPE_ADHOC) ? \ ((RTLPRIV(_priv))->dm.entry_min_undec_sm_pwdb) : \ ((RTLPRIV(_priv))->dm.undec_sm_pwdb) static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = { 0x7f8001fe, 0x788001e2, 0x71c001c7, 0x6b8001ae, 0x65400195, 0x5fc0017f, 0x5a400169, 0x55400155, 0x50800142, 0x4c000130, 0x47c0011f, 0x43c0010f, 0x40000100, 0x3c8000f2, 0x390000e4, 0x35c000d7, 0x32c000cb, 0x300000c0, 0x2d4000b5, 0x2ac000ab, 0x288000a2, 0x26000098, 0x24000090, 0x22000088, 0x20000080, 0x1e400079, 0x1c800072, 0x1b00006c, 0x19800066, 0x18000060, 0x16c0005b, 0x15800056, 0x14400051, 0x1300004c, 0x12000048, 0x11000044, 0x10000040, }; static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = { {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} }; static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = { {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} }; static u32 power_index_reg[6] = {0xc90, 0xc91, 0xc92, 0xc98, 0xc99, 0xc9a}; void dm_restorepowerindex(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 index; for (index = 0; index < 6; index++) rtl_write_byte(rtlpriv, power_index_reg[index], rtlpriv->dm.powerindex_backup[index]); } EXPORT_SYMBOL_GPL(dm_restorepowerindex); void dm_writepowerindex(struct ieee80211_hw *hw, u8 value) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 index; for (index = 0; index < 6; index++) rtl_write_byte(rtlpriv, power_index_reg[index], value); } EXPORT_SYMBOL_GPL(dm_writepowerindex); void dm_savepowerindex(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 index; u8 tmp; for (index = 0; index < 6; index++) { tmp = rtl_read_byte(rtlpriv, power_index_reg[index]); rtlpriv->dm.powerindex_backup[index] = tmp; } } EXPORT_SYMBOL_GPL(dm_savepowerindex); static void rtl92c_dm_diginit(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; dm_digtable->dig_enable_flag = true; dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; dm_digtable->cur_igvalue = 0x20; dm_digtable->pre_igvalue = 0x0; dm_digtable->cursta_cstate = DIG_STA_DISCONNECT; dm_digtable->presta_cstate = DIG_STA_DISCONNECT; dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT; dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; dm_digtable->rx_gain_max = DM_DIG_MAX; dm_digtable->rx_gain_min = DM_DIG_MIN; dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT; dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX; dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN; dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX; dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LowRssi; dm_digtable->forbidden_igi = DM_DIG_MIN; dm_digtable->large_fa_hit = 0; dm_digtable->recover_cnt = 0; dm_digtable->dig_dynamic_min = 0x25; } static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; long rssi_val_min = 0; if ((dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) && (dm_digtable->cursta_cstate == DIG_STA_CONNECT)) { if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0) rssi_val_min = (rtlpriv->dm.entry_min_undec_sm_pwdb > rtlpriv->dm.undec_sm_pwdb) ? rtlpriv->dm.undec_sm_pwdb : rtlpriv->dm.entry_min_undec_sm_pwdb; else rssi_val_min = rtlpriv->dm.undec_sm_pwdb; } else if (dm_digtable->cursta_cstate == DIG_STA_CONNECT || dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT) { rssi_val_min = rtlpriv->dm.undec_sm_pwdb; } else if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) { rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb; } if (rssi_val_min > 100) rssi_val_min = 100; return (u8)rssi_val_min; } static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) { u32 ret_value; struct rtl_priv *rtlpriv = rtl_priv(hw); struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD); falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD); falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff); falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff); ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD); falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff); falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16); falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + falsealm_cnt->cnt_rate_illegal + falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail + falsealm_cnt->cnt_fast_fsync_fail + falsealm_cnt->cnt_sb_search_fail; rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1); ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0); falsealm_cnt->cnt_cck_fail = ret_value; ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3); falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail + falsealm_cnt->cnt_rate_illegal + falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail + falsealm_cnt->cnt_cck_fail); rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1); rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", falsealm_cnt->cnt_parity_fail, falsealm_cnt->cnt_rate_illegal, falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n", falsealm_cnt->cnt_ofdm_fail, falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all); } static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; u8 value_igi = dm_digtable->cur_igvalue; if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0) value_igi--; else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1) value_igi += 0; else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2) value_igi++; else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2) value_igi += 2; if (value_igi > DM_DIG_FA_UPPER) value_igi = DM_DIG_FA_UPPER; else if (value_igi < DM_DIG_FA_LOWER) value_igi = DM_DIG_FA_LOWER; if (rtlpriv->falsealm_cnt.cnt_all > 10000) value_igi = DM_DIG_FA_UPPER; dm_digtable->cur_igvalue = value_igi; rtl92c_dm_write_dig(hw); } static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *digtable = &rtlpriv->dm_digtable; u32 isbt; /* modify DIG lower bound, deal with abnorally large false alarm */ if (rtlpriv->falsealm_cnt.cnt_all > 10000) { digtable->large_fa_hit++; if (digtable->forbidden_igi < digtable->cur_igvalue) { digtable->forbidden_igi = digtable->cur_igvalue; digtable->large_fa_hit = 1; } if (digtable->large_fa_hit >= 3) { if ((digtable->forbidden_igi + 1) > digtable->rx_gain_max) digtable->rx_gain_min = digtable->rx_gain_max; else digtable->rx_gain_min = (digtable->forbidden_igi + 1); digtable->recover_cnt = 3600; /* 3600=2hr */ } } else { /* Recovery mechanism for IGI lower bound */ if (digtable->recover_cnt != 0) { digtable->recover_cnt--; } else { if (digtable->large_fa_hit == 0) { if ((digtable->forbidden_igi-1) < DM_DIG_MIN) { digtable->forbidden_igi = DM_DIG_MIN; digtable->rx_gain_min = DM_DIG_MIN; } else { digtable->forbidden_igi--; digtable->rx_gain_min = digtable->forbidden_igi + 1; } } else if (digtable->large_fa_hit == 3) { digtable->large_fa_hit = 0; } } } if (rtlpriv->falsealm_cnt.cnt_all < 250) { isbt = rtl_read_byte(rtlpriv, 0x4fd) & 0x01; if (!isbt) { if (rtlpriv->falsealm_cnt.cnt_all > digtable->fa_lowthresh) { if ((digtable->back_val - 2) < digtable->back_range_min) digtable->back_val = digtable->back_range_min; else digtable->back_val -= 2; } else if (rtlpriv->falsealm_cnt.cnt_all < digtable->fa_lowthresh) { if ((digtable->back_val + 2) > digtable->back_range_max) digtable->back_val = digtable->back_range_max; else digtable->back_val += 2; } } else { digtable->back_val = DM_DIG_BACKOFF_DEFAULT; } } else { /* Adjust initial gain by false alarm */ if (rtlpriv->falsealm_cnt.cnt_all > 1000) digtable->cur_igvalue = digtable->pre_igvalue + 2; else if (rtlpriv->falsealm_cnt.cnt_all > 750) digtable->cur_igvalue = digtable->pre_igvalue + 1; else if (rtlpriv->falsealm_cnt.cnt_all < 500) digtable->cur_igvalue = digtable->pre_igvalue - 1; } /* Check initial gain by upper/lower bound */ if (digtable->cur_igvalue > digtable->rx_gain_max) digtable->cur_igvalue = digtable->rx_gain_max; if (digtable->cur_igvalue < digtable->rx_gain_min) digtable->cur_igvalue = digtable->rx_gain_min; rtl92c_dm_write_dig(hw); } static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw) { static u8 initialized; /* initialized to false */ struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); long rssi_strength = rtlpriv->dm.entry_min_undec_sm_pwdb; bool multi_sta = false; if (mac->opmode == NL80211_IFTYPE_ADHOC) multi_sta = true; if (!multi_sta || dm_digtable->cursta_cstate == DIG_STA_DISCONNECT) { initialized = false; dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; return; } else if (initialized == false) { initialized = true; dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; dm_digtable->cur_igvalue = 0x20; rtl92c_dm_write_dig(hw); } if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) { if ((rssi_strength < dm_digtable->rssi_lowthresh) && (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) { if (dm_digtable->dig_ext_port_stage == DIG_EXT_PORT_STAGE_2) { dm_digtable->cur_igvalue = 0x20; rtl92c_dm_write_dig(hw); } dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_1; } else if (rssi_strength > dm_digtable->rssi_highthresh) { dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_2; rtl92c_dm_ctrl_initgain_by_fa(hw); } } else if (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) { dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; dm_digtable->cur_igvalue = 0x20; rtl92c_dm_write_dig(hw); } RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "curmultista_cstate = %x dig_ext_port_stage %x\n", dm_digtable->curmultista_cstate, dm_digtable->dig_ext_port_stage); } static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "presta_cstate = %x, cursta_cstate = %x\n", dm_digtable->presta_cstate, dm_digtable->cursta_cstate); if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate || dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT || dm_digtable->cursta_cstate == DIG_STA_CONNECT) { if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) { dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); if (dm_digtable->rssi_val_min > 100) dm_digtable->rssi_val_min = 100; rtl92c_dm_ctrl_initgain_by_rssi(hw); } } else { dm_digtable->rssi_val_min = 0; dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT; dm_digtable->cur_igvalue = 0x20; dm_digtable->pre_igvalue = 0; rtl92c_dm_write_dig(hw); } } static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) { dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); if (dm_digtable->rssi_val_min > 100) dm_digtable->rssi_val_min = 100; if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { if (dm_digtable->rssi_val_min <= 25) dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LowRssi; else dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_HighRssi; } else { if (dm_digtable->rssi_val_min <= 20) dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LowRssi; else dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_HighRssi; } } else { dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX; } if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) { if ((dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) || (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_MAX)) rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83); else rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd); dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state; } } static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); if (mac->act_scanning) return; if (mac->link_state >= MAC80211_LINKED) dm_digtable->cursta_cstate = DIG_STA_CONNECT; else dm_digtable->cursta_cstate = DIG_STA_DISCONNECT; dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT; rtl92c_dm_initial_gain_sta(hw); rtl92c_dm_initial_gain_multi_sta(hw); rtl92c_dm_cck_packet_detection_thresh(hw); dm_digtable->presta_cstate = dm_digtable->cursta_cstate; } static void rtl92c_dm_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->dm.dm_initialgain_enable == false) return; if (!(rtlpriv->dm.dm_flag & DYNAMIC_FUNC_DIG)) return; rtl92c_dm_ctrl_initgain_by_twoport(hw); } static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->rtlhal.interface == INTF_USB && rtlpriv->rtlhal.board_type & 0x1) { dm_savepowerindex(hw); rtlpriv->dm.dynamic_txpower_enable = true; } else { rtlpriv->dm.dynamic_txpower_enable = false; } rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; } void rtl92c_dm_write_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n", dm_digtable->cur_igvalue, dm_digtable->pre_igvalue, dm_digtable->back_val); if (rtlpriv->rtlhal.interface == INTF_USB && !dm_digtable->dig_enable_flag) { dm_digtable->pre_igvalue = 0x17; return; } dm_digtable->cur_igvalue -= 1; if (dm_digtable->cur_igvalue < DM_DIG_MIN) dm_digtable->cur_igvalue = DM_DIG_MIN; if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) { rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, dm_digtable->cur_igvalue); rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, dm_digtable->cur_igvalue); dm_digtable->pre_igvalue = dm_digtable->cur_igvalue; } RT_TRACE(rtlpriv, COMP_DIG, DBG_WARNING, "dig values 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", dm_digtable->cur_igvalue, dm_digtable->pre_igvalue, dm_digtable->rssi_val_min, dm_digtable->back_val, dm_digtable->rx_gain_max, dm_digtable->rx_gain_min, dm_digtable->large_fa_hit, dm_digtable->forbidden_igi); } EXPORT_SYMBOL(rtl92c_dm_write_dig); static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff; if (mac->link_state != MAC80211_LINKED) return; if (mac->opmode == NL80211_IFTYPE_ADHOC || mac->opmode == NL80211_IFTYPE_AP) { /* TODO: Handle ADHOC and AP Mode */ } if (tmpentry_max_pwdb != 0) rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb; else rtlpriv->dm.entry_max_undec_sm_pwdb = 0; if (tmpentry_min_pwdb != 0xff) rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb; else rtlpriv->dm.entry_min_undec_sm_pwdb = 0; /* TODO: * if (mac->opmode == NL80211_IFTYPE_STATION) { * if (rtlpriv->rtlhal.fw_ready) { * u32 param = (u32)(rtlpriv->dm.undec_sm_pwdb << 16); * rtl8192c_set_rssi_cmd(hw, param); * } * } */ } void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.current_turbo_edca = false; rtlpriv->dm.is_any_nonbepkts = false; rtlpriv->dm.is_cur_rdlstate = false; } EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo); static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); static u64 last_txok_cnt; static u64 last_rxok_cnt; static u32 last_bt_edca_ul; static u32 last_bt_edca_dl; u64 cur_txok_cnt = 0; u64 cur_rxok_cnt = 0; u32 edca_be_ul = 0x5ea42b; u32 edca_be_dl = 0x5ea42b; bool bt_change_edca = false; if ((last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) || (last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) { rtlpriv->dm.current_turbo_edca = false; last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul; last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl; } if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) { edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul; bt_change_edca = true; } if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) { edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl; bt_change_edca = true; } if (mac->link_state != MAC80211_LINKED) { rtlpriv->dm.current_turbo_edca = false; return; } if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) { if (!(edca_be_ul & 0xffff0000)) edca_be_ul |= 0x005e0000; if (!(edca_be_dl & 0xffff0000)) edca_be_dl |= 0x005e0000; } if ((bt_change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) && (!rtlpriv->dm.disable_framebursting))) { cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; if (cur_rxok_cnt > 4 * cur_txok_cnt) { if (!rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be_dl); rtlpriv->dm.is_cur_rdlstate = true; } } else { if (rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be_ul); rtlpriv->dm.is_cur_rdlstate = false; } } rtlpriv->dm.current_turbo_edca = true; } else { if (rtlpriv->dm.current_turbo_edca) { u8 tmp = AC0_BE; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, &tmp); rtlpriv->dm.current_turbo_edca = false; } } rtlpriv->dm.is_any_nonbepkts = false; last_txok_cnt = rtlpriv->stats.txbytesunicast; last_rxok_cnt = rtlpriv->stats.rxbytesunicast; } static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 thermalvalue, delta, delta_lck, delta_iqk; long ele_a, ele_d, temp_cck, val_x, value32; long val_y, ele_c = 0; u8 ofdm_index[2], ofdm_index_old[2] = {0, 0}, cck_index_old = 0; s8 cck_index = 0; int i; bool is2t = IS_92C_SERIAL(rtlhal->version); s8 txpwr_level[3] = {0, 0, 0}; u8 ofdm_min_index = 6, rf; rtlpriv->dm.txpower_trackinginit = true; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "rtl92c_dm_txpower_tracking_callback_thermalmeter\n"); thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n", thermalvalue, rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter); rtl92c_phy_ap_calibrate(hw, (thermalvalue - rtlefuse->eeprom_thermalmeter)); if (is2t) rf = 2; else rf = 1; if (thermalvalue) { ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) & MASKOFDM_D; for (i = 0; i < OFDM_TABLE_LENGTH; i++) { if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { ofdm_index_old[0] = (u8) i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n", ROFDM0_XATXIQIMBALANCE, ele_d, ofdm_index_old[0]); break; } } if (is2t) { ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD) & MASKOFDM_D; for (i = 0; i < OFDM_TABLE_LENGTH; i++) { if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { ofdm_index_old[1] = (u8) i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial pathB ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n", ROFDM0_XBTXIQIMBALANCE, ele_d, ofdm_index_old[1]); break; } } } temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK; for (i = 0; i < CCK_TABLE_LENGTH; i++) { if (rtlpriv->dm.cck_inch14) { if (memcmp((void *)&temp_cck, (void *)&cckswing_table_ch14[i][2], 4) == 0) { cck_index_old = (u8) i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch 14 %d\n", RCCK0_TXFILTER2, temp_cck, cck_index_old, rtlpriv->dm.cck_inch14); break; } } else { if (memcmp((void *)&temp_cck, (void *) &cckswing_table_ch1ch13[i][2], 4) == 0) { cck_index_old = (u8) i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch14 %d\n", RCCK0_TXFILTER2, temp_cck, cck_index_old, rtlpriv->dm.cck_inch14); break; } } } if (!rtlpriv->dm.thermalvalue) { rtlpriv->dm.thermalvalue = rtlefuse->eeprom_thermalmeter; rtlpriv->dm.thermalvalue_lck = thermalvalue; rtlpriv->dm.thermalvalue_iqk = thermalvalue; for (i = 0; i < rf; i++) rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i]; rtlpriv->dm.cck_index = cck_index_old; } /* Handle USB High PA boards */ delta = (thermalvalue > rtlpriv->dm.thermalvalue) ? (thermalvalue - rtlpriv->dm.thermalvalue) : (rtlpriv->dm.thermalvalue - thermalvalue); delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ? (thermalvalue - rtlpriv->dm.thermalvalue_lck) : (rtlpriv->dm.thermalvalue_lck - thermalvalue); delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ? (thermalvalue - rtlpriv->dm.thermalvalue_iqk) : (rtlpriv->dm.thermalvalue_iqk - thermalvalue); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n", thermalvalue, rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk); if (delta_lck > 1) { rtlpriv->dm.thermalvalue_lck = thermalvalue; rtl92c_phy_lc_calibrate(hw); } if (delta > 0 && rtlpriv->dm.txpower_track_control) { if (thermalvalue > rtlpriv->dm.thermalvalue) { for (i = 0; i < rf; i++) rtlpriv->dm.ofdm_index[i] -= delta; rtlpriv->dm.cck_index -= delta; } else { for (i = 0; i < rf; i++) rtlpriv->dm.ofdm_index[i] += delta; rtlpriv->dm.cck_index += delta; } if (is2t) { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "temp OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n", rtlpriv->dm.ofdm_index[0], rtlpriv->dm.ofdm_index[1], rtlpriv->dm.cck_index); } else { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "temp OFDM_A_index=0x%x, cck_index=0x%x\n", rtlpriv->dm.ofdm_index[0], rtlpriv->dm.cck_index); } if (thermalvalue > rtlefuse->eeprom_thermalmeter) { for (i = 0; i < rf; i++) ofdm_index[i] = rtlpriv->dm.ofdm_index[i] + 1; cck_index = rtlpriv->dm.cck_index + 1; } else { for (i = 0; i < rf; i++) ofdm_index[i] = rtlpriv->dm.ofdm_index[i]; cck_index = rtlpriv->dm.cck_index; } for (i = 0; i < rf; i++) { if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) { if (thermalvalue > rtlefuse->eeprom_thermalmeter) { if (delta < 5) ofdm_index[i] -= 1; else ofdm_index[i] -= 2; } else if (delta > 5 && thermalvalue < rtlefuse-> eeprom_thermalmeter) { ofdm_index[i] += 1; } } else if (txpwr_level[i] >= 27 && txpwr_level[i] <= 32 && thermalvalue > rtlefuse->eeprom_thermalmeter) { if (delta < 5) ofdm_index[i] -= 1; else ofdm_index[i] -= 2; } else if (txpwr_level[i] >= 32 && txpwr_level[i] <= 38 && thermalvalue > rtlefuse->eeprom_thermalmeter && delta > 5) { ofdm_index[i] -= 1; } } if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) { if (thermalvalue > rtlefuse->eeprom_thermalmeter) { if (delta < 5) cck_index -= 1; else cck_index -= 2; } else if (delta > 5 && thermalvalue < rtlefuse->eeprom_thermalmeter) { cck_index += 1; } } else if (txpwr_level[i] >= 27 && txpwr_level[i] <= 32 && thermalvalue > rtlefuse->eeprom_thermalmeter) { if (delta < 5) cck_index -= 1; else cck_index -= 2; } else if (txpwr_level[i] >= 32 && txpwr_level[i] <= 38 && thermalvalue > rtlefuse->eeprom_thermalmeter && delta > 5) { cck_index -= 1; } for (i = 0; i < rf; i++) { if (ofdm_index[i] > OFDM_TABLE_SIZE - 1) ofdm_index[i] = OFDM_TABLE_SIZE - 1; else if (ofdm_index[i] < ofdm_min_index) ofdm_index[i] = ofdm_min_index; } if (cck_index > CCK_TABLE_SIZE - 1) cck_index = CCK_TABLE_SIZE - 1; else if (cck_index < 0) cck_index = 0; if (is2t) { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "new OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n", ofdm_index[0], ofdm_index[1], cck_index); } else { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "new OFDM_A_index=0x%x, cck_index=0x%x\n", ofdm_index[0], cck_index); } } if (rtlpriv->dm.txpower_track_control && delta != 0) { ele_d = (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22; val_x = rtlphy->reg_e94; val_y = rtlphy->reg_e9c; if (val_x != 0) { if ((val_x & 0x00000200) != 0) val_x = val_x | 0xFFFFFC00; ele_a = ((val_x * ele_d) >> 8) & 0x000003FF; if ((val_y & 0x00000200) != 0) val_y = val_y | 0xFFFFFC00; ele_c = ((val_y * ele_d) >> 8) & 0x000003FF; value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a; rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, value32); value32 = (ele_c & 0x000003C0) >> 6; rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32); value32 = ((val_x * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31), value32); value32 = ((val_y * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29), value32); } else { rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, ofdmswing_table[ofdm_index[0]]); rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00); rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31) | BIT(29), 0x00); } if (!rtlpriv->dm.cck_inch14) { rtl_write_byte(rtlpriv, 0xa22, cckswing_table_ch1ch13[cck_index] [0]); rtl_write_byte(rtlpriv, 0xa23, cckswing_table_ch1ch13[cck_index] [1]); rtl_write_byte(rtlpriv, 0xa24, cckswing_table_ch1ch13[cck_index] [2]); rtl_write_byte(rtlpriv, 0xa25, cckswing_table_ch1ch13[cck_index] [3]); rtl_write_byte(rtlpriv, 0xa26, cckswing_table_ch1ch13[cck_index] [4]); rtl_write_byte(rtlpriv, 0xa27, cckswing_table_ch1ch13[cck_index] [5]); rtl_write_byte(rtlpriv, 0xa28, cckswing_table_ch1ch13[cck_index] [6]); rtl_write_byte(rtlpriv, 0xa29, cckswing_table_ch1ch13[cck_index] [7]); } else { rtl_write_byte(rtlpriv, 0xa22, cckswing_table_ch14[cck_index] [0]); rtl_write_byte(rtlpriv, 0xa23, cckswing_table_ch14[cck_index] [1]); rtl_write_byte(rtlpriv, 0xa24, cckswing_table_ch14[cck_index] [2]); rtl_write_byte(rtlpriv, 0xa25, cckswing_table_ch14[cck_index] [3]); rtl_write_byte(rtlpriv, 0xa26, cckswing_table_ch14[cck_index] [4]); rtl_write_byte(rtlpriv, 0xa27, cckswing_table_ch14[cck_index] [5]); rtl_write_byte(rtlpriv, 0xa28, cckswing_table_ch14[cck_index] [6]); rtl_write_byte(rtlpriv, 0xa29, cckswing_table_ch14[cck_index] [7]); } if (is2t) { ele_d = (ofdmswing_table[ofdm_index[1]] & 0xFFC00000) >> 22; val_x = rtlphy->reg_eb4; val_y = rtlphy->reg_ebc; if (val_x != 0) { if ((val_x & 0x00000200) != 0) val_x = val_x | 0xFFFFFC00; ele_a = ((val_x * ele_d) >> 8) & 0x000003FF; if ((val_y & 0x00000200) != 0) val_y = val_y | 0xFFFFFC00; ele_c = ((val_y * ele_d) >> 8) & 0x00003FF; value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a; rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, value32); value32 = (ele_c & 0x000003C0) >> 6; rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, value32); value32 = ((val_x * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27), value32); value32 = ((val_y * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25), value32); } else { rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, ofdmswing_table[ofdm_index [1]]); rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, 0x00); rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27) | BIT(25), 0x00); } } } if (delta_iqk > 3) { rtlpriv->dm.thermalvalue_iqk = thermalvalue; rtl92c_phy_iq_calibrate(hw, false); } if (rtlpriv->dm.txpower_track_control) rtlpriv->dm.thermalvalue = thermalvalue; } RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n"); } static void rtl92c_dm_initialize_txpower_tracking_thermalmeter( struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.txpower_tracking = true; rtlpriv->dm.txpower_trackinginit = false; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "pMgntInfo->txpower_tracking = %d\n", rtlpriv->dm.txpower_tracking); } static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw) { rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw); } static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw) { rtl92c_dm_txpower_tracking_callback_thermalmeter(hw); } static void rtl92c_dm_check_txpower_tracking_thermal_meter( struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); static u8 tm_trigger; if (!rtlpriv->dm.txpower_tracking) return; if (!tm_trigger) { rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK, 0x60); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Trigger 92S Thermal Meter!!\n"); tm_trigger = 1; return; } else { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Schedule TxPowerTracking direct call!!\n"); rtl92c_dm_txpower_tracking_directcall(hw); tm_trigger = 0; } } void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw) { rtl92c_dm_check_txpower_tracking_thermal_meter(hw); } EXPORT_SYMBOL(rtl92c_dm_check_txpower_tracking); void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rate_adaptive *p_ra = &(rtlpriv->ra); p_ra->ratr_state = DM_RATR_STA_INIT; p_ra->pre_ratr_state = DM_RATR_STA_INIT; if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) rtlpriv->dm.useramask = true; else rtlpriv->dm.useramask = false; } EXPORT_SYMBOL(rtl92c_dm_init_rate_adaptive_mask); static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct ps_t *dm_pstable = &rtlpriv->dm_pstable; dm_pstable->pre_ccastate = CCA_MAX; dm_pstable->cur_ccasate = CCA_MAX; dm_pstable->pre_rfstate = RF_MAX; dm_pstable->cur_rfstate = RF_MAX; dm_pstable->rssi_val_min = 0; } void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct ps_t *dm_pstable = &rtlpriv->dm_pstable; if (!rtlpriv->reg_init) { rtlpriv->reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD) & 0x1CC000) >> 14; rtlpriv->reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1, MASKDWORD) & BIT(3)) >> 3; rtlpriv->reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, MASKDWORD) & 0xFF000000) >> 24; rtlpriv->reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12; rtlpriv->reg_init = true; } if (!bforce_in_normal) { if (dm_pstable->rssi_val_min != 0) { if (dm_pstable->pre_rfstate == RF_NORMAL) { if (dm_pstable->rssi_val_min >= 30) dm_pstable->cur_rfstate = RF_SAVE; else dm_pstable->cur_rfstate = RF_NORMAL; } else { if (dm_pstable->rssi_val_min <= 25) dm_pstable->cur_rfstate = RF_NORMAL; else dm_pstable->cur_rfstate = RF_SAVE; } } else { dm_pstable->cur_rfstate = RF_MAX; } } else { dm_pstable->cur_rfstate = RF_NORMAL; } if (dm_pstable->pre_rfstate != dm_pstable->cur_rfstate) { if (dm_pstable->cur_rfstate == RF_SAVE) { rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, 0x1C0000, 0x2); rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0); rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000, 0x63); rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, 0xC000, 0x2); rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3); rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); rtl_set_bbreg(hw, 0x818, BIT(28), 0x1); } else { rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, 0x1CC000, rtlpriv->reg_874); rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), rtlpriv->reg_c70); rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000, rtlpriv->reg_85c); rtl_set_bbreg(hw, 0xa74, 0xF000, rtlpriv->reg_a74); rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); } dm_pstable->pre_rfstate = dm_pstable->cur_rfstate; } } EXPORT_SYMBOL(rtl92c_dm_rf_saving); static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct ps_t *dm_pstable = &rtlpriv->dm_pstable; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); /* Determine the minimum RSSI */ if (((mac->link_state == MAC80211_NOLINK)) && (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) { dm_pstable->rssi_val_min = 0; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n"); } if (mac->link_state == MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_ADHOC) { dm_pstable->rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "AP Client PWDB = 0x%lx\n", dm_pstable->rssi_val_min); } else { dm_pstable->rssi_val_min = rtlpriv->dm.undec_sm_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "STA Default Port PWDB = 0x%lx\n", dm_pstable->rssi_val_min); } } else { dm_pstable->rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "AP Ext Port PWDB = 0x%lx\n", dm_pstable->rssi_val_min); } /* Power Saving for 92C */ if (IS_92C_SERIAL(rtlhal->version)) ;/* rtl92c_dm_1r_cca(hw); */ else rtl92c_dm_rf_saving(hw, false); } void rtl92c_dm_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; rtlpriv->dm.dm_flag = DYNAMIC_FUNC_DISABLE | DYNAMIC_FUNC_DIG; rtlpriv->dm.undec_sm_pwdb = -1; rtlpriv->dm.undec_sm_cck = -1; rtlpriv->dm.dm_initialgain_enable = true; rtl92c_dm_diginit(hw); rtlpriv->dm.dm_flag |= HAL_DM_HIPWR_DISABLE; rtl92c_dm_init_dynamic_txpower(hw); rtl92c_dm_init_edca_turbo(hw); rtl92c_dm_init_rate_adaptive_mask(hw); rtlpriv->dm.dm_flag |= DYNAMIC_FUNC_SS; rtl92c_dm_initialize_txpower_tracking(hw); rtl92c_dm_init_dynamic_bb_powersaving(hw); rtlpriv->dm.ofdm_pkt_cnt = 0; rtlpriv->dm.dm_rssi_sel = RSSI_DEFAULT; } EXPORT_SYMBOL(rtl92c_dm_init); void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); long undec_sm_pwdb; if (!rtlpriv->dm.dynamic_txpower_enable) return; if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; return; } if ((mac->link_state < MAC80211_LINKED) && (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, "Not connected to any\n"); rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; return; } if (mac->link_state >= MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_ADHOC) { undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Client PWDB = 0x%lx\n", undec_sm_pwdb); } else { undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "STA Default Port PWDB = 0x%lx\n", undec_sm_pwdb); } } else { undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Ext Port PWDB = 0x%lx\n", undec_sm_pwdb); } if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL2; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"); } else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"); } else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_NORMAL\n"); } if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "PHY_SetTxPowerLevel8192S() Channel = %d\n", rtlphy->current_channel); rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel); if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_NORMAL) dm_restorepowerindex(hw); else if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_LEVEL1) dm_writepowerindex(hw, 0x14); else if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_LEVEL2) dm_writepowerindex(hw, 0x10); } rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; } void rtl92c_dm_watchdog(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); bool fw_current_inpsmode = false; bool fw_ps_awake = true; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, (u8 *) (&fw_current_inpsmode)); rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, (u8 *) (&fw_ps_awake)); if (ppsc->p2p_ps_info.p2p_ps_mode) fw_ps_awake = false; if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) && fw_ps_awake) && (!ppsc->rfchange_inprogress)) { rtl92c_dm_pwdb_monitor(hw); rtl92c_dm_dig(hw); rtl92c_dm_false_alarm_counter_statistics(hw); rtl92c_dm_dynamic_bb_powersaving(hw); rtl92c_dm_dynamic_txpower(hw); rtl92c_dm_check_txpower_tracking(hw); /* rtl92c_dm_refresh_rate_adaptive_mask(hw); */ rtl92c_dm_bt_coexist(hw); rtl92c_dm_check_edca_turbo(hw); } } EXPORT_SYMBOL(rtl92c_dm_watchdog); u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); long undec_sm_pwdb; u8 curr_bt_rssi_state = 0x00; if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { undec_sm_pwdb = GET_UNDECORATED_AVERAGE_RSSI(rtlpriv); } else { if (rtlpriv->dm.entry_min_undec_sm_pwdb == 0) undec_sm_pwdb = 100; else undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb; } /* Check RSSI to determine HighPower/NormalPower state for * BT coexistence. */ if (undec_sm_pwdb >= 67) curr_bt_rssi_state &= (~BT_RSSI_STATE_NORMAL_POWER); else if (undec_sm_pwdb < 62) curr_bt_rssi_state |= BT_RSSI_STATE_NORMAL_POWER; /* Check RSSI to determine AMPDU setting for BT coexistence. */ if (undec_sm_pwdb >= 40) curr_bt_rssi_state &= (~BT_RSSI_STATE_AMDPU_OFF); else if (undec_sm_pwdb <= 32) curr_bt_rssi_state |= BT_RSSI_STATE_AMDPU_OFF; /* Marked RSSI state. It will be used to determine BT coexistence * setting later. */ if (undec_sm_pwdb < 35) curr_bt_rssi_state |= BT_RSSI_STATE_SPECIAL_LOW; else curr_bt_rssi_state &= (~BT_RSSI_STATE_SPECIAL_LOW); /* Check BT state related to BT_Idle in B/G mode. */ if (undec_sm_pwdb < 15) curr_bt_rssi_state |= BT_RSSI_STATE_BG_EDCA_LOW; else curr_bt_rssi_state &= (~BT_RSSI_STATE_BG_EDCA_LOW); if (curr_bt_rssi_state != rtlpcipriv->bt_coexist.bt_rssi_state) { rtlpcipriv->bt_coexist.bt_rssi_state = curr_bt_rssi_state; return true; } else { return false; } } EXPORT_SYMBOL(rtl92c_bt_rssi_state_change); static bool rtl92c_bt_state_change(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); u32 polling, ratio_tx, ratio_pri; u32 bt_tx, bt_pri; u8 bt_state; u8 cur_service_type; if (rtlpriv->mac80211.link_state < MAC80211_LINKED) return false; bt_state = rtl_read_byte(rtlpriv, 0x4fd); bt_tx = rtl_read_dword(rtlpriv, 0x488); bt_tx = bt_tx & 0x00ffffff; bt_pri = rtl_read_dword(rtlpriv, 0x48c); bt_pri = bt_pri & 0x00ffffff; polling = rtl_read_dword(rtlpriv, 0x490); if (bt_tx == 0xffffffff && bt_pri == 0xffffffff && polling == 0xffffffff && bt_state == 0xff) return false; bt_state &= BIT_OFFSET_LEN_MASK_32(0, 1); if (bt_state != rtlpcipriv->bt_coexist.bt_cur_state) { rtlpcipriv->bt_coexist.bt_cur_state = bt_state; if (rtlpcipriv->bt_coexist.reg_bt_sco == 3) { rtlpcipriv->bt_coexist.bt_service = BT_IDLE; bt_state = bt_state | ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ? 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) | BIT_OFFSET_LEN_MASK_32(2, 1); rtl_write_byte(rtlpriv, 0x4fd, bt_state); } return true; } ratio_tx = bt_tx * 1000 / polling; ratio_pri = bt_pri * 1000 / polling; rtlpcipriv->bt_coexist.ratio_tx = ratio_tx; rtlpcipriv->bt_coexist.ratio_pri = ratio_pri; if (bt_state && rtlpcipriv->bt_coexist.reg_bt_sco == 3) { if ((ratio_tx < 30) && (ratio_pri < 30)) cur_service_type = BT_IDLE; else if ((ratio_pri > 110) && (ratio_pri < 250)) cur_service_type = BT_SCO; else if ((ratio_tx >= 200) && (ratio_pri >= 200)) cur_service_type = BT_BUSY; else if ((ratio_tx >= 350) && (ratio_tx < 500)) cur_service_type = BT_OTHERBUSY; else if (ratio_tx >= 500) cur_service_type = BT_PAN; else cur_service_type = BT_OTHER_ACTION; if (cur_service_type != rtlpcipriv->bt_coexist.bt_service) { rtlpcipriv->bt_coexist.bt_service = cur_service_type; bt_state = bt_state | ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ? 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) | ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) ? 0 : BIT_OFFSET_LEN_MASK_32(2, 1)); /* Add interrupt migration when bt is not ini * idle state (no traffic). */ if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) { rtl_write_word(rtlpriv, 0x504, 0x0ccc); rtl_write_byte(rtlpriv, 0x506, 0x54); rtl_write_byte(rtlpriv, 0x507, 0x54); } else { rtl_write_byte(rtlpriv, 0x506, 0x00); rtl_write_byte(rtlpriv, 0x507, 0x00); } rtl_write_byte(rtlpriv, 0x4fd, bt_state); return true; } } return false; } static bool rtl92c_bt_wifi_connect_change(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); static bool media_connect; if (rtlpriv->mac80211.link_state < MAC80211_LINKED) { media_connect = false; } else { if (!media_connect) { media_connect = true; return true; } media_connect = true; } return false; } static void rtl92c_bt_set_normal(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); if (rtlpcipriv->bt_coexist.bt_service == BT_OTHERBUSY) { rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72b; rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72b; } else if (rtlpcipriv->bt_coexist.bt_service == BT_BUSY) { rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82f; rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82f; } else if (rtlpcipriv->bt_coexist.bt_service == BT_SCO) { if (rtlpcipriv->bt_coexist.ratio_tx > 160) { rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72f; rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72f; } else { rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea32b; rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea42b; } } else { rtlpcipriv->bt_coexist.bt_edca_ul = 0; rtlpcipriv->bt_coexist.bt_edca_dl = 0; } if ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) && (rtlpriv->mac80211.mode == WIRELESS_MODE_G || (rtlpriv->mac80211.mode == (WIRELESS_MODE_G | WIRELESS_MODE_B))) && (rtlpcipriv->bt_coexist.bt_rssi_state & BT_RSSI_STATE_BG_EDCA_LOW)) { rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82b; rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82b; } } static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); /* Only enable HW BT coexist when BT in "Busy" state. */ if (rtlpriv->mac80211.vendor == PEER_CISCO && rtlpcipriv->bt_coexist.bt_service == BT_OTHER_ACTION) { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0); } else { if ((rtlpcipriv->bt_coexist.bt_service == BT_BUSY) && (rtlpcipriv->bt_coexist.bt_rssi_state & BT_RSSI_STATE_NORMAL_POWER)) { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0); } else if ((rtlpcipriv->bt_coexist.bt_service == BT_OTHER_ACTION) && (rtlpriv->mac80211.mode < WIRELESS_MODE_N_24G) && (rtlpcipriv->bt_coexist.bt_rssi_state & BT_RSSI_STATE_SPECIAL_LOW)) { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0); } else if (rtlpcipriv->bt_coexist.bt_service == BT_PAN) { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte); } else { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte); } } if (rtlpcipriv->bt_coexist.bt_service == BT_PAN) rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x10100); else rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x0); if (rtlpcipriv->bt_coexist.bt_rssi_state & BT_RSSI_STATE_NORMAL_POWER) { rtl92c_bt_set_normal(hw); } else { rtlpcipriv->bt_coexist.bt_edca_ul = 0; rtlpcipriv->bt_coexist.bt_edca_dl = 0; } if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) { rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0, 0xf); } else { rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0, rtlpcipriv->bt_coexist.bt_rfreg_origin_1e); } if (!rtlpriv->dm.dynamic_txpower_enable) { if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) { if (rtlpcipriv->bt_coexist.bt_rssi_state & BT_RSSI_STATE_TXPOWER_LOW) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_BT2; } else { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_BT1; } } else { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; } rtl92c_phy_set_txpower_level(hw, rtlpriv->phy.current_channel); } } static void rtl92c_check_bt_change(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 tmp1byte = 0; if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version) && rtlpcipriv->bt_coexist.bt_coexistence) tmp1byte |= BIT(5); if (rtlpcipriv->bt_coexist.bt_cur_state) { if (rtlpcipriv->bt_coexist.bt_ant_isolation) rtl92c_bt_ant_isolation(hw, tmp1byte); } else { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte); rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0, rtlpcipriv->bt_coexist.bt_rfreg_origin_1e); rtlpcipriv->bt_coexist.bt_edca_ul = 0; rtlpcipriv->bt_coexist.bt_edca_dl = 0; } } void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw) { struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); bool wifi_connect_change; bool bt_state_change; bool rssi_state_change; if ((rtlpcipriv->bt_coexist.bt_coexistence) && (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) { wifi_connect_change = rtl92c_bt_wifi_connect_change(hw); bt_state_change = rtl92c_bt_state_change(hw); rssi_state_change = rtl92c_bt_rssi_state_change(hw); if (wifi_connect_change || bt_state_change || rssi_state_change) rtl92c_check_bt_change(hw); } } EXPORT_SYMBOL(rtl92c_dm_bt_coexist);
gpl-2.0
Jairus980/kernel_hltexx
kernel/user.c
672
5018
/* * The "user cache". * * (C) Copyright 1991-2000 Linus Torvalds * * We have a per-user structure to keep track of how many * processes, files etc the user has claimed, in order to be * able to have per-user limits for system resources. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/key.h> #include <linux/interrupt.h> #include <linux/export.h> #include <linux/user_namespace.h> #include <linux/proc_fs.h> /* * userns count is 1 for root user, 1 for init_uts_ns, * and 1 for... ? */ struct user_namespace init_user_ns = { .kref = { .refcount = ATOMIC_INIT(3), }, .creator = &root_user, .proc_inum = PROC_USER_INIT_INO, }; EXPORT_SYMBOL_GPL(init_user_ns); /* * UID task count cache, to get fast user lookup in "alloc_uid" * when changing user ID's (ie setuid() and friends). */ #define UIDHASH_MASK (UIDHASH_SZ - 1) #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) static struct kmem_cache *uid_cachep; /* * The uidhash_lock is mostly taken from process context, but it is * occasionally also taken from softirq/tasklet context, when * task-structs get RCU-freed. Hence all locking must be softirq-safe. * But free_uid() is also called with local interrupts disabled, and running * local_bh_enable() with local interrupts disabled is an error - we'll run * softirq callbacks, and they can unconditionally enable interrupts, and * the caller of free_uid() didn't expect that.. */ static DEFINE_SPINLOCK(uidhash_lock); /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->user_ns */ struct user_struct root_user = { .__count = ATOMIC_INIT(2), .processes = ATOMIC_INIT(1), .files = ATOMIC_INIT(0), .sigpending = ATOMIC_INIT(0), .locked_shm = 0, .user_ns = &init_user_ns, }; /* * These routines must be called with the uidhash spinlock held! */ static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) { hlist_add_head(&up->uidhash_node, hashent); } static void uid_hash_remove(struct user_struct *up) { hlist_del_init(&up->uidhash_node); put_user_ns(up->user_ns); } static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) { struct user_struct *user; struct hlist_node *h; hlist_for_each_entry(user, h, hashent, uidhash_node) { if (user->uid == uid) { atomic_inc(&user->__count); return user; } } return NULL; } /* IRQs are disabled and uidhash_lock is held upon function entry. * IRQ state (as stored in flags) is restored and uidhash_lock released * upon function exit. */ static void free_user(struct user_struct *up, unsigned long flags) __releases(&uidhash_lock) { uid_hash_remove(up); spin_unlock_irqrestore(&uidhash_lock, flags); key_put(up->uid_keyring); key_put(up->session_keyring); kmem_cache_free(uid_cachep, up); } /* * Locate the user_struct for the passed UID. If found, take a ref on it. The * caller must undo that ref with free_uid(). * * If the user_struct could not be found, return NULL. */ struct user_struct *find_user(uid_t uid) { struct user_struct *ret; unsigned long flags; struct user_namespace *ns = current_user_ns(); spin_lock_irqsave(&uidhash_lock, flags); ret = uid_hash_find(uid, uidhashentry(ns, uid)); spin_unlock_irqrestore(&uidhash_lock, flags); return ret; } void free_uid(struct user_struct *up) { unsigned long flags; if (!up) return; local_irq_save(flags); if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) free_user(up, flags); else local_irq_restore(flags); } struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) { struct hlist_head *hashent = uidhashentry(ns, uid); struct user_struct *up, *new; spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); spin_unlock_irq(&uidhash_lock); if (!up) { new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); if (!new) goto out_unlock; new->uid = uid; atomic_set(&new->__count, 1); new->user_ns = get_user_ns(ns); /* * Before adding this, check whether we raced * on adding the same user already.. */ spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); if (up) { put_user_ns(ns); key_put(new->uid_keyring); key_put(new->session_keyring); kmem_cache_free(uid_cachep, new); } else { uid_hash_insert(new, hashent); up = new; } spin_unlock_irq(&uidhash_lock); } return up; out_unlock: return NULL; } static int __init uid_cache_init(void) { int n; uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); for(n = 0; n < UIDHASH_SZ; ++n) INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); /* Insert the root user immediately (init already runs as root) */ spin_lock_irq(&uidhash_lock); uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); spin_unlock_irq(&uidhash_lock); return 0; } module_init(uid_cache_init);
gpl-2.0
jetonbacaj/SomeKernel_G920P_PB6
arch/powerpc/platforms/85xx/mpc85xx_rdb.c
2208
8759
/* * MPC85xx RDB Board Setup * * Copyright 2009,2012 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/qe.h> #include <asm/qe_ic.h> #include <asm/fsl_guts.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "smp.h" #include "mpc85xx.h" #undef DEBUG #ifdef DEBUG #define DBG(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) #else #define DBG(fmt, args...) #endif void __init mpc85xx_rdb_pic_init(void) { struct mpic *mpic; unsigned long root = of_get_flat_dt_root(); #ifdef CONFIG_QUICC_ENGINE struct device_node *np; #endif if (of_flat_dt_is_compatible(root, "fsl,MPC85XXRDB-CAMP")) { mpic = mpic_alloc(NULL, 0, MPIC_NO_RESET | MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); } else { mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); } BUG_ON(mpic == NULL); mpic_init(mpic); #ifdef CONFIG_QUICC_ENGINE np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); if (np) { qe_ic_init(np, 0, qe_ic_cascade_low_mpic, qe_ic_cascade_high_mpic); of_node_put(np); } else pr_err("%s: Could not find qe-ic node\n", __func__); #endif } /* * Setup the architecture */ static void __init mpc85xx_rdb_setup_arch(void) { #ifdef CONFIG_QUICC_ENGINE struct device_node *np; #endif if (ppc_md.progress) ppc_md.progress("mpc85xx_rdb_setup_arch()", 0); mpc85xx_smp_init(); fsl_pci_assign_primary(); #ifdef CONFIG_QUICC_ENGINE np = of_find_compatible_node(NULL, NULL, "fsl,qe"); if (!np) { pr_err("%s: Could not find Quicc Engine node\n", __func__); goto qe_fail; } qe_reset(); of_node_put(np); np = of_find_node_by_name(NULL, "par_io"); if (np) { struct device_node *ucc; par_io_init(np); of_node_put(np); for_each_node_by_name(ucc, "ucc") par_io_of_config(ucc); } #if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE) if (machine_is(p1025_rdb)) { struct ccsr_guts __iomem *guts; np = of_find_node_by_name(NULL, "global-utilities"); if (np) { guts = of_iomap(np, 0); if (!guts) { pr_err("mpc85xx-rdb: could not map global utilities register\n"); } else { /* P1025 has pins muxed for QE and other functions. To * enable QE UEC mode, we need to set bit QE0 for UCC1 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 * and QE12 for QE MII management singals in PMUXCR * register. */ setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) | MPC85xx_PMUXCR_QE(3) | MPC85xx_PMUXCR_QE(9) | MPC85xx_PMUXCR_QE(12)); iounmap(guts); } of_node_put(np); } } #endif qe_fail: #endif /* CONFIG_QUICC_ENGINE */ printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n"); } machine_arch_initcall(p2020_rdb, mpc85xx_common_publish_devices); machine_arch_initcall(p2020_rdb_pc, mpc85xx_common_publish_devices); machine_arch_initcall(p1020_mbg_pc, mpc85xx_common_publish_devices); machine_arch_initcall(p1020_rdb, mpc85xx_common_publish_devices); machine_arch_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices); machine_arch_initcall(p1020_utm_pc, mpc85xx_common_publish_devices); machine_arch_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices); machine_arch_initcall(p1025_rdb, mpc85xx_common_publish_devices); machine_arch_initcall(p1024_rdb, mpc85xx_common_publish_devices); /* * Called very early, device-tree isn't unflattened */ static int __init p2020_rdb_probe(void) { unsigned long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "fsl,P2020RDB")) return 1; return 0; } static int __init p1020_rdb_probe(void) { unsigned long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "fsl,P1020RDB")) return 1; return 0; } static int __init p1020_rdb_pc_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,P1020RDB-PC"); } static int __init p1021_rdb_pc_probe(void) { unsigned long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "fsl,P1021RDB-PC")) return 1; return 0; } static int __init p2020_rdb_pc_probe(void) { unsigned long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "fsl,P2020RDB-PC")) return 1; return 0; } static int __init p1025_rdb_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,P1025RDB"); } static int __init p1020_mbg_pc_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,P1020MBG-PC"); } static int __init p1020_utm_pc_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,P1020UTM-PC"); } static int __init p1024_rdb_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,P1024RDB"); } define_machine(p2020_rdb) { .name = "P2020 RDB", .probe = p2020_rdb_probe, .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p1020_rdb) { .name = "P1020 RDB", .probe = p1020_rdb_probe, .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p1021_rdb_pc) { .name = "P1021 RDB-PC", .probe = p1021_rdb_pc_probe, .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p2020_rdb_pc) { .name = "P2020RDB-PC", .probe = p2020_rdb_pc_probe, .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p1025_rdb) { .name = "P1025 RDB", .probe = p1025_rdb_probe, .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p1020_mbg_pc) { .name = "P1020 MBG-PC", .probe = p1020_mbg_pc_probe, .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p1020_utm_pc) { .name = "P1020 UTM-PC", .probe = p1020_utm_pc_probe, .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p1020_rdb_pc) { .name = "P1020RDB-PC", .probe = p1020_rdb_pc_probe, .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, }; define_machine(p1024_rdb) { .name = "P1024 RDB", .probe = p1024_rdb_probe, .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
Red54/linux-shumeipai2
arch/arm/plat-mxc/devices/platform-spi_imx.c
2208
3977
/* * Copyright (C) 2009-2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <mach/hardware.h> #include <mach/devices-common.h> #define imx_spi_imx_data_entry_single(soc, type, _devid, _id, hwid, _size) \ { \ .devid = _devid, \ .id = _id, \ .iobase = soc ## _ ## type ## hwid ## _BASE_ADDR, \ .iosize = _size, \ .irq = soc ## _INT_ ## type ## hwid, \ } #define imx_spi_imx_data_entry(soc, type, devid, id, hwid, size) \ [id] = imx_spi_imx_data_entry_single(soc, type, devid, id, hwid, size) #ifdef CONFIG_SOC_IMX1 const struct imx_spi_imx_data imx1_cspi_data[] __initconst = { #define imx1_cspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX1, CSPI, "imx1-cspi", _id, _hwid, SZ_4K) imx1_cspi_data_entry(0, 1), imx1_cspi_data_entry(1, 2), }; #endif #ifdef CONFIG_SOC_IMX21 const struct imx_spi_imx_data imx21_cspi_data[] __initconst = { #define imx21_cspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX21, CSPI, "imx21-cspi", _id, _hwid, SZ_4K) imx21_cspi_data_entry(0, 1), imx21_cspi_data_entry(1, 2), }; #endif #ifdef CONFIG_SOC_IMX25 const struct imx_spi_imx_data imx25_cspi_data[] __initconst = { #define imx25_cspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX25, CSPI, "imx25-cspi", _id, _hwid, SZ_16K) imx25_cspi_data_entry(0, 1), imx25_cspi_data_entry(1, 2), imx25_cspi_data_entry(2, 3), }; #endif /* ifdef CONFIG_SOC_IMX25 */ #ifdef CONFIG_SOC_IMX27 const struct imx_spi_imx_data imx27_cspi_data[] __initconst = { #define imx27_cspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX27, CSPI, "imx27-cspi", _id, _hwid, SZ_4K) imx27_cspi_data_entry(0, 1), imx27_cspi_data_entry(1, 2), imx27_cspi_data_entry(2, 3), }; #endif /* ifdef CONFIG_SOC_IMX27 */ #ifdef CONFIG_SOC_IMX31 const struct imx_spi_imx_data imx31_cspi_data[] __initconst = { #define imx31_cspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX31, CSPI, "imx31-cspi", _id, _hwid, SZ_4K) imx31_cspi_data_entry(0, 1), imx31_cspi_data_entry(1, 2), imx31_cspi_data_entry(2, 3), }; #endif /* ifdef CONFIG_SOC_IMX31 */ #ifdef CONFIG_SOC_IMX35 const struct imx_spi_imx_data imx35_cspi_data[] __initconst = { #define imx35_cspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX35, CSPI, "imx35-cspi", _id, _hwid, SZ_4K) imx35_cspi_data_entry(0, 1), imx35_cspi_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX35 */ #ifdef CONFIG_SOC_IMX51 const struct imx_spi_imx_data imx51_cspi_data __initconst = imx_spi_imx_data_entry_single(MX51, CSPI, "imx51-cspi", 2, , SZ_4K); const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = { #define imx51_ecspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX51, ECSPI, "imx51-ecspi", _id, _hwid, SZ_4K) imx51_ecspi_data_entry(0, 1), imx51_ecspi_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX51 */ #ifdef CONFIG_SOC_IMX53 const struct imx_spi_imx_data imx53_cspi_data __initconst = imx_spi_imx_data_entry_single(MX53, CSPI, "imx53-cspi", 0, , SZ_4K); const struct imx_spi_imx_data imx53_ecspi_data[] __initconst = { #define imx53_ecspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX53, ECSPI, "imx53-ecspi", _id, _hwid, SZ_4K) imx53_ecspi_data_entry(0, 1), imx53_ecspi_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX53 */ struct platform_device *__init imx_add_spi_imx( const struct imx_spi_imx_data *data, const struct spi_imx_master *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device(data->devid, data->id, res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); }
gpl-2.0
profglavcho/ALPS.L0.MP8.V2.1_LCSH6735_65C_HZ_L_KERNEL
arch/arm/mach-omap1/devices.c
2208
11394
/* * linux/arch/arm/mach-omap1/devices.c * * OMAP1 platform device setup/initialization * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/platform_data/omap-wd-timer.h> #include <asm/mach/map.h> #include <mach/tc.h> #include <mach/mux.h> #include <mach/omap7xx.h> #include <mach/camera.h> #include <mach/hardware.h> #include "common.h" #include "clock.h" #include "dma.h" #include "mmc.h" #include "sram.h" #if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE) static struct platform_device omap_pcm = { .name = "omap-pcm-audio", .id = -1, }; static void omap_init_audio(void) { platform_device_register(&omap_pcm); } #else static inline void omap_init_audio(void) {} #endif /*-------------------------------------------------------------------------*/ #if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE) #define OMAP_RTC_BASE 0xfffb4800 static struct resource rtc_resources[] = { { .start = OMAP_RTC_BASE, .end = OMAP_RTC_BASE + 0x5f, .flags = IORESOURCE_MEM, }, { .start = INT_RTC_TIMER, .flags = IORESOURCE_IRQ, }, { .start = INT_RTC_ALARM, .flags = IORESOURCE_IRQ, }, }; static struct platform_device omap_rtc_device = { .name = "omap_rtc", .id = -1, .num_resources = ARRAY_SIZE(rtc_resources), .resource = rtc_resources, }; static void omap_init_rtc(void) { (void) platform_device_register(&omap_rtc_device); } #else static inline void omap_init_rtc(void) {} #endif static inline void omap_init_mbox(void) { } /*-------------------------------------------------------------------------*/ #if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) static inline void omap1_mmc_mux(struct omap_mmc_platform_data *mmc_controller, int controller_nr) { if (controller_nr == 0) { if (cpu_is_omap7xx()) { omap_cfg_reg(MMC_7XX_CMD); omap_cfg_reg(MMC_7XX_CLK); omap_cfg_reg(MMC_7XX_DAT0); } else { omap_cfg_reg(MMC_CMD); omap_cfg_reg(MMC_CLK); omap_cfg_reg(MMC_DAT0); } if (cpu_is_omap1710()) { omap_cfg_reg(M15_1710_MMC_CLKI); omap_cfg_reg(P19_1710_MMC_CMDDIR); omap_cfg_reg(P20_1710_MMC_DATDIR0); } if (mmc_controller->slots[0].wires == 4 && !cpu_is_omap7xx()) { omap_cfg_reg(MMC_DAT1); /* NOTE: DAT2 can be on W10 (here) or M15 */ if (!mmc_controller->slots[0].nomux) omap_cfg_reg(MMC_DAT2); omap_cfg_reg(MMC_DAT3); } } /* Block 2 is on newer chips, and has many pinout options */ if (cpu_is_omap16xx() && controller_nr == 1) { if (!mmc_controller->slots[1].nomux) { omap_cfg_reg(Y8_1610_MMC2_CMD); omap_cfg_reg(Y10_1610_MMC2_CLK); omap_cfg_reg(R18_1610_MMC2_CLKIN); omap_cfg_reg(W8_1610_MMC2_DAT0); if (mmc_controller->slots[1].wires == 4) { omap_cfg_reg(V8_1610_MMC2_DAT1); omap_cfg_reg(W15_1610_MMC2_DAT2); omap_cfg_reg(R10_1610_MMC2_DAT3); } /* These are needed for the level shifter */ omap_cfg_reg(V9_1610_MMC2_CMDDIR); omap_cfg_reg(V5_1610_MMC2_DATDIR0); omap_cfg_reg(W19_1610_MMC2_DATDIR1); } /* Feedback clock must be set on OMAP-1710 MMC2 */ if (cpu_is_omap1710()) omap_writel(omap_readl(MOD_CONF_CTRL_1) | (1 << 24), MOD_CONF_CTRL_1); } } #define OMAP_MMC_NR_RES 4 /* * Register MMC devices. */ static int __init omap_mmc_add(const char *name, int id, unsigned long base, unsigned long size, unsigned int irq, unsigned rx_req, unsigned tx_req, struct omap_mmc_platform_data *data) { struct platform_device *pdev; struct resource res[OMAP_MMC_NR_RES]; int ret; pdev = platform_device_alloc(name, id); if (!pdev) return -ENOMEM; memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource)); res[0].start = base; res[0].end = base + size - 1; res[0].flags = IORESOURCE_MEM; res[1].start = res[1].end = irq; res[1].flags = IORESOURCE_IRQ; res[2].start = rx_req; res[2].name = "rx"; res[2].flags = IORESOURCE_DMA; res[3].start = tx_req; res[3].name = "tx"; res[3].flags = IORESOURCE_DMA; if (cpu_is_omap7xx()) data->slots[0].features = MMC_OMAP7XX; if (cpu_is_omap15xx()) data->slots[0].features = MMC_OMAP15XX; if (cpu_is_omap16xx()) data->slots[0].features = MMC_OMAP16XX; ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret == 0) ret = platform_device_add_data(pdev, data, sizeof(*data)); if (ret) goto fail; ret = platform_device_add(pdev); if (ret) goto fail; /* return device handle to board setup code */ data->dev = &pdev->dev; return 0; fail: platform_device_put(pdev); return ret; } void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data, int nr_controllers) { int i; for (i = 0; i < nr_controllers; i++) { unsigned long base, size; unsigned rx_req, tx_req; unsigned int irq = 0; if (!mmc_data[i]) continue; omap1_mmc_mux(mmc_data[i], i); switch (i) { case 0: base = OMAP1_MMC1_BASE; irq = INT_MMC; rx_req = OMAP_DMA_MMC_RX; tx_req = OMAP_DMA_MMC_TX; break; case 1: if (!cpu_is_omap16xx()) return; base = OMAP1_MMC2_BASE; irq = INT_1610_MMC2; rx_req = OMAP_DMA_MMC2_RX; tx_req = OMAP_DMA_MMC2_TX; break; default: continue; } size = OMAP1_MMC_SIZE; omap_mmc_add("mmci-omap", i, base, size, irq, rx_req, tx_req, mmc_data[i]); } } #endif /*-------------------------------------------------------------------------*/ /* OMAP7xx SPI support */ #if defined(CONFIG_SPI_OMAP_100K) || defined(CONFIG_SPI_OMAP_100K_MODULE) struct platform_device omap_spi1 = { .name = "omap1_spi100k", .id = 1, }; struct platform_device omap_spi2 = { .name = "omap1_spi100k", .id = 2, }; static void omap_init_spi100k(void) { omap_spi1.dev.platform_data = ioremap(OMAP7XX_SPI1_BASE, 0x7ff); if (omap_spi1.dev.platform_data) platform_device_register(&omap_spi1); omap_spi2.dev.platform_data = ioremap(OMAP7XX_SPI2_BASE, 0x7ff); if (omap_spi2.dev.platform_data) platform_device_register(&omap_spi2); } #else static inline void omap_init_spi100k(void) { } #endif #define OMAP1_CAMERA_BASE 0xfffb6800 #define OMAP1_CAMERA_IOSIZE 0x1c static struct resource omap1_camera_resources[] = { [0] = { .start = OMAP1_CAMERA_BASE, .end = OMAP1_CAMERA_BASE + OMAP1_CAMERA_IOSIZE - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = INT_CAMERA, .flags = IORESOURCE_IRQ, }, }; static u64 omap1_camera_dma_mask = DMA_BIT_MASK(32); static struct platform_device omap1_camera_device = { .name = "omap1-camera", .id = 0, /* This is used to put cameras on this interface */ .dev = { .dma_mask = &omap1_camera_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(omap1_camera_resources), .resource = omap1_camera_resources, }; void __init omap1_camera_init(void *info) { struct platform_device *dev = &omap1_camera_device; int ret; dev->dev.platform_data = info; ret = platform_device_register(dev); if (ret) dev_err(&dev->dev, "unable to register device: %d\n", ret); } /*-------------------------------------------------------------------------*/ static inline void omap_init_sti(void) {} /* Numbering for the SPI-capable controllers when used for SPI: * spi = 1 * uwire = 2 * mmc1..2 = 3..4 * mcbsp1..3 = 5..7 */ #if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE) #define OMAP_UWIRE_BASE 0xfffb3000 static struct resource uwire_resources[] = { { .start = OMAP_UWIRE_BASE, .end = OMAP_UWIRE_BASE + 0x20, .flags = IORESOURCE_MEM, }, }; static struct platform_device omap_uwire_device = { .name = "omap_uwire", .id = -1, .num_resources = ARRAY_SIZE(uwire_resources), .resource = uwire_resources, }; static void omap_init_uwire(void) { /* FIXME define and use a boot tag; not all boards will be hooking * up devices to the microwire controller, and multi-board configs * mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway... */ /* board-specific code must configure chipselects (only a few * are normally used) and SCLK/SDI/SDO (each has two choices). */ (void) platform_device_register(&omap_uwire_device); } #else static inline void omap_init_uwire(void) {} #endif #define OMAP1_RNG_BASE 0xfffe5000 static struct resource omap1_rng_resources[] = { { .start = OMAP1_RNG_BASE, .end = OMAP1_RNG_BASE + 0x4f, .flags = IORESOURCE_MEM, }, }; static struct platform_device omap1_rng_device = { .name = "omap_rng", .id = -1, .num_resources = ARRAY_SIZE(omap1_rng_resources), .resource = omap1_rng_resources, }; static void omap1_init_rng(void) { if (!cpu_is_omap16xx()) return; (void) platform_device_register(&omap1_rng_device); } /*-------------------------------------------------------------------------*/ /* * This gets called after board-specific INIT_MACHINE, and initializes most * on-chip peripherals accessible on this board (except for few like USB): * * (a) Does any "standard config" pin muxing needed. Board-specific * code will have muxed GPIO pins and done "nonstandard" setup; * that code could live in the boot loader. * (b) Populating board-specific platform_data with the data drivers * rely on to handle wiring variations. * (c) Creating platform devices as meaningful on this board and * with this kernel configuration. * * Claiming GPIOs, and setting their direction and initial values, is the * responsibility of the device drivers. So is responding to probe(). * * Board-specific knowledge like creating devices or pin setup is to be * kept out of drivers as much as possible. In particular, pin setup * may be handled by the boot loader, and drivers should expect it will * normally have been done by the time they're probed. */ static int __init omap1_init_devices(void) { if (!cpu_class_is_omap1()) return -ENODEV; omap_sram_init(); omap1_clk_late_init(); /* please keep these calls, and their implementations above, * in alphabetical order so they're easier to sort through. */ omap_init_audio(); omap_init_mbox(); omap_init_rtc(); omap_init_spi100k(); omap_init_sti(); omap_init_uwire(); omap1_init_rng(); return 0; } arch_initcall(omap1_init_devices); #if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE) static struct resource wdt_resources[] = { { .start = 0xfffeb000, .end = 0xfffeb07F, .flags = IORESOURCE_MEM, }, }; static struct platform_device omap_wdt_device = { .name = "omap_wdt", .id = -1, .num_resources = ARRAY_SIZE(wdt_resources), .resource = wdt_resources, }; static int __init omap_init_wdt(void) { struct omap_wd_timer_platform_data pdata; int ret; if (!cpu_is_omap16xx()) return -ENODEV; pdata.read_reset_sources = omap1_get_reset_sources; ret = platform_device_register(&omap_wdt_device); if (!ret) { ret = platform_device_add_data(&omap_wdt_device, &pdata, sizeof(pdata)); if (ret) platform_device_del(&omap_wdt_device); } return ret; } subsys_initcall(omap_init_wdt); #endif
gpl-2.0
akhirasip/kernel_SEMC_Shakira_N7_kitkat
drivers/media/video/ivtv/ivtv-irq.c
3232
31907
/* interrupt handling Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2004 Chris Kennedy <c@groovy.org> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ivtv-driver.h" #include "ivtv-queue.h" #include "ivtv-udma.h" #include "ivtv-irq.h" #include "ivtv-mailbox.h" #include "ivtv-vbi.h" #include "ivtv-yuv.h" #include <media/v4l2-event.h> #define DMA_MAGIC_COOKIE 0x000001fe static void ivtv_dma_dec_start(struct ivtv_stream *s); static const int ivtv_stream_map[] = { IVTV_ENC_STREAM_TYPE_MPG, IVTV_ENC_STREAM_TYPE_YUV, IVTV_ENC_STREAM_TYPE_PCM, IVTV_ENC_STREAM_TYPE_VBI, }; static void ivtv_pio_work_handler(struct ivtv *itv) { struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream]; struct ivtv_buffer *buf; int i = 0; IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n"); if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS || s->vdev == NULL || !ivtv_use_pio(s)) { itv->cur_pio_stream = -1; /* trigger PIO complete user interrupt */ write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44); return; } IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name); list_for_each_entry(buf, &s->q_dma.list, list) { u32 size = s->sg_processing[i].size & 0x3ffff; /* Copy the data from the card to the buffer */ if (s->type == IVTV_DEC_STREAM_TYPE_VBI) { memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size); } else { memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size); } i++; if (i == s->sg_processing_size) break; } write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44); } void ivtv_irq_work_handler(struct kthread_work *work) { struct ivtv *itv = container_of(work, struct ivtv, irq_work); if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags)) ivtv_pio_work_handler(itv); if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags)) ivtv_vbi_work_handler(itv); if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags)) ivtv_yuv_work_handler(itv); } /* Determine the required DMA size, setup enough buffers in the predma queue and actually copy the data from the card to the buffers in case a PIO transfer is required for this stream. */ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA]) { struct ivtv *itv = s->itv; struct ivtv_buffer *buf; u32 bytes_needed = 0; u32 offset, size; u32 UVoffset = 0, UVsize = 0; int skip_bufs = s->q_predma.buffers; int idx = s->sg_pending_size; int rc; /* sanity checks */ if (s->vdev == NULL) { IVTV_DEBUG_WARN("Stream %s not started\n", s->name); return -1; } if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) { IVTV_DEBUG_WARN("Stream %s not open\n", s->name); return -1; } /* determine offset, size and PTS for the various streams */ switch (s->type) { case IVTV_ENC_STREAM_TYPE_MPG: offset = data[1]; size = data[2]; s->pending_pts = 0; break; case IVTV_ENC_STREAM_TYPE_YUV: offset = data[1]; size = data[2]; UVoffset = data[3]; UVsize = data[4]; s->pending_pts = ((u64) data[5] << 32) | data[6]; break; case IVTV_ENC_STREAM_TYPE_PCM: offset = data[1] + 12; size = data[2] - 12; s->pending_pts = read_dec(offset - 8) | ((u64)(read_dec(offset - 12)) << 32); if (itv->has_cx23415) offset += IVTV_DECODER_OFFSET; break; case IVTV_ENC_STREAM_TYPE_VBI: size = itv->vbi.enc_size * itv->vbi.fpi; offset = read_enc(itv->vbi.enc_start - 4) + 12; if (offset == 12) { IVTV_DEBUG_INFO("VBI offset == 0\n"); return -1; } s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32); break; case IVTV_DEC_STREAM_TYPE_VBI: size = read_dec(itv->vbi.dec_start + 4) + 8; offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start; s->pending_pts = 0; offset += IVTV_DECODER_OFFSET; break; default: /* shouldn't happen */ return -1; } /* if this is the start of the DMA then fill in the magic cookie */ if (s->sg_pending_size == 0 && ivtv_use_dma(s)) { if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM || s->type == IVTV_DEC_STREAM_TYPE_VBI)) { s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET); write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET); } else { s->pending_backup = read_enc(offset); write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset); } s->pending_offset = offset; } bytes_needed = size; if (s->type == IVTV_ENC_STREAM_TYPE_YUV) { /* The size for the Y samples needs to be rounded upwards to a multiple of the buf_size. The UV samples then start in the next buffer. */ bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size); bytes_needed += UVsize; } IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n", ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset); rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed); if (rc < 0) { /* Insufficient buffers */ IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n", bytes_needed, s->name); return -1; } if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) { IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name); IVTV_WARN("Cause: the application is not reading fast enough.\n"); } s->buffers_stolen = rc; /* got the buffers, now fill in sg_pending */ buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list); memset(buf->buf, 0, 128); list_for_each_entry(buf, &s->q_predma.list, list) { if (skip_bufs-- > 0) continue; s->sg_pending[idx].dst = buf->dma_handle; s->sg_pending[idx].src = offset; s->sg_pending[idx].size = s->buf_size; buf->bytesused = min(size, s->buf_size); buf->dma_xfer_cnt = s->dma_xfer_cnt; s->q_predma.bytesused += buf->bytesused; size -= buf->bytesused; offset += s->buf_size; /* Sync SG buffers */ ivtv_buf_sync_for_device(s, buf); if (size == 0) { /* YUV */ /* process the UV section */ offset = UVoffset; size = UVsize; } idx++; } s->sg_pending_size = idx; return 0; } static void dma_post(struct ivtv_stream *s) { struct ivtv *itv = s->itv; struct ivtv_buffer *buf = NULL; struct list_head *p; u32 offset; __le32 *u32buf; int x = 0; IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA", s->name, s->dma_offset); list_for_each(p, &s->q_dma.list) { buf = list_entry(p, struct ivtv_buffer, list); u32buf = (__le32 *)buf->buf; /* Sync Buffer */ ivtv_buf_sync_for_cpu(s, buf); if (x == 0 && ivtv_use_dma(s)) { offset = s->dma_last_offset; if (u32buf[offset / 4] != DMA_MAGIC_COOKIE) { for (offset = 0; offset < 64; offset++) { if (u32buf[offset] == DMA_MAGIC_COOKIE) { break; } } offset *= 4; if (offset == 256) { IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name); offset = s->dma_last_offset; } if (s->dma_last_offset != offset) IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset); s->dma_last_offset = offset; } if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM || s->type == IVTV_DEC_STREAM_TYPE_VBI)) { write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET); } else { write_enc_sync(0, s->dma_offset); } if (offset) { buf->bytesused -= offset; memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset); } *u32buf = cpu_to_le32(s->dma_backup); } x++; /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */ if (s->type == IVTV_ENC_STREAM_TYPE_MPG || s->type == IVTV_ENC_STREAM_TYPE_VBI) buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP; } if (buf) buf->bytesused += s->dma_last_offset; if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) { list_for_each_entry(buf, &s->q_dma.list, list) { /* Parse and Groom VBI Data */ s->q_dma.bytesused -= buf->bytesused; ivtv_process_vbi_data(itv, buf, 0, s->type); s->q_dma.bytesused += buf->bytesused; } if (s->id == -1) { ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0); return; } } ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused); if (s->id != -1) wake_up(&s->waitq); } void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock) { struct ivtv *itv = s->itv; struct yuv_playback_info *yi = &itv->yuv_info; u8 frame = yi->draw_frame; struct yuv_frame_info *f = &yi->new_frame_info[frame]; struct ivtv_buffer *buf; u32 y_size = 720 * ((f->src_h + 31) & ~31); u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET; int y_done = 0; int bytes_written = 0; unsigned long flags = 0; int idx = 0; IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset); /* Insert buffer block for YUV if needed */ if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) { if (yi->blanking_dmaptr) { s->sg_pending[idx].src = yi->blanking_dmaptr; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = 720 * 16; } offset += 720 * 16; idx++; } list_for_each_entry(buf, &s->q_predma.list, list) { /* YUV UV Offset from Y Buffer */ if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && (bytes_written + buf->bytesused) >= y_size) { s->sg_pending[idx].src = buf->dma_handle; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = y_size - bytes_written; offset = uv_offset; if (s->sg_pending[idx].size != buf->bytesused) { idx++; s->sg_pending[idx].src = buf->dma_handle + s->sg_pending[idx - 1].size; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = buf->bytesused - s->sg_pending[idx - 1].size; offset += s->sg_pending[idx].size; } y_done = 1; } else { s->sg_pending[idx].src = buf->dma_handle; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = buf->bytesused; offset += buf->bytesused; } bytes_written += buf->bytesused; /* Sync SG buffers */ ivtv_buf_sync_for_device(s, buf); idx++; } s->sg_pending_size = idx; /* Sync Hardware SG List of buffers */ ivtv_stream_sync_for_device(s); if (lock) spin_lock_irqsave(&itv->dma_reg_lock, flags); if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) { ivtv_dma_dec_start(s); } else { set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags); } if (lock) spin_unlock_irqrestore(&itv->dma_reg_lock, flags); } static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s) { struct ivtv *itv = s->itv; s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src); s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst); s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000); s->sg_processed++; /* Sync Hardware SG List of buffers */ ivtv_stream_sync_for_device(s); write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR); write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER); itv->dma_timer.expires = jiffies + msecs_to_jiffies(300); add_timer(&itv->dma_timer); } static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s) { struct ivtv *itv = s->itv; s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src); s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst); s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000); s->sg_processed++; /* Sync Hardware SG List of buffers */ ivtv_stream_sync_for_device(s); write_reg(s->sg_handle, IVTV_REG_DECDMAADDR); write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER); itv->dma_timer.expires = jiffies + msecs_to_jiffies(300); add_timer(&itv->dma_timer); } /* start the encoder DMA */ static void ivtv_dma_enc_start(struct ivtv_stream *s) { struct ivtv *itv = s->itv; struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; int i; IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name); if (s->q_predma.bytesused) ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); if (ivtv_use_dma(s)) s->sg_pending[s->sg_pending_size - 1].size += 256; /* If this is an MPEG stream, and VBI data is also pending, then append the VBI DMA to the MPEG DMA and transfer both sets of data at once. VBI DMA is a second class citizen compared to MPEG and mixing them together will confuse the firmware (the end of a VBI DMA is seen as the end of a MPEG DMA, thus effectively dropping an MPEG frame). So instead we make sure we only use the MPEG DMA to transfer the VBI DMA if both are in use. This way no conflicts occur. */ clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags); if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size && s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) { ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused); if (ivtv_use_dma(s_vbi)) s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256; for (i = 0; i < s_vbi->sg_pending_size; i++) { s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i]; } s_vbi->dma_offset = s_vbi->pending_offset; s_vbi->sg_pending_size = 0; s_vbi->dma_xfer_cnt++; set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags); IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name); } s->dma_xfer_cnt++; memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size); s->sg_processing_size = s->sg_pending_size; s->sg_pending_size = 0; s->sg_processed = 0; s->dma_offset = s->pending_offset; s->dma_backup = s->pending_backup; s->dma_pts = s->pending_pts; if (ivtv_use_pio(s)) { set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags); set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags); set_bit(IVTV_F_I_PIO, &itv->i_flags); itv->cur_pio_stream = s->type; } else { itv->dma_retries = 0; ivtv_dma_enc_start_xfer(s); set_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = s->type; } } static void ivtv_dma_dec_start(struct ivtv_stream *s) { struct ivtv *itv = s->itv; if (s->q_predma.bytesused) ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); s->dma_xfer_cnt++; memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size); s->sg_processing_size = s->sg_pending_size; s->sg_pending_size = 0; s->sg_processed = 0; IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name); itv->dma_retries = 0; ivtv_dma_dec_start_xfer(s); set_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = s->type; } static void ivtv_irq_dma_read(struct ivtv *itv) { struct ivtv_stream *s = NULL; struct ivtv_buffer *buf; int hw_stream_type = 0; IVTV_DEBUG_HI_IRQ("DEC DMA READ\n"); del_timer(&itv->dma_timer); if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) return; if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { s = &itv->streams[itv->cur_dma_stream]; ivtv_stream_sync_for_cpu(s); if (read_reg(IVTV_REG_DMASTATUS) & 0x14) { IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n", read_reg(IVTV_REG_DMASTATUS), s->sg_processed, s->sg_processing_size, itv->dma_retries); write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); if (itv->dma_retries == 3) { /* Too many retries, give up on this frame */ itv->dma_retries = 0; s->sg_processed = s->sg_processing_size; } else { /* Retry, starting with the first xfer segment. Just retrying the current segment is not sufficient. */ s->sg_processed = 0; itv->dma_retries++; } } if (s->sg_processed < s->sg_processing_size) { /* DMA next buffer */ ivtv_dma_dec_start_xfer(s); return; } if (s->type == IVTV_DEC_STREAM_TYPE_YUV) hw_stream_type = 2; IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused); /* For some reason must kick the firmware, like PIO mode, I think this tells the firmware we are done and the size of the xfer so it can calculate what we need next. I think we can do this part ourselves but would have to fully calculate xfer info ourselves and not use interrupts */ ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused, hw_stream_type); /* Free last DMA call */ while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) { ivtv_buf_sync_for_cpu(s, buf); ivtv_enqueue(s, buf, &s->q_free); } wake_up(&s->waitq); } clear_bit(IVTV_F_I_UDMA, &itv->i_flags); clear_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = -1; wake_up(&itv->dma_waitq); } static void ivtv_irq_enc_dma_complete(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s; ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream); del_timer(&itv->dma_timer); if (itv->cur_dma_stream < 0) return; s = &itv->streams[itv->cur_dma_stream]; ivtv_stream_sync_for_cpu(s); if (data[0] & 0x18) { IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0], s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries); write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); if (itv->dma_retries == 3) { /* Too many retries, give up on this frame */ itv->dma_retries = 0; s->sg_processed = s->sg_processing_size; } else { /* Retry, starting with the first xfer segment. Just retrying the current segment is not sufficient. */ s->sg_processed = 0; itv->dma_retries++; } } if (s->sg_processed < s->sg_processing_size) { /* DMA next buffer */ ivtv_dma_enc_start_xfer(s); return; } clear_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = -1; dma_post(s); if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) { s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; dma_post(s); } s->sg_processing_size = 0; s->sg_processed = 0; wake_up(&itv->dma_waitq); } static void ivtv_irq_enc_pio_complete(struct ivtv *itv) { struct ivtv_stream *s; if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) { itv->cur_pio_stream = -1; return; } s = &itv->streams[itv->cur_pio_stream]; IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name); clear_bit(IVTV_F_I_PIO, &itv->i_flags); itv->cur_pio_stream = -1; dma_post(s); if (s->type == IVTV_ENC_STREAM_TYPE_MPG) ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0); else if (s->type == IVTV_ENC_STREAM_TYPE_YUV) ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1); else if (s->type == IVTV_ENC_STREAM_TYPE_PCM) ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2); clear_bit(IVTV_F_I_PIO, &itv->i_flags); if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) { s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; dma_post(s); } wake_up(&itv->dma_waitq); } static void ivtv_irq_dma_err(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; u32 status; del_timer(&itv->dma_timer); ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); status = read_reg(IVTV_REG_DMASTATUS); IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1], status, itv->cur_dma_stream); /* * We do *not* write back to the IVTV_REG_DMASTATUS register to * clear the error status, if either the encoder write (0x02) or * decoder read (0x01) bus master DMA operation do not indicate * completed. We can race with the DMA engine, which may have * transitioned to completed status *after* we read the register. * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the * DMA engine has completed, will cause the DMA engine to stop working. */ status &= 0x3; if (status == 0x3) write_reg(status, IVTV_REG_DMASTATUS); if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) { /* retry */ /* * FIXME - handle cases of DMA error similar to * encoder below, except conditioned on status & 0x1 */ ivtv_dma_dec_start(s); return; } else { if ((status & 0x2) == 0) { /* * CX2341x Bus Master DMA write is ongoing. * Reset the timer and let it complete. */ itv->dma_timer.expires = jiffies + msecs_to_jiffies(600); add_timer(&itv->dma_timer); return; } if (itv->dma_retries < 3) { /* * CX2341x Bus Master DMA write has ended. * Retry the write, starting with the first * xfer segment. Just retrying the current * segment is not sufficient. */ s->sg_processed = 0; itv->dma_retries++; ivtv_dma_enc_start_xfer(s); return; } /* Too many retries, give up on this one */ } } if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { ivtv_udma_start(itv); return; } clear_bit(IVTV_F_I_UDMA, &itv->i_flags); clear_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = -1; wake_up(&itv->dma_waitq); } static void ivtv_irq_enc_start_cap(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s; /* Get DMA destination and size arguments from card */ ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data); IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]); if (data[0] > 2 || data[1] == 0 || data[2] == 0) { IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n", data[0], data[1], data[2]); return; } s = &itv->streams[ivtv_stream_map[data[0]]]; if (!stream_enc_dma_append(s, data)) { set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags); } } static void ivtv_irq_enc_vbi_cap(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s; IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n"); s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; if (!stream_enc_dma_append(s, data)) set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags); } static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI]; IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n"); if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) && !stream_enc_dma_append(s, data)) { set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags); } } static void ivtv_irq_dec_data_req(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; struct ivtv_stream *s; /* YUV or MPG */ if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) { ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data); itv->dma_data_req_size = 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31); itv->dma_data_req_offset = data[1]; if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0) ivtv_yuv_frame_complete(itv); s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV]; } else { ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data); itv->dma_data_req_size = min_t(u32, data[2], 0x10000); itv->dma_data_req_offset = data[1]; s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; } IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused, itv->dma_data_req_offset, itv->dma_data_req_size); if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) { set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags); } else { if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) ivtv_yuv_setup_stream_frame(itv); clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags); ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size); ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0); } } static void ivtv_irq_vsync(struct ivtv *itv) { /* The vsync interrupt is unusual in that it won't clear until * the end of the first line for the current field, at which * point it clears itself. This can result in repeated vsync * interrupts, or a missed vsync. Read some of the registers * to determine the line being displayed and ensure we handle * one vsync per frame. */ unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1; struct yuv_playback_info *yi = &itv->yuv_info; int last_dma_frame = atomic_read(&yi->next_dma_frame); struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame]; if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n"); if (((frame ^ f->sync_field) == 0 && ((itv->last_vsync_field & 1) ^ f->sync_field)) || (frame != (itv->last_vsync_field & 1) && !f->interlaced)) { int next_dma_frame = last_dma_frame; if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) { if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) { write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c); write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830); write_reg(yuv_offset[next_dma_frame] >> 4, 0x834); write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838); next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS; atomic_set(&yi->next_dma_frame, next_dma_frame); yi->fields_lapsed = -1; yi->running = 1; } } } if (frame != (itv->last_vsync_field & 1)) { static const struct v4l2_event evtop = { .type = V4L2_EVENT_VSYNC, .u.vsync.field = V4L2_FIELD_TOP, }; static const struct v4l2_event evbottom = { .type = V4L2_EVENT_VSYNC, .u.vsync.field = V4L2_FIELD_BOTTOM, }; struct ivtv_stream *s = ivtv_get_output_stream(itv); itv->last_vsync_field += 1; if (frame == 0) { clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags); clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags); } else { set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags); } if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) { set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags); wake_up(&itv->event_waitq); if (s) wake_up(&s->waitq); } if (s && s->vdev) v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom); wake_up(&itv->vsync_waitq); /* Send VBI to saa7127 */ if (frame && (itv->output_mode == OUT_PASSTHROUGH || test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) || test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) || test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) { set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags); set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags); } /* Check if we need to update the yuv registers */ if (yi->running && (yi->yuv_forced_update || f->update)) { if (!f->update) { last_dma_frame = (u8)(atomic_read(&yi->next_dma_frame) - 1) % IVTV_YUV_BUFFERS; f = &yi->new_frame_info[last_dma_frame]; } if (f->src_w) { yi->update_frame = last_dma_frame; f->update = 0; yi->yuv_forced_update = 0; set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags); set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags); } } yi->fields_lapsed++; } } #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT) irqreturn_t ivtv_irq_handler(int irq, void *dev_id) { struct ivtv *itv = (struct ivtv *)dev_id; u32 combo; u32 stat; int i; u8 vsync_force = 0; spin_lock(&itv->dma_reg_lock); /* get contents of irq status register */ stat = read_reg(IVTV_REG_IRQSTATUS); combo = ~itv->irqmask & stat; /* Clear out IRQ */ if (combo) write_reg(combo, IVTV_REG_IRQSTATUS); if (0 == combo) { /* The vsync interrupt is unusual and clears itself. If we * took too long, we may have missed it. Do some checks */ if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) { /* vsync is enabled, see if we're in a new field */ if ((itv->last_vsync_field & 1) != (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) { /* New field, looks like we missed it */ IVTV_DEBUG_YUV("VSync interrupt missed %d\n", read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16); vsync_force = 1; } } if (!vsync_force) { /* No Vsync expected, wasn't for us */ spin_unlock(&itv->dma_reg_lock); return IRQ_NONE; } } /* Exclude interrupts noted below from the output, otherwise the log is flooded with these messages */ if (combo & ~0xff6d0400) IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo); if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) { IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n"); } if (combo & IVTV_IRQ_DMA_READ) { ivtv_irq_dma_read(itv); } if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) { ivtv_irq_enc_dma_complete(itv); } if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) { ivtv_irq_enc_pio_complete(itv); } if (combo & IVTV_IRQ_DMA_ERR) { ivtv_irq_dma_err(itv); } if (combo & IVTV_IRQ_ENC_START_CAP) { ivtv_irq_enc_start_cap(itv); } if (combo & IVTV_IRQ_ENC_VBI_CAP) { ivtv_irq_enc_vbi_cap(itv); } if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) { ivtv_irq_dec_vbi_reinsert(itv); } if (combo & IVTV_IRQ_ENC_EOS) { IVTV_DEBUG_IRQ("ENC EOS\n"); set_bit(IVTV_F_I_EOS, &itv->i_flags); wake_up(&itv->eos_waitq); } if (combo & IVTV_IRQ_DEC_DATA_REQ) { ivtv_irq_dec_data_req(itv); } /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */ if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) { ivtv_irq_vsync(itv); } if (combo & IVTV_IRQ_ENC_VIM_RST) { IVTV_DEBUG_IRQ("VIM RST\n"); /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */ } if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) { IVTV_DEBUG_INFO("Stereo mode changed\n"); } if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) { itv->irq_rr_idx++; for (i = 0; i < IVTV_MAX_STREAMS; i++) { int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS; struct ivtv_stream *s = &itv->streams[idx]; if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) continue; if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) ivtv_dma_dec_start(s); else ivtv_dma_enc_start(s); break; } if (i == IVTV_MAX_STREAMS && test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) ivtv_udma_start(itv); } if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) { itv->irq_rr_idx++; for (i = 0; i < IVTV_MAX_STREAMS; i++) { int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS; struct ivtv_stream *s = &itv->streams[idx]; if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags)) continue; if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG) ivtv_dma_enc_start(s); break; } } if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) { queue_kthread_work(&itv->irq_worker, &itv->irq_work); } spin_unlock(&itv->dma_reg_lock); /* If we've just handled a 'forced' vsync, it's safest to say it * wasn't ours. Another device may have triggered it at just * the right time. */ return vsync_force ? IRQ_NONE : IRQ_HANDLED; } void ivtv_unfinished_dma(unsigned long arg) { struct ivtv *itv = (struct ivtv *)arg; if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) return; IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream); write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); clear_bit(IVTV_F_I_UDMA, &itv->i_flags); clear_bit(IVTV_F_I_DMA, &itv->i_flags); itv->cur_dma_stream = -1; wake_up(&itv->dma_waitq); }
gpl-2.0
01org/baytrailaudio
arch/arm/mach-s3c24xx/bast-ide.c
4512
2104
/* linux/arch/arm/mach-s3c2410/bast-ide.c * * Copyright 2007 Simtec Electronics * http://www.simtec.co.uk/products/EB2410ITX/ * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/ata_platform.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/map.h> #include "bast.h" /* IDE ports */ static struct pata_platform_info bast_ide_platdata = { .ioport_shift = 5, }; static struct resource bast_ide0_resource[] = { [0] = DEFINE_RES_MEM(BAST_IDE_CS + BAST_PA_IDEPRI, 8 * 0x20), [1] = DEFINE_RES_MEM(BAST_IDE_CS + BAST_PA_IDEPRIAUX + (6 * 0x20), 0x20), [2] = DEFINE_RES_IRQ(BAST_IRQ_IDE0), }; static struct platform_device bast_device_ide0 = { .name = "pata_platform", .id = 0, .num_resources = ARRAY_SIZE(bast_ide0_resource), .resource = bast_ide0_resource, .dev = { .platform_data = &bast_ide_platdata, .coherent_dma_mask = ~0, } }; static struct resource bast_ide1_resource[] = { [0] = DEFINE_RES_MEM(BAST_IDE_CS + BAST_PA_IDESEC, 8 * 0x20), [1] = DEFINE_RES_MEM(BAST_IDE_CS + BAST_PA_IDESECAUX + (6 * 0x20), 0x20), [2] = DEFINE_RES_IRQ(BAST_IRQ_IDE1), }; static struct platform_device bast_device_ide1 = { .name = "pata_platform", .id = 1, .num_resources = ARRAY_SIZE(bast_ide1_resource), .resource = bast_ide1_resource, .dev = { .platform_data = &bast_ide_platdata, .coherent_dma_mask = ~0, } }; static struct platform_device *bast_ide_devices[] __initdata = { &bast_device_ide0, &bast_device_ide1, }; static __init int bast_ide_init(void) { if (machine_is_bast() || machine_is_vr1000()) return platform_add_devices(bast_ide_devices, ARRAY_SIZE(bast_ide_devices)); return 0; } fs_initcall(bast_ide_init);
gpl-2.0
dmachaty/linux-bananapro
arch/arm/mach-s3c24xx/bast-ide.c
4512
2104
/* linux/arch/arm/mach-s3c2410/bast-ide.c * * Copyright 2007 Simtec Electronics * http://www.simtec.co.uk/products/EB2410ITX/ * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/ata_platform.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/map.h> #include "bast.h" /* IDE ports */ static struct pata_platform_info bast_ide_platdata = { .ioport_shift = 5, }; static struct resource bast_ide0_resource[] = { [0] = DEFINE_RES_MEM(BAST_IDE_CS + BAST_PA_IDEPRI, 8 * 0x20), [1] = DEFINE_RES_MEM(BAST_IDE_CS + BAST_PA_IDEPRIAUX + (6 * 0x20), 0x20), [2] = DEFINE_RES_IRQ(BAST_IRQ_IDE0), }; static struct platform_device bast_device_ide0 = { .name = "pata_platform", .id = 0, .num_resources = ARRAY_SIZE(bast_ide0_resource), .resource = bast_ide0_resource, .dev = { .platform_data = &bast_ide_platdata, .coherent_dma_mask = ~0, } }; static struct resource bast_ide1_resource[] = { [0] = DEFINE_RES_MEM(BAST_IDE_CS + BAST_PA_IDESEC, 8 * 0x20), [1] = DEFINE_RES_MEM(BAST_IDE_CS + BAST_PA_IDESECAUX + (6 * 0x20), 0x20), [2] = DEFINE_RES_IRQ(BAST_IRQ_IDE1), }; static struct platform_device bast_device_ide1 = { .name = "pata_platform", .id = 1, .num_resources = ARRAY_SIZE(bast_ide1_resource), .resource = bast_ide1_resource, .dev = { .platform_data = &bast_ide_platdata, .coherent_dma_mask = ~0, } }; static struct platform_device *bast_ide_devices[] __initdata = { &bast_device_ide0, &bast_device_ide1, }; static __init int bast_ide_init(void) { if (machine_is_bast() || machine_is_vr1000()) return platform_add_devices(bast_ide_devices, ARRAY_SIZE(bast_ide_devices)); return 0; } fs_initcall(bast_ide_init);
gpl-2.0
CyanogenMod/lge-kernel-iproj
arch/cris/arch-v32/kernel/fasttimer.c
4768
22909
/* * linux/arch/cris/kernel/fasttimer.c * * Fast timers for ETRAX FS * * Copyright (C) 2000-2006 Axis Communications AB, Lund, Sweden */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <linux/time.h> #include <linux/delay.h> #include <asm/irq.h> #include <asm/system.h> #include <hwregs/reg_map.h> #include <hwregs/reg_rdwr.h> #include <hwregs/timer_defs.h> #include <asm/fasttimer.h> #include <linux/proc_fs.h> /* * timer0 is running at 100MHz and generating jiffies timer ticks * at 100 or 1000 HZ. * fasttimer gives an API that gives timers that expire "between" the jiffies * giving microsecond resolution (10 ns). * fasttimer uses reg_timer_rw_trig register to get interrupt when * r_time reaches a certain value. */ #define DEBUG_LOG_INCLUDED #define FAST_TIMER_LOG /* #define FAST_TIMER_TEST */ #define FAST_TIMER_SANITY_CHECKS #ifdef FAST_TIMER_SANITY_CHECKS static int sanity_failed; #endif #define D1(x) #define D2(x) #define DP(x) static unsigned int fast_timer_running; static unsigned int fast_timers_added; static unsigned int fast_timers_started; static unsigned int fast_timers_expired; static unsigned int fast_timers_deleted; static unsigned int fast_timer_is_init; static unsigned int fast_timer_ints; struct fast_timer *fast_timer_list = NULL; #ifdef DEBUG_LOG_INCLUDED #define DEBUG_LOG_MAX 128 static const char * debug_log_string[DEBUG_LOG_MAX]; static unsigned long debug_log_value[DEBUG_LOG_MAX]; static unsigned int debug_log_cnt; static unsigned int debug_log_cnt_wrapped; #define DEBUG_LOG(string, value) \ { \ unsigned long log_flags; \ local_irq_save(log_flags); \ debug_log_string[debug_log_cnt] = (string); \ debug_log_value[debug_log_cnt] = (unsigned long)(value); \ if (++debug_log_cnt >= DEBUG_LOG_MAX) \ { \ debug_log_cnt = debug_log_cnt % DEBUG_LOG_MAX; \ debug_log_cnt_wrapped = 1; \ } \ local_irq_restore(log_flags); \ } #else #define DEBUG_LOG(string, value) #endif #define NUM_TIMER_STATS 16 #ifdef FAST_TIMER_LOG struct fast_timer timer_added_log[NUM_TIMER_STATS]; struct fast_timer timer_started_log[NUM_TIMER_STATS]; struct fast_timer timer_expired_log[NUM_TIMER_STATS]; #endif int timer_div_settings[NUM_TIMER_STATS]; int timer_delay_settings[NUM_TIMER_STATS]; struct work_struct fast_work; static void timer_trig_handler(struct work_struct *work); /* Not true gettimeofday, only checks the jiffies (uptime) + useconds */ inline void do_gettimeofday_fast(struct fasttime_t *tv) { tv->tv_jiff = jiffies; tv->tv_usec = GET_JIFFIES_USEC(); } inline int fasttime_cmp(struct fasttime_t *t0, struct fasttime_t *t1) { /* Compare jiffies. Takes care of wrapping */ if (time_before(t0->tv_jiff, t1->tv_jiff)) return -1; else if (time_after(t0->tv_jiff, t1->tv_jiff)) return 1; /* Compare us */ if (t0->tv_usec < t1->tv_usec) return -1; else if (t0->tv_usec > t1->tv_usec) return 1; return 0; } /* Called with ints off */ inline void start_timer_trig(unsigned long delay_us) { reg_timer_rw_ack_intr ack_intr = { 0 }; reg_timer_rw_intr_mask intr_mask; reg_timer_rw_trig trig; reg_timer_rw_trig_cfg trig_cfg = { 0 }; reg_timer_r_time r_time0; reg_timer_r_time r_time1; unsigned char trig_wrap; unsigned char time_wrap; r_time0 = REG_RD(timer, regi_timer0, r_time); D1(printk("start_timer_trig : %d us freq: %i div: %i\n", delay_us, freq_index, div)); /* Clear trig irq */ intr_mask = REG_RD(timer, regi_timer0, rw_intr_mask); intr_mask.trig = 0; REG_WR(timer, regi_timer0, rw_intr_mask, intr_mask); /* Set timer values and check if trigger wraps. */ /* r_time is 100MHz (10 ns resolution) */ trig_wrap = (trig = r_time0 + delay_us*(1000/10)) < r_time0; timer_div_settings[fast_timers_started % NUM_TIMER_STATS] = trig; timer_delay_settings[fast_timers_started % NUM_TIMER_STATS] = delay_us; /* Ack interrupt */ ack_intr.trig = 1; REG_WR(timer, regi_timer0, rw_ack_intr, ack_intr); /* Start timer */ REG_WR(timer, regi_timer0, rw_trig, trig); trig_cfg.tmr = regk_timer_time; REG_WR(timer, regi_timer0, rw_trig_cfg, trig_cfg); /* Check if we have already passed the trig time */ r_time1 = REG_RD(timer, regi_timer0, r_time); time_wrap = r_time1 < r_time0; if ((trig_wrap && !time_wrap) || (r_time1 < trig)) { /* No, Enable trig irq */ intr_mask = REG_RD(timer, regi_timer0, rw_intr_mask); intr_mask.trig = 1; REG_WR(timer, regi_timer0, rw_intr_mask, intr_mask); fast_timers_started++; fast_timer_running = 1; } else { /* We have passed the time, disable trig point, ack intr */ trig_cfg.tmr = regk_timer_off; REG_WR(timer, regi_timer0, rw_trig_cfg, trig_cfg); REG_WR(timer, regi_timer0, rw_ack_intr, ack_intr); /* call the int routine */ INIT_WORK(&fast_work, timer_trig_handler); schedule_work(&fast_work); } } /* In version 1.4 this function takes 27 - 50 us */ void start_one_shot_timer(struct fast_timer *t, fast_timer_function_type *function, unsigned long data, unsigned long delay_us, const char *name) { unsigned long flags; struct fast_timer *tmp; D1(printk("sft %s %d us\n", name, delay_us)); local_irq_save(flags); do_gettimeofday_fast(&t->tv_set); tmp = fast_timer_list; #ifdef FAST_TIMER_SANITY_CHECKS /* Check so this is not in the list already... */ while (tmp != NULL) { if (tmp == t) { printk(KERN_DEBUG "timer name: %s data: 0x%08lX already " "in list!\n", name, data); sanity_failed++; goto done; } else tmp = tmp->next; } tmp = fast_timer_list; #endif t->delay_us = delay_us; t->function = function; t->data = data; t->name = name; t->tv_expires.tv_usec = t->tv_set.tv_usec + delay_us % 1000000; t->tv_expires.tv_jiff = t->tv_set.tv_jiff + delay_us / 1000000 / HZ; if (t->tv_expires.tv_usec > 1000000) { t->tv_expires.tv_usec -= 1000000; t->tv_expires.tv_jiff += HZ; } #ifdef FAST_TIMER_LOG timer_added_log[fast_timers_added % NUM_TIMER_STATS] = *t; #endif fast_timers_added++; /* Check if this should timeout before anything else */ if (tmp == NULL || fasttime_cmp(&t->tv_expires, &tmp->tv_expires) < 0) { /* Put first in list and modify the timer value */ t->prev = NULL; t->next = fast_timer_list; if (fast_timer_list) fast_timer_list->prev = t; fast_timer_list = t; #ifdef FAST_TIMER_LOG timer_started_log[fast_timers_started % NUM_TIMER_STATS] = *t; #endif start_timer_trig(delay_us); } else { /* Put in correct place in list */ while (tmp->next && fasttime_cmp(&t->tv_expires, &tmp->next->tv_expires) > 0) tmp = tmp->next; /* Insert t after tmp */ t->prev = tmp; t->next = tmp->next; if (tmp->next) { tmp->next->prev = t; } tmp->next = t; } D2(printk("start_one_shot_timer: %d us done\n", delay_us)); done: local_irq_restore(flags); } /* start_one_shot_timer */ static inline int fast_timer_pending (const struct fast_timer * t) { return (t->next != NULL) || (t->prev != NULL) || (t == fast_timer_list); } static inline int detach_fast_timer (struct fast_timer *t) { struct fast_timer *next, *prev; if (!fast_timer_pending(t)) return 0; next = t->next; prev = t->prev; if (next) next->prev = prev; if (prev) prev->next = next; else fast_timer_list = next; fast_timers_deleted++; return 1; } int del_fast_timer(struct fast_timer * t) { unsigned long flags; int ret; local_irq_save(flags); ret = detach_fast_timer(t); t->next = t->prev = NULL; local_irq_restore(flags); return ret; } /* del_fast_timer */ /* Interrupt routines or functions called in interrupt context */ /* Timer interrupt handler for trig interrupts */ static irqreturn_t timer_trig_interrupt(int irq, void *dev_id) { reg_timer_r_masked_intr masked_intr; /* Check if the timer interrupt is for us (a trig int) */ masked_intr = REG_RD(timer, regi_timer0, r_masked_intr); if (!masked_intr.trig) return IRQ_NONE; timer_trig_handler(NULL); return IRQ_HANDLED; } static void timer_trig_handler(struct work_struct *work) { reg_timer_rw_ack_intr ack_intr = { 0 }; reg_timer_rw_intr_mask intr_mask; reg_timer_rw_trig_cfg trig_cfg = { 0 }; struct fast_timer *t; unsigned long flags; /* We keep interrupts disabled not only when we modify the * fast timer list, but any time we hold a reference to a * timer in the list, since del_fast_timer may be called * from (another) interrupt context. Thus, the only time * when interrupts are enabled is when calling the timer * callback function. */ local_irq_save(flags); /* Clear timer trig interrupt */ intr_mask = REG_RD(timer, regi_timer0, rw_intr_mask); intr_mask.trig = 0; REG_WR(timer, regi_timer0, rw_intr_mask, intr_mask); /* First stop timer, then ack interrupt */ /* Stop timer */ trig_cfg.tmr = regk_timer_off; REG_WR(timer, regi_timer0, rw_trig_cfg, trig_cfg); /* Ack interrupt */ ack_intr.trig = 1; REG_WR(timer, regi_timer0, rw_ack_intr, ack_intr); fast_timer_running = 0; fast_timer_ints++; fast_timer_function_type *f; unsigned long d; t = fast_timer_list; while (t) { struct fasttime_t tv; /* Has it really expired? */ do_gettimeofday_fast(&tv); D1(printk(KERN_DEBUG "t: %is %06ius\n", tv.tv_jiff, tv.tv_usec)); if (fasttime_cmp(&t->tv_expires, &tv) <= 0) { /* Yes it has expired */ #ifdef FAST_TIMER_LOG timer_expired_log[fast_timers_expired % NUM_TIMER_STATS] = *t; #endif fast_timers_expired++; /* Remove this timer before call, since it may reuse the timer */ if (t->prev) t->prev->next = t->next; else fast_timer_list = t->next; if (t->next) t->next->prev = t->prev; t->prev = NULL; t->next = NULL; /* Save function callback data before enabling * interrupts, since the timer may be removed and we * don't know how it was allocated (e.g. ->function * and ->data may become overwritten after deletion * if the timer was stack-allocated). */ f = t->function; d = t->data; if (f != NULL) { /* Run the callback function with interrupts * enabled. */ local_irq_restore(flags); f(d); local_irq_save(flags); } else DEBUG_LOG("!trimertrig %i function==NULL!\n", fast_timer_ints); } else { /* Timer is to early, let's set it again using the normal routines */ D1(printk(".\n")); } t = fast_timer_list; if (t != NULL) { /* Start next timer.. */ long us = 0; struct fasttime_t tv; do_gettimeofday_fast(&tv); /* time_after_eq takes care of wrapping */ if (time_after_eq(t->tv_expires.tv_jiff, tv.tv_jiff)) us = ((t->tv_expires.tv_jiff - tv.tv_jiff) * 1000000 / HZ + t->tv_expires.tv_usec - tv.tv_usec); if (us > 0) { if (!fast_timer_running) { #ifdef FAST_TIMER_LOG timer_started_log[fast_timers_started % NUM_TIMER_STATS] = *t; #endif start_timer_trig(us); } break; } else { /* Timer already expired, let's handle it better late than never. * The normal loop handles it */ D1(printk("e! %d\n", us)); } } } local_irq_restore(flags); if (!t) D1(printk("ttrig stop!\n")); } static void wake_up_func(unsigned long data) { wait_queue_head_t *sleep_wait_p = (wait_queue_head_t*)data; wake_up(sleep_wait_p); } /* Useful API */ void schedule_usleep(unsigned long us) { struct fast_timer t; wait_queue_head_t sleep_wait; init_waitqueue_head(&sleep_wait); D1(printk("schedule_usleep(%d)\n", us)); start_one_shot_timer(&t, wake_up_func, (unsigned long)&sleep_wait, us, "usleep"); /* Uninterruptible sleep on the fast timer. (The condition is * somewhat redundant since the timer is what wakes us up.) */ wait_event(sleep_wait, !fast_timer_pending(&t)); D1(printk("done schedule_usleep(%d)\n", us)); } #ifdef CONFIG_PROC_FS static int proc_fasttimer_read(char *buf, char **start, off_t offset, int len ,int *eof, void *data_unused); static struct proc_dir_entry *fasttimer_proc_entry; #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_PROC_FS /* This value is very much based on testing */ #define BIG_BUF_SIZE (500 + NUM_TIMER_STATS * 300) static int proc_fasttimer_read(char *buf, char **start, off_t offset, int len ,int *eof, void *data_unused) { unsigned long flags; int i = 0; int num_to_show; struct fasttime_t tv; struct fast_timer *t, *nextt; static char *bigbuf = NULL; static unsigned long used; if (!bigbuf) { bigbuf = vmalloc(BIG_BUF_SIZE); if (!bigbuf) { used = 0; if (buf) buf[0] = '\0'; return 0; } } if (!offset || !used) { do_gettimeofday_fast(&tv); used = 0; used += sprintf(bigbuf + used, "Fast timers added: %i\n", fast_timers_added); used += sprintf(bigbuf + used, "Fast timers started: %i\n", fast_timers_started); used += sprintf(bigbuf + used, "Fast timer interrupts: %i\n", fast_timer_ints); used += sprintf(bigbuf + used, "Fast timers expired: %i\n", fast_timers_expired); used += sprintf(bigbuf + used, "Fast timers deleted: %i\n", fast_timers_deleted); used += sprintf(bigbuf + used, "Fast timer running: %s\n", fast_timer_running ? "yes" : "no"); used += sprintf(bigbuf + used, "Current time: %lu.%06lu\n", (unsigned long)tv.tv_jiff, (unsigned long)tv.tv_usec); #ifdef FAST_TIMER_SANITY_CHECKS used += sprintf(bigbuf + used, "Sanity failed: %i\n", sanity_failed); #endif used += sprintf(bigbuf + used, "\n"); #ifdef DEBUG_LOG_INCLUDED { int end_i = debug_log_cnt; i = 0; if (debug_log_cnt_wrapped) i = debug_log_cnt; while ((i != end_i || (debug_log_cnt_wrapped && !used)) && used+100 < BIG_BUF_SIZE) { used += sprintf(bigbuf + used, debug_log_string[i], debug_log_value[i]); i = (i+1) % DEBUG_LOG_MAX; } } used += sprintf(bigbuf + used, "\n"); #endif num_to_show = (fast_timers_started < NUM_TIMER_STATS ? fast_timers_started: NUM_TIMER_STATS); used += sprintf(bigbuf + used, "Timers started: %i\n", fast_timers_started); for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE) ; i++) { int cur = (fast_timers_started - i - 1) % NUM_TIMER_STATS; #if 1 //ndef FAST_TIMER_LOG used += sprintf(bigbuf + used, "div: %i delay: %i" "\n", timer_div_settings[cur], timer_delay_settings[cur] ); #endif #ifdef FAST_TIMER_LOG t = &timer_started_log[cur]; used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data ); #endif } used += sprintf(bigbuf + used, "\n"); #ifdef FAST_TIMER_LOG num_to_show = (fast_timers_added < NUM_TIMER_STATS ? fast_timers_added: NUM_TIMER_STATS); used += sprintf(bigbuf + used, "Timers added: %i\n", fast_timers_added); for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE); i++) { t = &timer_added_log[(fast_timers_added - i - 1) % NUM_TIMER_STATS]; used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data ); } used += sprintf(bigbuf + used, "\n"); num_to_show = (fast_timers_expired < NUM_TIMER_STATS ? fast_timers_expired: NUM_TIMER_STATS); used += sprintf(bigbuf + used, "Timers expired: %i\n", fast_timers_expired); for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE); i++) { t = &timer_expired_log[(fast_timers_expired - i - 1) % NUM_TIMER_STATS]; used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data ); } used += sprintf(bigbuf + used, "\n"); #endif used += sprintf(bigbuf + used, "Active timers:\n"); local_irq_save(flags); t = fast_timer_list; while (t != NULL && (used+100 < BIG_BUF_SIZE)) { nextt = t->next; local_irq_restore(flags); used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" /* " func: 0x%08lX" */ "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data /* , t->function */ ); local_irq_save(flags); if (t->next != nextt) { printk("timer removed!\n"); } t = nextt; } local_irq_restore(flags); } if (used - offset < len) { len = used - offset; } memcpy(buf, bigbuf + offset, len); *start = buf; *eof = 1; return len; } #endif /* PROC_FS */ #ifdef FAST_TIMER_TEST static volatile unsigned long i = 0; static volatile int num_test_timeout = 0; static struct fast_timer tr[10]; static int exp_num[10]; static struct fasttime_t tv_exp[100]; static void test_timeout(unsigned long data) { do_gettimeofday_fast(&tv_exp[data]); exp_num[data] = num_test_timeout; num_test_timeout++; } static void test_timeout1(unsigned long data) { do_gettimeofday_fast(&tv_exp[data]); exp_num[data] = num_test_timeout; if (data < 7) { start_one_shot_timer(&tr[i], test_timeout1, i, 1000, "timeout1"); i++; } num_test_timeout++; } DP( static char buf0[2000]; static char buf1[2000]; static char buf2[2000]; static char buf3[2000]; static char buf4[2000]; ); static char buf5[6000]; static int j_u[1000]; static void fast_timer_test(void) { int prev_num; int j; struct fasttime_t tv, tv0, tv1, tv2; printk("fast_timer_test() start\n"); do_gettimeofday_fast(&tv); for (j = 0; j < 1000; j++) { j_u[j] = GET_JIFFIES_USEC(); } for (j = 0; j < 100; j++) { do_gettimeofday_fast(&tv_exp[j]); } printk(KERN_DEBUG "fast_timer_test() %is %06i\n", tv.tv_jiff, tv.tv_usec); for (j = 0; j < 1000; j++) { printk(KERN_DEBUG "%i %i %i %i %i\n", j_u[j], j_u[j+1], j_u[j+2], j_u[j+3], j_u[j+4]); j += 4; } for (j = 0; j < 100; j++) { printk(KERN_DEBUG "%i.%i %i.%i %i.%i %i.%i %i.%i\n", tv_exp[j].tv_jiff, tv_exp[j].tv_usec, tv_exp[j+1].tv_jiff, tv_exp[j+1].tv_usec, tv_exp[j+2].tv_jiff, tv_exp[j+2].tv_usec, tv_exp[j+3].tv_jiff, tv_exp[j+3].tv_usec, tv_exp[j+4].tv_jiff, tv_exp[j+4].tv_usec); j += 4; } do_gettimeofday_fast(&tv0); start_one_shot_timer(&tr[i], test_timeout, i, 50000, "test0"); DP(proc_fasttimer_read(buf0, NULL, 0, 0, 0)); i++; start_one_shot_timer(&tr[i], test_timeout, i, 70000, "test1"); DP(proc_fasttimer_read(buf1, NULL, 0, 0, 0)); i++; start_one_shot_timer(&tr[i], test_timeout, i, 40000, "test2"); DP(proc_fasttimer_read(buf2, NULL, 0, 0, 0)); i++; start_one_shot_timer(&tr[i], test_timeout, i, 60000, "test3"); DP(proc_fasttimer_read(buf3, NULL, 0, 0, 0)); i++; start_one_shot_timer(&tr[i], test_timeout1, i, 55000, "test4xx"); DP(proc_fasttimer_read(buf4, NULL, 0, 0, 0)); i++; do_gettimeofday_fast(&tv1); proc_fasttimer_read(buf5, NULL, 0, 0, 0); prev_num = num_test_timeout; while (num_test_timeout < i) { if (num_test_timeout != prev_num) prev_num = num_test_timeout; } do_gettimeofday_fast(&tv2); printk(KERN_INFO "Timers started %is %06i\n", tv0.tv_jiff, tv0.tv_usec); printk(KERN_INFO "Timers started at %is %06i\n", tv1.tv_jiff, tv1.tv_usec); printk(KERN_INFO "Timers done %is %06i\n", tv2.tv_jiff, tv2.tv_usec); DP(printk("buf0:\n"); printk(buf0); printk("buf1:\n"); printk(buf1); printk("buf2:\n"); printk(buf2); printk("buf3:\n"); printk(buf3); printk("buf4:\n"); printk(buf4); ); printk("buf5:\n"); printk(buf5); printk("timers set:\n"); for(j = 0; j<i; j++) { struct fast_timer *t = &tr[j]; printk("%-10s set: %6is %06ius exp: %6is %06ius " "data: 0x%08X func: 0x%08X\n", t->name, t->tv_set.tv_jiff, t->tv_set.tv_usec, t->tv_expires.tv_jiff, t->tv_expires.tv_usec, t->data, t->function ); printk(" del: %6ius did exp: %6is %06ius as #%i error: %6li\n", t->delay_us, tv_exp[j].tv_jiff, tv_exp[j].tv_usec, exp_num[j], (tv_exp[j].tv_jiff - t->tv_expires.tv_jiff) * 1000000 + tv_exp[j].tv_usec - t->tv_expires.tv_usec); } proc_fasttimer_read(buf5, NULL, 0, 0, 0); printk("buf5 after all done:\n"); printk(buf5); printk("fast_timer_test() done\n"); } #endif int fast_timer_init(void) { /* For some reason, request_irq() hangs when called froom time_init() */ if (!fast_timer_is_init) { printk("fast_timer_init()\n"); #ifdef CONFIG_PROC_FS fasttimer_proc_entry = create_proc_entry("fasttimer", 0, 0); if (fasttimer_proc_entry) fasttimer_proc_entry->read_proc = proc_fasttimer_read; #endif /* PROC_FS */ if (request_irq(TIMER0_INTR_VECT, timer_trig_interrupt, IRQF_SHARED | IRQF_DISABLED, "fast timer int", &fast_timer_list)) printk(KERN_ERR "err: fasttimer irq\n"); fast_timer_is_init = 1; #ifdef FAST_TIMER_TEST printk("do test\n"); fast_timer_test(); #endif } return 0; } __initcall(fast_timer_init);
gpl-2.0
sktjdgns1189/android_kernel_samsung_frescolteskt
drivers/mfd/twl6040-core.c
4768
16547
/* * MFD driver for TWL6040 audio device * * Authors: Misael Lopez Cruz <misael.lopez@ti.com> * Jorge Eduardo Candelaria <jorge.candelaria@ti.com> * Peter Ujfalusi <peter.ujfalusi@ti.com> * * Copyright: (C) 2011 Texas Instruments, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/regmap.h> #include <linux/err.h> #include <linux/mfd/core.h> #include <linux/mfd/twl6040.h> #define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1) int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg) { int ret; unsigned int val; mutex_lock(&twl6040->io_mutex); /* Vibra control registers from cache */ if (unlikely(reg == TWL6040_REG_VIBCTLL || reg == TWL6040_REG_VIBCTLR)) { val = twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)]; } else { ret = regmap_read(twl6040->regmap, reg, &val); if (ret < 0) { mutex_unlock(&twl6040->io_mutex); return ret; } } mutex_unlock(&twl6040->io_mutex); return val; } EXPORT_SYMBOL(twl6040_reg_read); int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg, u8 val) { int ret; mutex_lock(&twl6040->io_mutex); ret = regmap_write(twl6040->regmap, reg, val); /* Cache the vibra control registers */ if (reg == TWL6040_REG_VIBCTLL || reg == TWL6040_REG_VIBCTLR) twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)] = val; mutex_unlock(&twl6040->io_mutex); return ret; } EXPORT_SYMBOL(twl6040_reg_write); int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask) { int ret; mutex_lock(&twl6040->io_mutex); ret = regmap_update_bits(twl6040->regmap, reg, mask, mask); mutex_unlock(&twl6040->io_mutex); return ret; } EXPORT_SYMBOL(twl6040_set_bits); int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask) { int ret; mutex_lock(&twl6040->io_mutex); ret = regmap_update_bits(twl6040->regmap, reg, mask, 0); mutex_unlock(&twl6040->io_mutex); return ret; } EXPORT_SYMBOL(twl6040_clear_bits); /* twl6040 codec manual power-up sequence */ static int twl6040_power_up(struct twl6040 *twl6040) { u8 ldoctl, ncpctl, lppllctl; int ret; /* enable high-side LDO, reference system and internal oscillator */ ldoctl = TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA; ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl); if (ret) return ret; usleep_range(10000, 10500); /* enable negative charge pump */ ncpctl = TWL6040_NCPENA; ret = twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl); if (ret) goto ncp_err; usleep_range(1000, 1500); /* enable low-side LDO */ ldoctl |= TWL6040_LSLDOENA; ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl); if (ret) goto lsldo_err; usleep_range(1000, 1500); /* enable low-power PLL */ lppllctl = TWL6040_LPLLENA; ret = twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); if (ret) goto lppll_err; usleep_range(5000, 5500); /* disable internal oscillator */ ldoctl &= ~TWL6040_OSCENA; ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl); if (ret) goto osc_err; return 0; osc_err: lppllctl &= ~TWL6040_LPLLENA; twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); lppll_err: ldoctl &= ~TWL6040_LSLDOENA; twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl); lsldo_err: ncpctl &= ~TWL6040_NCPENA; twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl); ncp_err: ldoctl &= ~(TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA); twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl); return ret; } /* twl6040 manual power-down sequence */ static void twl6040_power_down(struct twl6040 *twl6040) { u8 ncpctl, ldoctl, lppllctl; ncpctl = twl6040_reg_read(twl6040, TWL6040_REG_NCPCTL); ldoctl = twl6040_reg_read(twl6040, TWL6040_REG_LDOCTL); lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL); /* enable internal oscillator */ ldoctl |= TWL6040_OSCENA; twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl); usleep_range(1000, 1500); /* disable low-power PLL */ lppllctl &= ~TWL6040_LPLLENA; twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); /* disable low-side LDO */ ldoctl &= ~TWL6040_LSLDOENA; twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl); /* disable negative charge pump */ ncpctl &= ~TWL6040_NCPENA; twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl); /* disable high-side LDO, reference system and internal oscillator */ ldoctl &= ~(TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA); twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl); } static irqreturn_t twl6040_naudint_handler(int irq, void *data) { struct twl6040 *twl6040 = data; u8 intid, status; intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID); if (intid & TWL6040_READYINT) complete(&twl6040->ready); if (intid & TWL6040_THINT) { status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS); if (status & TWL6040_TSHUTDET) { dev_warn(twl6040->dev, "Thermal shutdown, powering-off"); twl6040_power(twl6040, 0); } else { dev_warn(twl6040->dev, "Leaving thermal shutdown, powering-on"); twl6040_power(twl6040, 1); } } return IRQ_HANDLED; } static int twl6040_power_up_completion(struct twl6040 *twl6040, int naudint) { int time_left; u8 intid; time_left = wait_for_completion_timeout(&twl6040->ready, msecs_to_jiffies(144)); if (!time_left) { intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID); if (!(intid & TWL6040_READYINT)) { dev_err(twl6040->dev, "timeout waiting for READYINT\n"); return -ETIMEDOUT; } } return 0; } int twl6040_power(struct twl6040 *twl6040, int on) { int audpwron = twl6040->audpwron; int naudint = twl6040->irq; int ret = 0; mutex_lock(&twl6040->mutex); if (on) { /* already powered-up */ if (twl6040->power_count++) goto out; if (gpio_is_valid(audpwron)) { /* use AUDPWRON line */ gpio_set_value(audpwron, 1); /* wait for power-up completion */ ret = twl6040_power_up_completion(twl6040, naudint); if (ret) { dev_err(twl6040->dev, "automatic power-down failed\n"); twl6040->power_count = 0; goto out; } } else { /* use manual power-up sequence */ ret = twl6040_power_up(twl6040); if (ret) { dev_err(twl6040->dev, "manual power-up failed\n"); twl6040->power_count = 0; goto out; } } /* Default PLL configuration after power up */ twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL; twl6040->sysclk = 19200000; twl6040->mclk = 32768; } else { /* already powered-down */ if (!twl6040->power_count) { dev_err(twl6040->dev, "device is already powered-off\n"); ret = -EPERM; goto out; } if (--twl6040->power_count) goto out; if (gpio_is_valid(audpwron)) { /* use AUDPWRON line */ gpio_set_value(audpwron, 0); /* power-down sequence latency */ usleep_range(500, 700); } else { /* use manual power-down sequence */ twl6040_power_down(twl6040); } twl6040->sysclk = 0; twl6040->mclk = 0; } out: mutex_unlock(&twl6040->mutex); return ret; } EXPORT_SYMBOL(twl6040_power); int twl6040_set_pll(struct twl6040 *twl6040, int pll_id, unsigned int freq_in, unsigned int freq_out) { u8 hppllctl, lppllctl; int ret = 0; mutex_lock(&twl6040->mutex); hppllctl = twl6040_reg_read(twl6040, TWL6040_REG_HPPLLCTL); lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL); /* Force full reconfiguration when switching between PLL */ if (pll_id != twl6040->pll) { twl6040->sysclk = 0; twl6040->mclk = 0; } switch (pll_id) { case TWL6040_SYSCLK_SEL_LPPLL: /* low-power PLL divider */ /* Change the sysclk configuration only if it has been canged */ if (twl6040->sysclk != freq_out) { switch (freq_out) { case 17640000: lppllctl |= TWL6040_LPLLFIN; break; case 19200000: lppllctl &= ~TWL6040_LPLLFIN; break; default: dev_err(twl6040->dev, "freq_out %d not supported\n", freq_out); ret = -EINVAL; goto pll_out; } twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); } /* The PLL in use has not been change, we can exit */ if (twl6040->pll == pll_id) break; switch (freq_in) { case 32768: lppllctl |= TWL6040_LPLLENA; twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); mdelay(5); lppllctl &= ~TWL6040_HPLLSEL; twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); hppllctl &= ~TWL6040_HPLLENA; twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL, hppllctl); break; default: dev_err(twl6040->dev, "freq_in %d not supported\n", freq_in); ret = -EINVAL; goto pll_out; } break; case TWL6040_SYSCLK_SEL_HPPLL: /* high-performance PLL can provide only 19.2 MHz */ if (freq_out != 19200000) { dev_err(twl6040->dev, "freq_out %d not supported\n", freq_out); ret = -EINVAL; goto pll_out; } if (twl6040->mclk != freq_in) { hppllctl &= ~TWL6040_MCLK_MSK; switch (freq_in) { case 12000000: /* PLL enabled, active mode */ hppllctl |= TWL6040_MCLK_12000KHZ | TWL6040_HPLLENA; break; case 19200000: /* * PLL disabled * (enable PLL if MCLK jitter quality * doesn't meet specification) */ hppllctl |= TWL6040_MCLK_19200KHZ; break; case 26000000: /* PLL enabled, active mode */ hppllctl |= TWL6040_MCLK_26000KHZ | TWL6040_HPLLENA; break; case 38400000: /* PLL enabled, active mode */ hppllctl |= TWL6040_MCLK_38400KHZ | TWL6040_HPLLENA; break; default: dev_err(twl6040->dev, "freq_in %d not supported\n", freq_in); ret = -EINVAL; goto pll_out; } /* * enable clock slicer to ensure input waveform is * square */ hppllctl |= TWL6040_HPLLSQRENA; twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL, hppllctl); usleep_range(500, 700); lppllctl |= TWL6040_HPLLSEL; twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); lppllctl &= ~TWL6040_LPLLENA; twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); } break; default: dev_err(twl6040->dev, "unknown pll id %d\n", pll_id); ret = -EINVAL; goto pll_out; } twl6040->sysclk = freq_out; twl6040->mclk = freq_in; twl6040->pll = pll_id; pll_out: mutex_unlock(&twl6040->mutex); return ret; } EXPORT_SYMBOL(twl6040_set_pll); int twl6040_get_pll(struct twl6040 *twl6040) { if (twl6040->power_count) return twl6040->pll; else return -ENODEV; } EXPORT_SYMBOL(twl6040_get_pll); unsigned int twl6040_get_sysclk(struct twl6040 *twl6040) { return twl6040->sysclk; } EXPORT_SYMBOL(twl6040_get_sysclk); /* Get the combined status of the vibra control register */ int twl6040_get_vibralr_status(struct twl6040 *twl6040) { u8 status; status = twl6040->vibra_ctrl_cache[0] | twl6040->vibra_ctrl_cache[1]; status &= (TWL6040_VIBENA | TWL6040_VIBSEL); return status; } EXPORT_SYMBOL(twl6040_get_vibralr_status); static struct resource twl6040_vibra_rsrc[] = { { .flags = IORESOURCE_IRQ, }, }; static struct resource twl6040_codec_rsrc[] = { { .flags = IORESOURCE_IRQ, }, }; static bool twl6040_readable_reg(struct device *dev, unsigned int reg) { /* Register 0 is not readable */ if (!reg) return false; return true; } static struct regmap_config twl6040_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = TWL6040_REG_STATUS, /* 0x2e */ .readable_reg = twl6040_readable_reg, }; static int __devinit twl6040_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct twl6040_platform_data *pdata = client->dev.platform_data; struct twl6040 *twl6040; struct mfd_cell *cell = NULL; int ret, children = 0; if (!pdata) { dev_err(&client->dev, "Platform data is missing\n"); return -EINVAL; } /* In order to operate correctly we need valid interrupt config */ if (!client->irq || !pdata->irq_base) { dev_err(&client->dev, "Invalid IRQ configuration\n"); return -EINVAL; } twl6040 = devm_kzalloc(&client->dev, sizeof(struct twl6040), GFP_KERNEL); if (!twl6040) { ret = -ENOMEM; goto err; } twl6040->regmap = regmap_init_i2c(client, &twl6040_regmap_config); if (IS_ERR(twl6040->regmap)) { ret = PTR_ERR(twl6040->regmap); goto err; } i2c_set_clientdata(client, twl6040); twl6040->dev = &client->dev; twl6040->irq = client->irq; twl6040->irq_base = pdata->irq_base; mutex_init(&twl6040->mutex); mutex_init(&twl6040->io_mutex); init_completion(&twl6040->ready); twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV); /* ERRATA: Automatic power-up is not possible in ES1.0 */ if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0) twl6040->audpwron = pdata->audpwron_gpio; else twl6040->audpwron = -EINVAL; if (gpio_is_valid(twl6040->audpwron)) { ret = gpio_request_one(twl6040->audpwron, GPIOF_OUT_INIT_LOW, "audpwron"); if (ret) goto gpio1_err; } /* codec interrupt */ ret = twl6040_irq_init(twl6040); if (ret) goto gpio2_err; ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY, NULL, twl6040_naudint_handler, 0, "twl6040_irq_ready", twl6040); if (ret) { dev_err(twl6040->dev, "READY IRQ request failed: %d\n", ret); goto irq_err; } /* dual-access registers controlled by I2C only */ twl6040_set_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_I2CSEL); if (pdata->codec) { int irq = twl6040->irq_base + TWL6040_IRQ_PLUG; cell = &twl6040->cells[children]; cell->name = "twl6040-codec"; twl6040_codec_rsrc[0].start = irq; twl6040_codec_rsrc[0].end = irq; cell->resources = twl6040_codec_rsrc; cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc); cell->platform_data = pdata->codec; cell->pdata_size = sizeof(*pdata->codec); children++; } if (pdata->vibra) { int irq = twl6040->irq_base + TWL6040_IRQ_VIB; cell = &twl6040->cells[children]; cell->name = "twl6040-vibra"; twl6040_vibra_rsrc[0].start = irq; twl6040_vibra_rsrc[0].end = irq; cell->resources = twl6040_vibra_rsrc; cell->num_resources = ARRAY_SIZE(twl6040_vibra_rsrc); cell->platform_data = pdata->vibra; cell->pdata_size = sizeof(*pdata->vibra); children++; } if (children) { ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children, NULL, 0); if (ret) goto mfd_err; } else { dev_err(&client->dev, "No platform data found for children\n"); ret = -ENODEV; goto mfd_err; } return 0; mfd_err: free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040); irq_err: twl6040_irq_exit(twl6040); gpio2_err: if (gpio_is_valid(twl6040->audpwron)) gpio_free(twl6040->audpwron); gpio1_err: i2c_set_clientdata(client, NULL); regmap_exit(twl6040->regmap); err: return ret; } static int __devexit twl6040_remove(struct i2c_client *client) { struct twl6040 *twl6040 = i2c_get_clientdata(client); if (twl6040->power_count) twl6040_power(twl6040, 0); if (gpio_is_valid(twl6040->audpwron)) gpio_free(twl6040->audpwron); free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040); twl6040_irq_exit(twl6040); mfd_remove_devices(&client->dev); i2c_set_clientdata(client, NULL); regmap_exit(twl6040->regmap); return 0; } static const struct i2c_device_id twl6040_i2c_id[] = { { "twl6040", 0, }, { }, }; MODULE_DEVICE_TABLE(i2c, twl6040_i2c_id); static struct i2c_driver twl6040_driver = { .driver = { .name = "twl6040", .owner = THIS_MODULE, }, .probe = twl6040_probe, .remove = __devexit_p(twl6040_remove), .id_table = twl6040_i2c_id, }; module_i2c_driver(twl6040_driver); MODULE_DESCRIPTION("TWL6040 MFD"); MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>"); MODULE_AUTHOR("Jorge Eduardo Candelaria <jorge.candelaria@ti.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:twl6040");
gpl-2.0
TeamWin/kernel_samsung_lt02ltetmo
arch/arm/plat-mxc/devices/platform-mx1-camera.c
8096
1170
/* * Copyright (C) 2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <mach/hardware.h> #include <mach/devices-common.h> #define imx_mx1_camera_data_entry_single(soc, _size) \ { \ .iobase = soc ## _CSI ## _BASE_ADDR, \ .iosize = _size, \ .irq = soc ## _INT_CSI, \ } #ifdef CONFIG_SOC_IMX1 const struct imx_mx1_camera_data imx1_mx1_camera_data __initconst = imx_mx1_camera_data_entry_single(MX1, 10); #endif /* ifdef CONFIG_SOC_IMX1 */ struct platform_device *__init imx_add_mx1_camera( const struct imx_mx1_camera_data *data, const struct mx1_camera_pdata *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device_dmamask("mx1-camera", 0, res, ARRAY_SIZE(res), pdata, sizeof(*pdata), DMA_BIT_MASK(32)); }
gpl-2.0
Toygoon/ToyKernel-Team.TCP_SGS2
arch/x86/lib/memcpy_32.c
8352
3788
#include <linux/string.h> #include <linux/module.h> #undef memcpy #undef memset void *memcpy(void *to, const void *from, size_t n) { #ifdef CONFIG_X86_USE_3DNOW return __memcpy3d(to, from, n); #else return __memcpy(to, from, n); #endif } EXPORT_SYMBOL(memcpy); void *memset(void *s, int c, size_t count) { return __memset(s, c, count); } EXPORT_SYMBOL(memset); void *memmove(void *dest, const void *src, size_t n) { int d0,d1,d2,d3,d4,d5; char *ret = dest; __asm__ __volatile__( /* Handle more 16bytes in loop */ "cmp $0x10, %0\n\t" "jb 1f\n\t" /* Decide forward/backward copy mode */ "cmp %2, %1\n\t" "jb 2f\n\t" /* * movs instruction have many startup latency * so we handle small size by general register. */ "cmp $680, %0\n\t" "jb 3f\n\t" /* * movs instruction is only good for aligned case. */ "mov %1, %3\n\t" "xor %2, %3\n\t" "and $0xff, %3\n\t" "jz 4f\n\t" "3:\n\t" "sub $0x10, %0\n\t" /* * We gobble 16byts forward in each loop. */ "3:\n\t" "sub $0x10, %0\n\t" "mov 0*4(%1), %3\n\t" "mov 1*4(%1), %4\n\t" "mov %3, 0*4(%2)\n\t" "mov %4, 1*4(%2)\n\t" "mov 2*4(%1), %3\n\t" "mov 3*4(%1), %4\n\t" "mov %3, 2*4(%2)\n\t" "mov %4, 3*4(%2)\n\t" "lea 0x10(%1), %1\n\t" "lea 0x10(%2), %2\n\t" "jae 3b\n\t" "add $0x10, %0\n\t" "jmp 1f\n\t" /* * Handle data forward by movs. */ ".p2align 4\n\t" "4:\n\t" "mov -4(%1, %0), %3\n\t" "lea -4(%2, %0), %4\n\t" "shr $2, %0\n\t" "rep movsl\n\t" "mov %3, (%4)\n\t" "jmp 11f\n\t" /* * Handle data backward by movs. */ ".p2align 4\n\t" "6:\n\t" "mov (%1), %3\n\t" "mov %2, %4\n\t" "lea -4(%1, %0), %1\n\t" "lea -4(%2, %0), %2\n\t" "shr $2, %0\n\t" "std\n\t" "rep movsl\n\t" "mov %3,(%4)\n\t" "cld\n\t" "jmp 11f\n\t" /* * Start to prepare for backward copy. */ ".p2align 4\n\t" "2:\n\t" "cmp $680, %0\n\t" "jb 5f\n\t" "mov %1, %3\n\t" "xor %2, %3\n\t" "and $0xff, %3\n\t" "jz 6b\n\t" /* * Calculate copy position to tail. */ "5:\n\t" "add %0, %1\n\t" "add %0, %2\n\t" "sub $0x10, %0\n\t" /* * We gobble 16byts backward in each loop. */ "7:\n\t" "sub $0x10, %0\n\t" "mov -1*4(%1), %3\n\t" "mov -2*4(%1), %4\n\t" "mov %3, -1*4(%2)\n\t" "mov %4, -2*4(%2)\n\t" "mov -3*4(%1), %3\n\t" "mov -4*4(%1), %4\n\t" "mov %3, -3*4(%2)\n\t" "mov %4, -4*4(%2)\n\t" "lea -0x10(%1), %1\n\t" "lea -0x10(%2), %2\n\t" "jae 7b\n\t" /* * Calculate copy position to head. */ "add $0x10, %0\n\t" "sub %0, %1\n\t" "sub %0, %2\n\t" /* * Move data from 8 bytes to 15 bytes. */ ".p2align 4\n\t" "1:\n\t" "cmp $8, %0\n\t" "jb 8f\n\t" "mov 0*4(%1), %3\n\t" "mov 1*4(%1), %4\n\t" "mov -2*4(%1, %0), %5\n\t" "mov -1*4(%1, %0), %1\n\t" "mov %3, 0*4(%2)\n\t" "mov %4, 1*4(%2)\n\t" "mov %5, -2*4(%2, %0)\n\t" "mov %1, -1*4(%2, %0)\n\t" "jmp 11f\n\t" /* * Move data from 4 bytes to 7 bytes. */ ".p2align 4\n\t" "8:\n\t" "cmp $4, %0\n\t" "jb 9f\n\t" "mov 0*4(%1), %3\n\t" "mov -1*4(%1, %0), %4\n\t" "mov %3, 0*4(%2)\n\t" "mov %4, -1*4(%2, %0)\n\t" "jmp 11f\n\t" /* * Move data from 2 bytes to 3 bytes. */ ".p2align 4\n\t" "9:\n\t" "cmp $2, %0\n\t" "jb 10f\n\t" "movw 0*2(%1), %%dx\n\t" "movw -1*2(%1, %0), %%bx\n\t" "movw %%dx, 0*2(%2)\n\t" "movw %%bx, -1*2(%2, %0)\n\t" "jmp 11f\n\t" /* * Move data for 1 byte. */ ".p2align 4\n\t" "10:\n\t" "cmp $1, %0\n\t" "jb 11f\n\t" "movb (%1), %%cl\n\t" "movb %%cl, (%2)\n\t" ".p2align 4\n\t" "11:" : "=&c" (d0), "=&S" (d1), "=&D" (d2), "=r" (d3),"=r" (d4), "=r"(d5) :"0" (n), "1" (src), "2" (dest) :"memory"); return ret; } EXPORT_SYMBOL(memmove);
gpl-2.0
crpalmer/android_kernel_motorola_msm8974
fs/fscache/netfs.c
11680
2660
/* FS-Cache netfs (client) registration * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define FSCACHE_DEBUG_LEVEL COOKIE #include <linux/module.h> #include <linux/slab.h> #include "internal.h" static LIST_HEAD(fscache_netfs_list); /* * register a network filesystem for caching */ int __fscache_register_netfs(struct fscache_netfs *netfs) { struct fscache_netfs *ptr; int ret; _enter("{%s}", netfs->name); INIT_LIST_HEAD(&netfs->link); /* allocate a cookie for the primary index */ netfs->primary_index = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL); if (!netfs->primary_index) { _leave(" = -ENOMEM"); return -ENOMEM; } /* initialise the primary index cookie */ atomic_set(&netfs->primary_index->usage, 1); atomic_set(&netfs->primary_index->n_children, 0); netfs->primary_index->def = &fscache_fsdef_netfs_def; netfs->primary_index->parent = &fscache_fsdef_index; netfs->primary_index->netfs_data = netfs; atomic_inc(&netfs->primary_index->parent->usage); atomic_inc(&netfs->primary_index->parent->n_children); spin_lock_init(&netfs->primary_index->lock); INIT_HLIST_HEAD(&netfs->primary_index->backing_objects); /* check the netfs type is not already present */ down_write(&fscache_addremove_sem); ret = -EEXIST; list_for_each_entry(ptr, &fscache_netfs_list, link) { if (strcmp(ptr->name, netfs->name) == 0) goto already_registered; } list_add(&netfs->link, &fscache_netfs_list); ret = 0; printk(KERN_NOTICE "FS-Cache: Netfs '%s' registered for caching\n", netfs->name); already_registered: up_write(&fscache_addremove_sem); if (ret < 0) { netfs->primary_index->parent = NULL; __fscache_cookie_put(netfs->primary_index); netfs->primary_index = NULL; } _leave(" = %d", ret); return ret; } EXPORT_SYMBOL(__fscache_register_netfs); /* * unregister a network filesystem from the cache * - all cookies must have been released first */ void __fscache_unregister_netfs(struct fscache_netfs *netfs) { _enter("{%s.%u}", netfs->name, netfs->version); down_write(&fscache_addremove_sem); list_del(&netfs->link); fscache_relinquish_cookie(netfs->primary_index, 0); up_write(&fscache_addremove_sem); printk(KERN_NOTICE "FS-Cache: Netfs '%s' unregistered from caching\n", netfs->name); _leave(""); } EXPORT_SYMBOL(__fscache_unregister_netfs);
gpl-2.0
twitish/SimpleKernel-4.2.2
fs/ntfs/runlist.c
12960
60881
/** * runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project. * * Copyright (c) 2001-2007 Anton Altaparmakov * Copyright (c) 2002-2005 Richard Russon * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "debug.h" #include "dir.h" #include "endian.h" #include "malloc.h" #include "ntfs.h" /** * ntfs_rl_mm - runlist memmove * * It is up to the caller to serialize access to the runlist @base. */ static inline void ntfs_rl_mm(runlist_element *base, int dst, int src, int size) { if (likely((dst != src) && (size > 0))) memmove(base + dst, base + src, size * sizeof(*base)); } /** * ntfs_rl_mc - runlist memory copy * * It is up to the caller to serialize access to the runlists @dstbase and * @srcbase. */ static inline void ntfs_rl_mc(runlist_element *dstbase, int dst, runlist_element *srcbase, int src, int size) { if (likely(size > 0)) memcpy(dstbase + dst, srcbase + src, size * sizeof(*dstbase)); } /** * ntfs_rl_realloc - Reallocate memory for runlists * @rl: original runlist * @old_size: number of runlist elements in the original runlist @rl * @new_size: number of runlist elements we need space for * * As the runlists grow, more memory will be required. To prevent the * kernel having to allocate and reallocate large numbers of small bits of * memory, this function returns an entire page of memory. * * It is up to the caller to serialize access to the runlist @rl. * * N.B. If the new allocation doesn't require a different number of pages in * memory, the function will return the original pointer. * * On success, return a pointer to the newly allocated, or recycled, memory. * On error, return -errno. The following error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EINVAL - Invalid parameters were passed in. */ static inline runlist_element *ntfs_rl_realloc(runlist_element *rl, int old_size, int new_size) { runlist_element *new_rl; old_size = PAGE_ALIGN(old_size * sizeof(*rl)); new_size = PAGE_ALIGN(new_size * sizeof(*rl)); if (old_size == new_size) return rl; new_rl = ntfs_malloc_nofs(new_size); if (unlikely(!new_rl)) return ERR_PTR(-ENOMEM); if (likely(rl != NULL)) { if (unlikely(old_size > new_size)) old_size = new_size; memcpy(new_rl, rl, old_size); ntfs_free(rl); } return new_rl; } /** * ntfs_rl_realloc_nofail - Reallocate memory for runlists * @rl: original runlist * @old_size: number of runlist elements in the original runlist @rl * @new_size: number of runlist elements we need space for * * As the runlists grow, more memory will be required. To prevent the * kernel having to allocate and reallocate large numbers of small bits of * memory, this function returns an entire page of memory. * * This function guarantees that the allocation will succeed. It will sleep * for as long as it takes to complete the allocation. * * It is up to the caller to serialize access to the runlist @rl. * * N.B. If the new allocation doesn't require a different number of pages in * memory, the function will return the original pointer. * * On success, return a pointer to the newly allocated, or recycled, memory. * On error, return -errno. The following error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EINVAL - Invalid parameters were passed in. */ static inline runlist_element *ntfs_rl_realloc_nofail(runlist_element *rl, int old_size, int new_size) { runlist_element *new_rl; old_size = PAGE_ALIGN(old_size * sizeof(*rl)); new_size = PAGE_ALIGN(new_size * sizeof(*rl)); if (old_size == new_size) return rl; new_rl = ntfs_malloc_nofs_nofail(new_size); BUG_ON(!new_rl); if (likely(rl != NULL)) { if (unlikely(old_size > new_size)) old_size = new_size; memcpy(new_rl, rl, old_size); ntfs_free(rl); } return new_rl; } /** * ntfs_are_rl_mergeable - test if two runlists can be joined together * @dst: original runlist * @src: new runlist to test for mergeability with @dst * * Test if two runlists can be joined together. For this, their VCNs and LCNs * must be adjacent. * * It is up to the caller to serialize access to the runlists @dst and @src. * * Return: true Success, the runlists can be merged. * false Failure, the runlists cannot be merged. */ static inline bool ntfs_are_rl_mergeable(runlist_element *dst, runlist_element *src) { BUG_ON(!dst); BUG_ON(!src); /* We can merge unmapped regions even if they are misaligned. */ if ((dst->lcn == LCN_RL_NOT_MAPPED) && (src->lcn == LCN_RL_NOT_MAPPED)) return true; /* If the runs are misaligned, we cannot merge them. */ if ((dst->vcn + dst->length) != src->vcn) return false; /* If both runs are non-sparse and contiguous, we can merge them. */ if ((dst->lcn >= 0) && (src->lcn >= 0) && ((dst->lcn + dst->length) == src->lcn)) return true; /* If we are merging two holes, we can merge them. */ if ((dst->lcn == LCN_HOLE) && (src->lcn == LCN_HOLE)) return true; /* Cannot merge. */ return false; } /** * __ntfs_rl_merge - merge two runlists without testing if they can be merged * @dst: original, destination runlist * @src: new runlist to merge with @dst * * Merge the two runlists, writing into the destination runlist @dst. The * caller must make sure the runlists can be merged or this will corrupt the * destination runlist. * * It is up to the caller to serialize access to the runlists @dst and @src. */ static inline void __ntfs_rl_merge(runlist_element *dst, runlist_element *src) { dst->length += src->length; } /** * ntfs_rl_append - append a runlist after a given element * @dst: original runlist to be worked on * @dsize: number of elements in @dst (including end marker) * @src: runlist to be inserted into @dst * @ssize: number of elements in @src (excluding end marker) * @loc: append the new runlist @src after this element in @dst * * Append the runlist @src after element @loc in @dst. Merge the right end of * the new runlist, if necessary. Adjust the size of the hole before the * appended runlist. * * It is up to the caller to serialize access to the runlists @dst and @src. * * On success, return a pointer to the new, combined, runlist. Note, both * runlists @dst and @src are deallocated before returning so you cannot use * the pointers for anything any more. (Strictly speaking the returned runlist * may be the same as @dst but this is irrelevant.) * * On error, return -errno. Both runlists are left unmodified. The following * error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EINVAL - Invalid parameters were passed in. */ static inline runlist_element *ntfs_rl_append(runlist_element *dst, int dsize, runlist_element *src, int ssize, int loc) { bool right = false; /* Right end of @src needs merging. */ int marker; /* End of the inserted runs. */ BUG_ON(!dst); BUG_ON(!src); /* First, check if the right hand end needs merging. */ if ((loc + 1) < dsize) right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); /* Space required: @dst size + @src size, less one if we merged. */ dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right); if (IS_ERR(dst)) return dst; /* * We are guaranteed to succeed from here so can start modifying the * original runlists. */ /* First, merge the right hand end, if necessary. */ if (right) __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); /* First run after the @src runs that have been inserted. */ marker = loc + ssize + 1; /* Move the tail of @dst out of the way, then copy in @src. */ ntfs_rl_mm(dst, marker, loc + 1 + right, dsize - (loc + 1 + right)); ntfs_rl_mc(dst, loc + 1, src, 0, ssize); /* Adjust the size of the preceding hole. */ dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; /* We may have changed the length of the file, so fix the end marker */ if (dst[marker].lcn == LCN_ENOENT) dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; return dst; } /** * ntfs_rl_insert - insert a runlist into another * @dst: original runlist to be worked on * @dsize: number of elements in @dst (including end marker) * @src: new runlist to be inserted * @ssize: number of elements in @src (excluding end marker) * @loc: insert the new runlist @src before this element in @dst * * Insert the runlist @src before element @loc in the runlist @dst. Merge the * left end of the new runlist, if necessary. Adjust the size of the hole * after the inserted runlist. * * It is up to the caller to serialize access to the runlists @dst and @src. * * On success, return a pointer to the new, combined, runlist. Note, both * runlists @dst and @src are deallocated before returning so you cannot use * the pointers for anything any more. (Strictly speaking the returned runlist * may be the same as @dst but this is irrelevant.) * * On error, return -errno. Both runlists are left unmodified. The following * error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EINVAL - Invalid parameters were passed in. */ static inline runlist_element *ntfs_rl_insert(runlist_element *dst, int dsize, runlist_element *src, int ssize, int loc) { bool left = false; /* Left end of @src needs merging. */ bool disc = false; /* Discontinuity between @dst and @src. */ int marker; /* End of the inserted runs. */ BUG_ON(!dst); BUG_ON(!src); /* * disc => Discontinuity between the end of @dst and the start of @src. * This means we might need to insert a "not mapped" run. */ if (loc == 0) disc = (src[0].vcn > 0); else { s64 merged_length; left = ntfs_are_rl_mergeable(dst + loc - 1, src); merged_length = dst[loc - 1].length; if (left) merged_length += src->length; disc = (src[0].vcn > dst[loc - 1].vcn + merged_length); } /* * Space required: @dst size + @src size, less one if we merged, plus * one if there was a discontinuity. */ dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc); if (IS_ERR(dst)) return dst; /* * We are guaranteed to succeed from here so can start modifying the * original runlist. */ if (left) __ntfs_rl_merge(dst + loc - 1, src); /* * First run after the @src runs that have been inserted. * Nominally, @marker equals @loc + @ssize, i.e. location + number of * runs in @src. However, if @left, then the first run in @src has * been merged with one in @dst. And if @disc, then @dst and @src do * not meet and we need an extra run to fill the gap. */ marker = loc + ssize - left + disc; /* Move the tail of @dst out of the way, then copy in @src. */ ntfs_rl_mm(dst, marker, loc, dsize - loc); ntfs_rl_mc(dst, loc + disc, src, left, ssize - left); /* Adjust the VCN of the first run after the insertion... */ dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; /* ... and the length. */ if (dst[marker].lcn == LCN_HOLE || dst[marker].lcn == LCN_RL_NOT_MAPPED) dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn; /* Writing beyond the end of the file and there is a discontinuity. */ if (disc) { if (loc > 0) { dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length; dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; } else { dst[loc].vcn = 0; dst[loc].length = dst[loc + 1].vcn; } dst[loc].lcn = LCN_RL_NOT_MAPPED; } return dst; } /** * ntfs_rl_replace - overwrite a runlist element with another runlist * @dst: original runlist to be worked on * @dsize: number of elements in @dst (including end marker) * @src: new runlist to be inserted * @ssize: number of elements in @src (excluding end marker) * @loc: index in runlist @dst to overwrite with @src * * Replace the runlist element @dst at @loc with @src. Merge the left and * right ends of the inserted runlist, if necessary. * * It is up to the caller to serialize access to the runlists @dst and @src. * * On success, return a pointer to the new, combined, runlist. Note, both * runlists @dst and @src are deallocated before returning so you cannot use * the pointers for anything any more. (Strictly speaking the returned runlist * may be the same as @dst but this is irrelevant.) * * On error, return -errno. Both runlists are left unmodified. The following * error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EINVAL - Invalid parameters were passed in. */ static inline runlist_element *ntfs_rl_replace(runlist_element *dst, int dsize, runlist_element *src, int ssize, int loc) { signed delta; bool left = false; /* Left end of @src needs merging. */ bool right = false; /* Right end of @src needs merging. */ int tail; /* Start of tail of @dst. */ int marker; /* End of the inserted runs. */ BUG_ON(!dst); BUG_ON(!src); /* First, see if the left and right ends need merging. */ if ((loc + 1) < dsize) right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); if (loc > 0) left = ntfs_are_rl_mergeable(dst + loc - 1, src); /* * Allocate some space. We will need less if the left, right, or both * ends get merged. The -1 accounts for the run being replaced. */ delta = ssize - 1 - left - right; if (delta > 0) { dst = ntfs_rl_realloc(dst, dsize, dsize + delta); if (IS_ERR(dst)) return dst; } /* * We are guaranteed to succeed from here so can start modifying the * original runlists. */ /* First, merge the left and right ends, if necessary. */ if (right) __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); if (left) __ntfs_rl_merge(dst + loc - 1, src); /* * Offset of the tail of @dst. This needs to be moved out of the way * to make space for the runs to be copied from @src, i.e. the first * run of the tail of @dst. * Nominally, @tail equals @loc + 1, i.e. location, skipping the * replaced run. However, if @right, then one of @dst's runs is * already merged into @src. */ tail = loc + right + 1; /* * First run after the @src runs that have been inserted, i.e. where * the tail of @dst needs to be moved to. * Nominally, @marker equals @loc + @ssize, i.e. location + number of * runs in @src. However, if @left, then the first run in @src has * been merged with one in @dst. */ marker = loc + ssize - left; /* Move the tail of @dst out of the way, then copy in @src. */ ntfs_rl_mm(dst, marker, tail, dsize - tail); ntfs_rl_mc(dst, loc, src, left, ssize - left); /* We may have changed the length of the file, so fix the end marker. */ if (dsize - tail > 0 && dst[marker].lcn == LCN_ENOENT) dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; return dst; } /** * ntfs_rl_split - insert a runlist into the centre of a hole * @dst: original runlist to be worked on * @dsize: number of elements in @dst (including end marker) * @src: new runlist to be inserted * @ssize: number of elements in @src (excluding end marker) * @loc: index in runlist @dst at which to split and insert @src * * Split the runlist @dst at @loc into two and insert @new in between the two * fragments. No merging of runlists is necessary. Adjust the size of the * holes either side. * * It is up to the caller to serialize access to the runlists @dst and @src. * * On success, return a pointer to the new, combined, runlist. Note, both * runlists @dst and @src are deallocated before returning so you cannot use * the pointers for anything any more. (Strictly speaking the returned runlist * may be the same as @dst but this is irrelevant.) * * On error, return -errno. Both runlists are left unmodified. The following * error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EINVAL - Invalid parameters were passed in. */ static inline runlist_element *ntfs_rl_split(runlist_element *dst, int dsize, runlist_element *src, int ssize, int loc) { BUG_ON(!dst); BUG_ON(!src); /* Space required: @dst size + @src size + one new hole. */ dst = ntfs_rl_realloc(dst, dsize, dsize + ssize + 1); if (IS_ERR(dst)) return dst; /* * We are guaranteed to succeed from here so can start modifying the * original runlists. */ /* Move the tail of @dst out of the way, then copy in @src. */ ntfs_rl_mm(dst, loc + 1 + ssize, loc, dsize - loc); ntfs_rl_mc(dst, loc + 1, src, 0, ssize); /* Adjust the size of the holes either size of @src. */ dst[loc].length = dst[loc+1].vcn - dst[loc].vcn; dst[loc+ssize+1].vcn = dst[loc+ssize].vcn + dst[loc+ssize].length; dst[loc+ssize+1].length = dst[loc+ssize+2].vcn - dst[loc+ssize+1].vcn; return dst; } /** * ntfs_runlists_merge - merge two runlists into one * @drl: original runlist to be worked on * @srl: new runlist to be merged into @drl * * First we sanity check the two runlists @srl and @drl to make sure that they * are sensible and can be merged. The runlist @srl must be either after the * runlist @drl or completely within a hole (or unmapped region) in @drl. * * It is up to the caller to serialize access to the runlists @drl and @srl. * * Merging of runlists is necessary in two cases: * 1. When attribute lists are used and a further extent is being mapped. * 2. When new clusters are allocated to fill a hole or extend a file. * * There are four possible ways @srl can be merged. It can: * - be inserted at the beginning of a hole, * - split the hole in two and be inserted between the two fragments, * - be appended at the end of a hole, or it can * - replace the whole hole. * It can also be appended to the end of the runlist, which is just a variant * of the insert case. * * On success, return a pointer to the new, combined, runlist. Note, both * runlists @drl and @srl are deallocated before returning so you cannot use * the pointers for anything any more. (Strictly speaking the returned runlist * may be the same as @dst but this is irrelevant.) * * On error, return -errno. Both runlists are left unmodified. The following * error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EINVAL - Invalid parameters were passed in. * -ERANGE - The runlists overlap and cannot be merged. */ runlist_element *ntfs_runlists_merge(runlist_element *drl, runlist_element *srl) { int di, si; /* Current index into @[ds]rl. */ int sstart; /* First index with lcn > LCN_RL_NOT_MAPPED. */ int dins; /* Index into @drl at which to insert @srl. */ int dend, send; /* Last index into @[ds]rl. */ int dfinal, sfinal; /* The last index into @[ds]rl with lcn >= LCN_HOLE. */ int marker = 0; VCN marker_vcn = 0; #ifdef DEBUG ntfs_debug("dst:"); ntfs_debug_dump_runlist(drl); ntfs_debug("src:"); ntfs_debug_dump_runlist(srl); #endif /* Check for silly calling... */ if (unlikely(!srl)) return drl; if (IS_ERR(srl) || IS_ERR(drl)) return ERR_PTR(-EINVAL); /* Check for the case where the first mapping is being done now. */ if (unlikely(!drl)) { drl = srl; /* Complete the source runlist if necessary. */ if (unlikely(drl[0].vcn)) { /* Scan to the end of the source runlist. */ for (dend = 0; likely(drl[dend].length); dend++) ; dend++; drl = ntfs_rl_realloc(drl, dend, dend + 1); if (IS_ERR(drl)) return drl; /* Insert start element at the front of the runlist. */ ntfs_rl_mm(drl, 1, 0, dend); drl[0].vcn = 0; drl[0].lcn = LCN_RL_NOT_MAPPED; drl[0].length = drl[1].vcn; } goto finished; } si = di = 0; /* Skip any unmapped start element(s) in the source runlist. */ while (srl[si].length && srl[si].lcn < LCN_HOLE) si++; /* Can't have an entirely unmapped source runlist. */ BUG_ON(!srl[si].length); /* Record the starting points. */ sstart = si; /* * Skip forward in @drl until we reach the position where @srl needs to * be inserted. If we reach the end of @drl, @srl just needs to be * appended to @drl. */ for (; drl[di].length; di++) { if (drl[di].vcn + drl[di].length > srl[sstart].vcn) break; } dins = di; /* Sanity check for illegal overlaps. */ if ((drl[di].vcn == srl[si].vcn) && (drl[di].lcn >= 0) && (srl[si].lcn >= 0)) { ntfs_error(NULL, "Run lists overlap. Cannot merge!"); return ERR_PTR(-ERANGE); } /* Scan to the end of both runlists in order to know their sizes. */ for (send = si; srl[send].length; send++) ; for (dend = di; drl[dend].length; dend++) ; if (srl[send].lcn == LCN_ENOENT) marker_vcn = srl[marker = send].vcn; /* Scan to the last element with lcn >= LCN_HOLE. */ for (sfinal = send; sfinal >= 0 && srl[sfinal].lcn < LCN_HOLE; sfinal--) ; for (dfinal = dend; dfinal >= 0 && drl[dfinal].lcn < LCN_HOLE; dfinal--) ; { bool start; bool finish; int ds = dend + 1; /* Number of elements in drl & srl */ int ss = sfinal - sstart + 1; start = ((drl[dins].lcn < LCN_RL_NOT_MAPPED) || /* End of file */ (drl[dins].vcn == srl[sstart].vcn)); /* Start of hole */ finish = ((drl[dins].lcn >= LCN_RL_NOT_MAPPED) && /* End of file */ ((drl[dins].vcn + drl[dins].length) <= /* End of hole */ (srl[send - 1].vcn + srl[send - 1].length))); /* Or we will lose an end marker. */ if (finish && !drl[dins].length) ss++; if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn)) finish = false; #if 0 ntfs_debug("dfinal = %i, dend = %i", dfinal, dend); ntfs_debug("sstart = %i, sfinal = %i, send = %i", sstart, sfinal, send); ntfs_debug("start = %i, finish = %i", start, finish); ntfs_debug("ds = %i, ss = %i, dins = %i", ds, ss, dins); #endif if (start) { if (finish) drl = ntfs_rl_replace(drl, ds, srl + sstart, ss, dins); else drl = ntfs_rl_insert(drl, ds, srl + sstart, ss, dins); } else { if (finish) drl = ntfs_rl_append(drl, ds, srl + sstart, ss, dins); else drl = ntfs_rl_split(drl, ds, srl + sstart, ss, dins); } if (IS_ERR(drl)) { ntfs_error(NULL, "Merge failed."); return drl; } ntfs_free(srl); if (marker) { ntfs_debug("Triggering marker code."); for (ds = dend; drl[ds].length; ds++) ; /* We only need to care if @srl ended after @drl. */ if (drl[ds].vcn <= marker_vcn) { int slots = 0; if (drl[ds].vcn == marker_vcn) { ntfs_debug("Old marker = 0x%llx, replacing " "with LCN_ENOENT.", (unsigned long long) drl[ds].lcn); drl[ds].lcn = LCN_ENOENT; goto finished; } /* * We need to create an unmapped runlist element in * @drl or extend an existing one before adding the * ENOENT terminator. */ if (drl[ds].lcn == LCN_ENOENT) { ds--; slots = 1; } if (drl[ds].lcn != LCN_RL_NOT_MAPPED) { /* Add an unmapped runlist element. */ if (!slots) { drl = ntfs_rl_realloc_nofail(drl, ds, ds + 2); slots = 2; } ds++; /* Need to set vcn if it isn't set already. */ if (slots != 1) drl[ds].vcn = drl[ds - 1].vcn + drl[ds - 1].length; drl[ds].lcn = LCN_RL_NOT_MAPPED; /* We now used up a slot. */ slots--; } drl[ds].length = marker_vcn - drl[ds].vcn; /* Finally add the ENOENT terminator. */ ds++; if (!slots) drl = ntfs_rl_realloc_nofail(drl, ds, ds + 1); drl[ds].vcn = marker_vcn; drl[ds].lcn = LCN_ENOENT; drl[ds].length = (s64)0; } } } finished: /* The merge was completed successfully. */ ntfs_debug("Merged runlist:"); ntfs_debug_dump_runlist(drl); return drl; } /** * ntfs_mapping_pairs_decompress - convert mapping pairs array to runlist * @vol: ntfs volume on which the attribute resides * @attr: attribute record whose mapping pairs array to decompress * @old_rl: optional runlist in which to insert @attr's runlist * * It is up to the caller to serialize access to the runlist @old_rl. * * Decompress the attribute @attr's mapping pairs array into a runlist. On * success, return the decompressed runlist. * * If @old_rl is not NULL, decompressed runlist is inserted into the * appropriate place in @old_rl and the resultant, combined runlist is * returned. The original @old_rl is deallocated. * * On error, return -errno. @old_rl is left unmodified in that case. * * The following error codes are defined: * -ENOMEM - Not enough memory to allocate runlist array. * -EIO - Corrupt runlist. * -EINVAL - Invalid parameters were passed in. * -ERANGE - The two runlists overlap. * * FIXME: For now we take the conceptionally simplest approach of creating the * new runlist disregarding the already existing one and then splicing the * two into one, if that is possible (we check for overlap and discard the new * runlist if overlap present before returning ERR_PTR(-ERANGE)). */ runlist_element *ntfs_mapping_pairs_decompress(const ntfs_volume *vol, const ATTR_RECORD *attr, runlist_element *old_rl) { VCN vcn; /* Current vcn. */ LCN lcn; /* Current lcn. */ s64 deltaxcn; /* Change in [vl]cn. */ runlist_element *rl; /* The output runlist. */ u8 *buf; /* Current position in mapping pairs array. */ u8 *attr_end; /* End of attribute. */ int rlsize; /* Size of runlist buffer. */ u16 rlpos; /* Current runlist position in units of runlist_elements. */ u8 b; /* Current byte offset in buf. */ #ifdef DEBUG /* Make sure attr exists and is non-resident. */ if (!attr || !attr->non_resident || sle64_to_cpu( attr->data.non_resident.lowest_vcn) < (VCN)0) { ntfs_error(vol->sb, "Invalid arguments."); return ERR_PTR(-EINVAL); } #endif /* Start at vcn = lowest_vcn and lcn 0. */ vcn = sle64_to_cpu(attr->data.non_resident.lowest_vcn); lcn = 0; /* Get start of the mapping pairs array. */ buf = (u8*)attr + le16_to_cpu( attr->data.non_resident.mapping_pairs_offset); attr_end = (u8*)attr + le32_to_cpu(attr->length); if (unlikely(buf < (u8*)attr || buf > attr_end)) { ntfs_error(vol->sb, "Corrupt attribute."); return ERR_PTR(-EIO); } /* If the mapping pairs array is valid but empty, nothing to do. */ if (!vcn && !*buf) return old_rl; /* Current position in runlist array. */ rlpos = 0; /* Allocate first page and set current runlist size to one page. */ rl = ntfs_malloc_nofs(rlsize = PAGE_SIZE); if (unlikely(!rl)) return ERR_PTR(-ENOMEM); /* Insert unmapped starting element if necessary. */ if (vcn) { rl->vcn = 0; rl->lcn = LCN_RL_NOT_MAPPED; rl->length = vcn; rlpos++; } while (buf < attr_end && *buf) { /* * Allocate more memory if needed, including space for the * not-mapped and terminator elements. ntfs_malloc_nofs() * operates on whole pages only. */ if (((rlpos + 3) * sizeof(*old_rl)) > rlsize) { runlist_element *rl2; rl2 = ntfs_malloc_nofs(rlsize + (int)PAGE_SIZE); if (unlikely(!rl2)) { ntfs_free(rl); return ERR_PTR(-ENOMEM); } memcpy(rl2, rl, rlsize); ntfs_free(rl); rl = rl2; rlsize += PAGE_SIZE; } /* Enter the current vcn into the current runlist element. */ rl[rlpos].vcn = vcn; /* * Get the change in vcn, i.e. the run length in clusters. * Doing it this way ensures that we signextend negative values. * A negative run length doesn't make any sense, but hey, I * didn't make up the NTFS specs and Windows NT4 treats the run * length as a signed value so that's how it is... */ b = *buf & 0xf; if (b) { if (unlikely(buf + b > attr_end)) goto io_error; for (deltaxcn = (s8)buf[b--]; b; b--) deltaxcn = (deltaxcn << 8) + buf[b]; } else { /* The length entry is compulsory. */ ntfs_error(vol->sb, "Missing length entry in mapping " "pairs array."); deltaxcn = (s64)-1; } /* * Assume a negative length to indicate data corruption and * hence clean-up and return NULL. */ if (unlikely(deltaxcn < 0)) { ntfs_error(vol->sb, "Invalid length in mapping pairs " "array."); goto err_out; } /* * Enter the current run length into the current runlist * element. */ rl[rlpos].length = deltaxcn; /* Increment the current vcn by the current run length. */ vcn += deltaxcn; /* * There might be no lcn change at all, as is the case for * sparse clusters on NTFS 3.0+, in which case we set the lcn * to LCN_HOLE. */ if (!(*buf & 0xf0)) rl[rlpos].lcn = LCN_HOLE; else { /* Get the lcn change which really can be negative. */ u8 b2 = *buf & 0xf; b = b2 + ((*buf >> 4) & 0xf); if (buf + b > attr_end) goto io_error; for (deltaxcn = (s8)buf[b--]; b > b2; b--) deltaxcn = (deltaxcn << 8) + buf[b]; /* Change the current lcn to its new value. */ lcn += deltaxcn; #ifdef DEBUG /* * On NTFS 1.2-, apparently can have lcn == -1 to * indicate a hole. But we haven't verified ourselves * whether it is really the lcn or the deltaxcn that is * -1. So if either is found give us a message so we * can investigate it further! */ if (vol->major_ver < 3) { if (unlikely(deltaxcn == (LCN)-1)) ntfs_error(vol->sb, "lcn delta == -1"); if (unlikely(lcn == (LCN)-1)) ntfs_error(vol->sb, "lcn == -1"); } #endif /* Check lcn is not below -1. */ if (unlikely(lcn < (LCN)-1)) { ntfs_error(vol->sb, "Invalid LCN < -1 in " "mapping pairs array."); goto err_out; } /* Enter the current lcn into the runlist element. */ rl[rlpos].lcn = lcn; } /* Get to the next runlist element. */ rlpos++; /* Increment the buffer position to the next mapping pair. */ buf += (*buf & 0xf) + ((*buf >> 4) & 0xf) + 1; } if (unlikely(buf >= attr_end)) goto io_error; /* * If there is a highest_vcn specified, it must be equal to the final * vcn in the runlist - 1, or something has gone badly wrong. */ deltaxcn = sle64_to_cpu(attr->data.non_resident.highest_vcn); if (unlikely(deltaxcn && vcn - 1 != deltaxcn)) { mpa_err: ntfs_error(vol->sb, "Corrupt mapping pairs array in " "non-resident attribute."); goto err_out; } /* Setup not mapped runlist element if this is the base extent. */ if (!attr->data.non_resident.lowest_vcn) { VCN max_cluster; max_cluster = ((sle64_to_cpu( attr->data.non_resident.allocated_size) + vol->cluster_size - 1) >> vol->cluster_size_bits) - 1; /* * A highest_vcn of zero means this is a single extent * attribute so simply terminate the runlist with LCN_ENOENT). */ if (deltaxcn) { /* * If there is a difference between the highest_vcn and * the highest cluster, the runlist is either corrupt * or, more likely, there are more extents following * this one. */ if (deltaxcn < max_cluster) { ntfs_debug("More extents to follow; deltaxcn " "= 0x%llx, max_cluster = " "0x%llx", (unsigned long long)deltaxcn, (unsigned long long) max_cluster); rl[rlpos].vcn = vcn; vcn += rl[rlpos].length = max_cluster - deltaxcn; rl[rlpos].lcn = LCN_RL_NOT_MAPPED; rlpos++; } else if (unlikely(deltaxcn > max_cluster)) { ntfs_error(vol->sb, "Corrupt attribute. " "deltaxcn = 0x%llx, " "max_cluster = 0x%llx", (unsigned long long)deltaxcn, (unsigned long long) max_cluster); goto mpa_err; } } rl[rlpos].lcn = LCN_ENOENT; } else /* Not the base extent. There may be more extents to follow. */ rl[rlpos].lcn = LCN_RL_NOT_MAPPED; /* Setup terminating runlist element. */ rl[rlpos].vcn = vcn; rl[rlpos].length = (s64)0; /* If no existing runlist was specified, we are done. */ if (!old_rl) { ntfs_debug("Mapping pairs array successfully decompressed:"); ntfs_debug_dump_runlist(rl); return rl; } /* Now combine the new and old runlists checking for overlaps. */ old_rl = ntfs_runlists_merge(old_rl, rl); if (likely(!IS_ERR(old_rl))) return old_rl; ntfs_free(rl); ntfs_error(vol->sb, "Failed to merge runlists."); return old_rl; io_error: ntfs_error(vol->sb, "Corrupt attribute."); err_out: ntfs_free(rl); return ERR_PTR(-EIO); } /** * ntfs_rl_vcn_to_lcn - convert a vcn into a lcn given a runlist * @rl: runlist to use for conversion * @vcn: vcn to convert * * Convert the virtual cluster number @vcn of an attribute into a logical * cluster number (lcn) of a device using the runlist @rl to map vcns to their * corresponding lcns. * * It is up to the caller to serialize access to the runlist @rl. * * Since lcns must be >= 0, we use negative return codes with special meaning: * * Return code Meaning / Description * ================================================== * LCN_HOLE Hole / not allocated on disk. * LCN_RL_NOT_MAPPED This is part of the runlist which has not been * inserted into the runlist yet. * LCN_ENOENT There is no such vcn in the attribute. * * Locking: - The caller must have locked the runlist (for reading or writing). * - This function does not touch the lock, nor does it modify the * runlist. */ LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn) { int i; BUG_ON(vcn < 0); /* * If rl is NULL, assume that we have found an unmapped runlist. The * caller can then attempt to map it and fail appropriately if * necessary. */ if (unlikely(!rl)) return LCN_RL_NOT_MAPPED; /* Catch out of lower bounds vcn. */ if (unlikely(vcn < rl[0].vcn)) return LCN_ENOENT; for (i = 0; likely(rl[i].length); i++) { if (unlikely(vcn < rl[i+1].vcn)) { if (likely(rl[i].lcn >= (LCN)0)) return rl[i].lcn + (vcn - rl[i].vcn); return rl[i].lcn; } } /* * The terminator element is setup to the correct value, i.e. one of * LCN_HOLE, LCN_RL_NOT_MAPPED, or LCN_ENOENT. */ if (likely(rl[i].lcn < (LCN)0)) return rl[i].lcn; /* Just in case... We could replace this with BUG() some day. */ return LCN_ENOENT; } #ifdef NTFS_RW /** * ntfs_rl_find_vcn_nolock - find a vcn in a runlist * @rl: runlist to search * @vcn: vcn to find * * Find the virtual cluster number @vcn in the runlist @rl and return the * address of the runlist element containing the @vcn on success. * * Return NULL if @rl is NULL or @vcn is in an unmapped part/out of bounds of * the runlist. * * Locking: The runlist must be locked on entry. */ runlist_element *ntfs_rl_find_vcn_nolock(runlist_element *rl, const VCN vcn) { BUG_ON(vcn < 0); if (unlikely(!rl || vcn < rl[0].vcn)) return NULL; while (likely(rl->length)) { if (unlikely(vcn < rl[1].vcn)) { if (likely(rl->lcn >= LCN_HOLE)) return rl; return NULL; } rl++; } if (likely(rl->lcn == LCN_ENOENT)) return rl; return NULL; } /** * ntfs_get_nr_significant_bytes - get number of bytes needed to store a number * @n: number for which to get the number of bytes for * * Return the number of bytes required to store @n unambiguously as * a signed number. * * This is used in the context of the mapping pairs array to determine how * many bytes will be needed in the array to store a given logical cluster * number (lcn) or a specific run length. * * Return the number of bytes written. This function cannot fail. */ static inline int ntfs_get_nr_significant_bytes(const s64 n) { s64 l = n; int i; s8 j; i = 0; do { l >>= 8; i++; } while (l != 0 && l != -1); j = (n >> 8 * (i - 1)) & 0xff; /* If the sign bit is wrong, we need an extra byte. */ if ((n < 0 && j >= 0) || (n > 0 && j < 0)) i++; return i; } /** * ntfs_get_size_for_mapping_pairs - get bytes needed for mapping pairs array * @vol: ntfs volume (needed for the ntfs version) * @rl: locked runlist to determine the size of the mapping pairs of * @first_vcn: first vcn which to include in the mapping pairs array * @last_vcn: last vcn which to include in the mapping pairs array * * Walk the locked runlist @rl and calculate the size in bytes of the mapping * pairs array corresponding to the runlist @rl, starting at vcn @first_vcn and * finishing with vcn @last_vcn. * * A @last_vcn of -1 means end of runlist and in that case the size of the * mapping pairs array corresponding to the runlist starting at vcn @first_vcn * and finishing at the end of the runlist is determined. * * This for example allows us to allocate a buffer of the right size when * building the mapping pairs array. * * If @rl is NULL, just return 1 (for the single terminator byte). * * Return the calculated size in bytes on success. On error, return -errno. * The following error codes are defined: * -EINVAL - Run list contains unmapped elements. Make sure to only pass * fully mapped runlists to this function. * -EIO - The runlist is corrupt. * * Locking: @rl must be locked on entry (either for reading or writing), it * remains locked throughout, and is left locked upon return. */ int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol, const runlist_element *rl, const VCN first_vcn, const VCN last_vcn) { LCN prev_lcn; int rls; bool the_end = false; BUG_ON(first_vcn < 0); BUG_ON(last_vcn < -1); BUG_ON(last_vcn >= 0 && first_vcn > last_vcn); if (!rl) { BUG_ON(first_vcn); BUG_ON(last_vcn > 0); return 1; } /* Skip to runlist element containing @first_vcn. */ while (rl->length && first_vcn >= rl[1].vcn) rl++; if (unlikely((!rl->length && first_vcn > rl->vcn) || first_vcn < rl->vcn)) return -EINVAL; prev_lcn = 0; /* Always need the termining zero byte. */ rls = 1; /* Do the first partial run if present. */ if (first_vcn > rl->vcn) { s64 delta, length = rl->length; /* We know rl->length != 0 already. */ if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) goto err_out; /* * If @stop_vcn is given and finishes inside this run, cap the * run length. */ if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { s64 s1 = last_vcn + 1; if (unlikely(rl[1].vcn > s1)) length = s1 - rl->vcn; the_end = true; } delta = first_vcn - rl->vcn; /* Header byte + length. */ rls += 1 + ntfs_get_nr_significant_bytes(length - delta); /* * If the logical cluster number (lcn) denotes a hole and we * are on NTFS 3.0+, we don't store it at all, i.e. we need * zero space. On earlier NTFS versions we just store the lcn. * Note: this assumes that on NTFS 1.2-, holes are stored with * an lcn of -1 and not a delta_lcn of -1 (unless both are -1). */ if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { prev_lcn = rl->lcn; if (likely(rl->lcn >= 0)) prev_lcn += delta; /* Change in lcn. */ rls += ntfs_get_nr_significant_bytes(prev_lcn); } /* Go to next runlist element. */ rl++; } /* Do the full runs. */ for (; rl->length && !the_end; rl++) { s64 length = rl->length; if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) goto err_out; /* * If @stop_vcn is given and finishes inside this run, cap the * run length. */ if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { s64 s1 = last_vcn + 1; if (unlikely(rl[1].vcn > s1)) length = s1 - rl->vcn; the_end = true; } /* Header byte + length. */ rls += 1 + ntfs_get_nr_significant_bytes(length); /* * If the logical cluster number (lcn) denotes a hole and we * are on NTFS 3.0+, we don't store it at all, i.e. we need * zero space. On earlier NTFS versions we just store the lcn. * Note: this assumes that on NTFS 1.2-, holes are stored with * an lcn of -1 and not a delta_lcn of -1 (unless both are -1). */ if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { /* Change in lcn. */ rls += ntfs_get_nr_significant_bytes(rl->lcn - prev_lcn); prev_lcn = rl->lcn; } } return rls; err_out: if (rl->lcn == LCN_RL_NOT_MAPPED) rls = -EINVAL; else rls = -EIO; return rls; } /** * ntfs_write_significant_bytes - write the significant bytes of a number * @dst: destination buffer to write to * @dst_max: pointer to last byte of destination buffer for bounds checking * @n: number whose significant bytes to write * * Store in @dst, the minimum bytes of the number @n which are required to * identify @n unambiguously as a signed number, taking care not to exceed * @dest_max, the maximum position within @dst to which we are allowed to * write. * * This is used when building the mapping pairs array of a runlist to compress * a given logical cluster number (lcn) or a specific run length to the minimum * size possible. * * Return the number of bytes written on success. On error, i.e. the * destination buffer @dst is too small, return -ENOSPC. */ static inline int ntfs_write_significant_bytes(s8 *dst, const s8 *dst_max, const s64 n) { s64 l = n; int i; s8 j; i = 0; do { if (unlikely(dst > dst_max)) goto err_out; *dst++ = l & 0xffll; l >>= 8; i++; } while (l != 0 && l != -1); j = (n >> 8 * (i - 1)) & 0xff; /* If the sign bit is wrong, we need an extra byte. */ if (n < 0 && j >= 0) { if (unlikely(dst > dst_max)) goto err_out; i++; *dst = (s8)-1; } else if (n > 0 && j < 0) { if (unlikely(dst > dst_max)) goto err_out; i++; *dst = (s8)0; } return i; err_out: return -ENOSPC; } /** * ntfs_mapping_pairs_build - build the mapping pairs array from a runlist * @vol: ntfs volume (needed for the ntfs version) * @dst: destination buffer to which to write the mapping pairs array * @dst_len: size of destination buffer @dst in bytes * @rl: locked runlist for which to build the mapping pairs array * @first_vcn: first vcn which to include in the mapping pairs array * @last_vcn: last vcn which to include in the mapping pairs array * @stop_vcn: first vcn outside destination buffer on success or -ENOSPC * * Create the mapping pairs array from the locked runlist @rl, starting at vcn * @first_vcn and finishing with vcn @last_vcn and save the array in @dst. * @dst_len is the size of @dst in bytes and it should be at least equal to the * value obtained by calling ntfs_get_size_for_mapping_pairs(). * * A @last_vcn of -1 means end of runlist and in that case the mapping pairs * array corresponding to the runlist starting at vcn @first_vcn and finishing * at the end of the runlist is created. * * If @rl is NULL, just write a single terminator byte to @dst. * * On success or -ENOSPC error, if @stop_vcn is not NULL, *@stop_vcn is set to * the first vcn outside the destination buffer. Note that on error, @dst has * been filled with all the mapping pairs that will fit, thus it can be treated * as partial success, in that a new attribute extent needs to be created or * the next extent has to be used and the mapping pairs build has to be * continued with @first_vcn set to *@stop_vcn. * * Return 0 on success and -errno on error. The following error codes are * defined: * -EINVAL - Run list contains unmapped elements. Make sure to only pass * fully mapped runlists to this function. * -EIO - The runlist is corrupt. * -ENOSPC - The destination buffer is too small. * * Locking: @rl must be locked on entry (either for reading or writing), it * remains locked throughout, and is left locked upon return. */ int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst, const int dst_len, const runlist_element *rl, const VCN first_vcn, const VCN last_vcn, VCN *const stop_vcn) { LCN prev_lcn; s8 *dst_max, *dst_next; int err = -ENOSPC; bool the_end = false; s8 len_len, lcn_len; BUG_ON(first_vcn < 0); BUG_ON(last_vcn < -1); BUG_ON(last_vcn >= 0 && first_vcn > last_vcn); BUG_ON(dst_len < 1); if (!rl) { BUG_ON(first_vcn); BUG_ON(last_vcn > 0); if (stop_vcn) *stop_vcn = 0; /* Terminator byte. */ *dst = 0; return 0; } /* Skip to runlist element containing @first_vcn. */ while (rl->length && first_vcn >= rl[1].vcn) rl++; if (unlikely((!rl->length && first_vcn > rl->vcn) || first_vcn < rl->vcn)) return -EINVAL; /* * @dst_max is used for bounds checking in * ntfs_write_significant_bytes(). */ dst_max = dst + dst_len - 1; prev_lcn = 0; /* Do the first partial run if present. */ if (first_vcn > rl->vcn) { s64 delta, length = rl->length; /* We know rl->length != 0 already. */ if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) goto err_out; /* * If @stop_vcn is given and finishes inside this run, cap the * run length. */ if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { s64 s1 = last_vcn + 1; if (unlikely(rl[1].vcn > s1)) length = s1 - rl->vcn; the_end = true; } delta = first_vcn - rl->vcn; /* Write length. */ len_len = ntfs_write_significant_bytes(dst + 1, dst_max, length - delta); if (unlikely(len_len < 0)) goto size_err; /* * If the logical cluster number (lcn) denotes a hole and we * are on NTFS 3.0+, we don't store it at all, i.e. we need * zero space. On earlier NTFS versions we just write the lcn * change. FIXME: Do we need to write the lcn change or just * the lcn in that case? Not sure as I have never seen this * case on NT4. - We assume that we just need to write the lcn * change until someone tells us otherwise... (AIA) */ if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { prev_lcn = rl->lcn; if (likely(rl->lcn >= 0)) prev_lcn += delta; /* Write change in lcn. */ lcn_len = ntfs_write_significant_bytes(dst + 1 + len_len, dst_max, prev_lcn); if (unlikely(lcn_len < 0)) goto size_err; } else lcn_len = 0; dst_next = dst + len_len + lcn_len + 1; if (unlikely(dst_next > dst_max)) goto size_err; /* Update header byte. */ *dst = lcn_len << 4 | len_len; /* Position at next mapping pairs array element. */ dst = dst_next; /* Go to next runlist element. */ rl++; } /* Do the full runs. */ for (; rl->length && !the_end; rl++) { s64 length = rl->length; if (unlikely(length < 0 || rl->lcn < LCN_HOLE)) goto err_out; /* * If @stop_vcn is given and finishes inside this run, cap the * run length. */ if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) { s64 s1 = last_vcn + 1; if (unlikely(rl[1].vcn > s1)) length = s1 - rl->vcn; the_end = true; } /* Write length. */ len_len = ntfs_write_significant_bytes(dst + 1, dst_max, length); if (unlikely(len_len < 0)) goto size_err; /* * If the logical cluster number (lcn) denotes a hole and we * are on NTFS 3.0+, we don't store it at all, i.e. we need * zero space. On earlier NTFS versions we just write the lcn * change. FIXME: Do we need to write the lcn change or just * the lcn in that case? Not sure as I have never seen this * case on NT4. - We assume that we just need to write the lcn * change until someone tells us otherwise... (AIA) */ if (likely(rl->lcn >= 0 || vol->major_ver < 3)) { /* Write change in lcn. */ lcn_len = ntfs_write_significant_bytes(dst + 1 + len_len, dst_max, rl->lcn - prev_lcn); if (unlikely(lcn_len < 0)) goto size_err; prev_lcn = rl->lcn; } else lcn_len = 0; dst_next = dst + len_len + lcn_len + 1; if (unlikely(dst_next > dst_max)) goto size_err; /* Update header byte. */ *dst = lcn_len << 4 | len_len; /* Position at next mapping pairs array element. */ dst = dst_next; } /* Success. */ err = 0; size_err: /* Set stop vcn. */ if (stop_vcn) *stop_vcn = rl->vcn; /* Add terminator byte. */ *dst = 0; return err; err_out: if (rl->lcn == LCN_RL_NOT_MAPPED) err = -EINVAL; else err = -EIO; return err; } /** * ntfs_rl_truncate_nolock - truncate a runlist starting at a specified vcn * @vol: ntfs volume (needed for error output) * @runlist: runlist to truncate * @new_length: the new length of the runlist in VCNs * * Truncate the runlist described by @runlist as well as the memory buffer * holding the runlist elements to a length of @new_length VCNs. * * If @new_length lies within the runlist, the runlist elements with VCNs of * @new_length and above are discarded. As a special case if @new_length is * zero, the runlist is discarded and set to NULL. * * If @new_length lies beyond the runlist, a sparse runlist element is added to * the end of the runlist @runlist or if the last runlist element is a sparse * one already, this is extended. * * Note, no checking is done for unmapped runlist elements. It is assumed that * the caller has mapped any elements that need to be mapped already. * * Return 0 on success and -errno on error. * * Locking: The caller must hold @runlist->lock for writing. */ int ntfs_rl_truncate_nolock(const ntfs_volume *vol, runlist *const runlist, const s64 new_length) { runlist_element *rl; int old_size; ntfs_debug("Entering for new_length 0x%llx.", (long long)new_length); BUG_ON(!runlist); BUG_ON(new_length < 0); rl = runlist->rl; if (!new_length) { ntfs_debug("Freeing runlist."); runlist->rl = NULL; if (rl) ntfs_free(rl); return 0; } if (unlikely(!rl)) { /* * Create a runlist consisting of a sparse runlist element of * length @new_length followed by a terminator runlist element. */ rl = ntfs_malloc_nofs(PAGE_SIZE); if (unlikely(!rl)) { ntfs_error(vol->sb, "Not enough memory to allocate " "runlist element buffer."); return -ENOMEM; } runlist->rl = rl; rl[1].length = rl->vcn = 0; rl->lcn = LCN_HOLE; rl[1].vcn = rl->length = new_length; rl[1].lcn = LCN_ENOENT; return 0; } BUG_ON(new_length < rl->vcn); /* Find @new_length in the runlist. */ while (likely(rl->length && new_length >= rl[1].vcn)) rl++; /* * If not at the end of the runlist we need to shrink it. * If at the end of the runlist we need to expand it. */ if (rl->length) { runlist_element *trl; bool is_end; ntfs_debug("Shrinking runlist."); /* Determine the runlist size. */ trl = rl + 1; while (likely(trl->length)) trl++; old_size = trl - runlist->rl + 1; /* Truncate the run. */ rl->length = new_length - rl->vcn; /* * If a run was partially truncated, make the following runlist * element a terminator. */ is_end = false; if (rl->length) { rl++; if (!rl->length) is_end = true; rl->vcn = new_length; rl->length = 0; } rl->lcn = LCN_ENOENT; /* Reallocate memory if necessary. */ if (!is_end) { int new_size = rl - runlist->rl + 1; rl = ntfs_rl_realloc(runlist->rl, old_size, new_size); if (IS_ERR(rl)) ntfs_warning(vol->sb, "Failed to shrink " "runlist buffer. This just " "wastes a bit of memory " "temporarily so we ignore it " "and return success."); else runlist->rl = rl; } } else if (likely(/* !rl->length && */ new_length > rl->vcn)) { ntfs_debug("Expanding runlist."); /* * If there is a previous runlist element and it is a sparse * one, extend it. Otherwise need to add a new, sparse runlist * element. */ if ((rl > runlist->rl) && ((rl - 1)->lcn == LCN_HOLE)) (rl - 1)->length = new_length - (rl - 1)->vcn; else { /* Determine the runlist size. */ old_size = rl - runlist->rl + 1; /* Reallocate memory if necessary. */ rl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1); if (IS_ERR(rl)) { ntfs_error(vol->sb, "Failed to expand runlist " "buffer, aborting."); return PTR_ERR(rl); } runlist->rl = rl; /* * Set @rl to the same runlist element in the new * runlist as before in the old runlist. */ rl += old_size - 1; /* Add a new, sparse runlist element. */ rl->lcn = LCN_HOLE; rl->length = new_length - rl->vcn; /* Add a new terminator runlist element. */ rl++; rl->length = 0; } rl->vcn = new_length; rl->lcn = LCN_ENOENT; } else /* if (unlikely(!rl->length && new_length == rl->vcn)) */ { /* Runlist already has same size as requested. */ rl->lcn = LCN_ENOENT; } ntfs_debug("Done."); return 0; } /** * ntfs_rl_punch_nolock - punch a hole into a runlist * @vol: ntfs volume (needed for error output) * @runlist: runlist to punch a hole into * @start: starting VCN of the hole to be created * @length: size of the hole to be created in units of clusters * * Punch a hole into the runlist @runlist starting at VCN @start and of size * @length clusters. * * Return 0 on success and -errno on error, in which case @runlist has not been * modified. * * If @start and/or @start + @length are outside the runlist return error code * -ENOENT. * * If the runlist contains unmapped or error elements between @start and @start * + @length return error code -EINVAL. * * Locking: The caller must hold @runlist->lock for writing. */ int ntfs_rl_punch_nolock(const ntfs_volume *vol, runlist *const runlist, const VCN start, const s64 length) { const VCN end = start + length; s64 delta; runlist_element *rl, *rl_end, *rl_real_end, *trl; int old_size; bool lcn_fixup = false; ntfs_debug("Entering for start 0x%llx, length 0x%llx.", (long long)start, (long long)length); BUG_ON(!runlist); BUG_ON(start < 0); BUG_ON(length < 0); BUG_ON(end < 0); rl = runlist->rl; if (unlikely(!rl)) { if (likely(!start && !length)) return 0; return -ENOENT; } /* Find @start in the runlist. */ while (likely(rl->length && start >= rl[1].vcn)) rl++; rl_end = rl; /* Find @end in the runlist. */ while (likely(rl_end->length && end >= rl_end[1].vcn)) { /* Verify there are no unmapped or error elements. */ if (unlikely(rl_end->lcn < LCN_HOLE)) return -EINVAL; rl_end++; } /* Check the last element. */ if (unlikely(rl_end->length && rl_end->lcn < LCN_HOLE)) return -EINVAL; /* This covers @start being out of bounds, too. */ if (!rl_end->length && end > rl_end->vcn) return -ENOENT; if (!length) return 0; if (!rl->length) return -ENOENT; rl_real_end = rl_end; /* Determine the runlist size. */ while (likely(rl_real_end->length)) rl_real_end++; old_size = rl_real_end - runlist->rl + 1; /* If @start is in a hole simply extend the hole. */ if (rl->lcn == LCN_HOLE) { /* * If both @start and @end are in the same sparse run, we are * done. */ if (end <= rl[1].vcn) { ntfs_debug("Done (requested hole is already sparse)."); return 0; } extend_hole: /* Extend the hole. */ rl->length = end - rl->vcn; /* If @end is in a hole, merge it with the current one. */ if (rl_end->lcn == LCN_HOLE) { rl_end++; rl->length = rl_end->vcn - rl->vcn; } /* We have done the hole. Now deal with the remaining tail. */ rl++; /* Cut out all runlist elements up to @end. */ if (rl < rl_end) memmove(rl, rl_end, (rl_real_end - rl_end + 1) * sizeof(*rl)); /* Adjust the beginning of the tail if necessary. */ if (end > rl->vcn) { delta = end - rl->vcn; rl->vcn = end; rl->length -= delta; /* Only adjust the lcn if it is real. */ if (rl->lcn >= 0) rl->lcn += delta; } shrink_allocation: /* Reallocate memory if the allocation changed. */ if (rl < rl_end) { rl = ntfs_rl_realloc(runlist->rl, old_size, old_size - (rl_end - rl)); if (IS_ERR(rl)) ntfs_warning(vol->sb, "Failed to shrink " "runlist buffer. This just " "wastes a bit of memory " "temporarily so we ignore it " "and return success."); else runlist->rl = rl; } ntfs_debug("Done (extend hole)."); return 0; } /* * If @start is at the beginning of a run things are easier as there is * no need to split the first run. */ if (start == rl->vcn) { /* * @start is at the beginning of a run. * * If the previous run is sparse, extend its hole. * * If @end is not in the same run, switch the run to be sparse * and extend the newly created hole. * * Thus both of these cases reduce the problem to the above * case of "@start is in a hole". */ if (rl > runlist->rl && (rl - 1)->lcn == LCN_HOLE) { rl--; goto extend_hole; } if (end >= rl[1].vcn) { rl->lcn = LCN_HOLE; goto extend_hole; } /* * The final case is when @end is in the same run as @start. * For this need to split the run into two. One run for the * sparse region between the beginning of the old run, i.e. * @start, and @end and one for the remaining non-sparse * region, i.e. between @end and the end of the old run. */ trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1); if (IS_ERR(trl)) goto enomem_out; old_size++; if (runlist->rl != trl) { rl = trl + (rl - runlist->rl); rl_end = trl + (rl_end - runlist->rl); rl_real_end = trl + (rl_real_end - runlist->rl); runlist->rl = trl; } split_end: /* Shift all the runs up by one. */ memmove(rl + 1, rl, (rl_real_end - rl + 1) * sizeof(*rl)); /* Finally, setup the two split runs. */ rl->lcn = LCN_HOLE; rl->length = length; rl++; rl->vcn += length; /* Only adjust the lcn if it is real. */ if (rl->lcn >= 0 || lcn_fixup) rl->lcn += length; rl->length -= length; ntfs_debug("Done (split one)."); return 0; } /* * @start is neither in a hole nor at the beginning of a run. * * If @end is in a hole, things are easier as simply truncating the run * @start is in to end at @start - 1, deleting all runs after that up * to @end, and finally extending the beginning of the run @end is in * to be @start is all that is needed. */ if (rl_end->lcn == LCN_HOLE) { /* Truncate the run containing @start. */ rl->length = start - rl->vcn; rl++; /* Cut out all runlist elements up to @end. */ if (rl < rl_end) memmove(rl, rl_end, (rl_real_end - rl_end + 1) * sizeof(*rl)); /* Extend the beginning of the run @end is in to be @start. */ rl->vcn = start; rl->length = rl[1].vcn - start; goto shrink_allocation; } /* * If @end is not in a hole there are still two cases to distinguish. * Either @end is or is not in the same run as @start. * * The second case is easier as it can be reduced to an already solved * problem by truncating the run @start is in to end at @start - 1. * Then, if @end is in the next run need to split the run into a sparse * run followed by a non-sparse run (already covered above) and if @end * is not in the next run switching it to be sparse, again reduces the * problem to the already covered case of "@start is in a hole". */ if (end >= rl[1].vcn) { /* * If @end is not in the next run, reduce the problem to the * case of "@start is in a hole". */ if (rl[1].length && end >= rl[2].vcn) { /* Truncate the run containing @start. */ rl->length = start - rl->vcn; rl++; rl->vcn = start; rl->lcn = LCN_HOLE; goto extend_hole; } trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1); if (IS_ERR(trl)) goto enomem_out; old_size++; if (runlist->rl != trl) { rl = trl + (rl - runlist->rl); rl_end = trl + (rl_end - runlist->rl); rl_real_end = trl + (rl_real_end - runlist->rl); runlist->rl = trl; } /* Truncate the run containing @start. */ rl->length = start - rl->vcn; rl++; /* * @end is in the next run, reduce the problem to the case * where "@start is at the beginning of a run and @end is in * the same run as @start". */ delta = rl->vcn - start; rl->vcn = start; if (rl->lcn >= 0) { rl->lcn -= delta; /* Need this in case the lcn just became negative. */ lcn_fixup = true; } rl->length += delta; goto split_end; } /* * The first case from above, i.e. @end is in the same run as @start. * We need to split the run into three. One run for the non-sparse * region between the beginning of the old run and @start, one for the * sparse region between @start and @end, and one for the remaining * non-sparse region, i.e. between @end and the end of the old run. */ trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 2); if (IS_ERR(trl)) goto enomem_out; old_size += 2; if (runlist->rl != trl) { rl = trl + (rl - runlist->rl); rl_end = trl + (rl_end - runlist->rl); rl_real_end = trl + (rl_real_end - runlist->rl); runlist->rl = trl; } /* Shift all the runs up by two. */ memmove(rl + 2, rl, (rl_real_end - rl + 1) * sizeof(*rl)); /* Finally, setup the three split runs. */ rl->length = start - rl->vcn; rl++; rl->vcn = start; rl->lcn = LCN_HOLE; rl->length = length; rl++; delta = end - rl->vcn; rl->vcn = end; rl->lcn += delta; rl->length -= delta; ntfs_debug("Done (split both)."); return 0; enomem_out: ntfs_error(vol->sb, "Not enough memory to extend runlist buffer."); return -ENOMEM; } #endif /* NTFS_RW */
gpl-2.0
zachf714/android_kernel_common
tools/perf/util/string.c
161
5559
#include "util.h" #include "string.h" #define K 1024LL /* * perf_atoll() * Parse (\d+)(b|B|kb|KB|mb|MB|gb|GB|tb|TB) (e.g. "256MB") * and return its numeric value */ s64 perf_atoll(const char *str) { unsigned int i; s64 length = -1, unit = 1; if (!isdigit(str[0])) goto out_err; for (i = 1; i < strlen(str); i++) { switch (str[i]) { case 'B': case 'b': break; case 'K': if (str[i + 1] != 'B') goto out_err; else goto kilo; case 'k': if (str[i + 1] != 'b') goto out_err; kilo: unit = K; break; case 'M': if (str[i + 1] != 'B') goto out_err; else goto mega; case 'm': if (str[i + 1] != 'b') goto out_err; mega: unit = K * K; break; case 'G': if (str[i + 1] != 'B') goto out_err; else goto giga; case 'g': if (str[i + 1] != 'b') goto out_err; giga: unit = K * K * K; break; case 'T': if (str[i + 1] != 'B') goto out_err; else goto tera; case 't': if (str[i + 1] != 'b') goto out_err; tera: unit = K * K * K * K; break; case '\0': /* only specified figures */ unit = 1; break; default: if (!isdigit(str[i])) goto out_err; break; } } length = atoll(str) * unit; goto out; out_err: length = -1; out: return length; } /* * Helper function for splitting a string into an argv-like array. * originaly copied from lib/argv_split.c */ static const char *skip_sep(const char *cp) { while (*cp && isspace(*cp)) cp++; return cp; } static const char *skip_arg(const char *cp) { while (*cp && !isspace(*cp)) cp++; return cp; } static int count_argc(const char *str) { int count = 0; while (*str) { str = skip_sep(str); if (*str) { count++; str = skip_arg(str); } } return count; } /** * argv_free - free an argv * @argv - the argument vector to be freed * * Frees an argv and the strings it points to. */ void argv_free(char **argv) { char **p; for (p = argv; *p; p++) free(*p); free(argv); } /** * argv_split - split a string at whitespace, returning an argv * @str: the string to be split * @argcp: returned argument count * * Returns an array of pointers to strings which are split out from * @str. This is performed by strictly splitting on white-space; no * quote processing is performed. Multiple whitespace characters are * considered to be a single argument separator. The returned array * is always NULL-terminated. Returns NULL on memory allocation * failure. */ char **argv_split(const char *str, int *argcp) { int argc = count_argc(str); char **argv = zalloc(sizeof(*argv) * (argc+1)); char **argvp; if (argv == NULL) goto out; if (argcp) *argcp = argc; argvp = argv; while (*str) { str = skip_sep(str); if (*str) { const char *p = str; char *t; str = skip_arg(str); t = strndup(p, str-p); if (t == NULL) goto fail; *argvp++ = t; } } *argvp = NULL; out: return argv; fail: argv_free(argv); return NULL; } /* Character class matching */ static bool __match_charclass(const char *pat, char c, const char **npat) { bool complement = false, ret = true; if (*pat == '!') { complement = true; pat++; } if (*pat++ == c) /* First character is special */ goto end; while (*pat && *pat != ']') { /* Matching */ if (*pat == '-' && *(pat + 1) != ']') { /* Range */ if (*(pat - 1) <= c && c <= *(pat + 1)) goto end; if (*(pat - 1) > *(pat + 1)) goto error; pat += 2; } else if (*pat++ == c) goto end; } if (!*pat) goto error; ret = false; end: while (*pat && *pat != ']') /* Searching closing */ pat++; if (!*pat) goto error; *npat = pat + 1; return complement ? !ret : ret; error: return false; } /* Glob/lazy pattern matching */ static bool __match_glob(const char *str, const char *pat, bool ignore_space) { while (*str && *pat && *pat != '*') { if (ignore_space) { /* Ignore spaces for lazy matching */ if (isspace(*str)) { str++; continue; } if (isspace(*pat)) { pat++; continue; } } if (*pat == '?') { /* Matches any single character */ str++; pat++; continue; } else if (*pat == '[') /* Character classes/Ranges */ if (__match_charclass(pat + 1, *str, &pat)) { str++; continue; } else return false; else if (*pat == '\\') /* Escaped char match as normal char */ pat++; if (*str++ != *pat++) return false; } /* Check wild card */ if (*pat == '*') { while (*pat == '*') pat++; if (!*pat) /* Tail wild card matches all */ return true; while (*str) if (__match_glob(str++, pat, ignore_space)) return true; } return !*str && !*pat; } /** * strglobmatch - glob expression pattern matching * @str: the target string to match * @pat: the pattern string to match * * This returns true if the @str matches @pat. @pat can includes wildcards * ('*','?') and character classes ([CHARS], complementation and ranges are * also supported). Also, this supports escape character ('\') to use special * characters as normal character. * * Note: if @pat syntax is broken, this always returns false. */ bool strglobmatch(const char *str, const char *pat) { return __match_glob(str, pat, false); } /** * strlazymatch - matching pattern strings lazily with glob pattern * @str: the target string to match * @pat: the pattern string to match * * This is similar to strglobmatch, except this ignores spaces in * the target string. */ bool strlazymatch(const char *str, const char *pat) { return __match_glob(str, pat, true); }
gpl-2.0
arjunroy/cinder_msm
drivers/isdn/capi/kcapi.c
161
24155
/* $Id: kcapi.c,v 1.1.2.8 2004/03/26 19:57:20 armin Exp $ * * Kernel CAPI 2.0 Module * * Copyright 1999 by Carsten Paeth <calle@calle.de> * Copyright 2002 by Kai Germaschewski <kai@germaschewski.name> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #define AVMB1_COMPAT #include "kcapi.h" #include <linux/module.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/skbuff.h> #include <linux/workqueue.h> #include <linux/capi.h> #include <linux/kernelcapi.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/delay.h> #include <asm/uaccess.h> #include <linux/isdn/capicmd.h> #include <linux/isdn/capiutil.h> #ifdef AVMB1_COMPAT #include <linux/b1lli.h> #endif #include <linux/mutex.h> static char *revision = "$Revision: 1.1.2.8 $"; /* ------------------------------------------------------------- */ static int showcapimsgs = 0; MODULE_DESCRIPTION("CAPI4Linux: kernel CAPI layer"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); module_param(showcapimsgs, uint, 0); /* ------------------------------------------------------------- */ struct capi_notifier { struct work_struct work; unsigned int cmd; u32 controller; u16 applid; u32 ncci; }; /* ------------------------------------------------------------- */ static struct capi_version driver_version = {2, 0, 1, 1<<4}; static char driver_serial[CAPI_SERIAL_LEN] = "0004711"; static char capi_manufakturer[64] = "AVM Berlin"; #define NCCI2CTRL(ncci) (((ncci) >> 24) & 0x7f) LIST_HEAD(capi_drivers); DEFINE_RWLOCK(capi_drivers_list_lock); static DEFINE_RWLOCK(application_lock); static DEFINE_MUTEX(controller_mutex); struct capi20_appl *capi_applications[CAPI_MAXAPPL]; struct capi_ctr *capi_cards[CAPI_MAXCONTR]; static int ncards; /* -------- controller ref counting -------------------------------------- */ static inline struct capi_ctr * capi_ctr_get(struct capi_ctr *card) { if (!try_module_get(card->owner)) return NULL; return card; } static inline void capi_ctr_put(struct capi_ctr *card) { module_put(card->owner); } /* ------------------------------------------------------------- */ static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr) { if (contr - 1 >= CAPI_MAXCONTR) return NULL; return capi_cards[contr - 1]; } static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid) { if (applid - 1 >= CAPI_MAXAPPL) return NULL; return capi_applications[applid - 1]; } /* -------- util functions ------------------------------------ */ static inline int capi_cmd_valid(u8 cmd) { switch (cmd) { case CAPI_ALERT: case CAPI_CONNECT: case CAPI_CONNECT_ACTIVE: case CAPI_CONNECT_B3_ACTIVE: case CAPI_CONNECT_B3: case CAPI_CONNECT_B3_T90_ACTIVE: case CAPI_DATA_B3: case CAPI_DISCONNECT_B3: case CAPI_DISCONNECT: case CAPI_FACILITY: case CAPI_INFO: case CAPI_LISTEN: case CAPI_MANUFACTURER: case CAPI_RESET_B3: case CAPI_SELECT_B_PROTOCOL: return 1; } return 0; } static inline int capi_subcmd_valid(u8 subcmd) { switch (subcmd) { case CAPI_REQ: case CAPI_CONF: case CAPI_IND: case CAPI_RESP: return 1; } return 0; } /* ------------------------------------------------------------ */ static void register_appl(struct capi_ctr *card, u16 applid, capi_register_params *rparam) { card = capi_ctr_get(card); if (card) card->register_appl(card, applid, rparam); else printk(KERN_WARNING "%s: cannot get card resources\n", __func__); } static void release_appl(struct capi_ctr *card, u16 applid) { DBG("applid %#x", applid); card->release_appl(card, applid); capi_ctr_put(card); } /* -------- KCI_CONTRUP --------------------------------------- */ static void notify_up(u32 contr) { struct capi_ctr *card = get_capi_ctr_by_nr(contr); struct capi20_appl *ap; u16 applid; if (showcapimsgs & 1) { printk(KERN_DEBUG "kcapi: notify up contr %d\n", contr); } if (!card) { printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr); return; } for (applid = 1; applid <= CAPI_MAXAPPL; applid++) { ap = get_capi_appl_by_nr(applid); if (!ap || ap->release_in_progress) continue; register_appl(card, applid, &ap->rparam); if (ap->callback && !ap->release_in_progress) ap->callback(KCI_CONTRUP, contr, &card->profile); } } /* -------- KCI_CONTRDOWN ------------------------------------- */ static void notify_down(u32 contr) { struct capi20_appl *ap; u16 applid; if (showcapimsgs & 1) { printk(KERN_DEBUG "kcapi: notify down contr %d\n", contr); } for (applid = 1; applid <= CAPI_MAXAPPL; applid++) { ap = get_capi_appl_by_nr(applid); if (ap && ap->callback && !ap->release_in_progress) ap->callback(KCI_CONTRDOWN, contr, NULL); } } static void notify_handler(struct work_struct *work) { struct capi_notifier *np = container_of(work, struct capi_notifier, work); switch (np->cmd) { case KCI_CONTRUP: notify_up(np->controller); break; case KCI_CONTRDOWN: notify_down(np->controller); break; } kfree(np); } /* * The notifier will result in adding/deleteing of devices. Devices can * only removed in user process, not in bh. */ static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci) { struct capi_notifier *np = kmalloc(sizeof(*np), GFP_ATOMIC); if (!np) return -ENOMEM; INIT_WORK(&np->work, notify_handler); np->cmd = cmd; np->controller = controller; np->applid = applid; np->ncci = ncci; schedule_work(&np->work); return 0; } /* -------- Receiver ------------------------------------------ */ static void recv_handler(struct work_struct *work) { struct sk_buff *skb; struct capi20_appl *ap = container_of(work, struct capi20_appl, recv_work); if ((!ap) || (ap->release_in_progress)) return; mutex_lock(&ap->recv_mtx); while ((skb = skb_dequeue(&ap->recv_queue))) { if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_IND) ap->nrecvdatapkt++; else ap->nrecvctlpkt++; ap->recv_message(ap, skb); } mutex_unlock(&ap->recv_mtx); } void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb) { struct capi20_appl *ap; int showctl = 0; u8 cmd, subcmd; unsigned long flags; _cdebbuf *cdb; if (card->cardstate != CARD_RUNNING) { cdb = capi_message2str(skb->data); if (cdb) { printk(KERN_INFO "kcapi: controller [%03d] not active, got: %s", card->cnr, cdb->buf); cdebbuf_free(cdb); } else printk(KERN_INFO "kcapi: controller [%03d] not active, cannot trace\n", card->cnr); goto error; } cmd = CAPIMSG_COMMAND(skb->data); subcmd = CAPIMSG_SUBCOMMAND(skb->data); if (cmd == CAPI_DATA_B3 && subcmd == CAPI_IND) { card->nrecvdatapkt++; if (card->traceflag > 2) showctl |= 2; } else { card->nrecvctlpkt++; if (card->traceflag) showctl |= 2; } showctl |= (card->traceflag & 1); if (showctl & 2) { if (showctl & 1) { printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u\n", card->cnr, CAPIMSG_APPID(skb->data), capi_cmd2str(cmd, subcmd), CAPIMSG_LEN(skb->data)); } else { cdb = capi_message2str(skb->data); if (cdb) { printk(KERN_DEBUG "kcapi: got [%03d] %s\n", card->cnr, cdb->buf); cdebbuf_free(cdb); } else printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u, cannot trace\n", card->cnr, CAPIMSG_APPID(skb->data), capi_cmd2str(cmd, subcmd), CAPIMSG_LEN(skb->data)); } } read_lock_irqsave(&application_lock, flags); ap = get_capi_appl_by_nr(CAPIMSG_APPID(skb->data)); if ((!ap) || (ap->release_in_progress)) { read_unlock_irqrestore(&application_lock, flags); cdb = capi_message2str(skb->data); if (cdb) { printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s)\n", CAPIMSG_APPID(skb->data), cdb->buf); cdebbuf_free(cdb); } else printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s) cannot trace\n", CAPIMSG_APPID(skb->data), capi_cmd2str(cmd, subcmd)); goto error; } skb_queue_tail(&ap->recv_queue, skb); schedule_work(&ap->recv_work); read_unlock_irqrestore(&application_lock, flags); return; error: kfree_skb(skb); } EXPORT_SYMBOL(capi_ctr_handle_message); void capi_ctr_ready(struct capi_ctr * card) { card->cardstate = CARD_RUNNING; printk(KERN_NOTICE "kcapi: card [%03d] \"%s\" ready.\n", card->cnr, card->name); notify_push(KCI_CONTRUP, card->cnr, 0, 0); } EXPORT_SYMBOL(capi_ctr_ready); void capi_ctr_reseted(struct capi_ctr * card) { u16 appl; DBG(""); if (card->cardstate == CARD_DETECTED) return; card->cardstate = CARD_DETECTED; memset(card->manu, 0, sizeof(card->manu)); memset(&card->version, 0, sizeof(card->version)); memset(&card->profile, 0, sizeof(card->profile)); memset(card->serial, 0, sizeof(card->serial)); for (appl = 1; appl <= CAPI_MAXAPPL; appl++) { struct capi20_appl *ap = get_capi_appl_by_nr(appl); if (!ap || ap->release_in_progress) continue; capi_ctr_put(card); } printk(KERN_NOTICE "kcapi: card [%03d] down.\n", card->cnr); notify_push(KCI_CONTRDOWN, card->cnr, 0, 0); } EXPORT_SYMBOL(capi_ctr_reseted); void capi_ctr_suspend_output(struct capi_ctr *card) { if (!card->blocked) { printk(KERN_DEBUG "kcapi: card [%03d] suspend\n", card->cnr); card->blocked = 1; } } EXPORT_SYMBOL(capi_ctr_suspend_output); void capi_ctr_resume_output(struct capi_ctr *card) { if (card->blocked) { printk(KERN_DEBUG "kcapi: card [%03d] resume\n", card->cnr); card->blocked = 0; } } EXPORT_SYMBOL(capi_ctr_resume_output); /* ------------------------------------------------------------- */ int attach_capi_ctr(struct capi_ctr *card) { int i; mutex_lock(&controller_mutex); for (i = 0; i < CAPI_MAXCONTR; i++) { if (capi_cards[i] == NULL) break; } if (i == CAPI_MAXCONTR) { mutex_unlock(&controller_mutex); printk(KERN_ERR "kcapi: out of controller slots\n"); return -EBUSY; } capi_cards[i] = card; mutex_unlock(&controller_mutex); card->nrecvctlpkt = 0; card->nrecvdatapkt = 0; card->nsentctlpkt = 0; card->nsentdatapkt = 0; card->cnr = i + 1; card->cardstate = CARD_DETECTED; card->blocked = 0; card->traceflag = showcapimsgs; sprintf(card->procfn, "capi/controllers/%d", card->cnr); card->procent = create_proc_entry(card->procfn, 0, NULL); if (card->procent) { card->procent->read_proc = (int (*)(char *,char **,off_t,int,int *,void *)) card->ctr_read_proc; card->procent->data = card; } ncards++; printk(KERN_NOTICE "kcapi: Controller [%03d]: %s attached\n", card->cnr, card->name); return 0; } EXPORT_SYMBOL(attach_capi_ctr); int detach_capi_ctr(struct capi_ctr *card) { if (card->cardstate != CARD_DETECTED) capi_ctr_reseted(card); ncards--; if (card->procent) { remove_proc_entry(card->procfn, NULL); card->procent = NULL; } capi_cards[card->cnr - 1] = NULL; printk(KERN_NOTICE "kcapi: Controller [%03d]: %s unregistered\n", card->cnr, card->name); return 0; } EXPORT_SYMBOL(detach_capi_ctr); void register_capi_driver(struct capi_driver *driver) { unsigned long flags; write_lock_irqsave(&capi_drivers_list_lock, flags); list_add_tail(&driver->list, &capi_drivers); write_unlock_irqrestore(&capi_drivers_list_lock, flags); } EXPORT_SYMBOL(register_capi_driver); void unregister_capi_driver(struct capi_driver *driver) { unsigned long flags; write_lock_irqsave(&capi_drivers_list_lock, flags); list_del(&driver->list); write_unlock_irqrestore(&capi_drivers_list_lock, flags); } EXPORT_SYMBOL(unregister_capi_driver); /* ------------------------------------------------------------- */ /* -------- CAPI2.0 Interface ---------------------------------- */ /* ------------------------------------------------------------- */ u16 capi20_isinstalled(void) { int i; for (i = 0; i < CAPI_MAXCONTR; i++) { if (capi_cards[i] && capi_cards[i]->cardstate == CARD_RUNNING) return CAPI_NOERROR; } return CAPI_REGNOTINSTALLED; } EXPORT_SYMBOL(capi20_isinstalled); u16 capi20_register(struct capi20_appl *ap) { int i; u16 applid; unsigned long flags; DBG(""); if (ap->rparam.datablklen < 128) return CAPI_LOGBLKSIZETOSMALL; write_lock_irqsave(&application_lock, flags); for (applid = 1; applid <= CAPI_MAXAPPL; applid++) { if (capi_applications[applid - 1] == NULL) break; } if (applid > CAPI_MAXAPPL) { write_unlock_irqrestore(&application_lock, flags); return CAPI_TOOMANYAPPLS; } ap->applid = applid; capi_applications[applid - 1] = ap; ap->nrecvctlpkt = 0; ap->nrecvdatapkt = 0; ap->nsentctlpkt = 0; ap->nsentdatapkt = 0; ap->callback = NULL; mutex_init(&ap->recv_mtx); skb_queue_head_init(&ap->recv_queue); INIT_WORK(&ap->recv_work, recv_handler); ap->release_in_progress = 0; write_unlock_irqrestore(&application_lock, flags); mutex_lock(&controller_mutex); for (i = 0; i < CAPI_MAXCONTR; i++) { if (!capi_cards[i] || capi_cards[i]->cardstate != CARD_RUNNING) continue; register_appl(capi_cards[i], applid, &ap->rparam); } mutex_unlock(&controller_mutex); if (showcapimsgs & 1) { printk(KERN_DEBUG "kcapi: appl %d up\n", applid); } return CAPI_NOERROR; } EXPORT_SYMBOL(capi20_register); u16 capi20_release(struct capi20_appl *ap) { int i; unsigned long flags; DBG("applid %#x", ap->applid); write_lock_irqsave(&application_lock, flags); ap->release_in_progress = 1; capi_applications[ap->applid - 1] = NULL; write_unlock_irqrestore(&application_lock, flags); mutex_lock(&controller_mutex); for (i = 0; i < CAPI_MAXCONTR; i++) { if (!capi_cards[i] || capi_cards[i]->cardstate != CARD_RUNNING) continue; release_appl(capi_cards[i], ap->applid); } mutex_unlock(&controller_mutex); flush_scheduled_work(); skb_queue_purge(&ap->recv_queue); if (showcapimsgs & 1) { printk(KERN_DEBUG "kcapi: appl %d down\n", ap->applid); } return CAPI_NOERROR; } EXPORT_SYMBOL(capi20_release); u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb) { struct capi_ctr *card; int showctl = 0; u8 cmd, subcmd; DBG("applid %#x", ap->applid); if (ncards == 0) return CAPI_REGNOTINSTALLED; if ((ap->applid == 0) || ap->release_in_progress) return CAPI_ILLAPPNR; if (skb->len < 12 || !capi_cmd_valid(CAPIMSG_COMMAND(skb->data)) || !capi_subcmd_valid(CAPIMSG_SUBCOMMAND(skb->data))) return CAPI_ILLCMDORSUBCMDORMSGTOSMALL; card = get_capi_ctr_by_nr(CAPIMSG_CONTROLLER(skb->data)); if (!card || card->cardstate != CARD_RUNNING) { card = get_capi_ctr_by_nr(1); // XXX why? if (!card || card->cardstate != CARD_RUNNING) return CAPI_REGNOTINSTALLED; } if (card->blocked) return CAPI_SENDQUEUEFULL; cmd = CAPIMSG_COMMAND(skb->data); subcmd = CAPIMSG_SUBCOMMAND(skb->data); if (cmd == CAPI_DATA_B3 && subcmd== CAPI_REQ) { card->nsentdatapkt++; ap->nsentdatapkt++; if (card->traceflag > 2) showctl |= 2; } else { card->nsentctlpkt++; ap->nsentctlpkt++; if (card->traceflag) showctl |= 2; } showctl |= (card->traceflag & 1); if (showctl & 2) { if (showctl & 1) { printk(KERN_DEBUG "kcapi: put [%03d] id#%d %s len=%u\n", CAPIMSG_CONTROLLER(skb->data), CAPIMSG_APPID(skb->data), capi_cmd2str(cmd, subcmd), CAPIMSG_LEN(skb->data)); } else { _cdebbuf *cdb = capi_message2str(skb->data); if (cdb) { printk(KERN_DEBUG "kcapi: put [%03d] %s\n", CAPIMSG_CONTROLLER(skb->data), cdb->buf); cdebbuf_free(cdb); } else printk(KERN_DEBUG "kcapi: put [%03d] id#%d %s len=%u cannot trace\n", CAPIMSG_CONTROLLER(skb->data), CAPIMSG_APPID(skb->data), capi_cmd2str(cmd, subcmd), CAPIMSG_LEN(skb->data)); } } return card->send_message(card, skb); } EXPORT_SYMBOL(capi20_put_message); u16 capi20_get_manufacturer(u32 contr, u8 *buf) { struct capi_ctr *card; if (contr == 0) { strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN); return CAPI_NOERROR; } card = get_capi_ctr_by_nr(contr); if (!card || card->cardstate != CARD_RUNNING) return CAPI_REGNOTINSTALLED; strlcpy(buf, card->manu, CAPI_MANUFACTURER_LEN); return CAPI_NOERROR; } EXPORT_SYMBOL(capi20_get_manufacturer); u16 capi20_get_version(u32 contr, struct capi_version *verp) { struct capi_ctr *card; if (contr == 0) { *verp = driver_version; return CAPI_NOERROR; } card = get_capi_ctr_by_nr(contr); if (!card || card->cardstate != CARD_RUNNING) return CAPI_REGNOTINSTALLED; memcpy((void *) verp, &card->version, sizeof(capi_version)); return CAPI_NOERROR; } EXPORT_SYMBOL(capi20_get_version); u16 capi20_get_serial(u32 contr, u8 *serial) { struct capi_ctr *card; if (contr == 0) { strlcpy(serial, driver_serial, CAPI_SERIAL_LEN); return CAPI_NOERROR; } card = get_capi_ctr_by_nr(contr); if (!card || card->cardstate != CARD_RUNNING) return CAPI_REGNOTINSTALLED; strlcpy((void *) serial, card->serial, CAPI_SERIAL_LEN); return CAPI_NOERROR; } EXPORT_SYMBOL(capi20_get_serial); u16 capi20_get_profile(u32 contr, struct capi_profile *profp) { struct capi_ctr *card; if (contr == 0) { profp->ncontroller = ncards; return CAPI_NOERROR; } card = get_capi_ctr_by_nr(contr); if (!card || card->cardstate != CARD_RUNNING) return CAPI_REGNOTINSTALLED; memcpy((void *) profp, &card->profile, sizeof(struct capi_profile)); return CAPI_NOERROR; } EXPORT_SYMBOL(capi20_get_profile); #ifdef AVMB1_COMPAT static int old_capi_manufacturer(unsigned int cmd, void __user *data) { avmb1_loadandconfigdef ldef; avmb1_extcarddef cdef; avmb1_resetdef rdef; capicardparams cparams; struct capi_ctr *card; struct capi_driver *driver = NULL; capiloaddata ldata; struct list_head *l; unsigned long flags; int retval; switch (cmd) { case AVMB1_ADDCARD: case AVMB1_ADDCARD_WITH_TYPE: if (cmd == AVMB1_ADDCARD) { if ((retval = copy_from_user(&cdef, data, sizeof(avmb1_carddef)))) return retval; cdef.cardtype = AVM_CARDTYPE_B1; } else { if ((retval = copy_from_user(&cdef, data, sizeof(avmb1_extcarddef)))) return retval; } cparams.port = cdef.port; cparams.irq = cdef.irq; cparams.cardnr = cdef.cardnr; read_lock_irqsave(&capi_drivers_list_lock, flags); switch (cdef.cardtype) { case AVM_CARDTYPE_B1: list_for_each(l, &capi_drivers) { driver = list_entry(l, struct capi_driver, list); if (strcmp(driver->name, "b1isa") == 0) break; } break; case AVM_CARDTYPE_T1: list_for_each(l, &capi_drivers) { driver = list_entry(l, struct capi_driver, list); if (strcmp(driver->name, "t1isa") == 0) break; } break; default: driver = NULL; break; } if (!driver) { read_unlock_irqrestore(&capi_drivers_list_lock, flags); printk(KERN_ERR "kcapi: driver not loaded.\n"); return -EIO; } if (!driver->add_card) { read_unlock_irqrestore(&capi_drivers_list_lock, flags); printk(KERN_ERR "kcapi: driver has no add card function.\n"); return -EIO; } retval = driver->add_card(driver, &cparams); read_unlock_irqrestore(&capi_drivers_list_lock, flags); return retval; case AVMB1_LOAD: case AVMB1_LOAD_AND_CONFIG: if (cmd == AVMB1_LOAD) { if (copy_from_user(&ldef, data, sizeof(avmb1_loaddef))) return -EFAULT; ldef.t4config.len = 0; ldef.t4config.data = NULL; } else { if (copy_from_user(&ldef, data, sizeof(avmb1_loadandconfigdef))) return -EFAULT; } card = get_capi_ctr_by_nr(ldef.contr); if (!card) return -EINVAL; card = capi_ctr_get(card); if (!card) return -ESRCH; if (card->load_firmware == NULL) { printk(KERN_DEBUG "kcapi: load: no load function\n"); capi_ctr_put(card); return -ESRCH; } if (ldef.t4file.len <= 0) { printk(KERN_DEBUG "kcapi: load: invalid parameter: length of t4file is %d ?\n", ldef.t4file.len); capi_ctr_put(card); return -EINVAL; } if (ldef.t4file.data == NULL) { printk(KERN_DEBUG "kcapi: load: invalid parameter: dataptr is 0\n"); capi_ctr_put(card); return -EINVAL; } ldata.firmware.user = 1; ldata.firmware.data = ldef.t4file.data; ldata.firmware.len = ldef.t4file.len; ldata.configuration.user = 1; ldata.configuration.data = ldef.t4config.data; ldata.configuration.len = ldef.t4config.len; if (card->cardstate != CARD_DETECTED) { printk(KERN_INFO "kcapi: load: contr=%d not in detect state\n", ldef.contr); capi_ctr_put(card); return -EBUSY; } card->cardstate = CARD_LOADING; retval = card->load_firmware(card, &ldata); if (retval) { card->cardstate = CARD_DETECTED; capi_ctr_put(card); return retval; } while (card->cardstate != CARD_RUNNING) { msleep_interruptible(100); /* 0.1 sec */ if (signal_pending(current)) { capi_ctr_put(card); return -EINTR; } } capi_ctr_put(card); return 0; case AVMB1_RESETCARD: if (copy_from_user(&rdef, data, sizeof(avmb1_resetdef))) return -EFAULT; card = get_capi_ctr_by_nr(rdef.contr); if (!card) return -ESRCH; if (card->cardstate == CARD_DETECTED) return 0; card->reset_ctr(card); while (card->cardstate > CARD_DETECTED) { msleep_interruptible(100); /* 0.1 sec */ if (signal_pending(current)) return -EINTR; } return 0; } return -EINVAL; } #endif int capi20_manufacturer(unsigned int cmd, void __user *data) { struct capi_ctr *card; switch (cmd) { #ifdef AVMB1_COMPAT case AVMB1_LOAD: case AVMB1_LOAD_AND_CONFIG: case AVMB1_RESETCARD: case AVMB1_GET_CARDINFO: case AVMB1_REMOVECARD: return old_capi_manufacturer(cmd, data); #endif case KCAPI_CMD_TRACE: { kcapi_flagdef fdef; if (copy_from_user(&fdef, data, sizeof(kcapi_flagdef))) return -EFAULT; card = get_capi_ctr_by_nr(fdef.contr); if (!card) return -ESRCH; card->traceflag = fdef.flag; printk(KERN_INFO "kcapi: contr [%03d] set trace=%d\n", card->cnr, card->traceflag); return 0; } case KCAPI_CMD_ADDCARD: { struct list_head *l; struct capi_driver *driver = NULL; capicardparams cparams; kcapi_carddef cdef; int retval; if ((retval = copy_from_user(&cdef, data, sizeof(cdef)))) return retval; cparams.port = cdef.port; cparams.irq = cdef.irq; cparams.membase = cdef.membase; cparams.cardnr = cdef.cardnr; cparams.cardtype = 0; cdef.driver[sizeof(cdef.driver)-1] = 0; list_for_each(l, &capi_drivers) { driver = list_entry(l, struct capi_driver, list); if (strcmp(driver->name, cdef.driver) == 0) break; } if (driver == NULL) { printk(KERN_ERR "kcapi: driver \"%s\" not loaded.\n", cdef.driver); return -ESRCH; } if (!driver->add_card) { printk(KERN_ERR "kcapi: driver \"%s\" has no add card function.\n", cdef.driver); return -EIO; } return driver->add_card(driver, &cparams); } default: printk(KERN_ERR "kcapi: manufacturer command %d unknown.\n", cmd); break; } return -EINVAL; } EXPORT_SYMBOL(capi20_manufacturer); /* temporary hack */ void capi20_set_callback(struct capi20_appl *ap, void (*callback) (unsigned int cmd, __u32 contr, void *data)) { ap->callback = callback; } EXPORT_SYMBOL(capi20_set_callback); /* ------------------------------------------------------------- */ /* -------- Init & Cleanup ------------------------------------- */ /* ------------------------------------------------------------- */ /* * init / exit functions */ static int __init kcapi_init(void) { char *p; char rev[32]; int ret; ret = cdebug_init(); if (ret) return ret; kcapi_proc_init(); if ((p = strchr(revision, ':')) != NULL && p[1]) { strlcpy(rev, p + 2, sizeof(rev)); if ((p = strchr(rev, '$')) != NULL && p > rev) *(p-1) = 0; } else strcpy(rev, "1.0"); printk(KERN_NOTICE "CAPI Subsystem Rev %s\n", rev); return 0; } static void __exit kcapi_exit(void) { kcapi_proc_exit(); /* make sure all notifiers are finished */ flush_scheduled_work(); cdebug_exit(); } module_init(kcapi_init); module_exit(kcapi_exit);
gpl-2.0
jld/b2g-hamachi-kernel
arch/arm/mach-omap2/board-omap3stalker.c
417
12676
/* * linux/arch/arm/mach-omap2/board-omap3evm.c * * Copyright (C) 2008 Guangzhou EMA-Tech * * Modified from mach-omap2/board-omap3evm.c * * Initial code: Syed Mohammed Khasim * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/leds.h> #include <linux/gpio.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/regulator/machine.h> #include <linux/i2c/twl.h> #include <linux/mmc/host.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/flash.h> #include <plat/board.h> #include <plat/common.h> #include <plat/gpmc.h> #include <plat/nand.h> #include <plat/usb.h> #include <video/omapdss.h> #include <video/omap-panel-generic-dpi.h> #include <plat/mcspi.h> #include <linux/input/matrix_keypad.h> #include <linux/spi/spi.h> #include <linux/interrupt.h> #include <linux/smsc911x.h> #include <linux/i2c/at24.h> #include "sdram-micron-mt46h32m32lf-6.h" #include "mux.h" #include "hsmmc.h" #include "timer-gp.h" #include "common-board-devices.h" #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) #include <plat/gpmc-smsc911x.h> #define OMAP3STALKER_ETHR_START 0x2c000000 #define OMAP3STALKER_ETHR_SIZE 1024 #define OMAP3STALKER_ETHR_GPIO_IRQ 19 #define OMAP3STALKER_SMC911X_CS 5 static struct omap_smsc911x_platform_data smsc911x_cfg = { .cs = OMAP3STALKER_SMC911X_CS, .gpio_irq = OMAP3STALKER_ETHR_GPIO_IRQ, .gpio_reset = -EINVAL, .flags = (SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS), }; static inline void __init omap3stalker_init_eth(void) { struct clk *l3ck; unsigned int rate; l3ck = clk_get(NULL, "l3_ck"); if (IS_ERR(l3ck)) rate = 100000000; else rate = clk_get_rate(l3ck); omap_mux_init_gpio(19, OMAP_PIN_INPUT_PULLUP); gpmc_smsc911x_init(&smsc911x_cfg); } #else static inline void __init omap3stalker_init_eth(void) { return; } #endif /* * OMAP3 DSS control signals */ #define DSS_ENABLE_GPIO 199 #define LCD_PANEL_BKLIGHT_GPIO 210 #define ENABLE_VPLL2_DEV_GRP 0xE0 static int lcd_enabled; static int dvi_enabled; static void __init omap3_stalker_display_init(void) { return; } static int omap3_stalker_enable_lcd(struct omap_dss_device *dssdev) { if (dvi_enabled) { printk(KERN_ERR "cannot enable LCD, DVI is enabled\n"); return -EINVAL; } gpio_set_value(DSS_ENABLE_GPIO, 1); gpio_set_value(LCD_PANEL_BKLIGHT_GPIO, 1); lcd_enabled = 1; return 0; } static void omap3_stalker_disable_lcd(struct omap_dss_device *dssdev) { gpio_set_value(DSS_ENABLE_GPIO, 0); gpio_set_value(LCD_PANEL_BKLIGHT_GPIO, 0); lcd_enabled = 0; } static struct panel_generic_dpi_data lcd_panel = { .name = "generic", .platform_enable = omap3_stalker_enable_lcd, .platform_disable = omap3_stalker_disable_lcd, }; static struct omap_dss_device omap3_stalker_lcd_device = { .name = "lcd", .driver_name = "generic_dpi_panel", .data = &lcd_panel, .phy.dpi.data_lines = 24, .type = OMAP_DISPLAY_TYPE_DPI, }; static int omap3_stalker_enable_tv(struct omap_dss_device *dssdev) { return 0; } static void omap3_stalker_disable_tv(struct omap_dss_device *dssdev) { } static struct omap_dss_device omap3_stalker_tv_device = { .name = "tv", .driver_name = "venc", .type = OMAP_DISPLAY_TYPE_VENC, #if defined(CONFIG_OMAP2_VENC_OUT_TYPE_SVIDEO) .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO, #elif defined(CONFIG_OMAP2_VENC_OUT_TYPE_COMPOSITE) .u.venc.type = OMAP_DSS_VENC_TYPE_COMPOSITE, #endif .platform_enable = omap3_stalker_enable_tv, .platform_disable = omap3_stalker_disable_tv, }; static int omap3_stalker_enable_dvi(struct omap_dss_device *dssdev) { if (lcd_enabled) { printk(KERN_ERR "cannot enable DVI, LCD is enabled\n"); return -EINVAL; } gpio_set_value(DSS_ENABLE_GPIO, 1); dvi_enabled = 1; return 0; } static void omap3_stalker_disable_dvi(struct omap_dss_device *dssdev) { gpio_set_value(DSS_ENABLE_GPIO, 0); dvi_enabled = 0; } static struct panel_generic_dpi_data dvi_panel = { .name = "generic", .platform_enable = omap3_stalker_enable_dvi, .platform_disable = omap3_stalker_disable_dvi, }; static struct omap_dss_device omap3_stalker_dvi_device = { .name = "dvi", .type = OMAP_DISPLAY_TYPE_DPI, .driver_name = "generic_dpi_panel", .data = &dvi_panel, .phy.dpi.data_lines = 24, }; static struct omap_dss_device *omap3_stalker_dss_devices[] = { &omap3_stalker_lcd_device, &omap3_stalker_tv_device, &omap3_stalker_dvi_device, }; static struct omap_dss_board_info omap3_stalker_dss_data = { .num_devices = ARRAY_SIZE(omap3_stalker_dss_devices), .devices = omap3_stalker_dss_devices, .default_device = &omap3_stalker_dvi_device, }; static struct regulator_consumer_supply omap3stalker_vmmc1_supply = { .supply = "vmmc", }; static struct regulator_consumer_supply omap3stalker_vsim_supply = { .supply = "vmmc_aux", }; /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */ static struct regulator_init_data omap3stalker_vmmc1 = { .constraints = { .min_uV = 1850000, .max_uV = 3150000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = 1, .consumer_supplies = &omap3stalker_vmmc1_supply, }; /* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */ static struct regulator_init_data omap3stalker_vsim = { .constraints = { .min_uV = 1800000, .max_uV = 3000000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = 1, .consumer_supplies = &omap3stalker_vsim_supply, }; static struct omap2_hsmmc_info mmc[] = { { .mmc = 1, .caps = MMC_CAP_4_BIT_DATA, .gpio_cd = -EINVAL, .gpio_wp = 23, }, {} /* Terminator */ }; static struct gpio_keys_button gpio_buttons[] = { { .code = BTN_EXTRA, .gpio = 18, .desc = "user", .wakeup = 1, }, }; static struct gpio_keys_platform_data gpio_key_info = { .buttons = gpio_buttons, .nbuttons = ARRAY_SIZE(gpio_buttons), }; static struct platform_device keys_gpio = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &gpio_key_info, }, }; static struct gpio_led gpio_leds[] = { { .name = "stalker:D8:usr0", .default_trigger = "default-on", .gpio = 126, }, { .name = "stalker:D9:usr1", .default_trigger = "default-on", .gpio = 127, }, { .name = "stalker:D3:mmc0", .gpio = -EINVAL, /* gets replaced */ .active_low = true, .default_trigger = "mmc0", }, { .name = "stalker:D4:heartbeat", .gpio = -EINVAL, /* gets replaced */ .active_low = true, .default_trigger = "heartbeat", }, }; static struct gpio_led_platform_data gpio_led_info = { .leds = gpio_leds, .num_leds = ARRAY_SIZE(gpio_leds), }; static struct platform_device leds_gpio = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &gpio_led_info, }, }; static int omap3stalker_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { /* gpio + 0 is "mmc0_cd" (input/IRQ) */ omap_mux_init_gpio(23, OMAP_PIN_INPUT); mmc[0].gpio_cd = gpio + 0; omap2_hsmmc_init(mmc); /* link regulators to MMC adapters */ omap3stalker_vmmc1_supply.dev = mmc[0].dev; omap3stalker_vsim_supply.dev = mmc[0].dev; /* * Most GPIOs are for USB OTG. Some are mostly sent to * the P2 connector; notably LEDA for the LCD backlight. */ /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */ gpio_request_one(gpio + TWL4030_GPIO_MAX, GPIOF_OUT_INIT_LOW, "EN_LCD_BKL"); /* gpio + 7 == DVI Enable */ gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "EN_DVI"); /* TWL4030_GPIO_MAX + 1 == ledB (out, mmc0) */ gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; /* GPIO + 13 == ledsync (out, heartbeat) */ gpio_leds[3].gpio = gpio + 13; platform_device_register(&leds_gpio); return 0; } static struct twl4030_gpio_platform_data omap3stalker_gpio_data = { .gpio_base = OMAP_MAX_GPIO_LINES, .irq_base = TWL4030_GPIO_IRQ_BASE, .irq_end = TWL4030_GPIO_IRQ_END, .use_leds = true, .setup = omap3stalker_twl_gpio_setup, }; static uint32_t board_keymap[] = { KEY(0, 0, KEY_LEFT), KEY(0, 1, KEY_DOWN), KEY(0, 2, KEY_ENTER), KEY(0, 3, KEY_M), KEY(1, 0, KEY_RIGHT), KEY(1, 1, KEY_UP), KEY(1, 2, KEY_I), KEY(1, 3, KEY_N), KEY(2, 0, KEY_A), KEY(2, 1, KEY_E), KEY(2, 2, KEY_J), KEY(2, 3, KEY_O), KEY(3, 0, KEY_B), KEY(3, 1, KEY_F), KEY(3, 2, KEY_K), KEY(3, 3, KEY_P) }; static struct matrix_keymap_data board_map_data = { .keymap = board_keymap, .keymap_size = ARRAY_SIZE(board_keymap), }; static struct twl4030_keypad_data omap3stalker_kp_data = { .keymap_data = &board_map_data, .rows = 4, .cols = 4, .rep = 1, }; static struct twl4030_platform_data omap3stalker_twldata = { /* platform_data for children goes here */ .keypad = &omap3stalker_kp_data, .gpio = &omap3stalker_gpio_data, .vmmc1 = &omap3stalker_vmmc1, .vsim = &omap3stalker_vsim, }; static struct at24_platform_data fram_info = { .byte_len = (64 * 1024) / 8, .page_size = 8192, .flags = AT24_FLAG_ADDR16 | AT24_FLAG_IRUGO, }; static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo3[] = { { I2C_BOARD_INFO("24c64", 0x50), .flags = I2C_CLIENT_WAKE, .platform_data = &fram_info, }, }; static int __init omap3_stalker_i2c_init(void) { omap3_pmic_get_config(&omap3stalker_twldata, TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC | TWL_COMMON_PDATA_AUDIO, TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2); omap3stalker_twldata.vdac->constraints.apply_uV = true; omap3stalker_twldata.vpll2->constraints.apply_uV = true; omap3stalker_twldata.vpll2->constraints.name = "VDVI"; omap3_pmic_init("twl4030", &omap3stalker_twldata); omap_register_i2c_bus(2, 400, NULL, 0); omap_register_i2c_bus(3, 400, omap3stalker_i2c_boardinfo3, ARRAY_SIZE(omap3stalker_i2c_boardinfo3)); return 0; } #define OMAP3_STALKER_TS_GPIO 175 static struct omap_board_config_kernel omap3_stalker_config[] __initdata = { }; static void __init omap3_stalker_init_early(void) { omap2_init_common_infrastructure(); omap2_init_common_devices(mt46h32m32lf6_sdrc_params, NULL); } static void __init omap3_stalker_init_irq(void) { omap_init_irq(); #ifdef CONFIG_OMAP_32K_TIMER omap2_gp_clockevent_set_gptimer(12); #endif } static struct platform_device *omap3_stalker_devices[] __initdata = { &keys_gpio, }; static struct usbhs_omap_board_data usbhs_bdata __initconst = { .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED, .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = -EINVAL, .reset_gpio_port[1] = 21, .reset_gpio_port[2] = -EINVAL, }; #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE), OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE), {.reg_offset = OMAP_MUX_TERMINATOR}, }; #endif static void __init omap3_stalker_init(void) { omap3_mux_init(board_mux, OMAP_PACKAGE_CUS); omap_board_config = omap3_stalker_config; omap_board_config_size = ARRAY_SIZE(omap3_stalker_config); omap3_stalker_i2c_init(); platform_add_devices(omap3_stalker_devices, ARRAY_SIZE(omap3_stalker_devices)); omap_display_init(&omap3_stalker_dss_data); omap_serial_init(); usb_musb_init(NULL); usbhs_init(&usbhs_bdata); omap_ads7846_init(1, OMAP3_STALKER_TS_GPIO, 310, NULL); omap_mux_init_gpio(21, OMAP_PIN_OUTPUT); omap_mux_init_gpio(18, OMAP_PIN_INPUT_PULLUP); omap3stalker_init_eth(); omap3_stalker_display_init(); /* Ensure SDRC pins are mux'd for self-refresh */ omap_mux_init_signal("sdr_cke0", OMAP_PIN_OUTPUT); omap_mux_init_signal("sdr_cke1", OMAP_PIN_OUTPUT); } MACHINE_START(SBC3530, "OMAP3 STALKER") /* Maintainer: Jason Lam -lzg@ema-tech.com */ .boot_params = 0x80000100, .map_io = omap3_map_io, .init_early = omap3_stalker_init_early, .init_irq = omap3_stalker_init_irq, .init_machine = omap3_stalker_init, .timer = &omap_timer, MACHINE_END
gpl-2.0
yoonjong/linux_kernel_kyle
drivers/input/misc/sparcspkr.c
417
8090
/* * Driver for PC-speaker like devices found on various Sparc systems. * * Copyright (c) 2002 Vojtech Pavlik * Copyright (c) 2002, 2006, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/of_device.h> #include <linux/slab.h> #include <asm/io.h> MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_DESCRIPTION("Sparc Speaker beeper driver"); MODULE_LICENSE("GPL"); struct grover_beep_info { void __iomem *freq_regs; void __iomem *enable_reg; }; struct bbc_beep_info { u32 clock_freq; void __iomem *regs; }; struct sparcspkr_state { const char *name; int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); spinlock_t lock; struct input_dev *input_dev; union { struct grover_beep_info grover; struct bbc_beep_info bbc; } u; }; static u32 bbc_count_to_reg(struct bbc_beep_info *info, unsigned int count) { u32 val, clock_freq = info->clock_freq; int i; if (!count) return 0; if (count <= clock_freq >> 20) return 1 << 18; if (count >= clock_freq >> 12) return 1 << 10; val = 1 << 18; for (i = 19; i >= 11; i--) { val >>= 1; if (count <= clock_freq >> i) break; } return val; } static int bbc_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct sparcspkr_state *state = dev_get_drvdata(dev->dev.parent); struct bbc_beep_info *info = &state->u.bbc; unsigned int count = 0; unsigned long flags; if (type != EV_SND) return -1; switch (code) { case SND_BELL: if (value) value = 1000; case SND_TONE: break; default: return -1; } if (value > 20 && value < 32767) count = 1193182 / value; count = bbc_count_to_reg(info, count); spin_lock_irqsave(&state->lock, flags); if (count) { sbus_writeb(0x01, info->regs + 0); sbus_writeb(0x00, info->regs + 2); sbus_writeb((count >> 16) & 0xff, info->regs + 3); sbus_writeb((count >> 8) & 0xff, info->regs + 4); sbus_writeb(0x00, info->regs + 5); } else { sbus_writeb(0x00, info->regs + 0); } spin_unlock_irqrestore(&state->lock, flags); return 0; } static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct sparcspkr_state *state = dev_get_drvdata(dev->dev.parent); struct grover_beep_info *info = &state->u.grover; unsigned int count = 0; unsigned long flags; if (type != EV_SND) return -1; switch (code) { case SND_BELL: if (value) value = 1000; case SND_TONE: break; default: return -1; } if (value > 20 && value < 32767) count = 1193182 / value; spin_lock_irqsave(&state->lock, flags); if (count) { /* enable counter 2 */ sbus_writeb(sbus_readb(info->enable_reg) | 3, info->enable_reg); /* set command for counter 2, 2 byte write */ sbus_writeb(0xB6, info->freq_regs + 1); /* select desired HZ */ sbus_writeb(count & 0xff, info->freq_regs + 0); sbus_writeb((count >> 8) & 0xff, info->freq_regs + 0); } else { /* disable counter 2 */ sbus_writeb(sbus_readb(info->enable_reg) & 0xFC, info->enable_reg); } spin_unlock_irqrestore(&state->lock, flags); return 0; } static int sparcspkr_probe(struct device *dev) { struct sparcspkr_state *state = dev_get_drvdata(dev); struct input_dev *input_dev; int error; input_dev = input_allocate_device(); if (!input_dev) return -ENOMEM; input_dev->name = state->name; input_dev->phys = "sparc/input0"; input_dev->id.bustype = BUS_ISA; input_dev->id.vendor = 0x001f; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = dev; input_dev->evbit[0] = BIT_MASK(EV_SND); input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE); input_dev->event = state->event; error = input_register_device(input_dev); if (error) { input_free_device(input_dev); return error; } state->input_dev = input_dev; return 0; } static void sparcspkr_shutdown(struct platform_device *dev) { struct sparcspkr_state *state = platform_get_drvdata(dev); struct input_dev *input_dev = state->input_dev; /* turn off the speaker */ state->event(input_dev, EV_SND, SND_BELL, 0); } static int bbc_beep_probe(struct platform_device *op) { struct sparcspkr_state *state; struct bbc_beep_info *info; struct device_node *dp; int err = -ENOMEM; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) goto out_err; state->name = "Sparc BBC Speaker"; state->event = bbc_spkr_event; spin_lock_init(&state->lock); dp = of_find_node_by_path("/"); err = -ENODEV; if (!dp) goto out_free; info = &state->u.bbc; info->clock_freq = of_getintprop_default(dp, "clock-frequency", 0); if (!info->clock_freq) goto out_free; info->regs = of_ioremap(&op->resource[0], 0, 6, "bbc beep"); if (!info->regs) goto out_free; platform_set_drvdata(op, state); err = sparcspkr_probe(&op->dev); if (err) goto out_clear_drvdata; return 0; out_clear_drvdata: of_iounmap(&op->resource[0], info->regs, 6); out_free: kfree(state); out_err: return err; } static int bbc_remove(struct platform_device *op) { struct sparcspkr_state *state = platform_get_drvdata(op); struct input_dev *input_dev = state->input_dev; struct bbc_beep_info *info = &state->u.bbc; /* turn off the speaker */ state->event(input_dev, EV_SND, SND_BELL, 0); input_unregister_device(input_dev); of_iounmap(&op->resource[0], info->regs, 6); kfree(state); return 0; } static const struct of_device_id bbc_beep_match[] = { { .name = "beep", .compatible = "SUNW,bbc-beep", }, {}, }; static struct platform_driver bbc_beep_driver = { .driver = { .name = "bbcbeep", .owner = THIS_MODULE, .of_match_table = bbc_beep_match, }, .probe = bbc_beep_probe, .remove = bbc_remove, .shutdown = sparcspkr_shutdown, }; static int grover_beep_probe(struct platform_device *op) { struct sparcspkr_state *state; struct grover_beep_info *info; int err = -ENOMEM; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) goto out_err; state->name = "Sparc Grover Speaker"; state->event = grover_spkr_event; spin_lock_init(&state->lock); info = &state->u.grover; info->freq_regs = of_ioremap(&op->resource[2], 0, 2, "grover beep freq"); if (!info->freq_regs) goto out_free; info->enable_reg = of_ioremap(&op->resource[3], 0, 1, "grover beep enable"); if (!info->enable_reg) goto out_unmap_freq_regs; platform_set_drvdata(op, state); err = sparcspkr_probe(&op->dev); if (err) goto out_clear_drvdata; return 0; out_clear_drvdata: of_iounmap(&op->resource[3], info->enable_reg, 1); out_unmap_freq_regs: of_iounmap(&op->resource[2], info->freq_regs, 2); out_free: kfree(state); out_err: return err; } static int grover_remove(struct platform_device *op) { struct sparcspkr_state *state = platform_get_drvdata(op); struct grover_beep_info *info = &state->u.grover; struct input_dev *input_dev = state->input_dev; /* turn off the speaker */ state->event(input_dev, EV_SND, SND_BELL, 0); input_unregister_device(input_dev); of_iounmap(&op->resource[3], info->enable_reg, 1); of_iounmap(&op->resource[2], info->freq_regs, 2); kfree(state); return 0; } static const struct of_device_id grover_beep_match[] = { { .name = "beep", .compatible = "SUNW,smbus-beep", }, {}, }; static struct platform_driver grover_beep_driver = { .driver = { .name = "groverbeep", .owner = THIS_MODULE, .of_match_table = grover_beep_match, }, .probe = grover_beep_probe, .remove = grover_remove, .shutdown = sparcspkr_shutdown, }; static int __init sparcspkr_init(void) { int err = platform_driver_register(&bbc_beep_driver); if (!err) { err = platform_driver_register(&grover_beep_driver); if (err) platform_driver_unregister(&bbc_beep_driver); } return err; } static void __exit sparcspkr_exit(void) { platform_driver_unregister(&bbc_beep_driver); platform_driver_unregister(&grover_beep_driver); } module_init(sparcspkr_init); module_exit(sparcspkr_exit);
gpl-2.0
Borkata/adam-nv-3.1
drivers/staging/octeon/ethernet-tx.c
417
21465
/********************************************************************* * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2010 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information *********************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/etherdevice.h> #include <linux/ip.h> #include <linux/ratelimit.h> #include <linux/string.h> #include <net/dst.h> #ifdef CONFIG_XFRM #include <linux/xfrm.h> #include <net/xfrm.h> #endif /* CONFIG_XFRM */ #include <linux/atomic.h> #include <asm/octeon/octeon.h> #include "ethernet-defines.h" #include "octeon-ethernet.h" #include "ethernet-tx.h" #include "ethernet-util.h" #include "cvmx-wqe.h" #include "cvmx-fau.h" #include "cvmx-pip.h" #include "cvmx-pko.h" #include "cvmx-helper.h" #include "cvmx-gmxx-defs.h" #define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb)) /* * You can define GET_SKBUFF_QOS() to override how the skbuff output * function determines which output queue is used. The default * implementation always uses the base queue for the port. If, for * example, you wanted to use the skb->priority fieid, define * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority) */ #ifndef GET_SKBUFF_QOS #define GET_SKBUFF_QOS(skb) 0 #endif static void cvm_oct_tx_do_cleanup(unsigned long arg); static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0); /* Maximum number of SKBs to try to free per xmit packet. */ #define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2) static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau) { int32_t undo; undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE; if (undo > 0) cvmx_fau_atomic_add32(fau, -undo); skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free; return skb_to_free; } static void cvm_oct_kick_tx_poll_watchdog(void) { union cvmx_ciu_timx ciu_timx; ciu_timx.u64 = 0; ciu_timx.s.one_shot = 1; ciu_timx.s.len = cvm_oct_tx_poll_interval; cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64); } void cvm_oct_free_tx_skbs(struct net_device *dev) { int32_t skb_to_free; int qos, queues_per_port; int total_freed = 0; int total_remaining = 0; unsigned long flags; struct octeon_ethernet *priv = netdev_priv(dev); queues_per_port = cvmx_pko_get_num_queues(priv->port); /* Drain any pending packets in the free list */ for (qos = 0; qos < queues_per_port; qos++) { if (skb_queue_len(&priv->tx_free_list[qos]) == 0) continue; skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE); skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4); total_freed += skb_to_free; if (skb_to_free > 0) { struct sk_buff *to_free_list = NULL; spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); while (skb_to_free > 0) { struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]); t->next = to_free_list; to_free_list = t; skb_to_free--; } spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); /* Do the actual freeing outside of the lock. */ while (to_free_list) { struct sk_buff *t = to_free_list; to_free_list = to_free_list->next; dev_kfree_skb_any(t); } } total_remaining += skb_queue_len(&priv->tx_free_list[qos]); } if (total_freed >= 0 && netif_queue_stopped(dev)) netif_wake_queue(dev); if (total_remaining) cvm_oct_kick_tx_poll_watchdog(); } /** * cvm_oct_xmit - transmit a packet * @skb: Packet to send * @dev: Device info structure * * Returns Always returns NETDEV_TX_OK */ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) { cvmx_pko_command_word0_t pko_command; union cvmx_buf_ptr hw_buffer; uint64_t old_scratch; uint64_t old_scratch2; int qos; int i; enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type; struct octeon_ethernet *priv = netdev_priv(dev); struct sk_buff *to_free_list; int32_t skb_to_free; int32_t buffers_to_free; u32 total_to_clean; unsigned long flags; #if REUSE_SKBUFFS_WITHOUT_FREE unsigned char *fpa_head; #endif /* * Prefetch the private data structure. It is larger that one * cache line. */ prefetch(priv); /* * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to * completely remove "qos" in the event neither interface * supports multiple queues per port. */ if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { qos = GET_SKBUFF_QOS(skb); if (qos <= 0) qos = 0; else if (qos >= cvmx_pko_get_num_queues(priv->port)) qos = 0; } else qos = 0; if (USE_ASYNC_IOBDMA) { /* Save scratch in case userspace is using it */ CVMX_SYNCIOBDMA; old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); /* * Fetch and increment the number of packets to be * freed. */ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8, FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, priv->fau + qos * 4, MAX_SKB_TO_FREE); } /* * We have space for 6 segment pointers, If there will be more * than that, we must linearize. */ if (unlikely(skb_shinfo(skb)->nr_frags > 5)) { if (unlikely(__skb_linearize(skb))) { queue_type = QUEUE_DROP; if (USE_ASYNC_IOBDMA) { /* Get the number of skbuffs in use by the hardware */ CVMX_SYNCIOBDMA; skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH); } else { /* Get the number of skbuffs in use by the hardware */ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, MAX_SKB_TO_FREE); } skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4); spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); goto skip_xmit; } } /* * The CN3XXX series of parts has an errata (GMX-401) which * causes the GMX block to hang if a collision occurs towards * the end of a <68 byte packet. As a workaround for this, we * pad packets to be 68 bytes whenever we are in half duplex * mode. We don't handle the case of having a small packet but * no room to add the padding. The kernel should always give * us at least a cache line */ if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) { union cvmx_gmxx_prtx_cfg gmx_prt_cfg; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); if (interface < 2) { /* We only need to pad packet in half duplex mode */ gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); if (gmx_prt_cfg.s.duplex == 0) { int add_bytes = 64 - skb->len; if ((skb_tail_pointer(skb) + add_bytes) <= skb_end_pointer(skb)) memset(__skb_put(skb, add_bytes), 0, add_bytes); } } } /* Build the PKO command */ pko_command.u64 = 0; pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ pko_command.s.segs = 1; pko_command.s.total_bytes = skb->len; pko_command.s.size0 = CVMX_FAU_OP_SIZE_32; pko_command.s.subone0 = 1; pko_command.s.dontfree = 1; /* Build the PKO buffer pointer */ hw_buffer.u64 = 0; if (skb_shinfo(skb)->nr_frags == 0) { hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); hw_buffer.s.pool = 0; hw_buffer.s.size = skb->len; } else { hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); hw_buffer.s.pool = 0; hw_buffer.s.size = skb_headlen(skb); CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i; hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset)); hw_buffer.s.size = fs->size; CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; } hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb)); hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1; pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; pko_command.s.gather = 1; goto dont_put_skbuff_in_hw; } /* * See if we can put this skb in the FPA pool. Any strange * behavior from the Linux networking stack will most likely * be caused by a bug in the following code. If some field is * in use by the network stack and get carried over when a * buffer is reused, bad thing may happen. If in doubt and * you dont need the absolute best performance, disable the * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has * shown a 25% increase in performance under some loads. */ #if REUSE_SKBUFFS_WITHOUT_FREE fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f); if (unlikely(skb->data < fpa_head)) { /* * printk("TX buffer beginning can't meet FPA * alignment constraints\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) { /* printk("TX buffer isn't large enough for the FPA\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_shared(skb))) { /* printk("TX buffer sharing data with someone else\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_cloned(skb))) { /* printk("TX buffer has been cloned\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_header_cloned(skb))) { /* printk("TX buffer header has been cloned\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb->destructor)) { /* printk("TX buffer has a destructor\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_shinfo(skb)->nr_frags)) { /* printk("TX buffer has fragments\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely (skb->truesize != sizeof(*skb) + skb_end_pointer(skb) - skb->head)) { /* printk("TX buffer truesize has been changed\n"); */ goto dont_put_skbuff_in_hw; } /* * We can use this buffer in the FPA. We don't need the FAU * update anymore */ pko_command.s.dontfree = 0; hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7); *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb; /* * The skbuff will be reused without ever being freed. We must * cleanup a bunch of core things. */ dst_release(skb_dst(skb)); skb_dst_set(skb, NULL); #ifdef CONFIG_XFRM secpath_put(skb->sp); skb->sp = NULL; #endif nf_reset(skb); #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = 0; #endif /* CONFIG_NET_CLS_ACT */ #endif /* CONFIG_NET_SCHED */ #endif /* REUSE_SKBUFFS_WITHOUT_FREE */ dont_put_skbuff_in_hw: /* Check if we can use the hardware checksumming */ if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) && (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) && ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14)) && ((ip_hdr(skb)->protocol == IPPROTO_TCP) || (ip_hdr(skb)->protocol == IPPROTO_UDP))) { /* Use hardware checksum calc */ pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; } if (USE_ASYNC_IOBDMA) { /* Get the number of skbuffs in use by the hardware */ CVMX_SYNCIOBDMA; skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH); buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); } else { /* Get the number of skbuffs in use by the hardware */ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, MAX_SKB_TO_FREE); buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); } skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4); /* * If we're sending faster than the receive can free them then * don't do the HW free. */ if ((buffers_to_free < -100) && !pko_command.s.dontfree) pko_command.s.dontfree = 1; if (pko_command.s.dontfree) { queue_type = QUEUE_CORE; pko_command.s.reg0 = priv->fau+qos*4; } else { queue_type = QUEUE_HW; } if (USE_ASYNC_IOBDMA) cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1); spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); /* Drop this packet if we have too many already queued to the HW */ if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) { if (dev->tx_queue_len != 0) { /* Drop the lock when notifying the core. */ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); netif_stop_queue(dev); spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); } else { /* If not using normal queueing. */ queue_type = QUEUE_DROP; goto skip_xmit; } } cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_NONE); /* Send the packet to the output queue */ if (unlikely(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_NONE))) { printk_ratelimited("%s: Failed to send the packet\n", dev->name); queue_type = QUEUE_DROP; } skip_xmit: to_free_list = NULL; switch (queue_type) { case QUEUE_DROP: skb->next = to_free_list; to_free_list = skb; priv->stats.tx_dropped++; break; case QUEUE_HW: cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1); break; case QUEUE_CORE: __skb_queue_tail(&priv->tx_free_list[qos], skb); break; default: BUG(); } while (skb_to_free > 0) { struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]); t->next = to_free_list; to_free_list = t; skb_to_free--; } spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); /* Do the actual freeing outside of the lock. */ while (to_free_list) { struct sk_buff *t = to_free_list; to_free_list = to_free_list->next; dev_kfree_skb_any(t); } if (USE_ASYNC_IOBDMA) { CVMX_SYNCIOBDMA; total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH); /* Restore the scratch area */ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); } else { total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1); } if (total_to_clean & 0x3ff) { /* * Schedule the cleanup tasklet every 1024 packets for * the pathological case of high traffic on one port * delaying clean up of packets on a different port * that is blocked waiting for the cleanup. */ tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); } cvm_oct_kick_tx_poll_watchdog(); return NETDEV_TX_OK; } /** * cvm_oct_xmit_pow - transmit a packet to the POW * @skb: Packet to send * @dev: Device info structure * Returns Always returns zero */ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); void *packet_buffer; void *copy_location; /* Get a work queue entry */ cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); if (unlikely(work == NULL)) { printk_ratelimited("%s: Failed to allocate a work " "queue entry\n", dev->name); priv->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } /* Get a packet buffer */ packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL); if (unlikely(packet_buffer == NULL)) { printk_ratelimited("%s: Failed to allocate a packet buffer\n", dev->name); cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); priv->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } /* * Calculate where we need to copy the data to. We need to * leave 8 bytes for a next pointer (unused). We also need to * include any configure skip. Then we need to align the IP * packet src and dest into the same 64bit word. The below * calculation may add a little extra, but that doesn't * hurt. */ copy_location = packet_buffer + sizeof(uint64_t); copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6; /* * We have to copy the packet since whoever processes this * packet will free it to a hardware pool. We can't use the * trick of counting outstanding packets like in * cvm_oct_xmit. */ memcpy(copy_location, skb->data, skb->len); /* * Fill in some of the work queue fields. We may need to add * more if the software at the other end needs them. */ work->hw_chksum = skb->csum; work->len = skb->len; work->ipprt = priv->port; work->qos = priv->port & 0x7; work->grp = pow_send_group; work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE; work->tag = pow_send_group; /* FIXME */ /* Default to zero. Sets of zero later are commented out */ work->word2.u64 = 0; work->word2.s.bufs = 1; work->packet_ptr.u64 = 0; work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location); work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL; work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE; work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7; if (skb->protocol == htons(ETH_P_IP)) { work->word2.s.ip_offset = 14; #if 0 work->word2.s.vlan_valid = 0; /* FIXME */ work->word2.s.vlan_cfi = 0; /* FIXME */ work->word2.s.vlan_id = 0; /* FIXME */ work->word2.s.dec_ipcomp = 0; /* FIXME */ #endif work->word2.s.tcp_or_udp = (ip_hdr(skb)->protocol == IPPROTO_TCP) || (ip_hdr(skb)->protocol == IPPROTO_UDP); #if 0 /* FIXME */ work->word2.s.dec_ipsec = 0; /* We only support IPv4 right now */ work->word2.s.is_v6 = 0; /* Hardware would set to zero */ work->word2.s.software = 0; /* No error, packet is internal */ work->word2.s.L4_error = 0; #endif work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14)); #if 0 /* Assume Linux is sending a good packet */ work->word2.s.IP_exc = 0; #endif work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST); work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST); #if 0 /* This is an IP packet */ work->word2.s.not_IP = 0; /* No error, packet is internal */ work->word2.s.rcv_error = 0; /* No error, packet is internal */ work->word2.s.err_code = 0; #endif /* * When copying the data, include 4 bytes of the * ethernet header to align the same way hardware * does. */ memcpy(work->packet_data, skb->data + 10, sizeof(work->packet_data)); } else { #if 0 work->word2.snoip.vlan_valid = 0; /* FIXME */ work->word2.snoip.vlan_cfi = 0; /* FIXME */ work->word2.snoip.vlan_id = 0; /* FIXME */ work->word2.snoip.software = 0; /* Hardware would set to zero */ #endif work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP); work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP); work->word2.snoip.is_bcast = (skb->pkt_type == PACKET_BROADCAST); work->word2.snoip.is_mcast = (skb->pkt_type == PACKET_MULTICAST); work->word2.snoip.not_IP = 1; /* IP was done up above */ #if 0 /* No error, packet is internal */ work->word2.snoip.rcv_error = 0; /* No error, packet is internal */ work->word2.snoip.err_code = 0; #endif memcpy(work->packet_data, skb->data, sizeof(work->packet_data)); } /* Submit the packet to the POW */ cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos, work->grp); priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len; dev_kfree_skb(skb); return 0; } /** * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX. * @dev: Device being shutdown * */ void cvm_oct_tx_shutdown_dev(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); unsigned long flags; int qos; for (qos = 0; qos < 16; qos++) { spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); while (skb_queue_len(&priv->tx_free_list[qos])) dev_kfree_skb_any(__skb_dequeue (&priv->tx_free_list[qos])); spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); } } static void cvm_oct_tx_do_cleanup(unsigned long arg) { int port; for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { if (cvm_oct_device[port]) { struct net_device *dev = cvm_oct_device[port]; cvm_oct_free_tx_skbs(dev); } } } static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id) { /* Disable the interrupt. */ cvmx_write_csr(CVMX_CIU_TIMX(1), 0); /* Do the work in the tasklet. */ tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); return IRQ_HANDLED; } void cvm_oct_tx_initialize(void) { int i; /* Disable the interrupt. */ cvmx_write_csr(CVMX_CIU_TIMX(1), 0); /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */ i = request_irq(OCTEON_IRQ_TIMER1, cvm_oct_tx_cleanup_watchdog, 0, "Ethernet", cvm_oct_device); if (i) panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1); } void cvm_oct_tx_shutdown(void) { /* Free the interrupt handler */ free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device); }
gpl-2.0
drgreenth/UBER-L
lib/div64.c
4513
3154
/* * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com> * * Based on former do_div() implementation from asm-parisc/div64.h: * Copyright (C) 1999 Hewlett-Packard Co * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> * * * Generic C version of 64bit/32bit division and modulo, with * 64bit result and 32bit remainder. * * The fast case for (n>>32 == 0) is handled inline by do_div(). * * Code generated for this function might be very inefficient * for some CPUs. __div64_32() can be overridden by linking arch-specific * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/math64.h> /* Not needed on 64bit architectures */ #if BITS_PER_LONG == 32 uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) { uint64_t rem = *n; uint64_t b = base; uint64_t res, d = 1; uint32_t high = rem >> 32; /* Reduce the thing a bit first */ res = 0; if (high >= base) { high /= base; res = (uint64_t) high << 32; rem -= (uint64_t) (high*base) << 32; } while ((int64_t)b > 0 && b < rem) { b = b+b; d = d+d; } do { if (rem >= b) { rem -= b; res += d; } b >>= 1; d >>= 1; } while (d); *n = res; return rem; } EXPORT_SYMBOL(__div64_32); #ifndef div_s64_rem s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) { u64 quotient; if (dividend < 0) { quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder); *remainder = -*remainder; if (divisor > 0) quotient = -quotient; } else { quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder); if (divisor < 0) quotient = -quotient; } return quotient; } EXPORT_SYMBOL(div_s64_rem); #endif /** * div64_u64 - unsigned 64bit divide with 64bit divisor * @dividend: 64bit dividend * @divisor: 64bit divisor * * This implementation is a modified version of the algorithm proposed * by the book 'Hacker's Delight'. The original source and full proof * can be found here and is available for use without restriction. * * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c' */ #ifndef div64_u64 u64 div64_u64(u64 dividend, u64 divisor) { u32 high = divisor >> 32; u64 quot; if (high == 0) { quot = div_u64(dividend, divisor); } else { int n = 1 + fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) quot--; if ((dividend - quot * divisor) >= divisor) quot++; } return quot; } EXPORT_SYMBOL(div64_u64); #endif /** * div64_s64 - signed 64bit divide with 64bit divisor * @dividend: 64bit dividend * @divisor: 64bit divisor */ #ifndef div64_s64 s64 div64_s64(s64 dividend, s64 divisor) { s64 quot, t; quot = div64_u64(abs64(dividend), abs64(divisor)); t = (dividend ^ divisor) >> 63; return (quot ^ t) - t; } EXPORT_SYMBOL(div64_s64); #endif #endif /* BITS_PER_LONG == 32 */ /* * Iterative div/mod for use when dividend is not expected to be much * bigger than divisor. */ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) { return __iter_div_u64_rem(dividend, divisor, remainder); } EXPORT_SYMBOL(iter_div_u64_rem);
gpl-2.0
Altaf-Mahdi/mako
net/netfilter/ipset/ip_set_hash_ipportip.c
4769
14796
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Kernel module implementing an IP set type: the hash:ip,port,ip type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/random.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlink.h> #include <net/tcp.h> #include <linux/netfilter.h> #include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_timeout.h> #include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/netfilter/ipset/ip_set_hash.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_DESCRIPTION("hash:ip,port,ip type of IP sets"); MODULE_ALIAS("ip_set_hash:ip,port,ip"); /* Type specific function prefix */ #define TYPE hash_ipportip static bool hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b); #define hash_ipportip4_same_set hash_ipportip_same_set #define hash_ipportip6_same_set hash_ipportip_same_set /* The type variant functions: IPv4 */ /* Member elements without timeout */ struct hash_ipportip4_elem { __be32 ip; __be32 ip2; __be16 port; u8 proto; u8 padding; }; /* Member elements with timeout support */ struct hash_ipportip4_telem { __be32 ip; __be32 ip2; __be16 port; u8 proto; u8 padding; unsigned long timeout; }; static inline bool hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1, const struct hash_ipportip4_elem *ip2, u32 *multi) { return ip1->ip == ip2->ip && ip1->ip2 == ip2->ip2 && ip1->port == ip2->port && ip1->proto == ip2->proto; } static inline bool hash_ipportip4_data_isnull(const struct hash_ipportip4_elem *elem) { return elem->proto == 0; } static inline void hash_ipportip4_data_copy(struct hash_ipportip4_elem *dst, const struct hash_ipportip4_elem *src) { memcpy(dst, src, sizeof(*dst)); } static inline void hash_ipportip4_data_zero_out(struct hash_ipportip4_elem *elem) { elem->proto = 0; } static bool hash_ipportip4_data_list(struct sk_buff *skb, const struct hash_ipportip4_elem *data) { NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2); NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); return 0; nla_put_failure: return 1; } static bool hash_ipportip4_data_tlist(struct sk_buff *skb, const struct hash_ipportip4_elem *data) { const struct hash_ipportip4_telem *tdata = (const struct hash_ipportip4_telem *)data; NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2); NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(ip_set_timeout_get(tdata->timeout))); return 0; nla_put_failure: return 1; } #define PF 4 #define HOST_MASK 32 #include <linux/netfilter/ipset/ip_set_ahash.h> static inline void hash_ipportip4_data_next(struct ip_set_hash *h, const struct hash_ipportip4_elem *d) { h->next.ip = ntohl(d->ip); h->next.port = ntohs(d->port); } static int hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, const struct ip_set_adt_opt *opt) { const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportip4_elem data = { }; if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &data.port, &data.proto)) return -EINVAL; ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2); return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); } static int hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportip4_elem data = { }; u32 ip, ip_to = 0, p = 0, port, port_to; u32 timeout = h->timeout; bool with_ports = false; int ret; if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip); if (ret) return ret; ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2); if (ret) return ret; if (tb[IPSET_ATTR_PORT]) data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); else return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_PROTO]) { data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(data.proto); if (data.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else return -IPSET_ERR_MISSING_PROTO; if (!(with_ports || data.proto == IPPROTO_ICMP)) data.port = 0; if (tb[IPSET_ATTR_TIMEOUT]) { if (!with_timeout(h->timeout)) return -IPSET_ERR_TIMEOUT; timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_PORT_TO])) { ret = adtfn(set, &data, timeout, flags); return ip_set_eexist(ret, flags) ? 0 : ret; } ip = ntohl(data.ip); if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip > ip_to) swap(ip, ip_to); } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (cidr > 32) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(ip, ip_to, cidr); } else ip_to = ip; port_to = port = ntohs(data.port); if (with_ports && tb[IPSET_ATTR_PORT_TO]) { port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port > port_to) swap(port, port_to); } if (retried) ip = h->next.ip; for (; !before(ip_to, ip); ip++) { p = retried && ip == h->next.ip ? h->next.port : port; for (; p <= port_to; p++) { data.ip = htonl(ip); data.port = htons(p); ret = adtfn(set, &data, timeout, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; else ret = 0; } } return ret; } static bool hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b) { const struct ip_set_hash *x = a->data; const struct ip_set_hash *y = b->data; /* Resizing changes htable_bits, so we ignore it */ return x->maxelem == y->maxelem && x->timeout == y->timeout; } /* The type variant functions: IPv6 */ struct hash_ipportip6_elem { union nf_inet_addr ip; union nf_inet_addr ip2; __be16 port; u8 proto; u8 padding; }; struct hash_ipportip6_telem { union nf_inet_addr ip; union nf_inet_addr ip2; __be16 port; u8 proto; u8 padding; unsigned long timeout; }; static inline bool hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1, const struct hash_ipportip6_elem *ip2, u32 *multi) { return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 && ip1->port == ip2->port && ip1->proto == ip2->proto; } static inline bool hash_ipportip6_data_isnull(const struct hash_ipportip6_elem *elem) { return elem->proto == 0; } static inline void hash_ipportip6_data_copy(struct hash_ipportip6_elem *dst, const struct hash_ipportip6_elem *src) { memcpy(dst, src, sizeof(*dst)); } static inline void hash_ipportip6_data_zero_out(struct hash_ipportip6_elem *elem) { elem->proto = 0; } static bool hash_ipportip6_data_list(struct sk_buff *skb, const struct hash_ipportip6_elem *data) { NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); return 0; nla_put_failure: return 1; } static bool hash_ipportip6_data_tlist(struct sk_buff *skb, const struct hash_ipportip6_elem *data) { const struct hash_ipportip6_telem *e = (const struct hash_ipportip6_telem *)data; NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(ip_set_timeout_get(e->timeout))); return 0; nla_put_failure: return 1; } #undef PF #undef HOST_MASK #define PF 6 #define HOST_MASK 128 #include <linux/netfilter/ipset/ip_set_ahash.h> static inline void hash_ipportip6_data_next(struct ip_set_hash *h, const struct hash_ipportip6_elem *d) { h->next.port = ntohs(d->port); } static int hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, const struct ip_set_adt_opt *opt) { const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportip6_elem data = { }; if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &data.port, &data.proto)) return -EINVAL; ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6); return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); } static int hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportip6_elem data = { }; u32 port, port_to; u32 timeout = h->timeout; bool with_ports = false; int ret; if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); if (ret) return ret; ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2); if (ret) return ret; if (tb[IPSET_ATTR_PORT]) data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); else return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_PROTO]) { data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(data.proto); if (data.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else return -IPSET_ERR_MISSING_PROTO; if (!(with_ports || data.proto == IPPROTO_ICMPV6)) data.port = 0; if (tb[IPSET_ATTR_TIMEOUT]) { if (!with_timeout(h->timeout)) return -IPSET_ERR_TIMEOUT; timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { ret = adtfn(set, &data, timeout, flags); return ip_set_eexist(ret, flags) ? 0 : ret; } port = ntohs(data.port); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port > port_to) swap(port, port_to); if (retried) port = h->next.port; for (; port <= port_to; port++) { data.port = htons(port); ret = adtfn(set, &data, timeout, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; else ret = 0; } return ret; } /* Create hash:ip type of sets */ static int hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) { struct ip_set_hash *h; u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; u8 hbits; size_t hsize; if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) return -IPSET_ERR_INVALID_FAMILY; if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_HASHSIZE]) { hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); if (hashsize < IPSET_MIMINAL_HASHSIZE) hashsize = IPSET_MIMINAL_HASHSIZE; } if (tb[IPSET_ATTR_MAXELEM]) maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); h = kzalloc(sizeof(*h), GFP_KERNEL); if (!h) return -ENOMEM; h->maxelem = maxelem; get_random_bytes(&h->initval, sizeof(h->initval)); h->timeout = IPSET_NO_TIMEOUT; hbits = htable_bits(hashsize); hsize = htable_size(hbits); if (hsize == 0) { kfree(h); return -ENOMEM; } h->table = ip_set_alloc(hsize); if (!h->table) { kfree(h); return -ENOMEM; } h->table->htable_bits = hbits; set->data = h; if (tb[IPSET_ATTR_TIMEOUT]) { h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); set->variant = set->family == NFPROTO_IPV4 ? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant; if (set->family == NFPROTO_IPV4) hash_ipportip4_gc_init(set); else hash_ipportip6_gc_init(set); } else { set->variant = set->family == NFPROTO_IPV4 ? &hash_ipportip4_variant : &hash_ipportip6_variant; } pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", set->name, jhash_size(h->table->htable_bits), h->table->htable_bits, h->maxelem, set->data, h->table); return 0; } static struct ip_set_type hash_ipportip_type __read_mostly = { .name = "hash:ip,port,ip", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, .dimension = IPSET_DIM_THREE, .family = NFPROTO_UNSPEC, .revision_min = 0, .revision_max = 1, /* SCTP and UDPLITE support added */ .create = hash_ipportip_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_IP2] = { .type = NLA_NESTED }, [IPSET_ATTR_PORT] = { .type = NLA_U16 }, [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, }, .me = THIS_MODULE, }; static int __init hash_ipportip_init(void) { return ip_set_type_register(&hash_ipportip_type); } static void __exit hash_ipportip_fini(void) { ip_set_type_unregister(&hash_ipportip_type); } module_init(hash_ipportip_init); module_exit(hash_ipportip_fini);
gpl-2.0
Evervolv/android_kernel_lge_msm8974
fs/ntfs/dir.c
5793
53809
/** * dir.c - NTFS kernel directory operations. Part of the Linux-NTFS project. * * Copyright (c) 2001-2007 Anton Altaparmakov * Copyright (c) 2002 Richard Russon * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/buffer_head.h> #include <linux/slab.h> #include "dir.h" #include "aops.h" #include "attrib.h" #include "mft.h" #include "debug.h" #include "ntfs.h" /** * The little endian Unicode string $I30 as a global constant. */ ntfschar I30[5] = { cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'), 0 }; /** * ntfs_lookup_inode_by_name - find an inode in a directory given its name * @dir_ni: ntfs inode of the directory in which to search for the name * @uname: Unicode name for which to search in the directory * @uname_len: length of the name @uname in Unicode characters * @res: return the found file name if necessary (see below) * * Look for an inode with name @uname in the directory with inode @dir_ni. * ntfs_lookup_inode_by_name() walks the contents of the directory looking for * the Unicode name. If the name is found in the directory, the corresponding * inode number (>= 0) is returned as a mft reference in cpu format, i.e. it * is a 64-bit number containing the sequence number. * * On error, a negative value is returned corresponding to the error code. In * particular if the inode is not found -ENOENT is returned. Note that you * can't just check the return value for being negative, you have to check the * inode number for being negative which you can extract using MREC(return * value). * * Note, @uname_len does not include the (optional) terminating NULL character. * * Note, we look for a case sensitive match first but we also look for a case * insensitive match at the same time. If we find a case insensitive match, we * save that for the case that we don't find an exact match, where we return * the case insensitive match and setup @res (which we allocate!) with the mft * reference, the file name type, length and with a copy of the little endian * Unicode file name itself. If we match a file name which is in the DOS name * space, we only return the mft reference and file name type in @res. * ntfs_lookup() then uses this to find the long file name in the inode itself. * This is to avoid polluting the dcache with short file names. We want them to * work but we don't care for how quickly one can access them. This also fixes * the dcache aliasing issues. * * Locking: - Caller must hold i_mutex on the directory. * - Each page cache page in the index allocation mapping must be * locked whilst being accessed otherwise we may find a corrupt * page due to it being under ->writepage at the moment which * applies the mst protection fixups before writing out and then * removes them again after the write is complete after which it * unlocks the page. */ MFT_REF ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const ntfschar *uname, const int uname_len, ntfs_name **res) { ntfs_volume *vol = dir_ni->vol; struct super_block *sb = vol->sb; MFT_RECORD *m; INDEX_ROOT *ir; INDEX_ENTRY *ie; INDEX_ALLOCATION *ia; u8 *index_end; u64 mref; ntfs_attr_search_ctx *ctx; int err, rc; VCN vcn, old_vcn; struct address_space *ia_mapping; struct page *page; u8 *kaddr; ntfs_name *name = NULL; BUG_ON(!S_ISDIR(VFS_I(dir_ni)->i_mode)); BUG_ON(NInoAttr(dir_ni)); /* Get hold of the mft record for the directory. */ m = map_mft_record(dir_ni); if (IS_ERR(m)) { ntfs_error(sb, "map_mft_record() failed with error code %ld.", -PTR_ERR(m)); return ERR_MREF(PTR_ERR(m)); } ctx = ntfs_attr_get_search_ctx(dir_ni, m); if (unlikely(!ctx)) { err = -ENOMEM; goto err_out; } /* Find the index root attribute in the mft record. */ err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) { if (err == -ENOENT) { ntfs_error(sb, "Index root attribute missing in " "directory inode 0x%lx.", dir_ni->mft_no); err = -EIO; } goto err_out; } /* Get to the index root value (it's been verified in read_inode). */ ir = (INDEX_ROOT*)((u8*)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset)); index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length); /* The first index entry. */ ie = (INDEX_ENTRY*)((u8*)&ir->index + le32_to_cpu(ir->index.entries_offset)); /* * Loop until we exceed valid memory (corruption case) or until we * reach the last entry. */ for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) { /* Bounds checks. */ if ((u8*)ie < (u8*)ctx->mrec || (u8*)ie + sizeof(INDEX_ENTRY_HEADER) > index_end || (u8*)ie + le16_to_cpu(ie->key_length) > index_end) goto dir_err_out; /* * The last entry cannot contain a name. It can however contain * a pointer to a child node in the B+tree so we just break out. */ if (ie->flags & INDEX_ENTRY_END) break; /* * We perform a case sensitive comparison and if that matches * we are done and return the mft reference of the inode (i.e. * the inode number together with the sequence number for * consistency checking). We convert it to cpu format before * returning. */ if (ntfs_are_names_equal(uname, uname_len, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, CASE_SENSITIVE, vol->upcase, vol->upcase_len)) { found_it: /* * We have a perfect match, so we don't need to care * about having matched imperfectly before, so we can * free name and set *res to NULL. * However, if the perfect match is a short file name, * we need to signal this through *res, so that * ntfs_lookup() can fix dcache aliasing issues. * As an optimization we just reuse an existing * allocation of *res. */ if (ie->key.file_name.file_name_type == FILE_NAME_DOS) { if (!name) { name = kmalloc(sizeof(ntfs_name), GFP_NOFS); if (!name) { err = -ENOMEM; goto err_out; } } name->mref = le64_to_cpu( ie->data.dir.indexed_file); name->type = FILE_NAME_DOS; name->len = 0; *res = name; } else { kfree(name); *res = NULL; } mref = le64_to_cpu(ie->data.dir.indexed_file); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(dir_ni); return mref; } /* * For a case insensitive mount, we also perform a case * insensitive comparison (provided the file name is not in the * POSIX namespace). If the comparison matches, and the name is * in the WIN32 namespace, we cache the filename in *res so * that the caller, ntfs_lookup(), can work on it. If the * comparison matches, and the name is in the DOS namespace, we * only cache the mft reference and the file name type (we set * the name length to zero for simplicity). */ if (!NVolCaseSensitive(vol) && ie->key.file_name.file_name_type && ntfs_are_names_equal(uname, uname_len, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, IGNORE_CASE, vol->upcase, vol->upcase_len)) { int name_size = sizeof(ntfs_name); u8 type = ie->key.file_name.file_name_type; u8 len = ie->key.file_name.file_name_length; /* Only one case insensitive matching name allowed. */ if (name) { ntfs_error(sb, "Found already allocated name " "in phase 1. Please run chkdsk " "and if that doesn't find any " "errors please report you saw " "this message to " "linux-ntfs-dev@lists." "sourceforge.net."); goto dir_err_out; } if (type != FILE_NAME_DOS) name_size += len * sizeof(ntfschar); name = kmalloc(name_size, GFP_NOFS); if (!name) { err = -ENOMEM; goto err_out; } name->mref = le64_to_cpu(ie->data.dir.indexed_file); name->type = type; if (type != FILE_NAME_DOS) { name->len = len; memcpy(name->name, ie->key.file_name.file_name, len * sizeof(ntfschar)); } else name->len = 0; *res = name; } /* * Not a perfect match, need to do full blown collation so we * know which way in the B+tree we have to go. */ rc = ntfs_collate_names(uname, uname_len, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, 1, IGNORE_CASE, vol->upcase, vol->upcase_len); /* * If uname collates before the name of the current entry, there * is definitely no such name in this index but we might need to * descend into the B+tree so we just break out of the loop. */ if (rc == -1) break; /* The names are not equal, continue the search. */ if (rc) continue; /* * Names match with case insensitive comparison, now try the * case sensitive comparison, which is required for proper * collation. */ rc = ntfs_collate_names(uname, uname_len, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, 1, CASE_SENSITIVE, vol->upcase, vol->upcase_len); if (rc == -1) break; if (rc) continue; /* * Perfect match, this will never happen as the * ntfs_are_names_equal() call will have gotten a match but we * still treat it correctly. */ goto found_it; } /* * We have finished with this index without success. Check for the * presence of a child node and if not present return -ENOENT, unless * we have got a matching name cached in name in which case return the * mft reference associated with it. */ if (!(ie->flags & INDEX_ENTRY_NODE)) { if (name) { ntfs_attr_put_search_ctx(ctx); unmap_mft_record(dir_ni); return name->mref; } ntfs_debug("Entry not found."); err = -ENOENT; goto err_out; } /* Child node present, descend into it. */ /* Consistency check: Verify that an index allocation exists. */ if (!NInoIndexAllocPresent(dir_ni)) { ntfs_error(sb, "No index allocation attribute but index entry " "requires one. Directory inode 0x%lx is " "corrupt or driver bug.", dir_ni->mft_no); goto err_out; } /* Get the starting vcn of the index_block holding the child node. */ vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8)); ia_mapping = VFS_I(dir_ni)->i_mapping; /* * We are done with the index root and the mft record. Release them, * otherwise we deadlock with ntfs_map_page(). */ ntfs_attr_put_search_ctx(ctx); unmap_mft_record(dir_ni); m = NULL; ctx = NULL; descend_into_child_node: /* * Convert vcn to index into the index allocation attribute in units * of PAGE_CACHE_SIZE and map the page cache page, reading it from * disk if necessary. */ page = ntfs_map_page(ia_mapping, vcn << dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); if (IS_ERR(page)) { ntfs_error(sb, "Failed to map directory index page, error %ld.", -PTR_ERR(page)); err = PTR_ERR(page); goto err_out; } lock_page(page); kaddr = (u8*)page_address(page); fast_descend_into_child_node: /* Get to the index allocation block. */ ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); /* Bounds checks. */ if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { ntfs_error(sb, "Out of bounds check failed. Corrupt directory " "inode 0x%lx or driver bug.", dir_ni->mft_no); goto unm_err_out; } /* Catch multi sector transfer fixup errors. */ if (unlikely(!ntfs_is_indx_record(ia->magic))) { ntfs_error(sb, "Directory index record with vcn 0x%llx is " "corrupt. Corrupt inode 0x%lx. Run chkdsk.", (unsigned long long)vcn, dir_ni->mft_no); goto unm_err_out; } if (sle64_to_cpu(ia->index_block_vcn) != vcn) { ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is " "different from expected VCN (0x%llx). " "Directory inode 0x%lx is corrupt or driver " "bug.", (unsigned long long) sle64_to_cpu(ia->index_block_vcn), (unsigned long long)vcn, dir_ni->mft_no); goto unm_err_out; } if (le32_to_cpu(ia->index.allocated_size) + 0x18 != dir_ni->itype.index.block_size) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " "0x%lx has a size (%u) differing from the " "directory specified size (%u). Directory " "inode is corrupt or driver bug.", (unsigned long long)vcn, dir_ni->mft_no, le32_to_cpu(ia->index.allocated_size) + 0x18, dir_ni->itype.index.block_size); goto unm_err_out; } index_end = (u8*)ia + dir_ni->itype.index.block_size; if (index_end > kaddr + PAGE_CACHE_SIZE) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " "0x%lx crosses page boundary. Impossible! " "Cannot access! This is probably a bug in the " "driver.", (unsigned long long)vcn, dir_ni->mft_no); goto unm_err_out; } index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length); if (index_end > (u8*)ia + dir_ni->itype.index.block_size) { ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of directory " "inode 0x%lx exceeds maximum size.", (unsigned long long)vcn, dir_ni->mft_no); goto unm_err_out; } /* The first index entry. */ ie = (INDEX_ENTRY*)((u8*)&ia->index + le32_to_cpu(ia->index.entries_offset)); /* * Iterate similar to above big loop but applied to index buffer, thus * loop until we exceed valid memory (corruption case) or until we * reach the last entry. */ for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) { /* Bounds check. */ if ((u8*)ie < (u8*)ia || (u8*)ie + sizeof(INDEX_ENTRY_HEADER) > index_end || (u8*)ie + le16_to_cpu(ie->key_length) > index_end) { ntfs_error(sb, "Index entry out of bounds in " "directory inode 0x%lx.", dir_ni->mft_no); goto unm_err_out; } /* * The last entry cannot contain a name. It can however contain * a pointer to a child node in the B+tree so we just break out. */ if (ie->flags & INDEX_ENTRY_END) break; /* * We perform a case sensitive comparison and if that matches * we are done and return the mft reference of the inode (i.e. * the inode number together with the sequence number for * consistency checking). We convert it to cpu format before * returning. */ if (ntfs_are_names_equal(uname, uname_len, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, CASE_SENSITIVE, vol->upcase, vol->upcase_len)) { found_it2: /* * We have a perfect match, so we don't need to care * about having matched imperfectly before, so we can * free name and set *res to NULL. * However, if the perfect match is a short file name, * we need to signal this through *res, so that * ntfs_lookup() can fix dcache aliasing issues. * As an optimization we just reuse an existing * allocation of *res. */ if (ie->key.file_name.file_name_type == FILE_NAME_DOS) { if (!name) { name = kmalloc(sizeof(ntfs_name), GFP_NOFS); if (!name) { err = -ENOMEM; goto unm_err_out; } } name->mref = le64_to_cpu( ie->data.dir.indexed_file); name->type = FILE_NAME_DOS; name->len = 0; *res = name; } else { kfree(name); *res = NULL; } mref = le64_to_cpu(ie->data.dir.indexed_file); unlock_page(page); ntfs_unmap_page(page); return mref; } /* * For a case insensitive mount, we also perform a case * insensitive comparison (provided the file name is not in the * POSIX namespace). If the comparison matches, and the name is * in the WIN32 namespace, we cache the filename in *res so * that the caller, ntfs_lookup(), can work on it. If the * comparison matches, and the name is in the DOS namespace, we * only cache the mft reference and the file name type (we set * the name length to zero for simplicity). */ if (!NVolCaseSensitive(vol) && ie->key.file_name.file_name_type && ntfs_are_names_equal(uname, uname_len, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, IGNORE_CASE, vol->upcase, vol->upcase_len)) { int name_size = sizeof(ntfs_name); u8 type = ie->key.file_name.file_name_type; u8 len = ie->key.file_name.file_name_length; /* Only one case insensitive matching name allowed. */ if (name) { ntfs_error(sb, "Found already allocated name " "in phase 2. Please run chkdsk " "and if that doesn't find any " "errors please report you saw " "this message to " "linux-ntfs-dev@lists." "sourceforge.net."); unlock_page(page); ntfs_unmap_page(page); goto dir_err_out; } if (type != FILE_NAME_DOS) name_size += len * sizeof(ntfschar); name = kmalloc(name_size, GFP_NOFS); if (!name) { err = -ENOMEM; goto unm_err_out; } name->mref = le64_to_cpu(ie->data.dir.indexed_file); name->type = type; if (type != FILE_NAME_DOS) { name->len = len; memcpy(name->name, ie->key.file_name.file_name, len * sizeof(ntfschar)); } else name->len = 0; *res = name; } /* * Not a perfect match, need to do full blown collation so we * know which way in the B+tree we have to go. */ rc = ntfs_collate_names(uname, uname_len, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, 1, IGNORE_CASE, vol->upcase, vol->upcase_len); /* * If uname collates before the name of the current entry, there * is definitely no such name in this index but we might need to * descend into the B+tree so we just break out of the loop. */ if (rc == -1) break; /* The names are not equal, continue the search. */ if (rc) continue; /* * Names match with case insensitive comparison, now try the * case sensitive comparison, which is required for proper * collation. */ rc = ntfs_collate_names(uname, uname_len, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, 1, CASE_SENSITIVE, vol->upcase, vol->upcase_len); if (rc == -1) break; if (rc) continue; /* * Perfect match, this will never happen as the * ntfs_are_names_equal() call will have gotten a match but we * still treat it correctly. */ goto found_it2; } /* * We have finished with this index buffer without success. Check for * the presence of a child node. */ if (ie->flags & INDEX_ENTRY_NODE) { if ((ia->index.flags & NODE_MASK) == LEAF_NODE) { ntfs_error(sb, "Index entry with child node found in " "a leaf node in directory inode 0x%lx.", dir_ni->mft_no); goto unm_err_out; } /* Child node present, descend into it. */ old_vcn = vcn; vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8)); if (vcn >= 0) { /* If vcn is in the same page cache page as old_vcn we * recycle the mapped page. */ if (old_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT == vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT) goto fast_descend_into_child_node; unlock_page(page); ntfs_unmap_page(page); goto descend_into_child_node; } ntfs_error(sb, "Negative child node vcn in directory inode " "0x%lx.", dir_ni->mft_no); goto unm_err_out; } /* * No child node present, return -ENOENT, unless we have got a matching * name cached in name in which case return the mft reference * associated with it. */ if (name) { unlock_page(page); ntfs_unmap_page(page); return name->mref; } ntfs_debug("Entry not found."); err = -ENOENT; unm_err_out: unlock_page(page); ntfs_unmap_page(page); err_out: if (!err) err = -EIO; if (ctx) ntfs_attr_put_search_ctx(ctx); if (m) unmap_mft_record(dir_ni); if (name) { kfree(name); *res = NULL; } return ERR_MREF(err); dir_err_out: ntfs_error(sb, "Corrupt directory. Aborting lookup."); goto err_out; } #if 0 // TODO: (AIA) // The algorithm embedded in this code will be required for the time when we // want to support adding of entries to directories, where we require correct // collation of file names in order not to cause corruption of the filesystem. /** * ntfs_lookup_inode_by_name - find an inode in a directory given its name * @dir_ni: ntfs inode of the directory in which to search for the name * @uname: Unicode name for which to search in the directory * @uname_len: length of the name @uname in Unicode characters * * Look for an inode with name @uname in the directory with inode @dir_ni. * ntfs_lookup_inode_by_name() walks the contents of the directory looking for * the Unicode name. If the name is found in the directory, the corresponding * inode number (>= 0) is returned as a mft reference in cpu format, i.e. it * is a 64-bit number containing the sequence number. * * On error, a negative value is returned corresponding to the error code. In * particular if the inode is not found -ENOENT is returned. Note that you * can't just check the return value for being negative, you have to check the * inode number for being negative which you can extract using MREC(return * value). * * Note, @uname_len does not include the (optional) terminating NULL character. */ u64 ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const ntfschar *uname, const int uname_len) { ntfs_volume *vol = dir_ni->vol; struct super_block *sb = vol->sb; MFT_RECORD *m; INDEX_ROOT *ir; INDEX_ENTRY *ie; INDEX_ALLOCATION *ia; u8 *index_end; u64 mref; ntfs_attr_search_ctx *ctx; int err, rc; IGNORE_CASE_BOOL ic; VCN vcn, old_vcn; struct address_space *ia_mapping; struct page *page; u8 *kaddr; /* Get hold of the mft record for the directory. */ m = map_mft_record(dir_ni); if (IS_ERR(m)) { ntfs_error(sb, "map_mft_record() failed with error code %ld.", -PTR_ERR(m)); return ERR_MREF(PTR_ERR(m)); } ctx = ntfs_attr_get_search_ctx(dir_ni, m); if (!ctx) { err = -ENOMEM; goto err_out; } /* Find the index root attribute in the mft record. */ err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) { if (err == -ENOENT) { ntfs_error(sb, "Index root attribute missing in " "directory inode 0x%lx.", dir_ni->mft_no); err = -EIO; } goto err_out; } /* Get to the index root value (it's been verified in read_inode). */ ir = (INDEX_ROOT*)((u8*)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset)); index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length); /* The first index entry. */ ie = (INDEX_ENTRY*)((u8*)&ir->index + le32_to_cpu(ir->index.entries_offset)); /* * Loop until we exceed valid memory (corruption case) or until we * reach the last entry. */ for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) { /* Bounds checks. */ if ((u8*)ie < (u8*)ctx->mrec || (u8*)ie + sizeof(INDEX_ENTRY_HEADER) > index_end || (u8*)ie + le16_to_cpu(ie->key_length) > index_end) goto dir_err_out; /* * The last entry cannot contain a name. It can however contain * a pointer to a child node in the B+tree so we just break out. */ if (ie->flags & INDEX_ENTRY_END) break; /* * If the current entry has a name type of POSIX, the name is * case sensitive and not otherwise. This has the effect of us * not being able to access any POSIX file names which collate * after the non-POSIX one when they only differ in case, but * anyone doing screwy stuff like that deserves to burn in * hell... Doing that kind of stuff on NT4 actually causes * corruption on the partition even when using SP6a and Linux * is not involved at all. */ ic = ie->key.file_name.file_name_type ? IGNORE_CASE : CASE_SENSITIVE; /* * If the names match perfectly, we are done and return the * mft reference of the inode (i.e. the inode number together * with the sequence number for consistency checking. We * convert it to cpu format before returning. */ if (ntfs_are_names_equal(uname, uname_len, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, ic, vol->upcase, vol->upcase_len)) { found_it: mref = le64_to_cpu(ie->data.dir.indexed_file); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(dir_ni); return mref; } /* * Not a perfect match, need to do full blown collation so we * know which way in the B+tree we have to go. */ rc = ntfs_collate_names(uname, uname_len, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, 1, IGNORE_CASE, vol->upcase, vol->upcase_len); /* * If uname collates before the name of the current entry, there * is definitely no such name in this index but we might need to * descend into the B+tree so we just break out of the loop. */ if (rc == -1) break; /* The names are not equal, continue the search. */ if (rc) continue; /* * Names match with case insensitive comparison, now try the * case sensitive comparison, which is required for proper * collation. */ rc = ntfs_collate_names(uname, uname_len, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, 1, CASE_SENSITIVE, vol->upcase, vol->upcase_len); if (rc == -1) break; if (rc) continue; /* * Perfect match, this will never happen as the * ntfs_are_names_equal() call will have gotten a match but we * still treat it correctly. */ goto found_it; } /* * We have finished with this index without success. Check for the * presence of a child node. */ if (!(ie->flags & INDEX_ENTRY_NODE)) { /* No child node, return -ENOENT. */ err = -ENOENT; goto err_out; } /* Child node present, descend into it. */ /* Consistency check: Verify that an index allocation exists. */ if (!NInoIndexAllocPresent(dir_ni)) { ntfs_error(sb, "No index allocation attribute but index entry " "requires one. Directory inode 0x%lx is " "corrupt or driver bug.", dir_ni->mft_no); goto err_out; } /* Get the starting vcn of the index_block holding the child node. */ vcn = sle64_to_cpup((u8*)ie + le16_to_cpu(ie->length) - 8); ia_mapping = VFS_I(dir_ni)->i_mapping; /* * We are done with the index root and the mft record. Release them, * otherwise we deadlock with ntfs_map_page(). */ ntfs_attr_put_search_ctx(ctx); unmap_mft_record(dir_ni); m = NULL; ctx = NULL; descend_into_child_node: /* * Convert vcn to index into the index allocation attribute in units * of PAGE_CACHE_SIZE and map the page cache page, reading it from * disk if necessary. */ page = ntfs_map_page(ia_mapping, vcn << dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); if (IS_ERR(page)) { ntfs_error(sb, "Failed to map directory index page, error %ld.", -PTR_ERR(page)); err = PTR_ERR(page); goto err_out; } lock_page(page); kaddr = (u8*)page_address(page); fast_descend_into_child_node: /* Get to the index allocation block. */ ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); /* Bounds checks. */ if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { ntfs_error(sb, "Out of bounds check failed. Corrupt directory " "inode 0x%lx or driver bug.", dir_ni->mft_no); goto unm_err_out; } /* Catch multi sector transfer fixup errors. */ if (unlikely(!ntfs_is_indx_record(ia->magic))) { ntfs_error(sb, "Directory index record with vcn 0x%llx is " "corrupt. Corrupt inode 0x%lx. Run chkdsk.", (unsigned long long)vcn, dir_ni->mft_no); goto unm_err_out; } if (sle64_to_cpu(ia->index_block_vcn) != vcn) { ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is " "different from expected VCN (0x%llx). " "Directory inode 0x%lx is corrupt or driver " "bug.", (unsigned long long) sle64_to_cpu(ia->index_block_vcn), (unsigned long long)vcn, dir_ni->mft_no); goto unm_err_out; } if (le32_to_cpu(ia->index.allocated_size) + 0x18 != dir_ni->itype.index.block_size) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " "0x%lx has a size (%u) differing from the " "directory specified size (%u). Directory " "inode is corrupt or driver bug.", (unsigned long long)vcn, dir_ni->mft_no, le32_to_cpu(ia->index.allocated_size) + 0x18, dir_ni->itype.index.block_size); goto unm_err_out; } index_end = (u8*)ia + dir_ni->itype.index.block_size; if (index_end > kaddr + PAGE_CACHE_SIZE) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " "0x%lx crosses page boundary. Impossible! " "Cannot access! This is probably a bug in the " "driver.", (unsigned long long)vcn, dir_ni->mft_no); goto unm_err_out; } index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length); if (index_end > (u8*)ia + dir_ni->itype.index.block_size) { ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of directory " "inode 0x%lx exceeds maximum size.", (unsigned long long)vcn, dir_ni->mft_no); goto unm_err_out; } /* The first index entry. */ ie = (INDEX_ENTRY*)((u8*)&ia->index + le32_to_cpu(ia->index.entries_offset)); /* * Iterate similar to above big loop but applied to index buffer, thus * loop until we exceed valid memory (corruption case) or until we * reach the last entry. */ for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) { /* Bounds check. */ if ((u8*)ie < (u8*)ia || (u8*)ie + sizeof(INDEX_ENTRY_HEADER) > index_end || (u8*)ie + le16_to_cpu(ie->key_length) > index_end) { ntfs_error(sb, "Index entry out of bounds in " "directory inode 0x%lx.", dir_ni->mft_no); goto unm_err_out; } /* * The last entry cannot contain a name. It can however contain * a pointer to a child node in the B+tree so we just break out. */ if (ie->flags & INDEX_ENTRY_END) break; /* * If the current entry has a name type of POSIX, the name is * case sensitive and not otherwise. This has the effect of us * not being able to access any POSIX file names which collate * after the non-POSIX one when they only differ in case, but * anyone doing screwy stuff like that deserves to burn in * hell... Doing that kind of stuff on NT4 actually causes * corruption on the partition even when using SP6a and Linux * is not involved at all. */ ic = ie->key.file_name.file_name_type ? IGNORE_CASE : CASE_SENSITIVE; /* * If the names match perfectly, we are done and return the * mft reference of the inode (i.e. the inode number together * with the sequence number for consistency checking. We * convert it to cpu format before returning. */ if (ntfs_are_names_equal(uname, uname_len, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, ic, vol->upcase, vol->upcase_len)) { found_it2: mref = le64_to_cpu(ie->data.dir.indexed_file); unlock_page(page); ntfs_unmap_page(page); return mref; } /* * Not a perfect match, need to do full blown collation so we * know which way in the B+tree we have to go. */ rc = ntfs_collate_names(uname, uname_len, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, 1, IGNORE_CASE, vol->upcase, vol->upcase_len); /* * If uname collates before the name of the current entry, there * is definitely no such name in this index but we might need to * descend into the B+tree so we just break out of the loop. */ if (rc == -1) break; /* The names are not equal, continue the search. */ if (rc) continue; /* * Names match with case insensitive comparison, now try the * case sensitive comparison, which is required for proper * collation. */ rc = ntfs_collate_names(uname, uname_len, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, 1, CASE_SENSITIVE, vol->upcase, vol->upcase_len); if (rc == -1) break; if (rc) continue; /* * Perfect match, this will never happen as the * ntfs_are_names_equal() call will have gotten a match but we * still treat it correctly. */ goto found_it2; } /* * We have finished with this index buffer without success. Check for * the presence of a child node. */ if (ie->flags & INDEX_ENTRY_NODE) { if ((ia->index.flags & NODE_MASK) == LEAF_NODE) { ntfs_error(sb, "Index entry with child node found in " "a leaf node in directory inode 0x%lx.", dir_ni->mft_no); goto unm_err_out; } /* Child node present, descend into it. */ old_vcn = vcn; vcn = sle64_to_cpup((u8*)ie + le16_to_cpu(ie->length) - 8); if (vcn >= 0) { /* If vcn is in the same page cache page as old_vcn we * recycle the mapped page. */ if (old_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT == vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT) goto fast_descend_into_child_node; unlock_page(page); ntfs_unmap_page(page); goto descend_into_child_node; } ntfs_error(sb, "Negative child node vcn in directory inode " "0x%lx.", dir_ni->mft_no); goto unm_err_out; } /* No child node, return -ENOENT. */ ntfs_debug("Entry not found."); err = -ENOENT; unm_err_out: unlock_page(page); ntfs_unmap_page(page); err_out: if (!err) err = -EIO; if (ctx) ntfs_attr_put_search_ctx(ctx); if (m) unmap_mft_record(dir_ni); return ERR_MREF(err); dir_err_out: ntfs_error(sb, "Corrupt directory. Aborting lookup."); goto err_out; } #endif /** * ntfs_filldir - ntfs specific filldir method * @vol: current ntfs volume * @fpos: position in the directory * @ndir: ntfs inode of current directory * @ia_page: page in which the index allocation buffer @ie is in resides * @ie: current index entry * @name: buffer to use for the converted name * @dirent: vfs filldir callback context * @filldir: vfs filldir callback * * Convert the Unicode @name to the loaded NLS and pass it to the @filldir * callback. * * If @ia_page is not NULL it is the locked page containing the index * allocation block containing the index entry @ie. * * Note, we drop (and then reacquire) the page lock on @ia_page across the * @filldir() call otherwise we would deadlock with NFSd when it calls ->lookup * since ntfs_lookup() will lock the same page. As an optimization, we do not * retake the lock if we are returning a non-zero value as ntfs_readdir() * would need to drop the lock immediately anyway. */ static inline int ntfs_filldir(ntfs_volume *vol, loff_t fpos, ntfs_inode *ndir, struct page *ia_page, INDEX_ENTRY *ie, u8 *name, void *dirent, filldir_t filldir) { unsigned long mref; int name_len, rc; unsigned dt_type; FILE_NAME_TYPE_FLAGS name_type; name_type = ie->key.file_name.file_name_type; if (name_type == FILE_NAME_DOS) { ntfs_debug("Skipping DOS name space entry."); return 0; } if (MREF_LE(ie->data.dir.indexed_file) == FILE_root) { ntfs_debug("Skipping root directory self reference entry."); return 0; } if (MREF_LE(ie->data.dir.indexed_file) < FILE_first_user && !NVolShowSystemFiles(vol)) { ntfs_debug("Skipping system file."); return 0; } name_len = ntfs_ucstonls(vol, (ntfschar*)&ie->key.file_name.file_name, ie->key.file_name.file_name_length, &name, NTFS_MAX_NAME_LEN * NLS_MAX_CHARSET_SIZE + 1); if (name_len <= 0) { ntfs_warning(vol->sb, "Skipping unrepresentable inode 0x%llx.", (long long)MREF_LE(ie->data.dir.indexed_file)); return 0; } if (ie->key.file_name.file_attributes & FILE_ATTR_DUP_FILE_NAME_INDEX_PRESENT) dt_type = DT_DIR; else dt_type = DT_REG; mref = MREF_LE(ie->data.dir.indexed_file); /* * Drop the page lock otherwise we deadlock with NFS when it calls * ->lookup since ntfs_lookup() will lock the same page. */ if (ia_page) unlock_page(ia_page); ntfs_debug("Calling filldir for %s with len %i, fpos 0x%llx, inode " "0x%lx, DT_%s.", name, name_len, fpos, mref, dt_type == DT_DIR ? "DIR" : "REG"); rc = filldir(dirent, name, name_len, fpos, mref, dt_type); /* Relock the page but not if we are aborting ->readdir. */ if (!rc && ia_page) lock_page(ia_page); return rc; } /* * We use the same basic approach as the old NTFS driver, i.e. we parse the * index root entries and then the index allocation entries that are marked * as in use in the index bitmap. * * While this will return the names in random order this doesn't matter for * ->readdir but OTOH results in a faster ->readdir. * * VFS calls ->readdir without BKL but with i_mutex held. This protects the VFS * parts (e.g. ->f_pos and ->i_size, and it also protects against directory * modifications). * * Locking: - Caller must hold i_mutex on the directory. * - Each page cache page in the index allocation mapping must be * locked whilst being accessed otherwise we may find a corrupt * page due to it being under ->writepage at the moment which * applies the mst protection fixups before writing out and then * removes them again after the write is complete after which it * unlocks the page. */ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { s64 ia_pos, ia_start, prev_ia_pos, bmp_pos; loff_t fpos, i_size; struct inode *bmp_vi, *vdir = filp->f_path.dentry->d_inode; struct super_block *sb = vdir->i_sb; ntfs_inode *ndir = NTFS_I(vdir); ntfs_volume *vol = NTFS_SB(sb); MFT_RECORD *m; INDEX_ROOT *ir = NULL; INDEX_ENTRY *ie; INDEX_ALLOCATION *ia; u8 *name = NULL; int rc, err, ir_pos, cur_bmp_pos; struct address_space *ia_mapping, *bmp_mapping; struct page *bmp_page = NULL, *ia_page = NULL; u8 *kaddr, *bmp, *index_end; ntfs_attr_search_ctx *ctx; fpos = filp->f_pos; ntfs_debug("Entering for inode 0x%lx, fpos 0x%llx.", vdir->i_ino, fpos); rc = err = 0; /* Are we at end of dir yet? */ i_size = i_size_read(vdir); if (fpos >= i_size + vol->mft_record_size) goto done; /* Emulate . and .. for all directories. */ if (!fpos) { ntfs_debug("Calling filldir for . with len 1, fpos 0x0, " "inode 0x%lx, DT_DIR.", vdir->i_ino); rc = filldir(dirent, ".", 1, fpos, vdir->i_ino, DT_DIR); if (rc) goto done; fpos++; } if (fpos == 1) { ntfs_debug("Calling filldir for .. with len 2, fpos 0x1, " "inode 0x%lx, DT_DIR.", (unsigned long)parent_ino(filp->f_path.dentry)); rc = filldir(dirent, "..", 2, fpos, parent_ino(filp->f_path.dentry), DT_DIR); if (rc) goto done; fpos++; } m = NULL; ctx = NULL; /* * Allocate a buffer to store the current name being processed * converted to format determined by current NLS. */ name = kmalloc(NTFS_MAX_NAME_LEN * NLS_MAX_CHARSET_SIZE + 1, GFP_NOFS); if (unlikely(!name)) { err = -ENOMEM; goto err_out; } /* Are we jumping straight into the index allocation attribute? */ if (fpos >= vol->mft_record_size) goto skip_index_root; /* Get hold of the mft record for the directory. */ m = map_mft_record(ndir); if (IS_ERR(m)) { err = PTR_ERR(m); m = NULL; goto err_out; } ctx = ntfs_attr_get_search_ctx(ndir, m); if (unlikely(!ctx)) { err = -ENOMEM; goto err_out; } /* Get the offset into the index root attribute. */ ir_pos = (s64)fpos; /* Find the index root attribute in the mft record. */ err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) { ntfs_error(sb, "Index root attribute missing in directory " "inode 0x%lx.", vdir->i_ino); goto err_out; } /* * Copy the index root attribute value to a buffer so that we can put * the search context and unmap the mft record before calling the * filldir() callback. We need to do this because of NFSd which calls * ->lookup() from its filldir callback() and this causes NTFS to * deadlock as ntfs_lookup() maps the mft record of the directory and * we have got it mapped here already. The only solution is for us to * unmap the mft record here so that a call to ntfs_lookup() is able to * map the mft record without deadlocking. */ rc = le32_to_cpu(ctx->attr->data.resident.value_length); ir = kmalloc(rc, GFP_NOFS); if (unlikely(!ir)) { err = -ENOMEM; goto err_out; } /* Copy the index root value (it has been verified in read_inode). */ memcpy(ir, (u8*)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset), rc); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(ndir); ctx = NULL; m = NULL; index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length); /* The first index entry. */ ie = (INDEX_ENTRY*)((u8*)&ir->index + le32_to_cpu(ir->index.entries_offset)); /* * Loop until we exceed valid memory (corruption case) or until we * reach the last entry or until filldir tells us it has had enough * or signals an error (both covered by the rc test). */ for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) { ntfs_debug("In index root, offset 0x%zx.", (u8*)ie - (u8*)ir); /* Bounds checks. */ if (unlikely((u8*)ie < (u8*)ir || (u8*)ie + sizeof(INDEX_ENTRY_HEADER) > index_end || (u8*)ie + le16_to_cpu(ie->key_length) > index_end)) goto err_out; /* The last entry cannot contain a name. */ if (ie->flags & INDEX_ENTRY_END) break; /* Skip index root entry if continuing previous readdir. */ if (ir_pos > (u8*)ie - (u8*)ir) continue; /* Advance the position even if going to skip the entry. */ fpos = (u8*)ie - (u8*)ir; /* Submit the name to the filldir callback. */ rc = ntfs_filldir(vol, fpos, ndir, NULL, ie, name, dirent, filldir); if (rc) { kfree(ir); goto abort; } } /* We are done with the index root and can free the buffer. */ kfree(ir); ir = NULL; /* If there is no index allocation attribute we are finished. */ if (!NInoIndexAllocPresent(ndir)) goto EOD; /* Advance fpos to the beginning of the index allocation. */ fpos = vol->mft_record_size; skip_index_root: kaddr = NULL; prev_ia_pos = -1LL; /* Get the offset into the index allocation attribute. */ ia_pos = (s64)fpos - vol->mft_record_size; ia_mapping = vdir->i_mapping; ntfs_debug("Inode 0x%lx, getting index bitmap.", vdir->i_ino); bmp_vi = ntfs_attr_iget(vdir, AT_BITMAP, I30, 4); if (IS_ERR(bmp_vi)) { ntfs_error(sb, "Failed to get bitmap attribute."); err = PTR_ERR(bmp_vi); goto err_out; } bmp_mapping = bmp_vi->i_mapping; /* Get the starting bitmap bit position and sanity check it. */ bmp_pos = ia_pos >> ndir->itype.index.block_size_bits; if (unlikely(bmp_pos >> 3 >= i_size_read(bmp_vi))) { ntfs_error(sb, "Current index allocation position exceeds " "index bitmap size."); goto iput_err_out; } /* Get the starting bit position in the current bitmap page. */ cur_bmp_pos = bmp_pos & ((PAGE_CACHE_SIZE * 8) - 1); bmp_pos &= ~(u64)((PAGE_CACHE_SIZE * 8) - 1); get_next_bmp_page: ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx", (unsigned long long)bmp_pos >> (3 + PAGE_CACHE_SHIFT), (unsigned long long)bmp_pos & (unsigned long long)((PAGE_CACHE_SIZE * 8) - 1)); bmp_page = ntfs_map_page(bmp_mapping, bmp_pos >> (3 + PAGE_CACHE_SHIFT)); if (IS_ERR(bmp_page)) { ntfs_error(sb, "Reading index bitmap failed."); err = PTR_ERR(bmp_page); bmp_page = NULL; goto iput_err_out; } bmp = (u8*)page_address(bmp_page); /* Find next index block in use. */ while (!(bmp[cur_bmp_pos >> 3] & (1 << (cur_bmp_pos & 7)))) { find_next_index_buffer: cur_bmp_pos++; /* * If we have reached the end of the bitmap page, get the next * page, and put away the old one. */ if (unlikely((cur_bmp_pos >> 3) >= PAGE_CACHE_SIZE)) { ntfs_unmap_page(bmp_page); bmp_pos += PAGE_CACHE_SIZE * 8; cur_bmp_pos = 0; goto get_next_bmp_page; } /* If we have reached the end of the bitmap, we are done. */ if (unlikely(((bmp_pos + cur_bmp_pos) >> 3) >= i_size)) goto unm_EOD; ia_pos = (bmp_pos + cur_bmp_pos) << ndir->itype.index.block_size_bits; } ntfs_debug("Handling index buffer 0x%llx.", (unsigned long long)bmp_pos + cur_bmp_pos); /* If the current index buffer is in the same page we reuse the page. */ if ((prev_ia_pos & (s64)PAGE_CACHE_MASK) != (ia_pos & (s64)PAGE_CACHE_MASK)) { prev_ia_pos = ia_pos; if (likely(ia_page != NULL)) { unlock_page(ia_page); ntfs_unmap_page(ia_page); } /* * Map the page cache page containing the current ia_pos, * reading it from disk if necessary. */ ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_CACHE_SHIFT); if (IS_ERR(ia_page)) { ntfs_error(sb, "Reading index allocation data failed."); err = PTR_ERR(ia_page); ia_page = NULL; goto err_out; } lock_page(ia_page); kaddr = (u8*)page_address(ia_page); } /* Get the current index buffer. */ ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK & ~(s64)(ndir->itype.index.block_size - 1))); /* Bounds checks. */ if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { ntfs_error(sb, "Out of bounds check failed. Corrupt directory " "inode 0x%lx or driver bug.", vdir->i_ino); goto err_out; } /* Catch multi sector transfer fixup errors. */ if (unlikely(!ntfs_is_indx_record(ia->magic))) { ntfs_error(sb, "Directory index record with vcn 0x%llx is " "corrupt. Corrupt inode 0x%lx. Run chkdsk.", (unsigned long long)ia_pos >> ndir->itype.index.vcn_size_bits, vdir->i_ino); goto err_out; } if (unlikely(sle64_to_cpu(ia->index_block_vcn) != (ia_pos & ~(s64)(ndir->itype.index.block_size - 1)) >> ndir->itype.index.vcn_size_bits)) { ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is " "different from expected VCN (0x%llx). " "Directory inode 0x%lx is corrupt or driver " "bug. ", (unsigned long long) sle64_to_cpu(ia->index_block_vcn), (unsigned long long)ia_pos >> ndir->itype.index.vcn_size_bits, vdir->i_ino); goto err_out; } if (unlikely(le32_to_cpu(ia->index.allocated_size) + 0x18 != ndir->itype.index.block_size)) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " "0x%lx has a size (%u) differing from the " "directory specified size (%u). Directory " "inode is corrupt or driver bug.", (unsigned long long)ia_pos >> ndir->itype.index.vcn_size_bits, vdir->i_ino, le32_to_cpu(ia->index.allocated_size) + 0x18, ndir->itype.index.block_size); goto err_out; } index_end = (u8*)ia + ndir->itype.index.block_size; if (unlikely(index_end > kaddr + PAGE_CACHE_SIZE)) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " "0x%lx crosses page boundary. Impossible! " "Cannot access! This is probably a bug in the " "driver.", (unsigned long long)ia_pos >> ndir->itype.index.vcn_size_bits, vdir->i_ino); goto err_out; } ia_start = ia_pos & ~(s64)(ndir->itype.index.block_size - 1); index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length); if (unlikely(index_end > (u8*)ia + ndir->itype.index.block_size)) { ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of directory " "inode 0x%lx exceeds maximum size.", (unsigned long long)ia_pos >> ndir->itype.index.vcn_size_bits, vdir->i_ino); goto err_out; } /* The first index entry in this index buffer. */ ie = (INDEX_ENTRY*)((u8*)&ia->index + le32_to_cpu(ia->index.entries_offset)); /* * Loop until we exceed valid memory (corruption case) or until we * reach the last entry or until filldir tells us it has had enough * or signals an error (both covered by the rc test). */ for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) { ntfs_debug("In index allocation, offset 0x%llx.", (unsigned long long)ia_start + (unsigned long long)((u8*)ie - (u8*)ia)); /* Bounds checks. */ if (unlikely((u8*)ie < (u8*)ia || (u8*)ie + sizeof(INDEX_ENTRY_HEADER) > index_end || (u8*)ie + le16_to_cpu(ie->key_length) > index_end)) goto err_out; /* The last entry cannot contain a name. */ if (ie->flags & INDEX_ENTRY_END) break; /* Skip index block entry if continuing previous readdir. */ if (ia_pos - ia_start > (u8*)ie - (u8*)ia) continue; /* Advance the position even if going to skip the entry. */ fpos = (u8*)ie - (u8*)ia + (sle64_to_cpu(ia->index_block_vcn) << ndir->itype.index.vcn_size_bits) + vol->mft_record_size; /* * Submit the name to the @filldir callback. Note, * ntfs_filldir() drops the lock on @ia_page but it retakes it * before returning, unless a non-zero value is returned in * which case the page is left unlocked. */ rc = ntfs_filldir(vol, fpos, ndir, ia_page, ie, name, dirent, filldir); if (rc) { /* @ia_page is already unlocked in this case. */ ntfs_unmap_page(ia_page); ntfs_unmap_page(bmp_page); iput(bmp_vi); goto abort; } } goto find_next_index_buffer; unm_EOD: if (ia_page) { unlock_page(ia_page); ntfs_unmap_page(ia_page); } ntfs_unmap_page(bmp_page); iput(bmp_vi); EOD: /* We are finished, set fpos to EOD. */ fpos = i_size + vol->mft_record_size; abort: kfree(name); done: #ifdef DEBUG if (!rc) ntfs_debug("EOD, fpos 0x%llx, returning 0.", fpos); else ntfs_debug("filldir returned %i, fpos 0x%llx, returning 0.", rc, fpos); #endif filp->f_pos = fpos; return 0; err_out: if (bmp_page) { ntfs_unmap_page(bmp_page); iput_err_out: iput(bmp_vi); } if (ia_page) { unlock_page(ia_page); ntfs_unmap_page(ia_page); } kfree(ir); kfree(name); if (ctx) ntfs_attr_put_search_ctx(ctx); if (m) unmap_mft_record(ndir); if (!err) err = -EIO; ntfs_debug("Failed. Returning error code %i.", -err); filp->f_pos = fpos; return err; } /** * ntfs_dir_open - called when an inode is about to be opened * @vi: inode to be opened * @filp: file structure describing the inode * * Limit directory size to the page cache limit on architectures where unsigned * long is 32-bits. This is the most we can do for now without overflowing the * page cache page index. Doing it this way means we don't run into problems * because of existing too large directories. It would be better to allow the * user to read the accessible part of the directory but I doubt very much * anyone is going to hit this check on a 32-bit architecture, so there is no * point in adding the extra complexity required to support this. * * On 64-bit architectures, the check is hopefully optimized away by the * compiler. */ static int ntfs_dir_open(struct inode *vi, struct file *filp) { if (sizeof(unsigned long) < 8) { if (i_size_read(vi) > MAX_LFS_FILESIZE) return -EFBIG; } return 0; } #ifdef NTFS_RW /** * ntfs_dir_fsync - sync a directory to disk * @filp: directory to be synced * @dentry: dentry describing the directory to sync * @datasync: if non-zero only flush user data and not metadata * * Data integrity sync of a directory to disk. Used for fsync, fdatasync, and * msync system calls. This function is based on file.c::ntfs_file_fsync(). * * Write the mft record and all associated extent mft records as well as the * $INDEX_ALLOCATION and $BITMAP attributes and then sync the block device. * * If @datasync is true, we do not wait on the inode(s) to be written out * but we always wait on the page cache pages to be written out. * * Note: In the past @filp could be NULL so we ignore it as we don't need it * anyway. * * Locking: Caller must hold i_mutex on the inode. * * TODO: We should probably also write all attribute/index inodes associated * with this inode but since we have no simple way of getting to them we ignore * this problem for now. We do write the $BITMAP attribute if it is present * which is the important one for a directory so things are not too bad. */ static int ntfs_dir_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *bmp_vi, *vi = filp->f_mapping->host; int err, ret; ntfs_attr na; ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); err = filemap_write_and_wait_range(vi->i_mapping, start, end); if (err) return err; mutex_lock(&vi->i_mutex); BUG_ON(!S_ISDIR(vi->i_mode)); /* If the bitmap attribute inode is in memory sync it, too. */ na.mft_no = vi->i_ino; na.type = AT_BITMAP; na.name = I30; na.name_len = 4; bmp_vi = ilookup5(vi->i_sb, vi->i_ino, (test_t)ntfs_test_inode, &na); if (bmp_vi) { write_inode_now(bmp_vi, !datasync); iput(bmp_vi); } ret = __ntfs_write_inode(vi, 1); write_inode_now(vi, !datasync); err = sync_blockdev(vi->i_sb->s_bdev); if (unlikely(err && !ret)) ret = err; if (likely(!ret)) ntfs_debug("Done."); else ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error " "%u.", datasync ? "data" : "", vi->i_ino, -ret); mutex_unlock(&vi->i_mutex); return ret; } #endif /* NTFS_RW */ const struct file_operations ntfs_dir_ops = { .llseek = generic_file_llseek, /* Seek inside directory. */ .read = generic_read_dir, /* Return -EISDIR. */ .readdir = ntfs_readdir, /* Read directory contents. */ #ifdef NTFS_RW .fsync = ntfs_dir_fsync, /* Sync a directory to disk. */ /*.aio_fsync = ,*/ /* Sync all outstanding async i/o operations on a kiocb. */ #endif /* NTFS_RW */ /*.ioctl = ,*/ /* Perform function on the mounted filesystem. */ .open = ntfs_dir_open, /* Open directory. */ };
gpl-2.0
AntaresOne/AntaresCore-Kernel-h815
drivers/media/pci/cx18/cx18-fileops.c
7841
25737
/* * cx18 file operation functions * * Derived from ivtv-fileops.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA */ #include "cx18-driver.h" #include "cx18-fileops.h" #include "cx18-i2c.h" #include "cx18-queue.h" #include "cx18-vbi.h" #include "cx18-audio.h" #include "cx18-mailbox.h" #include "cx18-scb.h" #include "cx18-streams.h" #include "cx18-controls.h" #include "cx18-ioctl.h" #include "cx18-cards.h" /* This function tries to claim the stream for a specific file descriptor. If no one else is using this stream then the stream is claimed and associated VBI and IDX streams are also automatically claimed. Possible error returns: -EBUSY if someone else has claimed the stream or 0 on success. */ int cx18_claim_stream(struct cx18_open_id *id, int type) { struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[type]; struct cx18_stream *s_assoc; /* Nothing should ever try to directly claim the IDX stream */ if (type == CX18_ENC_STREAM_TYPE_IDX) { CX18_WARN("MPEG Index stream cannot be claimed " "directly, but something tried.\n"); return -EINVAL; } if (test_and_set_bit(CX18_F_S_CLAIMED, &s->s_flags)) { /* someone already claimed this stream */ if (s->id == id->open_id) { /* yes, this file descriptor did. So that's OK. */ return 0; } if (s->id == -1 && type == CX18_ENC_STREAM_TYPE_VBI) { /* VBI is handled already internally, now also assign the file descriptor to this stream for external reading of the stream. */ s->id = id->open_id; CX18_DEBUG_INFO("Start Read VBI\n"); return 0; } /* someone else is using this stream already */ CX18_DEBUG_INFO("Stream %d is busy\n", type); return -EBUSY; } s->id = id->open_id; /* * CX18_ENC_STREAM_TYPE_MPG needs to claim: * CX18_ENC_STREAM_TYPE_VBI, if VBI insertion is on for sliced VBI, or * CX18_ENC_STREAM_TYPE_IDX, if VBI insertion is off for sliced VBI * (We don't yet fix up MPEG Index entries for our inserted packets). * * For all other streams we're done. */ if (type != CX18_ENC_STREAM_TYPE_MPG) return 0; s_assoc = &cx->streams[CX18_ENC_STREAM_TYPE_IDX]; if (cx->vbi.insert_mpeg && !cx18_raw_vbi(cx)) s_assoc = &cx->streams[CX18_ENC_STREAM_TYPE_VBI]; else if (!cx18_stream_enabled(s_assoc)) return 0; set_bit(CX18_F_S_CLAIMED, &s_assoc->s_flags); /* mark that it is used internally */ set_bit(CX18_F_S_INTERNAL_USE, &s_assoc->s_flags); return 0; } EXPORT_SYMBOL(cx18_claim_stream); /* This function releases a previously claimed stream. It will take into account associated VBI streams. */ void cx18_release_stream(struct cx18_stream *s) { struct cx18 *cx = s->cx; struct cx18_stream *s_assoc; s->id = -1; if (s->type == CX18_ENC_STREAM_TYPE_IDX) { /* * The IDX stream is only used internally, and can * only be indirectly unclaimed by unclaiming the MPG stream. */ return; } if (s->type == CX18_ENC_STREAM_TYPE_VBI && test_bit(CX18_F_S_INTERNAL_USE, &s->s_flags)) { /* this stream is still in use internally */ return; } if (!test_and_clear_bit(CX18_F_S_CLAIMED, &s->s_flags)) { CX18_DEBUG_WARN("Release stream %s not in use!\n", s->name); return; } cx18_flush_queues(s); /* * CX18_ENC_STREAM_TYPE_MPG needs to release the * CX18_ENC_STREAM_TYPE_VBI and/or CX18_ENC_STREAM_TYPE_IDX streams. * * For all other streams we're done. */ if (s->type != CX18_ENC_STREAM_TYPE_MPG) return; /* Unclaim the associated MPEG Index stream */ s_assoc = &cx->streams[CX18_ENC_STREAM_TYPE_IDX]; if (test_and_clear_bit(CX18_F_S_INTERNAL_USE, &s_assoc->s_flags)) { clear_bit(CX18_F_S_CLAIMED, &s_assoc->s_flags); cx18_flush_queues(s_assoc); } /* Unclaim the associated VBI stream */ s_assoc = &cx->streams[CX18_ENC_STREAM_TYPE_VBI]; if (test_and_clear_bit(CX18_F_S_INTERNAL_USE, &s_assoc->s_flags)) { if (s_assoc->id == -1) { /* * The VBI stream is not still claimed by a file * descriptor, so completely unclaim it. */ clear_bit(CX18_F_S_CLAIMED, &s_assoc->s_flags); cx18_flush_queues(s_assoc); } } } EXPORT_SYMBOL(cx18_release_stream); static void cx18_dualwatch(struct cx18 *cx) { struct v4l2_tuner vt; u32 new_stereo_mode; const u32 dual = 0x0200; new_stereo_mode = v4l2_ctrl_g_ctrl(cx->cxhdl.audio_mode); memset(&vt, 0, sizeof(vt)); cx18_call_all(cx, tuner, g_tuner, &vt); if (vt.audmode == V4L2_TUNER_MODE_LANG1_LANG2 && (vt.rxsubchans & V4L2_TUNER_SUB_LANG2)) new_stereo_mode = dual; if (new_stereo_mode == cx->dualwatch_stereo_mode) return; CX18_DEBUG_INFO("dualwatch: change stereo flag from 0x%x to 0x%x.\n", cx->dualwatch_stereo_mode, new_stereo_mode); if (v4l2_ctrl_s_ctrl(cx->cxhdl.audio_mode, new_stereo_mode)) CX18_DEBUG_INFO("dualwatch: changing stereo flag failed\n"); } static struct cx18_mdl *cx18_get_mdl(struct cx18_stream *s, int non_block, int *err) { struct cx18 *cx = s->cx; struct cx18_stream *s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI]; struct cx18_mdl *mdl; DEFINE_WAIT(wait); *err = 0; while (1) { if (s->type == CX18_ENC_STREAM_TYPE_MPG) { /* Process pending program updates and VBI data */ if (time_after(jiffies, cx->dualwatch_jiffies + msecs_to_jiffies(1000))) { cx->dualwatch_jiffies = jiffies; cx18_dualwatch(cx); } if (test_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags) && !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) { while ((mdl = cx18_dequeue(s_vbi, &s_vbi->q_full))) { /* byteswap and process VBI data */ cx18_process_vbi_data(cx, mdl, s_vbi->type); cx18_stream_put_mdl_fw(s_vbi, mdl); } } mdl = &cx->vbi.sliced_mpeg_mdl; if (mdl->readpos != mdl->bytesused) return mdl; } /* do we have new data? */ mdl = cx18_dequeue(s, &s->q_full); if (mdl) { if (!test_and_clear_bit(CX18_F_M_NEED_SWAP, &mdl->m_flags)) return mdl; if (s->type == CX18_ENC_STREAM_TYPE_MPG) /* byteswap MPG data */ cx18_mdl_swap(mdl); else { /* byteswap and process VBI data */ cx18_process_vbi_data(cx, mdl, s->type); } return mdl; } /* return if end of stream */ if (!test_bit(CX18_F_S_STREAMING, &s->s_flags)) { CX18_DEBUG_INFO("EOS %s\n", s->name); return NULL; } /* return if file was opened with O_NONBLOCK */ if (non_block) { *err = -EAGAIN; return NULL; } /* wait for more data to arrive */ prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE); /* New buffers might have become available before we were added to the waitqueue */ if (!atomic_read(&s->q_full.depth)) schedule(); finish_wait(&s->waitq, &wait); if (signal_pending(current)) { /* return if a signal was received */ CX18_DEBUG_INFO("User stopped %s\n", s->name); *err = -EINTR; return NULL; } } } static void cx18_setup_sliced_vbi_mdl(struct cx18 *cx) { struct cx18_mdl *mdl = &cx->vbi.sliced_mpeg_mdl; struct cx18_buffer *buf = &cx->vbi.sliced_mpeg_buf; int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES; buf->buf = cx->vbi.sliced_mpeg_data[idx]; buf->bytesused = cx->vbi.sliced_mpeg_size[idx]; buf->readpos = 0; mdl->curr_buf = NULL; mdl->bytesused = cx->vbi.sliced_mpeg_size[idx]; mdl->readpos = 0; } static size_t cx18_copy_buf_to_user(struct cx18_stream *s, struct cx18_buffer *buf, char __user *ubuf, size_t ucount, bool *stop) { struct cx18 *cx = s->cx; size_t len = buf->bytesused - buf->readpos; *stop = false; if (len > ucount) len = ucount; if (cx->vbi.insert_mpeg && s->type == CX18_ENC_STREAM_TYPE_MPG && !cx18_raw_vbi(cx) && buf != &cx->vbi.sliced_mpeg_buf) { /* * Try to find a good splice point in the PS, just before * an MPEG-2 Program Pack start code, and provide only * up to that point to the user, so it's easy to insert VBI data * the next time around. * * This will not work for an MPEG-2 TS and has only been * verified by analysis to work for an MPEG-2 PS. Helen Buus * pointed out this works for the CX23416 MPEG-2 DVD compatible * stream, and research indicates both the MPEG 2 SVCD and DVD * stream types use an MPEG-2 PS container. */ /* * An MPEG-2 Program Stream (PS) is a series of * MPEG-2 Program Packs terminated by an * MPEG Program End Code after the last Program Pack. * A Program Pack may hold a PS System Header packet and any * number of Program Elementary Stream (PES) Packets */ const char *start = buf->buf + buf->readpos; const char *p = start + 1; const u8 *q; u8 ch = cx->search_pack_header ? 0xba : 0xe0; int stuffing, i; while (start + len > p) { /* Scan for a 0 to find a potential MPEG-2 start code */ q = memchr(p, 0, start + len - p); if (q == NULL) break; p = q + 1; /* * Keep looking if not a * MPEG-2 Pack header start code: 0x00 0x00 0x01 0xba * or MPEG-2 video PES start code: 0x00 0x00 0x01 0xe0 */ if ((char *)q + 15 >= buf->buf + buf->bytesused || q[1] != 0 || q[2] != 1 || q[3] != ch) continue; /* If expecting the primary video PES */ if (!cx->search_pack_header) { /* Continue if it couldn't be a PES packet */ if ((q[6] & 0xc0) != 0x80) continue; /* Check if a PTS or PTS & DTS follow */ if (((q[7] & 0xc0) == 0x80 && /* PTS only */ (q[9] & 0xf0) == 0x20) || /* PTS only */ ((q[7] & 0xc0) == 0xc0 && /* PTS & DTS */ (q[9] & 0xf0) == 0x30)) { /* DTS follows */ /* Assume we found the video PES hdr */ ch = 0xba; /* next want a Program Pack*/ cx->search_pack_header = 1; p = q + 9; /* Skip this video PES hdr */ } continue; } /* We may have found a Program Pack start code */ /* Get the count of stuffing bytes & verify them */ stuffing = q[13] & 7; /* all stuffing bytes must be 0xff */ for (i = 0; i < stuffing; i++) if (q[14 + i] != 0xff) break; if (i == stuffing && /* right number of stuffing bytes*/ (q[4] & 0xc4) == 0x44 && /* marker check */ (q[12] & 3) == 3 && /* marker check */ q[14 + stuffing] == 0 && /* PES Pack or Sys Hdr */ q[15 + stuffing] == 0 && q[16 + stuffing] == 1) { /* We declare we actually found a Program Pack*/ cx->search_pack_header = 0; /* expect vid PES */ len = (char *)q - start; cx18_setup_sliced_vbi_mdl(cx); *stop = true; break; } } } if (copy_to_user(ubuf, (u8 *)buf->buf + buf->readpos, len)) { CX18_DEBUG_WARN("copy %zd bytes to user failed for %s\n", len, s->name); return -EFAULT; } buf->readpos += len; if (s->type == CX18_ENC_STREAM_TYPE_MPG && buf != &cx->vbi.sliced_mpeg_buf) cx->mpg_data_received += len; return len; } static size_t cx18_copy_mdl_to_user(struct cx18_stream *s, struct cx18_mdl *mdl, char __user *ubuf, size_t ucount) { size_t tot_written = 0; int rc; bool stop = false; if (mdl->curr_buf == NULL) mdl->curr_buf = list_first_entry(&mdl->buf_list, struct cx18_buffer, list); if (list_entry_is_past_end(mdl->curr_buf, &mdl->buf_list, list)) { /* * For some reason we've exhausted the buffers, but the MDL * object still said some data was unread. * Fix that and bail out. */ mdl->readpos = mdl->bytesused; return 0; } list_for_each_entry_from(mdl->curr_buf, &mdl->buf_list, list) { if (mdl->curr_buf->readpos >= mdl->curr_buf->bytesused) continue; rc = cx18_copy_buf_to_user(s, mdl->curr_buf, ubuf + tot_written, ucount - tot_written, &stop); if (rc < 0) return rc; mdl->readpos += rc; tot_written += rc; if (stop || /* Forced stopping point for VBI insertion */ tot_written >= ucount || /* Reader request statisfied */ mdl->curr_buf->readpos < mdl->curr_buf->bytesused || mdl->readpos >= mdl->bytesused) /* MDL buffers drained */ break; } return tot_written; } static ssize_t cx18_read(struct cx18_stream *s, char __user *ubuf, size_t tot_count, int non_block) { struct cx18 *cx = s->cx; size_t tot_written = 0; int single_frame = 0; if (atomic_read(&cx->ana_capturing) == 0 && s->id == -1) { /* shouldn't happen */ CX18_DEBUG_WARN("Stream %s not initialized before read\n", s->name); return -EIO; } /* Each VBI buffer is one frame, the v4l2 API says that for VBI the frames should arrive one-by-one, so make sure we never output more than one VBI frame at a time */ if (s->type == CX18_ENC_STREAM_TYPE_VBI && !cx18_raw_vbi(cx)) single_frame = 1; for (;;) { struct cx18_mdl *mdl; int rc; mdl = cx18_get_mdl(s, non_block, &rc); /* if there is no data available... */ if (mdl == NULL) { /* if we got data, then return that regardless */ if (tot_written) break; /* EOS condition */ if (rc == 0) { clear_bit(CX18_F_S_STREAMOFF, &s->s_flags); clear_bit(CX18_F_S_APPL_IO, &s->s_flags); cx18_release_stream(s); } /* set errno */ return rc; } rc = cx18_copy_mdl_to_user(s, mdl, ubuf + tot_written, tot_count - tot_written); if (mdl != &cx->vbi.sliced_mpeg_mdl) { if (mdl->readpos == mdl->bytesused) cx18_stream_put_mdl_fw(s, mdl); else cx18_push(s, mdl, &s->q_full); } else if (mdl->readpos == mdl->bytesused) { int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES; cx->vbi.sliced_mpeg_size[idx] = 0; cx->vbi.inserted_frame++; cx->vbi_data_inserted += mdl->bytesused; } if (rc < 0) return rc; tot_written += rc; if (tot_written == tot_count || single_frame) break; } return tot_written; } static ssize_t cx18_read_pos(struct cx18_stream *s, char __user *ubuf, size_t count, loff_t *pos, int non_block) { ssize_t rc = count ? cx18_read(s, ubuf, count, non_block) : 0; struct cx18 *cx = s->cx; CX18_DEBUG_HI_FILE("read %zd from %s, got %zd\n", count, s->name, rc); if (rc > 0) pos += rc; return rc; } int cx18_start_capture(struct cx18_open_id *id) { struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; struct cx18_stream *s_vbi; struct cx18_stream *s_idx; if (s->type == CX18_ENC_STREAM_TYPE_RAD) { /* you cannot read from these stream types. */ return -EPERM; } /* Try to claim this stream. */ if (cx18_claim_stream(id, s->type)) return -EBUSY; /* If capture is already in progress, then we also have to do nothing extra. */ if (test_bit(CX18_F_S_STREAMOFF, &s->s_flags) || test_and_set_bit(CX18_F_S_STREAMING, &s->s_flags)) { set_bit(CX18_F_S_APPL_IO, &s->s_flags); return 0; } /* Start associated VBI or IDX stream capture if required */ s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI]; s_idx = &cx->streams[CX18_ENC_STREAM_TYPE_IDX]; if (s->type == CX18_ENC_STREAM_TYPE_MPG) { /* * The VBI and IDX streams should have been claimed * automatically, if for internal use, when the MPG stream was * claimed. We only need to start these streams capturing. */ if (test_bit(CX18_F_S_INTERNAL_USE, &s_idx->s_flags) && !test_and_set_bit(CX18_F_S_STREAMING, &s_idx->s_flags)) { if (cx18_start_v4l2_encode_stream(s_idx)) { CX18_DEBUG_WARN("IDX capture start failed\n"); clear_bit(CX18_F_S_STREAMING, &s_idx->s_flags); goto start_failed; } CX18_DEBUG_INFO("IDX capture started\n"); } if (test_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags) && !test_and_set_bit(CX18_F_S_STREAMING, &s_vbi->s_flags)) { if (cx18_start_v4l2_encode_stream(s_vbi)) { CX18_DEBUG_WARN("VBI capture start failed\n"); clear_bit(CX18_F_S_STREAMING, &s_vbi->s_flags); goto start_failed; } CX18_DEBUG_INFO("VBI insertion started\n"); } } /* Tell the card to start capturing */ if (!cx18_start_v4l2_encode_stream(s)) { /* We're done */ set_bit(CX18_F_S_APPL_IO, &s->s_flags); /* Resume a possibly paused encoder */ if (test_and_clear_bit(CX18_F_I_ENC_PAUSED, &cx->i_flags)) cx18_vapi(cx, CX18_CPU_CAPTURE_PAUSE, 1, s->handle); return 0; } start_failed: CX18_DEBUG_WARN("Failed to start capturing for stream %s\n", s->name); /* * The associated VBI and IDX streams for internal use are released * automatically when the MPG stream is released. We only need to stop * the associated stream. */ if (s->type == CX18_ENC_STREAM_TYPE_MPG) { /* Stop the IDX stream which is always for internal use */ if (test_bit(CX18_F_S_STREAMING, &s_idx->s_flags)) { cx18_stop_v4l2_encode_stream(s_idx, 0); clear_bit(CX18_F_S_STREAMING, &s_idx->s_flags); } /* Stop the VBI stream, if only running for internal use */ if (test_bit(CX18_F_S_STREAMING, &s_vbi->s_flags) && !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) { cx18_stop_v4l2_encode_stream(s_vbi, 0); clear_bit(CX18_F_S_STREAMING, &s_vbi->s_flags); } } clear_bit(CX18_F_S_STREAMING, &s->s_flags); cx18_release_stream(s); /* Also releases associated streams */ return -EIO; } ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct cx18_open_id *id = file2id(filp); struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; int rc; CX18_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name); mutex_lock(&cx->serialize_lock); rc = cx18_start_capture(id); mutex_unlock(&cx->serialize_lock); if (rc) return rc; if ((s->vb_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) && (id->type == CX18_ENC_STREAM_TYPE_YUV)) { return videobuf_read_stream(&s->vbuf_q, buf, count, pos, 0, filp->f_flags & O_NONBLOCK); } return cx18_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK); } unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait) { struct cx18_open_id *id = file2id(filp); struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; int eof = test_bit(CX18_F_S_STREAMOFF, &s->s_flags); /* Start a capture if there is none */ if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags)) { int rc; mutex_lock(&cx->serialize_lock); rc = cx18_start_capture(id); mutex_unlock(&cx->serialize_lock); if (rc) { CX18_DEBUG_INFO("Could not start capture for %s (%d)\n", s->name, rc); return POLLERR; } CX18_DEBUG_FILE("Encoder poll started capture\n"); } if ((s->vb_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) && (id->type == CX18_ENC_STREAM_TYPE_YUV)) { int videobuf_poll = videobuf_poll_stream(filp, &s->vbuf_q, wait); if (eof && videobuf_poll == POLLERR) return POLLHUP; else return videobuf_poll; } /* add stream's waitq to the poll list */ CX18_DEBUG_HI_FILE("Encoder poll\n"); poll_wait(filp, &s->waitq, wait); if (atomic_read(&s->q_full.depth)) return POLLIN | POLLRDNORM; if (eof) return POLLHUP; return 0; } int cx18_v4l2_mmap(struct file *file, struct vm_area_struct *vma) { struct cx18_open_id *id = file->private_data; struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; int eof = test_bit(CX18_F_S_STREAMOFF, &s->s_flags); if ((s->vb_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) && (id->type == CX18_ENC_STREAM_TYPE_YUV)) { /* Start a capture if there is none */ if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags)) { int rc; mutex_lock(&cx->serialize_lock); rc = cx18_start_capture(id); mutex_unlock(&cx->serialize_lock); if (rc) { CX18_DEBUG_INFO( "Could not start capture for %s (%d)\n", s->name, rc); return -EINVAL; } CX18_DEBUG_FILE("Encoder mmap started capture\n"); } return videobuf_mmap_mapper(&s->vbuf_q, vma); } return -EINVAL; } void cx18_vb_timeout(unsigned long data) { struct cx18_stream *s = (struct cx18_stream *)data; struct cx18_videobuf_buffer *buf; unsigned long flags; /* Return all of the buffers in error state, so the vbi/vid inode * can return from blocking. */ spin_lock_irqsave(&s->vb_lock, flags); while (!list_empty(&s->vb_capture)) { buf = list_entry(s->vb_capture.next, struct cx18_videobuf_buffer, vb.queue); list_del(&buf->vb.queue); buf->vb.state = VIDEOBUF_ERROR; wake_up(&buf->vb.done); } spin_unlock_irqrestore(&s->vb_lock, flags); } void cx18_stop_capture(struct cx18_open_id *id, int gop_end) { struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; struct cx18_stream *s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI]; struct cx18_stream *s_idx = &cx->streams[CX18_ENC_STREAM_TYPE_IDX]; CX18_DEBUG_IOCTL("close() of %s\n", s->name); /* 'Unclaim' this stream */ /* Stop capturing */ if (test_bit(CX18_F_S_STREAMING, &s->s_flags)) { CX18_DEBUG_INFO("close stopping capture\n"); if (id->type == CX18_ENC_STREAM_TYPE_MPG) { /* Stop internal use associated VBI and IDX streams */ if (test_bit(CX18_F_S_STREAMING, &s_vbi->s_flags) && !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) { CX18_DEBUG_INFO("close stopping embedded VBI " "capture\n"); cx18_stop_v4l2_encode_stream(s_vbi, 0); } if (test_bit(CX18_F_S_STREAMING, &s_idx->s_flags)) { CX18_DEBUG_INFO("close stopping IDX capture\n"); cx18_stop_v4l2_encode_stream(s_idx, 0); } } if (id->type == CX18_ENC_STREAM_TYPE_VBI && test_bit(CX18_F_S_INTERNAL_USE, &s->s_flags)) /* Also used internally, don't stop capturing */ s->id = -1; else cx18_stop_v4l2_encode_stream(s, gop_end); } if (!gop_end) { clear_bit(CX18_F_S_APPL_IO, &s->s_flags); clear_bit(CX18_F_S_STREAMOFF, &s->s_flags); cx18_release_stream(s); } } int cx18_v4l2_close(struct file *filp) { struct v4l2_fh *fh = filp->private_data; struct cx18_open_id *id = fh2id(fh); struct cx18 *cx = id->cx; struct cx18_stream *s = &cx->streams[id->type]; CX18_DEBUG_IOCTL("close() of %s\n", s->name); mutex_lock(&cx->serialize_lock); /* Stop radio */ if (id->type == CX18_ENC_STREAM_TYPE_RAD && v4l2_fh_is_singular_file(filp)) { /* Closing radio device, return to TV mode */ cx18_mute(cx); /* Mark that the radio is no longer in use */ clear_bit(CX18_F_I_RADIO_USER, &cx->i_flags); /* Switch tuner to TV */ cx18_call_all(cx, core, s_std, cx->std); /* Select correct audio input (i.e. TV tuner or Line in) */ cx18_audio_set_io(cx); if (atomic_read(&cx->ana_capturing) > 0) { /* Undo video mute */ cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2, s->handle, (v4l2_ctrl_g_ctrl(cx->cxhdl.video_mute) | (v4l2_ctrl_g_ctrl(cx->cxhdl.video_mute_yuv) << 8))); } /* Done! Unmute and continue. */ cx18_unmute(cx); } v4l2_fh_del(fh); v4l2_fh_exit(fh); /* 'Unclaim' this stream */ if (s->id == id->open_id) cx18_stop_capture(id, 0); kfree(id); mutex_unlock(&cx->serialize_lock); return 0; } static int cx18_serialized_open(struct cx18_stream *s, struct file *filp) { struct cx18 *cx = s->cx; struct cx18_open_id *item; CX18_DEBUG_FILE("open %s\n", s->name); /* Allocate memory */ item = kzalloc(sizeof(struct cx18_open_id), GFP_KERNEL); if (NULL == item) { CX18_DEBUG_WARN("nomem on v4l2 open\n"); return -ENOMEM; } v4l2_fh_init(&item->fh, s->video_dev); item->cx = cx; item->type = s->type; item->open_id = cx->open_id++; filp->private_data = &item->fh; v4l2_fh_add(&item->fh); if (item->type == CX18_ENC_STREAM_TYPE_RAD && v4l2_fh_is_singular_file(filp)) { if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) { if (atomic_read(&cx->ana_capturing) > 0) { /* switching to radio while capture is in progress is not polite */ v4l2_fh_del(&item->fh); v4l2_fh_exit(&item->fh); kfree(item); return -EBUSY; } } /* Mark that the radio is being used. */ set_bit(CX18_F_I_RADIO_USER, &cx->i_flags); /* We have the radio */ cx18_mute(cx); /* Switch tuner to radio */ cx18_call_all(cx, tuner, s_radio); /* Select the correct audio input (i.e. radio tuner) */ cx18_audio_set_io(cx); /* Done! Unmute and continue. */ cx18_unmute(cx); } return 0; } int cx18_v4l2_open(struct file *filp) { int res; struct video_device *video_dev = video_devdata(filp); struct cx18_stream *s = video_get_drvdata(video_dev); struct cx18 *cx = s->cx; mutex_lock(&cx->serialize_lock); if (cx18_init_on_first_open(cx)) { CX18_ERR("Failed to initialize on %s\n", video_device_node_name(video_dev)); mutex_unlock(&cx->serialize_lock); return -ENXIO; } res = cx18_serialized_open(s, filp); mutex_unlock(&cx->serialize_lock); return res; } void cx18_mute(struct cx18 *cx) { u32 h; if (atomic_read(&cx->ana_capturing)) { h = cx18_find_handle(cx); if (h != CX18_INVALID_TASK_HANDLE) cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2, h, 1); else CX18_ERR("Can't find valid task handle for mute\n"); } CX18_DEBUG_INFO("Mute\n"); } void cx18_unmute(struct cx18 *cx) { u32 h; if (atomic_read(&cx->ana_capturing)) { h = cx18_find_handle(cx); if (h != CX18_INVALID_TASK_HANDLE) { cx18_msleep_timeout(100, 0); cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 2, h, 12); cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2, h, 0); } else CX18_ERR("Can't find valid task handle for unmute\n"); } CX18_DEBUG_INFO("Unmute\n"); }
gpl-2.0
Scorpio92/LG_D618_kernel
arch/unicore32/mm/flush.c
9121
2471
/* * linux/arch/unicore32/mm/flush.c * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> void flush_cache_mm(struct mm_struct *mm) { } void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (vma->vm_flags & VM_EXEC) __flush_icache_all(); } void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { } static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len) { /* VIPT non-aliasing D-cache */ if (vma->vm_flags & VM_EXEC) { unsigned long addr = (unsigned long)kaddr; __cpuc_coherent_kern_range(addr, addr + len); } } /* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user * space" model to handle this. * * Note that this code needs to run on the current CPU. */ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *dst, const void *src, unsigned long len) { memcpy(dst, src, len); flush_ptrace_access(vma, page, uaddr, dst, len); } void __flush_dcache_page(struct address_space *mapping, struct page *page) { /* * Writeback any data associated with the kernel mapping of this * page. This ensures that data in the physical page is mutually * coherent with the kernels mapping. */ __cpuc_flush_kern_dcache_area(page_address(page), PAGE_SIZE); } /* * Ensure cache coherency between kernel mapping and userspace mapping * of this page. */ void flush_dcache_page(struct page *page) { struct address_space *mapping; /* * The zero page is never written to, so never has any dirty * cache lines, and therefore never needs to be flushed. */ if (page == ZERO_PAGE(0)) return; mapping = page_mapping(page); if (mapping && !mapping_mapped(mapping)) clear_bit(PG_dcache_clean, &page->flags); else { __flush_dcache_page(mapping, page); if (mapping) __flush_icache_all(); set_bit(PG_dcache_clean, &page->flags); } } EXPORT_SYMBOL(flush_dcache_page);
gpl-2.0
nadavitay/linux-3.14.1
drivers/isdn/mISDN/dsp_blowfish.c
10401
23926
/* * Blowfish encryption/decryption for mISDN_dsp. * * Copyright Andreas Eversberg (jolly@eversberg.eu) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/mISDNif.h> #include <linux/mISDNdsp.h> #include "core.h" #include "dsp.h" /* * how to encode a sample stream to 64-bit blocks that will be encryped * * first of all, data is collected until a block of 9 samples are received. * of course, a packet may have much more than 9 sample, but is may have * not excacly the multiple of 9 samples. if there is a rest, the next * received data will complete the block. * * the block is then converted to 9 uLAW samples without the least sigificant * bit. the result is a 7-bit encoded sample. * * the samples will be reoganised to form 8 bytes of data: * (5(6) means: encoded sample no. 5, bit 6) * * 0(6) 0(5) 0(4) 0(3) 0(2) 0(1) 0(0) 1(6) * 1(5) 1(4) 1(3) 1(2) 1(1) 1(0) 2(6) 2(5) * 2(4) 2(3) 2(2) 2(1) 2(0) 3(6) 3(5) 3(4) * 3(3) 3(2) 3(1) 3(0) 4(6) 4(5) 4(4) 4(3) * 4(2) 4(1) 4(0) 5(6) 5(5) 5(4) 5(3) 5(2) * 5(1) 5(0) 6(6) 6(5) 6(4) 6(3) 6(2) 6(1) * 6(0) 7(6) 7(5) 7(4) 7(3) 7(2) 7(1) 7(0) * 8(6) 8(5) 8(4) 8(3) 8(2) 8(1) 8(0) * * the missing bit 0 of the last byte is filled with some * random noise, to fill all 8 bytes. * * the 8 bytes will be encrypted using blowfish. * * the result will be converted into 9 bytes. the bit 7 is used for * checksumme (CS) for sync (0, 1) and for the last bit: * (5(6) means: crypted byte 5, bit 6) * * 1 0(7) 0(6) 0(5) 0(4) 0(3) 0(2) 0(1) * 0 0(0) 1(7) 1(6) 1(5) 1(4) 1(3) 1(2) * 0 1(1) 1(0) 2(7) 2(6) 2(5) 2(4) 2(3) * 0 2(2) 2(1) 2(0) 3(7) 3(6) 3(5) 3(4) * 0 3(3) 3(2) 3(1) 3(0) 4(7) 4(6) 4(5) * CS 4(4) 4(3) 4(2) 4(1) 4(0) 5(7) 5(6) * CS 5(5) 5(4) 5(3) 5(2) 5(1) 5(0) 6(7) * CS 6(6) 6(5) 6(4) 6(3) 6(2) 6(1) 6(0) * 7(0) 7(6) 7(5) 7(4) 7(3) 7(2) 7(1) 7(0) * * the checksum is used to detect transmission errors and frame drops. * * synchronisation of received block is done by shifting the upper bit of each * byte (bit 7) to a shift register. if the rigister has the first five bits * (10000), this is used to find the sync. only if sync has been found, the * current block of 9 received bytes are decrypted. before that the check * sum is calculated. if it is incorrect the block is dropped. * this will avoid loud noise due to corrupt encrypted data. * * if the last block is corrupt, the current decoded block is repeated * until a valid block has been received. */ /* * some blowfish parts are taken from the * crypto-api for faster implementation */ struct bf_ctx { u32 p[18]; u32 s[1024]; }; static const u32 bf_pbox[16 + 2] = { 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, }; static const u32 bf_sbox[256 * 4] = { 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, }; /* * Round loop unrolling macros, S is a pointer to a S-Box array * organized in 4 unsigned longs at a row. */ #define GET32_3(x) (((x) & 0xff)) #define GET32_2(x) (((x) >> (8)) & (0xff)) #define GET32_1(x) (((x) >> (16)) & (0xff)) #define GET32_0(x) (((x) >> (24)) & (0xff)) #define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \ S[512 + GET32_2(x)]) + S[768 + GET32_3(x)]) #define EROUND(a, b, n) do { b ^= P[n]; a ^= bf_F(b); } while (0) #define DROUND(a, b, n) do { a ^= bf_F(b); b ^= P[n]; } while (0) /* * encrypt isdn data frame * every block with 9 samples is encrypted */ void dsp_bf_encrypt(struct dsp *dsp, u8 *data, int len) { int i = 0, j = dsp->bf_crypt_pos; u8 *bf_data_in = dsp->bf_data_in; u8 *bf_crypt_out = dsp->bf_crypt_out; u32 *P = dsp->bf_p; u32 *S = dsp->bf_s; u32 yl, yr; u32 cs; u8 nibble; while (i < len) { /* collect a block of 9 samples */ if (j < 9) { bf_data_in[j] = *data; *data++ = bf_crypt_out[j++]; i++; continue; } j = 0; /* transcode 9 samples xlaw to 8 bytes */ yl = dsp_audio_law2seven[bf_data_in[0]]; yl = (yl << 7) | dsp_audio_law2seven[bf_data_in[1]]; yl = (yl << 7) | dsp_audio_law2seven[bf_data_in[2]]; yl = (yl << 7) | dsp_audio_law2seven[bf_data_in[3]]; nibble = dsp_audio_law2seven[bf_data_in[4]]; yr = nibble; yl = (yl << 4) | (nibble >> 3); yr = (yr << 7) | dsp_audio_law2seven[bf_data_in[5]]; yr = (yr << 7) | dsp_audio_law2seven[bf_data_in[6]]; yr = (yr << 7) | dsp_audio_law2seven[bf_data_in[7]]; yr = (yr << 7) | dsp_audio_law2seven[bf_data_in[8]]; yr = (yr << 1) | (bf_data_in[0] & 1); /* fill unused bit with random noise of audio input */ /* encrypt */ EROUND(yr, yl, 0); EROUND(yl, yr, 1); EROUND(yr, yl, 2); EROUND(yl, yr, 3); EROUND(yr, yl, 4); EROUND(yl, yr, 5); EROUND(yr, yl, 6); EROUND(yl, yr, 7); EROUND(yr, yl, 8); EROUND(yl, yr, 9); EROUND(yr, yl, 10); EROUND(yl, yr, 11); EROUND(yr, yl, 12); EROUND(yl, yr, 13); EROUND(yr, yl, 14); EROUND(yl, yr, 15); yl ^= P[16]; yr ^= P[17]; /* calculate 3-bit checksumme */ cs = yl ^ (yl >> 3) ^ (yl >> 6) ^ (yl >> 9) ^ (yl >> 12) ^ (yl >> 15) ^ (yl >> 18) ^ (yl >> 21) ^ (yl >> 24) ^ (yl >> 27) ^ (yl >> 30) ^ (yr << 2) ^ (yr >> 1) ^ (yr >> 4) ^ (yr >> 7) ^ (yr >> 10) ^ (yr >> 13) ^ (yr >> 16) ^ (yr >> 19) ^ (yr >> 22) ^ (yr >> 25) ^ (yr >> 28) ^ (yr >> 31); /* * transcode 8 crypted bytes to 9 data bytes with sync * and checksum information */ bf_crypt_out[0] = (yl >> 25) | 0x80; bf_crypt_out[1] = (yl >> 18) & 0x7f; bf_crypt_out[2] = (yl >> 11) & 0x7f; bf_crypt_out[3] = (yl >> 4) & 0x7f; bf_crypt_out[4] = ((yl << 3) & 0x78) | ((yr >> 29) & 0x07); bf_crypt_out[5] = ((yr >> 22) & 0x7f) | ((cs << 5) & 0x80); bf_crypt_out[6] = ((yr >> 15) & 0x7f) | ((cs << 6) & 0x80); bf_crypt_out[7] = ((yr >> 8) & 0x7f) | (cs << 7); bf_crypt_out[8] = yr; } /* write current count */ dsp->bf_crypt_pos = j; } /* * decrypt isdn data frame * every block with 9 bytes is decrypted */ void dsp_bf_decrypt(struct dsp *dsp, u8 *data, int len) { int i = 0; u8 j = dsp->bf_decrypt_in_pos; u8 k = dsp->bf_decrypt_out_pos; u8 *bf_crypt_inring = dsp->bf_crypt_inring; u8 *bf_data_out = dsp->bf_data_out; u16 sync = dsp->bf_sync; u32 *P = dsp->bf_p; u32 *S = dsp->bf_s; u32 yl, yr; u8 nibble; u8 cs, cs0, cs1, cs2; while (i < len) { /* * shift upper bit and rotate data to buffer ring * send current decrypted data */ sync = (sync << 1) | ((*data) >> 7); bf_crypt_inring[j++ & 15] = *data; *data++ = bf_data_out[k++]; i++; if (k == 9) k = 0; /* repeat if no sync has been found */ /* check if not in sync */ if ((sync & 0x1f0) != 0x100) continue; j -= 9; /* transcode receive data to 64 bit block of encrypted data */ yl = bf_crypt_inring[j++ & 15]; yl = (yl << 7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */ yl = (yl << 7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */ yl = (yl << 7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */ nibble = bf_crypt_inring[j++ & 15]; /* bit7 = 0 */ yr = nibble; yl = (yl << 4) | (nibble >> 3); cs2 = bf_crypt_inring[j++ & 15]; yr = (yr << 7) | (cs2 & 0x7f); cs1 = bf_crypt_inring[j++ & 15]; yr = (yr << 7) | (cs1 & 0x7f); cs0 = bf_crypt_inring[j++ & 15]; yr = (yr << 7) | (cs0 & 0x7f); yr = (yr << 8) | bf_crypt_inring[j++ & 15]; /* calculate 3-bit checksumme */ cs = yl ^ (yl >> 3) ^ (yl >> 6) ^ (yl >> 9) ^ (yl >> 12) ^ (yl >> 15) ^ (yl >> 18) ^ (yl >> 21) ^ (yl >> 24) ^ (yl >> 27) ^ (yl >> 30) ^ (yr << 2) ^ (yr >> 1) ^ (yr >> 4) ^ (yr >> 7) ^ (yr >> 10) ^ (yr >> 13) ^ (yr >> 16) ^ (yr >> 19) ^ (yr >> 22) ^ (yr >> 25) ^ (yr >> 28) ^ (yr >> 31); /* check if frame is valid */ if ((cs & 0x7) != (((cs2 >> 5) & 4) | ((cs1 >> 6) & 2) | (cs0 >> 7))) { if (dsp_debug & DEBUG_DSP_BLOWFISH) printk(KERN_DEBUG "DSP BLOWFISH: received corrupt frame, " "checksumme is not correct\n"); continue; } /* decrypt */ yr ^= P[17]; yl ^= P[16]; DROUND(yl, yr, 15); DROUND(yr, yl, 14); DROUND(yl, yr, 13); DROUND(yr, yl, 12); DROUND(yl, yr, 11); DROUND(yr, yl, 10); DROUND(yl, yr, 9); DROUND(yr, yl, 8); DROUND(yl, yr, 7); DROUND(yr, yl, 6); DROUND(yl, yr, 5); DROUND(yr, yl, 4); DROUND(yl, yr, 3); DROUND(yr, yl, 2); DROUND(yl, yr, 1); DROUND(yr, yl, 0); /* transcode 8 crypted bytes to 9 sample bytes */ bf_data_out[0] = dsp_audio_seven2law[(yl >> 25) & 0x7f]; bf_data_out[1] = dsp_audio_seven2law[(yl >> 18) & 0x7f]; bf_data_out[2] = dsp_audio_seven2law[(yl >> 11) & 0x7f]; bf_data_out[3] = dsp_audio_seven2law[(yl >> 4) & 0x7f]; bf_data_out[4] = dsp_audio_seven2law[((yl << 3) & 0x78) | ((yr >> 29) & 0x07)]; bf_data_out[5] = dsp_audio_seven2law[(yr >> 22) & 0x7f]; bf_data_out[6] = dsp_audio_seven2law[(yr >> 15) & 0x7f]; bf_data_out[7] = dsp_audio_seven2law[(yr >> 8) & 0x7f]; bf_data_out[8] = dsp_audio_seven2law[(yr >> 1) & 0x7f]; k = 0; /* start with new decoded frame */ } /* write current count and sync */ dsp->bf_decrypt_in_pos = j; dsp->bf_decrypt_out_pos = k; dsp->bf_sync = sync; } /* used to encrypt S and P boxes */ static inline void encrypt_block(const u32 *P, const u32 *S, u32 *dst, u32 *src) { u32 yl = src[0]; u32 yr = src[1]; EROUND(yr, yl, 0); EROUND(yl, yr, 1); EROUND(yr, yl, 2); EROUND(yl, yr, 3); EROUND(yr, yl, 4); EROUND(yl, yr, 5); EROUND(yr, yl, 6); EROUND(yl, yr, 7); EROUND(yr, yl, 8); EROUND(yl, yr, 9); EROUND(yr, yl, 10); EROUND(yl, yr, 11); EROUND(yr, yl, 12); EROUND(yl, yr, 13); EROUND(yr, yl, 14); EROUND(yl, yr, 15); yl ^= P[16]; yr ^= P[17]; dst[0] = yr; dst[1] = yl; } /* * initialize the dsp for encryption and decryption using the same key * Calculates the blowfish S and P boxes for encryption and decryption. * The margin of keylen must be 4-56 bytes. * returns 0 if ok. */ int dsp_bf_init(struct dsp *dsp, const u8 *key, uint keylen) { short i, j, count; u32 data[2], temp; u32 *P = (u32 *)dsp->bf_p; u32 *S = (u32 *)dsp->bf_s; if (keylen < 4 || keylen > 56) return 1; /* Set dsp states */ i = 0; while (i < 9) { dsp->bf_crypt_out[i] = 0xff; dsp->bf_data_out[i] = dsp_silence; i++; } dsp->bf_crypt_pos = 0; dsp->bf_decrypt_in_pos = 0; dsp->bf_decrypt_out_pos = 0; dsp->bf_sync = 0x1ff; dsp->bf_enable = 1; /* Copy the initialization s-boxes */ for (i = 0, count = 0; i < 256; i++) for (j = 0; j < 4; j++, count++) S[count] = bf_sbox[count]; /* Set the p-boxes */ for (i = 0; i < 16 + 2; i++) P[i] = bf_pbox[i]; /* Actual subkey generation */ for (j = 0, i = 0; i < 16 + 2; i++) { temp = (((u32)key[j] << 24) | ((u32)key[(j + 1) % keylen] << 16) | ((u32)key[(j + 2) % keylen] << 8) | ((u32)key[(j + 3) % keylen])); P[i] = P[i] ^ temp; j = (j + 4) % keylen; } data[0] = 0x00000000; data[1] = 0x00000000; for (i = 0; i < 16 + 2; i += 2) { encrypt_block(P, S, data, data); P[i] = data[0]; P[i + 1] = data[1]; } for (i = 0; i < 4; i++) { for (j = 0, count = i * 256; j < 256; j += 2, count += 2) { encrypt_block(P, S, data, data); S[count] = data[0]; S[count + 1] = data[1]; } } return 0; } /* * turn encryption off */ void dsp_bf_cleanup(struct dsp *dsp) { dsp->bf_enable = 0; }
gpl-2.0
estiko/android_kernel_cyanogen_msm8916
arch/x86/kernel/bootflag.c
12705
1674
/* * Implement 'Simple Boot Flag Specification 2.0' */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/acpi.h> #include <asm/io.h> #include <linux/mc146818rtc.h> #define SBF_RESERVED (0x78) #define SBF_PNPOS (1<<0) #define SBF_BOOTING (1<<1) #define SBF_DIAG (1<<2) #define SBF_PARITY (1<<7) int sbf_port __initdata = -1; /* set via acpi_boot_init() */ static int __init parity(u8 v) { int x = 0; int i; for (i = 0; i < 8; i++) { x ^= (v & 1); v >>= 1; } return x; } static void __init sbf_write(u8 v) { unsigned long flags; if (sbf_port != -1) { v &= ~SBF_PARITY; if (!parity(v)) v |= SBF_PARITY; printk(KERN_INFO "Simple Boot Flag at 0x%x set to 0x%x\n", sbf_port, v); spin_lock_irqsave(&rtc_lock, flags); CMOS_WRITE(v, sbf_port); spin_unlock_irqrestore(&rtc_lock, flags); } } static u8 __init sbf_read(void) { unsigned long flags; u8 v; if (sbf_port == -1) return 0; spin_lock_irqsave(&rtc_lock, flags); v = CMOS_READ(sbf_port); spin_unlock_irqrestore(&rtc_lock, flags); return v; } static int __init sbf_value_valid(u8 v) { if (v & SBF_RESERVED) /* Reserved bits */ return 0; if (!parity(v)) return 0; return 1; } static int __init sbf_init(void) { u8 v; if (sbf_port == -1) return 0; v = sbf_read(); if (!sbf_value_valid(v)) { printk(KERN_WARNING "Simple Boot Flag value 0x%x read from " "CMOS RAM was invalid\n", v); } v &= ~SBF_RESERVED; v &= ~SBF_BOOTING; v &= ~SBF_DIAG; #if defined(CONFIG_ISAPNP) v |= SBF_PNPOS; #endif sbf_write(v); return 0; } module_init(sbf_init);
gpl-2.0
JudsonWilson/CS244_RC3_Kernel
drivers/atm/he.c
162
78071
/* he.c ForeRunnerHE ATM Adapter driver for ATM on Linux Copyright (C) 1999-2001 Naval Research Laboratory This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* he.c ForeRunnerHE ATM Adapter driver for ATM on Linux Copyright (C) 1999-2001 Naval Research Laboratory Permission to use, copy, modify and distribute this software and its documentation is hereby granted, provided that both the copyright notice and this permission notice appear in all copies of the software, derivative works or modified versions, and any portions thereof, and that both notices appear in supporting documentation. NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. This driver was written using the "Programmer's Reference Manual for ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98. AUTHORS: chas williams <chas@cmf.nrl.navy.mil> eric kinzie <ekinzie@cmf.nrl.navy.mil> NOTES: 4096 supported 'connections' group 0 is used for all traffic interrupt queue 0 is used for all interrupts aal0 support (based on work from ulrich.u.muller@nokia.com) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/bitmap.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #include <linux/atmdev.h> #include <linux/atm.h> #include <linux/sonet.h> #undef USE_SCATTERGATHER #undef USE_CHECKSUM_HW /* still confused about this */ /* #undef HE_DEBUG */ #include "he.h" #include "suni.h" #include <linux/atm_he.h> #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args) #ifdef HE_DEBUG #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args) #else /* !HE_DEBUG */ #define HPRINTK(fmt,args...) do { } while (0) #endif /* HE_DEBUG */ /* declarations */ static int he_open(struct atm_vcc *vcc); static void he_close(struct atm_vcc *vcc); static int he_send(struct atm_vcc *vcc, struct sk_buff *skb); static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg); static irqreturn_t he_irq_handler(int irq, void *dev_id); static void he_tasklet(unsigned long data); static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page); static int he_start(struct atm_dev *dev); static void he_stop(struct he_dev *dev); static void he_phy_put(struct atm_dev *, unsigned char, unsigned long); static unsigned char he_phy_get(struct atm_dev *, unsigned long); static u8 read_prom_byte(struct he_dev *he_dev, int addr); /* globals */ static struct he_dev *he_devs; static bool disable64; static short nvpibits = -1; static short nvcibits = -1; static short rx_skb_reserve = 16; static bool irq_coalesce = 1; static bool sdh = 0; /* Read from EEPROM = 0000 0011b */ static unsigned int readtab[] = { CS_HIGH | CLK_HIGH, CS_LOW | CLK_LOW, CLK_HIGH, /* 0 */ CLK_LOW, CLK_HIGH, /* 0 */ CLK_LOW, CLK_HIGH, /* 0 */ CLK_LOW, CLK_HIGH, /* 0 */ CLK_LOW, CLK_HIGH, /* 0 */ CLK_LOW, CLK_HIGH, /* 0 */ CLK_LOW | SI_HIGH, CLK_HIGH | SI_HIGH, /* 1 */ CLK_LOW | SI_HIGH, CLK_HIGH | SI_HIGH /* 1 */ }; /* Clock to read from/write to the EEPROM */ static unsigned int clocktab[] = { CLK_LOW, CLK_HIGH, CLK_LOW, CLK_HIGH, CLK_LOW, CLK_HIGH, CLK_LOW, CLK_HIGH, CLK_LOW, CLK_HIGH, CLK_LOW, CLK_HIGH, CLK_LOW, CLK_HIGH, CLK_LOW, CLK_HIGH, CLK_LOW }; static struct atmdev_ops he_ops = { .open = he_open, .close = he_close, .ioctl = he_ioctl, .send = he_send, .phy_put = he_phy_put, .phy_get = he_phy_get, .proc_read = he_proc_read, .owner = THIS_MODULE }; #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0) #define he_readl(dev, reg) readl((dev)->membase + (reg)) /* section 2.12 connection memory access */ static __inline__ void he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr, unsigned flags) { he_writel(he_dev, val, CON_DAT); (void) he_readl(he_dev, CON_DAT); /* flush posted writes */ he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL); while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY); } #define he_writel_rcm(dev, val, reg) \ he_writel_internal(dev, val, reg, CON_CTL_RCM) #define he_writel_tcm(dev, val, reg) \ he_writel_internal(dev, val, reg, CON_CTL_TCM) #define he_writel_mbox(dev, val, reg) \ he_writel_internal(dev, val, reg, CON_CTL_MBOX) static unsigned he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags) { he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL); while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY); return he_readl(he_dev, CON_DAT); } #define he_readl_rcm(dev, reg) \ he_readl_internal(dev, reg, CON_CTL_RCM) #define he_readl_tcm(dev, reg) \ he_readl_internal(dev, reg, CON_CTL_TCM) #define he_readl_mbox(dev, reg) \ he_readl_internal(dev, reg, CON_CTL_MBOX) /* figure 2.2 connection id */ #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff) /* 2.5.1 per connection transmit state registers */ #define he_writel_tsr0(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0) #define he_readl_tsr0(dev, cid) \ he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0) #define he_writel_tsr1(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1) #define he_writel_tsr2(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2) #define he_writel_tsr3(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3) #define he_writel_tsr4(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4) /* from page 2-20 * * NOTE While the transmit connection is active, bits 23 through 0 * of this register must not be written by the host. Byte * enables should be used during normal operation when writing * the most significant byte. */ #define he_writel_tsr4_upper(dev, val, cid) \ he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \ CON_CTL_TCM \ | CON_BYTE_DISABLE_2 \ | CON_BYTE_DISABLE_1 \ | CON_BYTE_DISABLE_0) #define he_readl_tsr4(dev, cid) \ he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4) #define he_writel_tsr5(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5) #define he_writel_tsr6(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6) #define he_writel_tsr7(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7) #define he_writel_tsr8(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0) #define he_writel_tsr9(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1) #define he_writel_tsr10(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2) #define he_writel_tsr11(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3) #define he_writel_tsr12(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0) #define he_writel_tsr13(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1) #define he_writel_tsr14(dev, val, cid) \ he_writel_tcm(dev, val, CONFIG_TSRD | cid) #define he_writel_tsr14_upper(dev, val, cid) \ he_writel_internal(dev, val, CONFIG_TSRD | cid, \ CON_CTL_TCM \ | CON_BYTE_DISABLE_2 \ | CON_BYTE_DISABLE_1 \ | CON_BYTE_DISABLE_0) /* 2.7.1 per connection receive state registers */ #define he_writel_rsr0(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0) #define he_readl_rsr0(dev, cid) \ he_readl_rcm(dev, 0x00000 | (cid << 3) | 0) #define he_writel_rsr1(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1) #define he_writel_rsr2(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2) #define he_writel_rsr3(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3) #define he_writel_rsr4(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4) #define he_writel_rsr5(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5) #define he_writel_rsr6(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6) #define he_writel_rsr7(dev, val, cid) \ he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7) static __inline__ struct atm_vcc* __find_vcc(struct he_dev *he_dev, unsigned cid) { struct hlist_head *head; struct atm_vcc *vcc; struct sock *s; short vpi; int vci; vpi = cid >> he_dev->vcibits; vci = cid & ((1 << he_dev->vcibits) - 1); head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; sk_for_each(s, head) { vcc = atm_sk(s); if (vcc->dev == he_dev->atm_dev && vcc->vci == vci && vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE) { return vcc; } } return NULL; } static int he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent) { struct atm_dev *atm_dev = NULL; struct he_dev *he_dev = NULL; int err = 0; printk(KERN_INFO "ATM he driver\n"); if (pci_enable_device(pci_dev)) return -EIO; if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) { printk(KERN_WARNING "he: no suitable dma available\n"); err = -EIO; goto init_one_failure; } atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL); if (!atm_dev) { err = -ENODEV; goto init_one_failure; } pci_set_drvdata(pci_dev, atm_dev); he_dev = kzalloc(sizeof(struct he_dev), GFP_KERNEL); if (!he_dev) { err = -ENOMEM; goto init_one_failure; } he_dev->pci_dev = pci_dev; he_dev->atm_dev = atm_dev; he_dev->atm_dev->dev_data = he_dev; atm_dev->dev_data = he_dev; he_dev->number = atm_dev->number; tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev); spin_lock_init(&he_dev->global_lock); if (he_start(atm_dev)) { he_stop(he_dev); err = -ENODEV; goto init_one_failure; } he_dev->next = NULL; if (he_devs) he_dev->next = he_devs; he_devs = he_dev; return 0; init_one_failure: if (atm_dev) atm_dev_deregister(atm_dev); kfree(he_dev); pci_disable_device(pci_dev); return err; } static void he_remove_one(struct pci_dev *pci_dev) { struct atm_dev *atm_dev; struct he_dev *he_dev; atm_dev = pci_get_drvdata(pci_dev); he_dev = HE_DEV(atm_dev); /* need to remove from he_devs */ he_stop(he_dev); atm_dev_deregister(atm_dev); kfree(he_dev); pci_set_drvdata(pci_dev, NULL); pci_disable_device(pci_dev); } static unsigned rate_to_atmf(unsigned rate) /* cps to atm forum format */ { #define NONZERO (1 << 14) unsigned exp = 0; if (rate == 0) return 0; rate <<= 9; while (rate > 0x3ff) { ++exp; rate >>= 1; } return (NONZERO | (exp << 9) | (rate & 0x1ff)); } static void he_init_rx_lbfp0(struct he_dev *he_dev) { unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count; unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row; lbufd_index = 0; lbm_offset = he_readl(he_dev, RCMLBM_BA); he_writel(he_dev, lbufd_index, RLBF0_H); for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) { lbufd_index += 2; lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32; he_writel_rcm(he_dev, lbuf_addr, lbm_offset); he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); if (++lbuf_count == lbufs_per_row) { lbuf_count = 0; row_offset += he_dev->bytes_per_row; } lbm_offset += 4; } he_writel(he_dev, lbufd_index - 2, RLBF0_T); he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C); } static void he_init_rx_lbfp1(struct he_dev *he_dev) { unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count; unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row; lbufd_index = 1; lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index); he_writel(he_dev, lbufd_index, RLBF1_H); for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) { lbufd_index += 2; lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32; he_writel_rcm(he_dev, lbuf_addr, lbm_offset); he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); if (++lbuf_count == lbufs_per_row) { lbuf_count = 0; row_offset += he_dev->bytes_per_row; } lbm_offset += 4; } he_writel(he_dev, lbufd_index - 2, RLBF1_T); he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C); } static void he_init_tx_lbfp(struct he_dev *he_dev) { unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count; unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row; lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs; lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index); he_writel(he_dev, lbufd_index, TLBF_H); for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) { lbufd_index += 1; lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32; he_writel_rcm(he_dev, lbuf_addr, lbm_offset); he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); if (++lbuf_count == lbufs_per_row) { lbuf_count = 0; row_offset += he_dev->bytes_per_row; } lbm_offset += 2; } he_writel(he_dev, lbufd_index - 1, TLBF_T); } static int he_init_tpdrq(struct he_dev *he_dev) { he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev, CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys); if (he_dev->tpdrq_base == NULL) { hprintk("failed to alloc tpdrq\n"); return -ENOMEM; } memset(he_dev->tpdrq_base, 0, CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq)); he_dev->tpdrq_tail = he_dev->tpdrq_base; he_dev->tpdrq_head = he_dev->tpdrq_base; he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H); he_writel(he_dev, 0, TPDRQ_T); he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S); return 0; } static void he_init_cs_block(struct he_dev *he_dev) { unsigned clock, rate, delta; int reg; /* 5.1.7 cs block initialization */ for (reg = 0; reg < 0x20; ++reg) he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg); /* rate grid timer reload values */ clock = he_is622(he_dev) ? 66667000 : 50000000; rate = he_dev->atm_dev->link_rate; delta = rate / 16 / 2; for (reg = 0; reg < 0x10; ++reg) { /* 2.4 internal transmit function * * we initialize the first row in the rate grid. * values are period (in clock cycles) of timer */ unsigned period = clock / rate; he_writel_mbox(he_dev, period, CS_TGRLD0 + reg); rate -= delta; } if (he_is622(he_dev)) { /* table 5.2 (4 cells per lbuf) */ he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0); he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1); he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2); he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3); he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4); /* table 5.3, 5.4, 5.5, 5.6, 5.7 */ he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0); he_writel_mbox(he_dev, 0x1801, CS_ERCTL1); he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2); he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0); he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1); he_writel_mbox(he_dev, 0x14585, CS_RTFWR); he_writel_mbox(he_dev, 0x4680, CS_RTATR); /* table 5.8 */ he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET); he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX); he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN); he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC); he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC); he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL); /* table 5.9 */ he_writel_mbox(he_dev, 0x5, CS_OTPPER); he_writel_mbox(he_dev, 0x14, CS_OTWPER); } else { /* table 5.1 (4 cells per lbuf) */ he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0); he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1); he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2); he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3); he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4); /* table 5.3, 5.4, 5.5, 5.6, 5.7 */ he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0); he_writel_mbox(he_dev, 0x4701, CS_ERCTL1); he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2); he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0); he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1); he_writel_mbox(he_dev, 0xf424, CS_RTFWR); he_writel_mbox(he_dev, 0x4680, CS_RTATR); /* table 5.8 */ he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET); he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX); he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN); he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC); he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC); he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL); /* table 5.9 */ he_writel_mbox(he_dev, 0x6, CS_OTPPER); he_writel_mbox(he_dev, 0x1e, CS_OTWPER); } he_writel_mbox(he_dev, 0x8, CS_OTTLIM); for (reg = 0; reg < 0x8; ++reg) he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg); } static int he_init_cs_block_rcm(struct he_dev *he_dev) { unsigned (*rategrid)[16][16]; unsigned rate, delta; int i, j, reg; unsigned rate_atmf, exp, man; unsigned long long rate_cps; int mult, buf, buf_limit = 4; rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL); if (!rategrid) return -ENOMEM; /* initialize rate grid group table */ for (reg = 0x0; reg < 0xff; ++reg) he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg); /* initialize rate controller groups */ for (reg = 0x100; reg < 0x1ff; ++reg) he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg); /* initialize tNrm lookup table */ /* the manual makes reference to a routine in a sample driver for proper configuration; fortunately, we only need this in order to support abr connection */ /* initialize rate to group table */ rate = he_dev->atm_dev->link_rate; delta = rate / 32; /* * 2.4 transmit internal functions * * we construct a copy of the rate grid used by the scheduler * in order to construct the rate to group table below */ for (j = 0; j < 16; j++) { (*rategrid)[0][j] = rate; rate -= delta; } for (i = 1; i < 16; i++) for (j = 0; j < 16; j++) if (i > 14) (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4; else (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2; /* * 2.4 transmit internal function * * this table maps the upper 5 bits of exponent and mantissa * of the atm forum representation of the rate into an index * on rate grid */ rate_atmf = 0; while (rate_atmf < 0x400) { man = (rate_atmf & 0x1f) << 4; exp = rate_atmf >> 5; /* instead of '/ 512', use '>> 9' to prevent a call to divdu3 on x86 platforms */ rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9; if (rate_cps < 10) rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */ for (i = 255; i > 0; i--) if ((*rategrid)[i/16][i%16] >= rate_cps) break; /* pick nearest rate instead? */ /* * each table entry is 16 bits: (rate grid index (8 bits) * and a buffer limit (8 bits) * there are two table entries in each 32-bit register */ #ifdef notdef buf = rate_cps * he_dev->tx_numbuffs / (he_dev->atm_dev->link_rate * 2); #else /* this is pretty, but avoids _divdu3 and is mostly correct */ mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR; if (rate_cps > (272 * mult)) buf = 4; else if (rate_cps > (204 * mult)) buf = 3; else if (rate_cps > (136 * mult)) buf = 2; else if (rate_cps > (68 * mult)) buf = 1; else buf = 0; #endif if (buf > buf_limit) buf = buf_limit; reg = (reg << 16) | ((i << 8) | buf); #define RTGTBL_OFFSET 0x400 if (rate_atmf & 0x1) he_writel_rcm(he_dev, reg, CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1)); ++rate_atmf; } kfree(rategrid); return 0; } static int he_init_group(struct he_dev *he_dev, int group) { struct he_buff *heb, *next; dma_addr_t mapping; int i; he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32)); he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32)); he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32)); he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), G0_RBPS_BS + (group * 32)); /* bitmap table */ he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE) * sizeof(unsigned long), GFP_KERNEL); if (!he_dev->rbpl_table) { hprintk("unable to allocate rbpl bitmap table\n"); return -ENOMEM; } bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE); /* rbpl_virt 64-bit pointers */ he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE * sizeof(struct he_buff *), GFP_KERNEL); if (!he_dev->rbpl_virt) { hprintk("unable to allocate rbpl virt table\n"); goto out_free_rbpl_table; } /* large buffer pool */ he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev, CONFIG_RBPL_BUFSIZE, 64, 0); if (he_dev->rbpl_pool == NULL) { hprintk("unable to create rbpl pool\n"); goto out_free_rbpl_virt; } he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys); if (he_dev->rbpl_base == NULL) { hprintk("failed to alloc rbpl_base\n"); goto out_destroy_rbpl_pool; } memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp)); INIT_LIST_HEAD(&he_dev->rbpl_outstanding); for (i = 0; i < CONFIG_RBPL_SIZE; ++i) { heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping); if (!heb) goto out_free_rbpl; heb->mapping = mapping; list_add(&heb->entry, &he_dev->rbpl_outstanding); set_bit(i, he_dev->rbpl_table); he_dev->rbpl_virt[i] = heb; he_dev->rbpl_hint = i + 1; he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET; he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data); } he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1]; he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32)); he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T + (group * 32)); he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4, G0_RBPL_BS + (group * 32)); he_writel(he_dev, RBP_THRESH(CONFIG_RBPL_THRESH) | RBP_QSIZE(CONFIG_RBPL_SIZE - 1) | RBP_INT_ENB, G0_RBPL_QI + (group * 32)); /* rx buffer ready queue */ he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys); if (he_dev->rbrq_base == NULL) { hprintk("failed to allocate rbrq\n"); goto out_free_rbpl; } memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq)); he_dev->rbrq_head = he_dev->rbrq_base; he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16)); he_writel(he_dev, 0, G0_RBRQ_H + (group * 16)); he_writel(he_dev, RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1), G0_RBRQ_Q + (group * 16)); if (irq_coalesce) { hprintk("coalescing interrupts\n"); he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7), G0_RBRQ_I + (group * 16)); } else he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1), G0_RBRQ_I + (group * 16)); /* tx buffer ready queue */ he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys); if (he_dev->tbrq_base == NULL) { hprintk("failed to allocate tbrq\n"); goto out_free_rbpq_base; } memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq)); he_dev->tbrq_head = he_dev->tbrq_base; he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16)); he_writel(he_dev, 0, G0_TBRQ_H + (group * 16)); he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16)); he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16)); return 0; out_free_rbpq_base: pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), he_dev->rbrq_base, he_dev->rbrq_phys); out_free_rbpl: list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); out_destroy_rbpl_pool: pci_pool_destroy(he_dev->rbpl_pool); out_free_rbpl_virt: kfree(he_dev->rbpl_virt); out_free_rbpl_table: kfree(he_dev->rbpl_table); return -ENOMEM; } static int he_init_irq(struct he_dev *he_dev) { int i; /* 2.9.3.5 tail offset for each interrupt queue is located after the end of the interrupt queue */ he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys); if (he_dev->irq_base == NULL) { hprintk("failed to allocate irq\n"); return -ENOMEM; } he_dev->irq_tailoffset = (unsigned *) &he_dev->irq_base[CONFIG_IRQ_SIZE]; *he_dev->irq_tailoffset = 0; he_dev->irq_head = he_dev->irq_base; he_dev->irq_tail = he_dev->irq_base; for (i = 0; i < CONFIG_IRQ_SIZE; ++i) he_dev->irq_base[i].isw = ITYPE_INVALID; he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE); he_writel(he_dev, IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH), IRQ0_HEAD); he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL); he_writel(he_dev, 0x0, IRQ0_DATA); he_writel(he_dev, 0x0, IRQ1_BASE); he_writel(he_dev, 0x0, IRQ1_HEAD); he_writel(he_dev, 0x0, IRQ1_CNTL); he_writel(he_dev, 0x0, IRQ1_DATA); he_writel(he_dev, 0x0, IRQ2_BASE); he_writel(he_dev, 0x0, IRQ2_HEAD); he_writel(he_dev, 0x0, IRQ2_CNTL); he_writel(he_dev, 0x0, IRQ2_DATA); he_writel(he_dev, 0x0, IRQ3_BASE); he_writel(he_dev, 0x0, IRQ3_HEAD); he_writel(he_dev, 0x0, IRQ3_CNTL); he_writel(he_dev, 0x0, IRQ3_DATA); /* 2.9.3.2 interrupt queue mapping registers */ he_writel(he_dev, 0x0, GRP_10_MAP); he_writel(he_dev, 0x0, GRP_32_MAP); he_writel(he_dev, 0x0, GRP_54_MAP); he_writel(he_dev, 0x0, GRP_76_MAP); if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) { hprintk("irq %d already in use\n", he_dev->pci_dev->irq); return -EINVAL; } he_dev->irq = he_dev->pci_dev->irq; return 0; } static int he_start(struct atm_dev *dev) { struct he_dev *he_dev; struct pci_dev *pci_dev; unsigned long membase; u16 command; u32 gen_cntl_0, host_cntl, lb_swap; u8 cache_size, timer; unsigned err; unsigned int status, reg; int i, group; he_dev = HE_DEV(dev); pci_dev = he_dev->pci_dev; membase = pci_resource_start(pci_dev, 0); HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq); /* * pci bus controller initialization */ /* 4.3 pci bus controller-specific initialization */ if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) { hprintk("can't read GEN_CNTL_0\n"); return -EINVAL; } gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT); if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) { hprintk("can't write GEN_CNTL_0.\n"); return -EINVAL; } if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) { hprintk("can't read PCI_COMMAND.\n"); return -EINVAL; } command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE); if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) { hprintk("can't enable memory.\n"); return -EINVAL; } if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) { hprintk("can't read cache line size?\n"); return -EINVAL; } if (cache_size < 16) { cache_size = 16; if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size)) hprintk("can't set cache line size to %d\n", cache_size); } if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) { hprintk("can't read latency timer?\n"); return -EINVAL; } /* from table 3.9 * * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE * * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles] * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles] * */ #define LAT_TIMER 209 if (timer < LAT_TIMER) { HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER); timer = LAT_TIMER; if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer)) hprintk("can't set latency timer to %d\n", timer); } if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) { hprintk("can't set up page mapping\n"); return -EINVAL; } /* 4.4 card reset */ he_writel(he_dev, 0x0, RESET_CNTL); he_writel(he_dev, 0xff, RESET_CNTL); msleep(16); /* 16 ms */ status = he_readl(he_dev, RESET_CNTL); if ((status & BOARD_RST_STATUS) == 0) { hprintk("reset failed\n"); return -EINVAL; } /* 4.5 set bus width */ host_cntl = he_readl(he_dev, HOST_CNTL); if (host_cntl & PCI_BUS_SIZE64) gen_cntl_0 |= ENBL_64; else gen_cntl_0 &= ~ENBL_64; if (disable64 == 1) { hprintk("disabling 64-bit pci bus transfers\n"); gen_cntl_0 &= ~ENBL_64; } if (gen_cntl_0 & ENBL_64) hprintk("64-bit transfers enabled\n"); pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0); /* 4.7 read prom contents */ for (i = 0; i < PROD_ID_LEN; ++i) he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i); he_dev->media = read_prom_byte(he_dev, MEDIA); for (i = 0; i < 6; ++i) dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i); hprintk("%s%s, %pM\n", he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM", dev->esi); he_dev->atm_dev->link_rate = he_is622(he_dev) ? ATM_OC12_PCR : ATM_OC3_PCR; /* 4.6 set host endianess */ lb_swap = he_readl(he_dev, LB_SWAP); if (he_is622(he_dev)) lb_swap &= ~XFER_SIZE; /* 4 cells */ else lb_swap |= XFER_SIZE; /* 8 cells */ #ifdef __BIG_ENDIAN lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST; #else lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST | DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP); #endif /* __BIG_ENDIAN */ he_writel(he_dev, lb_swap, LB_SWAP); /* 4.8 sdram controller initialization */ he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL); /* 4.9 initialize rnum value */ lb_swap |= SWAP_RNUM_MAX(0xf); he_writel(he_dev, lb_swap, LB_SWAP); /* 4.10 initialize the interrupt queues */ if ((err = he_init_irq(he_dev)) != 0) return err; /* 4.11 enable pci bus controller state machines */ host_cntl |= (OUTFF_ENB | CMDFF_ENB | QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB); he_writel(he_dev, host_cntl, HOST_CNTL); gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB; pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0); /* * atm network controller initialization */ /* 5.1.1 generic configuration state */ /* * local (cell) buffer memory map * * HE155 HE622 * * 0 ____________1023 bytes 0 _______________________2047 bytes * | | | | | * | utility | | rx0 | | * 5|____________| 255|___________________| u | * 6| | 256| | t | * | | | | i | * | rx0 | row | tx | l | * | | | | i | * | | 767|___________________| t | * 517|____________| 768| | y | * row 518| | | rx1 | | * | | 1023|___________________|___| * | | * | tx | * | | * | | * 1535|____________| * 1536| | * | rx1 | * 2047|____________| * */ /* total 4096 connections */ he_dev->vcibits = CONFIG_DEFAULT_VCIBITS; he_dev->vpibits = CONFIG_DEFAULT_VPIBITS; if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) { hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS); return -ENODEV; } if (nvpibits != -1) { he_dev->vpibits = nvpibits; he_dev->vcibits = HE_MAXCIDBITS - nvpibits; } if (nvcibits != -1) { he_dev->vcibits = nvcibits; he_dev->vpibits = HE_MAXCIDBITS - nvcibits; } if (he_is622(he_dev)) { he_dev->cells_per_row = 40; he_dev->bytes_per_row = 2048; he_dev->r0_numrows = 256; he_dev->tx_numrows = 512; he_dev->r1_numrows = 256; he_dev->r0_startrow = 0; he_dev->tx_startrow = 256; he_dev->r1_startrow = 768; } else { he_dev->cells_per_row = 20; he_dev->bytes_per_row = 1024; he_dev->r0_numrows = 512; he_dev->tx_numrows = 1018; he_dev->r1_numrows = 512; he_dev->r0_startrow = 6; he_dev->tx_startrow = 518; he_dev->r1_startrow = 1536; } he_dev->cells_per_lbuf = 4; he_dev->buffer_limit = 4; he_dev->r0_numbuffs = he_dev->r0_numrows * he_dev->cells_per_row / he_dev->cells_per_lbuf; if (he_dev->r0_numbuffs > 2560) he_dev->r0_numbuffs = 2560; he_dev->r1_numbuffs = he_dev->r1_numrows * he_dev->cells_per_row / he_dev->cells_per_lbuf; if (he_dev->r1_numbuffs > 2560) he_dev->r1_numbuffs = 2560; he_dev->tx_numbuffs = he_dev->tx_numrows * he_dev->cells_per_row / he_dev->cells_per_lbuf; if (he_dev->tx_numbuffs > 5120) he_dev->tx_numbuffs = 5120; /* 5.1.2 configure hardware dependent registers */ he_writel(he_dev, SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) | RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) | (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) | (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)), LBARB); he_writel(he_dev, BANK_ON | (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)), SDRAMCON); he_writel(he_dev, (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) | RM_RW_WAIT(1), RCMCONFIG); he_writel(he_dev, (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) | TM_RW_WAIT(1), TCMCONFIG); he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG); he_writel(he_dev, (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) | (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) | RX_VALVP(he_dev->vpibits) | RX_VALVC(he_dev->vcibits), RC_CONFIG); he_writel(he_dev, DRF_THRESH(0x20) | (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) | TX_VCI_MASK(he_dev->vcibits) | LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG); he_writel(he_dev, 0x0, TXAAL5_PROTO); he_writel(he_dev, PHY_INT_ENB | (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)), RH_CONFIG); /* 5.1.3 initialize connection memory */ for (i = 0; i < TCM_MEM_SIZE; ++i) he_writel_tcm(he_dev, 0, i); for (i = 0; i < RCM_MEM_SIZE; ++i) he_writel_rcm(he_dev, 0, i); /* * transmit connection memory map * * tx memory * 0x0 ___________________ * | | * | | * | TSRa | * | | * | | * 0x8000|___________________| * | | * | TSRb | * 0xc000|___________________| * | | * | TSRc | * 0xe000|___________________| * | TSRd | * 0xf000|___________________| * | tmABR | * 0x10000|___________________| * | | * | tmTPD | * |___________________| * | | * .... * 0x1ffff|___________________| * * */ he_writel(he_dev, CONFIG_TSRB, TSRB_BA); he_writel(he_dev, CONFIG_TSRC, TSRC_BA); he_writel(he_dev, CONFIG_TSRD, TSRD_BA); he_writel(he_dev, CONFIG_TMABR, TMABR_BA); he_writel(he_dev, CONFIG_TPDBA, TPD_BA); /* * receive connection memory map * * 0x0 ___________________ * | | * | | * | RSRa | * | | * | | * 0x8000|___________________| * | | * | rx0/1 | * | LBM | link lists of local * | tx | buffer memory * | | * 0xd000|___________________| * | | * | rmABR | * 0xe000|___________________| * | | * | RSRb | * |___________________| * | | * .... * 0xffff|___________________| */ he_writel(he_dev, 0x08000, RCMLBM_BA); he_writel(he_dev, 0x0e000, RCMRSRB_BA); he_writel(he_dev, 0x0d800, RCMABR_BA); /* 5.1.4 initialize local buffer free pools linked lists */ he_init_rx_lbfp0(he_dev); he_init_rx_lbfp1(he_dev); he_writel(he_dev, 0x0, RLBC_H); he_writel(he_dev, 0x0, RLBC_T); he_writel(he_dev, 0x0, RLBC_H2); he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */ he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */ he_init_tx_lbfp(he_dev); he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA); /* 5.1.5 initialize intermediate receive queues */ if (he_is622(he_dev)) { he_writel(he_dev, 0x000f, G0_INMQ_S); he_writel(he_dev, 0x200f, G0_INMQ_L); he_writel(he_dev, 0x001f, G1_INMQ_S); he_writel(he_dev, 0x201f, G1_INMQ_L); he_writel(he_dev, 0x002f, G2_INMQ_S); he_writel(he_dev, 0x202f, G2_INMQ_L); he_writel(he_dev, 0x003f, G3_INMQ_S); he_writel(he_dev, 0x203f, G3_INMQ_L); he_writel(he_dev, 0x004f, G4_INMQ_S); he_writel(he_dev, 0x204f, G4_INMQ_L); he_writel(he_dev, 0x005f, G5_INMQ_S); he_writel(he_dev, 0x205f, G5_INMQ_L); he_writel(he_dev, 0x006f, G6_INMQ_S); he_writel(he_dev, 0x206f, G6_INMQ_L); he_writel(he_dev, 0x007f, G7_INMQ_S); he_writel(he_dev, 0x207f, G7_INMQ_L); } else { he_writel(he_dev, 0x0000, G0_INMQ_S); he_writel(he_dev, 0x0008, G0_INMQ_L); he_writel(he_dev, 0x0001, G1_INMQ_S); he_writel(he_dev, 0x0009, G1_INMQ_L); he_writel(he_dev, 0x0002, G2_INMQ_S); he_writel(he_dev, 0x000a, G2_INMQ_L); he_writel(he_dev, 0x0003, G3_INMQ_S); he_writel(he_dev, 0x000b, G3_INMQ_L); he_writel(he_dev, 0x0004, G4_INMQ_S); he_writel(he_dev, 0x000c, G4_INMQ_L); he_writel(he_dev, 0x0005, G5_INMQ_S); he_writel(he_dev, 0x000d, G5_INMQ_L); he_writel(he_dev, 0x0006, G6_INMQ_S); he_writel(he_dev, 0x000e, G6_INMQ_L); he_writel(he_dev, 0x0007, G7_INMQ_S); he_writel(he_dev, 0x000f, G7_INMQ_L); } /* 5.1.6 application tunable parameters */ he_writel(he_dev, 0x0, MCC); he_writel(he_dev, 0x0, OEC); he_writel(he_dev, 0x0, DCC); he_writel(he_dev, 0x0, CEC); /* 5.1.7 cs block initialization */ he_init_cs_block(he_dev); /* 5.1.8 cs block connection memory initialization */ if (he_init_cs_block_rcm(he_dev) < 0) return -ENOMEM; /* 5.1.10 initialize host structures */ he_init_tpdrq(he_dev); he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev, sizeof(struct he_tpd), TPD_ALIGNMENT, 0); if (he_dev->tpd_pool == NULL) { hprintk("unable to create tpd pci_pool\n"); return -ENOMEM; } INIT_LIST_HEAD(&he_dev->outstanding_tpds); if (he_init_group(he_dev, 0) != 0) return -ENOMEM; for (group = 1; group < HE_NUM_GROUPS; ++group) { he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32)); he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32)); he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32)); he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), G0_RBPS_BS + (group * 32)); he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32)); he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32)); he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), G0_RBPL_QI + (group * 32)); he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32)); he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16)); he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16)); he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0), G0_RBRQ_Q + (group * 16)); he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16)); he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16)); he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16)); he_writel(he_dev, TBRQ_THRESH(0x1), G0_TBRQ_THRESH + (group * 16)); he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16)); } /* host status page */ he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev, sizeof(struct he_hsp), &he_dev->hsp_phys); if (he_dev->hsp == NULL) { hprintk("failed to allocate host status page\n"); return -ENOMEM; } memset(he_dev->hsp, 0, sizeof(struct he_hsp)); he_writel(he_dev, he_dev->hsp_phys, HSP_BA); /* initialize framer */ #ifdef CONFIG_ATM_HE_USE_SUNI if (he_isMM(he_dev)) suni_init(he_dev->atm_dev); if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start) he_dev->atm_dev->phy->start(he_dev->atm_dev); #endif /* CONFIG_ATM_HE_USE_SUNI */ if (sdh) { /* this really should be in suni.c but for now... */ int val; val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM); val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT); he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM); he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP); } /* 5.1.12 enable transmit and receive */ reg = he_readl_mbox(he_dev, CS_ERCTL0); reg |= TX_ENABLE|ER_ENABLE; he_writel_mbox(he_dev, reg, CS_ERCTL0); reg = he_readl(he_dev, RC_CONFIG); reg |= RX_ENABLE; he_writel(he_dev, reg, RC_CONFIG); for (i = 0; i < HE_NUM_CS_STPER; ++i) { he_dev->cs_stper[i].inuse = 0; he_dev->cs_stper[i].pcr = -1; } he_dev->total_bw = 0; /* atm linux initialization */ he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits; he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits; he_dev->irq_peak = 0; he_dev->rbrq_peak = 0; he_dev->rbpl_peak = 0; he_dev->tbrq_peak = 0; HPRINTK("hell bent for leather!\n"); return 0; } static void he_stop(struct he_dev *he_dev) { struct he_buff *heb, *next; struct pci_dev *pci_dev; u32 gen_cntl_0, reg; u16 command; pci_dev = he_dev->pci_dev; /* disable interrupts */ if (he_dev->membase) { pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0); gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB); pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0); tasklet_disable(&he_dev->tasklet); /* disable recv and transmit */ reg = he_readl_mbox(he_dev, CS_ERCTL0); reg &= ~(TX_ENABLE|ER_ENABLE); he_writel_mbox(he_dev, reg, CS_ERCTL0); reg = he_readl(he_dev, RC_CONFIG); reg &= ~(RX_ENABLE); he_writel(he_dev, reg, RC_CONFIG); } #ifdef CONFIG_ATM_HE_USE_SUNI if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop) he_dev->atm_dev->phy->stop(he_dev->atm_dev); #endif /* CONFIG_ATM_HE_USE_SUNI */ if (he_dev->irq) free_irq(he_dev->irq, he_dev); if (he_dev->irq_base) pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys); if (he_dev->hsp) pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp), he_dev->hsp, he_dev->hsp_phys); if (he_dev->rbpl_base) { list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); } kfree(he_dev->rbpl_virt); kfree(he_dev->rbpl_table); if (he_dev->rbpl_pool) pci_pool_destroy(he_dev->rbpl_pool); if (he_dev->rbrq_base) pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), he_dev->rbrq_base, he_dev->rbrq_phys); if (he_dev->tbrq_base) pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), he_dev->tbrq_base, he_dev->tbrq_phys); if (he_dev->tpdrq_base) pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), he_dev->tpdrq_base, he_dev->tpdrq_phys); if (he_dev->tpd_pool) pci_pool_destroy(he_dev->tpd_pool); if (he_dev->pci_dev) { pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command); command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command); } if (he_dev->membase) iounmap(he_dev->membase); } static struct he_tpd * __alloc_tpd(struct he_dev *he_dev) { struct he_tpd *tpd; dma_addr_t mapping; tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping); if (tpd == NULL) return NULL; tpd->status = TPD_ADDR(mapping); tpd->reserved = 0; tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0; tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0; tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0; return tpd; } #define AAL5_LEN(buf,len) \ ((((unsigned char *)(buf))[(len)-6] << 8) | \ (((unsigned char *)(buf))[(len)-5])) /* 2.10.1.2 receive * * aal5 packets can optionally return the tcp checksum in the lower * 16 bits of the crc (RSR0_TCP_CKSUM) */ #define TCP_CKSUM(buf,len) \ ((((unsigned char *)(buf))[(len)-2] << 8) | \ (((unsigned char *)(buf))[(len-1)])) static int he_service_rbrq(struct he_dev *he_dev, int group) { struct he_rbrq *rbrq_tail = (struct he_rbrq *) ((unsigned long)he_dev->rbrq_base | he_dev->hsp->group[group].rbrq_tail); unsigned cid, lastcid = -1; struct sk_buff *skb; struct atm_vcc *vcc = NULL; struct he_vcc *he_vcc; struct he_buff *heb, *next; int i; int pdus_assembled = 0; int updated = 0; read_lock(&vcc_sklist_lock); while (he_dev->rbrq_head != rbrq_tail) { ++updated; HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n", he_dev->rbrq_head, group, RBRQ_ADDR(he_dev->rbrq_head), RBRQ_BUFLEN(he_dev->rbrq_head), RBRQ_CID(he_dev->rbrq_head), RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "", RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "", RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "", RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "", RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "", RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : ""); i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET; heb = he_dev->rbpl_virt[i]; cid = RBRQ_CID(he_dev->rbrq_head); if (cid != lastcid) vcc = __find_vcc(he_dev, cid); lastcid = cid; if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) { hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid); if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) { clear_bit(i, he_dev->rbpl_table); list_del(&heb->entry); pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); } goto next_rbrq_entry; } if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { hprintk("HBUF_ERR! (cid 0x%x)\n", cid); atomic_inc(&vcc->stats->rx_drop); goto return_host_buffers; } heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4; clear_bit(i, he_dev->rbpl_table); list_move_tail(&heb->entry, &he_vcc->buffers); he_vcc->pdu_len += heb->len; if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) { lastcid = -1; HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid); wake_up(&he_vcc->rx_waitq); goto return_host_buffers; } if (!RBRQ_END_PDU(he_dev->rbrq_head)) goto next_rbrq_entry; if (RBRQ_LEN_ERR(he_dev->rbrq_head) || RBRQ_CRC_ERR(he_dev->rbrq_head)) { HPRINTK("%s%s (%d.%d)\n", RBRQ_CRC_ERR(he_dev->rbrq_head) ? "CRC_ERR " : "", RBRQ_LEN_ERR(he_dev->rbrq_head) ? "LEN_ERR" : "", vcc->vpi, vcc->vci); atomic_inc(&vcc->stats->rx_err); goto return_host_buffers; } skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve, GFP_ATOMIC); if (!skb) { HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci); goto return_host_buffers; } if (rx_skb_reserve > 0) skb_reserve(skb, rx_skb_reserve); __net_timestamp(skb); list_for_each_entry(heb, &he_vcc->buffers, entry) memcpy(skb_put(skb, heb->len), &heb->data, heb->len); switch (vcc->qos.aal) { case ATM_AAL0: /* 2.10.1.5 raw cell receive */ skb->len = ATM_AAL0_SDU; skb_set_tail_pointer(skb, skb->len); break; case ATM_AAL5: /* 2.10.1.2 aal5 receive */ skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len); skb_set_tail_pointer(skb, skb->len); #ifdef USE_CHECKSUM_HW if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) { skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = TCP_CKSUM(skb->data, he_vcc->pdu_len); } #endif break; } #ifdef should_never_happen if (skb->len > vcc->qos.rxtp.max_sdu) hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid); #endif #ifdef notdef ATM_SKB(skb)->vcc = vcc; #endif spin_unlock(&he_dev->global_lock); vcc->push(vcc, skb); spin_lock(&he_dev->global_lock); atomic_inc(&vcc->stats->rx); return_host_buffers: ++pdus_assembled; list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry) pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); INIT_LIST_HEAD(&he_vcc->buffers); he_vcc->pdu_len = 0; next_rbrq_entry: he_dev->rbrq_head = (struct he_rbrq *) ((unsigned long) he_dev->rbrq_base | RBRQ_MASK(he_dev->rbrq_head + 1)); } read_unlock(&vcc_sklist_lock); if (updated) { if (updated > he_dev->rbrq_peak) he_dev->rbrq_peak = updated; he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head), G0_RBRQ_H + (group * 16)); } return pdus_assembled; } static void he_service_tbrq(struct he_dev *he_dev, int group) { struct he_tbrq *tbrq_tail = (struct he_tbrq *) ((unsigned long)he_dev->tbrq_base | he_dev->hsp->group[group].tbrq_tail); struct he_tpd *tpd; int slot, updated = 0; struct he_tpd *__tpd; /* 2.1.6 transmit buffer return queue */ while (he_dev->tbrq_head != tbrq_tail) { ++updated; HPRINTK("tbrq%d 0x%x%s%s\n", group, TBRQ_TPD(he_dev->tbrq_head), TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "", TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : ""); tpd = NULL; list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) { if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) { tpd = __tpd; list_del(&__tpd->entry); break; } } if (tpd == NULL) { hprintk("unable to locate tpd for dma buffer %x\n", TBRQ_TPD(he_dev->tbrq_head)); goto next_tbrq_entry; } if (TBRQ_EOS(he_dev->tbrq_head)) { HPRINTK("wake_up(tx_waitq) cid 0x%x\n", he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci)); if (tpd->vcc) wake_up(&HE_VCC(tpd->vcc)->tx_waitq); goto next_tbrq_entry; } for (slot = 0; slot < TPD_MAXIOV; ++slot) { if (tpd->iovec[slot].addr) pci_unmap_single(he_dev->pci_dev, tpd->iovec[slot].addr, tpd->iovec[slot].len & TPD_LEN_MASK, PCI_DMA_TODEVICE); if (tpd->iovec[slot].len & TPD_LST) break; } if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */ if (tpd->vcc && tpd->vcc->pop) tpd->vcc->pop(tpd->vcc, tpd->skb); else dev_kfree_skb_any(tpd->skb); } next_tbrq_entry: if (tpd) pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); he_dev->tbrq_head = (struct he_tbrq *) ((unsigned long) he_dev->tbrq_base | TBRQ_MASK(he_dev->tbrq_head + 1)); } if (updated) { if (updated > he_dev->tbrq_peak) he_dev->tbrq_peak = updated; he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head), G0_TBRQ_H + (group * 16)); } } static void he_service_rbpl(struct he_dev *he_dev, int group) { struct he_rbp *new_tail; struct he_rbp *rbpl_head; struct he_buff *heb; dma_addr_t mapping; int i; int moved = 0; rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | RBPL_MASK(he_readl(he_dev, G0_RBPL_S))); for (;;) { new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | RBPL_MASK(he_dev->rbpl_tail+1)); /* table 3.42 -- rbpl_tail should never be set to rbpl_head */ if (new_tail == rbpl_head) break; i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint); if (i > (RBPL_TABLE_SIZE - 1)) { i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE); if (i > (RBPL_TABLE_SIZE - 1)) break; } he_dev->rbpl_hint = i + 1; heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping); if (!heb) break; heb->mapping = mapping; list_add(&heb->entry, &he_dev->rbpl_outstanding); he_dev->rbpl_virt[i] = heb; set_bit(i, he_dev->rbpl_table); new_tail->idx = i << RBP_IDX_OFFSET; new_tail->phys = mapping + offsetof(struct he_buff, data); he_dev->rbpl_tail = new_tail; ++moved; } if (moved) he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T); } static void he_tasklet(unsigned long data) { unsigned long flags; struct he_dev *he_dev = (struct he_dev *) data; int group, type; int updated = 0; HPRINTK("tasklet (0x%lx)\n", data); spin_lock_irqsave(&he_dev->global_lock, flags); while (he_dev->irq_head != he_dev->irq_tail) { ++updated; type = ITYPE_TYPE(he_dev->irq_head->isw); group = ITYPE_GROUP(he_dev->irq_head->isw); switch (type) { case ITYPE_RBRQ_THRESH: HPRINTK("rbrq%d threshold\n", group); /* fall through */ case ITYPE_RBRQ_TIMER: if (he_service_rbrq(he_dev, group)) he_service_rbpl(he_dev, group); break; case ITYPE_TBRQ_THRESH: HPRINTK("tbrq%d threshold\n", group); /* fall through */ case ITYPE_TPD_COMPLETE: he_service_tbrq(he_dev, group); break; case ITYPE_RBPL_THRESH: he_service_rbpl(he_dev, group); break; case ITYPE_RBPS_THRESH: /* shouldn't happen unless small buffers enabled */ break; case ITYPE_PHY: HPRINTK("phy interrupt\n"); #ifdef CONFIG_ATM_HE_USE_SUNI spin_unlock_irqrestore(&he_dev->global_lock, flags); if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt) he_dev->atm_dev->phy->interrupt(he_dev->atm_dev); spin_lock_irqsave(&he_dev->global_lock, flags); #endif break; case ITYPE_OTHER: switch (type|group) { case ITYPE_PARITY: hprintk("parity error\n"); break; case ITYPE_ABORT: hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR)); break; } break; case ITYPE_TYPE(ITYPE_INVALID): /* see 8.1.1 -- check all queues */ HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw); he_service_rbrq(he_dev, 0); he_service_rbpl(he_dev, 0); he_service_tbrq(he_dev, 0); break; default: hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw); } he_dev->irq_head->isw = ITYPE_INVALID; he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK); } if (updated) { if (updated > he_dev->irq_peak) he_dev->irq_peak = updated; he_writel(he_dev, IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH) | IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD); (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */ } spin_unlock_irqrestore(&he_dev->global_lock, flags); } static irqreturn_t he_irq_handler(int irq, void *dev_id) { unsigned long flags; struct he_dev *he_dev = (struct he_dev * )dev_id; int handled = 0; if (he_dev == NULL) return IRQ_NONE; spin_lock_irqsave(&he_dev->global_lock, flags); he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) | (*he_dev->irq_tailoffset << 2)); if (he_dev->irq_tail == he_dev->irq_head) { HPRINTK("tailoffset not updated?\n"); he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base | ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2)); (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */ } #ifdef DEBUG if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */) hprintk("spurious (or shared) interrupt?\n"); #endif if (he_dev->irq_head != he_dev->irq_tail) { handled = 1; tasklet_schedule(&he_dev->tasklet); he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */ (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */ } spin_unlock_irqrestore(&he_dev->global_lock, flags); return IRQ_RETVAL(handled); } static __inline__ void __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid) { struct he_tpdrq *new_tail; HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n", tpd, cid, he_dev->tpdrq_tail); /* new_tail = he_dev->tpdrq_tail; */ new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base | TPDRQ_MASK(he_dev->tpdrq_tail+1)); /* * check to see if we are about to set the tail == head * if true, update the head pointer from the adapter * to see if this is really the case (reading the queue * head for every enqueue would be unnecessarily slow) */ if (new_tail == he_dev->tpdrq_head) { he_dev->tpdrq_head = (struct he_tpdrq *) (((unsigned long)he_dev->tpdrq_base) | TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H))); if (new_tail == he_dev->tpdrq_head) { int slot; hprintk("tpdrq full (cid 0x%x)\n", cid); /* * FIXME * push tpd onto a transmit backlog queue * after service_tbrq, service the backlog * for now, we just drop the pdu */ for (slot = 0; slot < TPD_MAXIOV; ++slot) { if (tpd->iovec[slot].addr) pci_unmap_single(he_dev->pci_dev, tpd->iovec[slot].addr, tpd->iovec[slot].len & TPD_LEN_MASK, PCI_DMA_TODEVICE); } if (tpd->skb) { if (tpd->vcc->pop) tpd->vcc->pop(tpd->vcc, tpd->skb); else dev_kfree_skb_any(tpd->skb); atomic_inc(&tpd->vcc->stats->tx_err); } pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); return; } } /* 2.1.5 transmit packet descriptor ready queue */ list_add_tail(&tpd->entry, &he_dev->outstanding_tpds); he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status); he_dev->tpdrq_tail->cid = cid; wmb(); he_dev->tpdrq_tail = new_tail; he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T); (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */ } static int he_open(struct atm_vcc *vcc) { unsigned long flags; struct he_dev *he_dev = HE_DEV(vcc->dev); struct he_vcc *he_vcc; int err = 0; unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock; short vpi = vcc->vpi; int vci = vcc->vci; if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC) return 0; HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci); set_bit(ATM_VF_ADDR, &vcc->flags); cid = he_mkcid(he_dev, vpi, vci); he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC); if (he_vcc == NULL) { hprintk("unable to allocate he_vcc during open\n"); return -ENOMEM; } INIT_LIST_HEAD(&he_vcc->buffers); he_vcc->pdu_len = 0; he_vcc->rc_index = -1; init_waitqueue_head(&he_vcc->rx_waitq); init_waitqueue_head(&he_vcc->tx_waitq); vcc->dev_data = he_vcc; if (vcc->qos.txtp.traffic_class != ATM_NONE) { int pcr_goal; pcr_goal = atm_pcr_goal(&vcc->qos.txtp); if (pcr_goal == 0) pcr_goal = he_dev->atm_dev->link_rate; if (pcr_goal < 0) /* means round down, technically */ pcr_goal = -pcr_goal; HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal); switch (vcc->qos.aal) { case ATM_AAL5: tsr0_aal = TSR0_AAL5; tsr4 = TSR4_AAL5; break; case ATM_AAL0: tsr0_aal = TSR0_AAL0_SDU; tsr4 = TSR4_AAL0_SDU; break; default: err = -EINVAL; goto open_failed; } spin_lock_irqsave(&he_dev->global_lock, flags); tsr0 = he_readl_tsr0(he_dev, cid); spin_unlock_irqrestore(&he_dev->global_lock, flags); if (TSR0_CONN_STATE(tsr0) != 0) { hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0); err = -EBUSY; goto open_failed; } switch (vcc->qos.txtp.traffic_class) { case ATM_UBR: /* 2.3.3.1 open connection ubr */ tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal | TSR0_USE_WMIN | TSR0_UPDATE_GER; break; case ATM_CBR: /* 2.3.3.2 open connection cbr */ /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */ if ((he_dev->total_bw + pcr_goal) > (he_dev->atm_dev->link_rate * 9 / 10)) { err = -EBUSY; goto open_failed; } spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */ /* find an unused cs_stper register */ for (reg = 0; reg < HE_NUM_CS_STPER; ++reg) if (he_dev->cs_stper[reg].inuse == 0 || he_dev->cs_stper[reg].pcr == pcr_goal) break; if (reg == HE_NUM_CS_STPER) { err = -EBUSY; spin_unlock_irqrestore(&he_dev->global_lock, flags); goto open_failed; } he_dev->total_bw += pcr_goal; he_vcc->rc_index = reg; ++he_dev->cs_stper[reg].inuse; he_dev->cs_stper[reg].pcr = pcr_goal; clock = he_is622(he_dev) ? 66667000 : 50000000; period = clock / pcr_goal; HPRINTK("rc_index = %d period = %d\n", reg, period); he_writel_mbox(he_dev, rate_to_atmf(period/2), CS_STPER0 + reg); spin_unlock_irqrestore(&he_dev->global_lock, flags); tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal | TSR0_RC_INDEX(reg); break; default: err = -EINVAL; goto open_failed; } spin_lock_irqsave(&he_dev->global_lock, flags); he_writel_tsr0(he_dev, tsr0, cid); he_writel_tsr4(he_dev, tsr4 | 1, cid); he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) | TSR1_PCR(rate_to_atmf(pcr_goal)), cid); he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid); he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid); he_writel_tsr3(he_dev, 0x0, cid); he_writel_tsr5(he_dev, 0x0, cid); he_writel_tsr6(he_dev, 0x0, cid); he_writel_tsr7(he_dev, 0x0, cid); he_writel_tsr8(he_dev, 0x0, cid); he_writel_tsr10(he_dev, 0x0, cid); he_writel_tsr11(he_dev, 0x0, cid); he_writel_tsr12(he_dev, 0x0, cid); he_writel_tsr13(he_dev, 0x0, cid); he_writel_tsr14(he_dev, 0x0, cid); (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */ spin_unlock_irqrestore(&he_dev->global_lock, flags); } if (vcc->qos.rxtp.traffic_class != ATM_NONE) { unsigned aal; HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid, &HE_VCC(vcc)->rx_waitq); switch (vcc->qos.aal) { case ATM_AAL5: aal = RSR0_AAL5; break; case ATM_AAL0: aal = RSR0_RAWCELL; break; default: err = -EINVAL; goto open_failed; } spin_lock_irqsave(&he_dev->global_lock, flags); rsr0 = he_readl_rsr0(he_dev, cid); if (rsr0 & RSR0_OPEN_CONN) { spin_unlock_irqrestore(&he_dev->global_lock, flags); hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0); err = -EBUSY; goto open_failed; } rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY; rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY; rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0; #ifdef USE_CHECKSUM_HW if (vpi == 0 && vci >= ATM_NOT_RSV_VCI) rsr0 |= RSR0_TCP_CKSUM; #endif he_writel_rsr4(he_dev, rsr4, cid); he_writel_rsr1(he_dev, rsr1, cid); /* 5.1.11 last parameter initialized should be the open/closed indication in rsr0 */ he_writel_rsr0(he_dev, rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid); (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */ spin_unlock_irqrestore(&he_dev->global_lock, flags); } open_failed: if (err) { kfree(he_vcc); clear_bit(ATM_VF_ADDR, &vcc->flags); } else set_bit(ATM_VF_READY, &vcc->flags); return err; } static void he_close(struct atm_vcc *vcc) { unsigned long flags; DECLARE_WAITQUEUE(wait, current); struct he_dev *he_dev = HE_DEV(vcc->dev); struct he_tpd *tpd; unsigned cid; struct he_vcc *he_vcc = HE_VCC(vcc); #define MAX_RETRY 30 int retry = 0, sleep = 1, tx_inuse; HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci); clear_bit(ATM_VF_READY, &vcc->flags); cid = he_mkcid(he_dev, vcc->vpi, vcc->vci); if (vcc->qos.rxtp.traffic_class != ATM_NONE) { int timeout; HPRINTK("close rx cid 0x%x\n", cid); /* 2.7.2.2 close receive operation */ /* wait for previous close (if any) to finish */ spin_lock_irqsave(&he_dev->global_lock, flags); while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) { HPRINTK("close cid 0x%x RCC_BUSY\n", cid); udelay(250); } set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&he_vcc->rx_waitq, &wait); he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid); (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */ he_writel_mbox(he_dev, cid, RXCON_CLOSE); spin_unlock_irqrestore(&he_dev->global_lock, flags); timeout = schedule_timeout(30*HZ); remove_wait_queue(&he_vcc->rx_waitq, &wait); set_current_state(TASK_RUNNING); if (timeout == 0) hprintk("close rx timeout cid 0x%x\n", cid); HPRINTK("close rx cid 0x%x complete\n", cid); } if (vcc->qos.txtp.traffic_class != ATM_NONE) { volatile unsigned tsr4, tsr0; int timeout; HPRINTK("close tx cid 0x%x\n", cid); /* 2.1.2 * * ... the host must first stop queueing packets to the TPDRQ * on the connection to be closed, then wait for all outstanding * packets to be transmitted and their buffers returned to the * TBRQ. When the last packet on the connection arrives in the * TBRQ, the host issues the close command to the adapter. */ while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) && (retry < MAX_RETRY)) { msleep(sleep); if (sleep < 250) sleep = sleep * 2; ++retry; } if (tx_inuse > 1) hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse); /* 2.3.1.1 generic close operations with flush */ spin_lock_irqsave(&he_dev->global_lock, flags); he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid); /* also clears TSR4_SESSION_ENDED */ switch (vcc->qos.txtp.traffic_class) { case ATM_UBR: he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(200000)) | TSR1_PCR(0), cid); break; case ATM_CBR: he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid); break; } (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */ tpd = __alloc_tpd(he_dev); if (tpd == NULL) { hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid); goto close_tx_incomplete; } tpd->status |= TPD_EOS | TPD_INT; tpd->skb = NULL; tpd->vcc = vcc; wmb(); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&he_vcc->tx_waitq, &wait); __enqueue_tpd(he_dev, tpd, cid); spin_unlock_irqrestore(&he_dev->global_lock, flags); timeout = schedule_timeout(30*HZ); remove_wait_queue(&he_vcc->tx_waitq, &wait); set_current_state(TASK_RUNNING); spin_lock_irqsave(&he_dev->global_lock, flags); if (timeout == 0) { hprintk("close tx timeout cid 0x%x\n", cid); goto close_tx_incomplete; } while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) { HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4); udelay(250); } while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) { HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0); udelay(250); } close_tx_incomplete: if (vcc->qos.txtp.traffic_class == ATM_CBR) { int reg = he_vcc->rc_index; HPRINTK("cs_stper reg = %d\n", reg); if (he_dev->cs_stper[reg].inuse == 0) hprintk("cs_stper[%d].inuse = 0!\n", reg); else --he_dev->cs_stper[reg].inuse; he_dev->total_bw -= he_dev->cs_stper[reg].pcr; } spin_unlock_irqrestore(&he_dev->global_lock, flags); HPRINTK("close tx cid 0x%x complete\n", cid); } kfree(he_vcc); clear_bit(ATM_VF_ADDR, &vcc->flags); } static int he_send(struct atm_vcc *vcc, struct sk_buff *skb) { unsigned long flags; struct he_dev *he_dev = HE_DEV(vcc->dev); unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci); struct he_tpd *tpd; #ifdef USE_SCATTERGATHER int i, slot = 0; #endif #define HE_TPD_BUFSIZE 0xffff HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci); if ((skb->len > HE_TPD_BUFSIZE) || ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) { hprintk("buffer too large (or small) -- %d bytes\n", skb->len ); if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); atomic_inc(&vcc->stats->tx_err); return -EINVAL; } #ifndef USE_SCATTERGATHER if (skb_shinfo(skb)->nr_frags) { hprintk("no scatter/gather support\n"); if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); atomic_inc(&vcc->stats->tx_err); return -EINVAL; } #endif spin_lock_irqsave(&he_dev->global_lock, flags); tpd = __alloc_tpd(he_dev); if (tpd == NULL) { if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); atomic_inc(&vcc->stats->tx_err); spin_unlock_irqrestore(&he_dev->global_lock, flags); return -ENOMEM; } if (vcc->qos.aal == ATM_AAL5) tpd->status |= TPD_CELLTYPE(TPD_USERCELL); else { char *pti_clp = (void *) (skb->data + 3); int clp, pti; pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; clp = (*pti_clp & ATM_HDR_CLP); tpd->status |= TPD_CELLTYPE(pti); if (clp) tpd->status |= TPD_CLP; skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD); } #ifdef USE_SCATTERGATHER tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data, skb_headlen(skb), PCI_DMA_TODEVICE); tpd->iovec[slot].len = skb_headlen(skb); ++slot; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */ tpd->vcc = vcc; tpd->skb = NULL; /* not the last fragment so dont ->push() yet */ wmb(); __enqueue_tpd(he_dev, tpd, cid); tpd = __alloc_tpd(he_dev); if (tpd == NULL) { if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); atomic_inc(&vcc->stats->tx_err); spin_unlock_irqrestore(&he_dev->global_lock, flags); return -ENOMEM; } tpd->status |= TPD_USERCELL; slot = 0; } tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, (void *) page_address(frag->page) + frag->page_offset, frag->size, PCI_DMA_TODEVICE); tpd->iovec[slot].len = frag->size; ++slot; } tpd->iovec[slot - 1].len |= TPD_LST; #else tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); tpd->length0 = skb->len | TPD_LST; #endif tpd->status |= TPD_INT; tpd->vcc = vcc; tpd->skb = skb; wmb(); ATM_SKB(skb)->vcc = vcc; __enqueue_tpd(he_dev, tpd, cid); spin_unlock_irqrestore(&he_dev->global_lock, flags); atomic_inc(&vcc->stats->tx); return 0; } static int he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg) { unsigned long flags; struct he_dev *he_dev = HE_DEV(atm_dev); struct he_ioctl_reg reg; int err = 0; switch (cmd) { case HE_GET_REG: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&reg, arg, sizeof(struct he_ioctl_reg))) return -EFAULT; spin_lock_irqsave(&he_dev->global_lock, flags); switch (reg.type) { case HE_REGTYPE_PCI: if (reg.addr >= HE_REGMAP_SIZE) { err = -EINVAL; break; } reg.val = he_readl(he_dev, reg.addr); break; case HE_REGTYPE_RCM: reg.val = he_readl_rcm(he_dev, reg.addr); break; case HE_REGTYPE_TCM: reg.val = he_readl_tcm(he_dev, reg.addr); break; case HE_REGTYPE_MBOX: reg.val = he_readl_mbox(he_dev, reg.addr); break; default: err = -EINVAL; break; } spin_unlock_irqrestore(&he_dev->global_lock, flags); if (err == 0) if (copy_to_user(arg, &reg, sizeof(struct he_ioctl_reg))) return -EFAULT; break; default: #ifdef CONFIG_ATM_HE_USE_SUNI if (atm_dev->phy && atm_dev->phy->ioctl) err = atm_dev->phy->ioctl(atm_dev, cmd, arg); #else /* CONFIG_ATM_HE_USE_SUNI */ err = -EINVAL; #endif /* CONFIG_ATM_HE_USE_SUNI */ break; } return err; } static void he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr) { unsigned long flags; struct he_dev *he_dev = HE_DEV(atm_dev); HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr); spin_lock_irqsave(&he_dev->global_lock, flags); he_writel(he_dev, val, FRAMER + (addr*4)); (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */ spin_unlock_irqrestore(&he_dev->global_lock, flags); } static unsigned char he_phy_get(struct atm_dev *atm_dev, unsigned long addr) { unsigned long flags; struct he_dev *he_dev = HE_DEV(atm_dev); unsigned reg; spin_lock_irqsave(&he_dev->global_lock, flags); reg = he_readl(he_dev, FRAMER + (addr*4)); spin_unlock_irqrestore(&he_dev->global_lock, flags); HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg); return reg; } static int he_proc_read(struct atm_dev *dev, loff_t *pos, char *page) { unsigned long flags; struct he_dev *he_dev = HE_DEV(dev); int left, i; #ifdef notdef struct he_rbrq *rbrq_tail; struct he_tpdrq *tpdrq_head; int rbpl_head, rbpl_tail; #endif static long mcc = 0, oec = 0, dcc = 0, cec = 0; left = *pos; if (!left--) return sprintf(page, "ATM he driver\n"); if (!left--) return sprintf(page, "%s%s\n\n", he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM"); if (!left--) return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n"); spin_lock_irqsave(&he_dev->global_lock, flags); mcc += he_readl(he_dev, MCC); oec += he_readl(he_dev, OEC); dcc += he_readl(he_dev, DCC); cec += he_readl(he_dev, CEC); spin_unlock_irqrestore(&he_dev->global_lock, flags); if (!left--) return sprintf(page, "%16ld %16ld %13ld %17ld\n\n", mcc, oec, dcc, cec); if (!left--) return sprintf(page, "irq_size = %d inuse = ? peak = %d\n", CONFIG_IRQ_SIZE, he_dev->irq_peak); if (!left--) return sprintf(page, "tpdrq_size = %d inuse = ?\n", CONFIG_TPDRQ_SIZE); if (!left--) return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n", CONFIG_RBRQ_SIZE, he_dev->rbrq_peak); if (!left--) return sprintf(page, "tbrq_size = %d peak = %d\n", CONFIG_TBRQ_SIZE, he_dev->tbrq_peak); #ifdef notdef rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S)); rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T)); inuse = rbpl_head - rbpl_tail; if (inuse < 0) inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp); inuse /= sizeof(struct he_rbp); if (!left--) return sprintf(page, "rbpl_size = %d inuse = %d\n\n", CONFIG_RBPL_SIZE, inuse); #endif if (!left--) return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n"); for (i = 0; i < HE_NUM_CS_STPER; ++i) if (!left--) return sprintf(page, "cs_stper%-2d %8ld %3d\n", i, he_dev->cs_stper[i].pcr, he_dev->cs_stper[i].inuse); if (!left--) return sprintf(page, "total bw (cbr): %d (limit %d)\n", he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9); return 0; } /* eeprom routines -- see 4.7 */ static u8 read_prom_byte(struct he_dev *he_dev, int addr) { u32 val = 0, tmp_read = 0; int i, j = 0; u8 byte_read = 0; val = readl(he_dev->membase + HOST_CNTL); val &= 0xFFFFE0FF; /* Turn on write enable */ val |= 0x800; he_writel(he_dev, val, HOST_CNTL); /* Send READ instruction */ for (i = 0; i < ARRAY_SIZE(readtab); i++) { he_writel(he_dev, val | readtab[i], HOST_CNTL); udelay(EEPROM_DELAY); } /* Next, we need to send the byte address to read from */ for (i = 7; i >= 0; i--) { he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL); udelay(EEPROM_DELAY); he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL); udelay(EEPROM_DELAY); } j = 0; val &= 0xFFFFF7FF; /* Turn off write enable */ he_writel(he_dev, val, HOST_CNTL); /* Now, we can read data from the EEPROM by clocking it in */ for (i = 7; i >= 0; i--) { he_writel(he_dev, val | clocktab[j++], HOST_CNTL); udelay(EEPROM_DELAY); tmp_read = he_readl(he_dev, HOST_CNTL); byte_read |= (unsigned char) ((tmp_read & ID_DOUT) >> ID_DOFFSET << i); he_writel(he_dev, val | clocktab[j++], HOST_CNTL); udelay(EEPROM_DELAY); } he_writel(he_dev, val | ID_CS, HOST_CNTL); udelay(EEPROM_DELAY); return byte_read; } MODULE_LICENSE("GPL"); MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>"); MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver"); module_param(disable64, bool, 0); MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers"); module_param(nvpibits, short, 0); MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)"); module_param(nvcibits, short, 0); MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)"); module_param(rx_skb_reserve, short, 0); MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)"); module_param(irq_coalesce, bool, 0); MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)"); module_param(sdh, bool, 0); MODULE_PARM_DESC(sdh, "use SDH framing (default 0)"); static struct pci_device_id he_pci_tbl[] = { { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 }, { 0, } }; MODULE_DEVICE_TABLE(pci, he_pci_tbl); static struct pci_driver he_driver = { .name = "he", .probe = he_init_one, .remove = he_remove_one, .id_table = he_pci_tbl, }; module_pci_driver(he_driver);
gpl-2.0
mathieudevos/linux_kernel_3.2.48
drivers/media/dvb/dvb-usb/technisat-usb2.c
162
20020
/* * Linux driver for Technisat DVB-S/S2 USB 2.0 device * * Copyright (C) 2010 Patrick Boettcher, * Kernel Labs Inc. PO Box 745, St James, NY 11780 * * Development was sponsored by Technisat Digital UK Limited, whose * registered office is Witan Gate House 500 - 600 Witan Gate West, * Milton Keynes, MK9 1SH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * THIS PROGRAM IS PROVIDED "AS IS" AND BOTH THE COPYRIGHT HOLDER AND * TECHNISAT DIGITAL UK LTD DISCLAIM ALL WARRANTIES WITH REGARD TO * THIS PROGRAM INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY OR * FITNESS FOR A PARTICULAR PURPOSE. NEITHER THE COPYRIGHT HOLDER * NOR TECHNISAT DIGITAL UK LIMITED SHALL BE LIABLE FOR ANY SPECIAL, * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR * IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS PROGRAM. See the * GNU General Public License for more details. */ #define DVB_USB_LOG_PREFIX "technisat-usb2" #include "dvb-usb.h" #include "stv6110x.h" #include "stv090x.h" /* module parameters */ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (bit-mask: 1=info,2=eeprom,4=i2c,8=rc)." \ DVB_USB_DEBUG_STATUS); /* disables all LED control command and * also does not start the signal polling thread */ static int disable_led_control; module_param(disable_led_control, int, 0444); MODULE_PARM_DESC(disable_led_control, "disable LED control of the device " "(default: 0 - LED control is active)."); /* device private data */ struct technisat_usb2_state { struct dvb_usb_device *dev; struct delayed_work green_led_work; u8 power_state; u16 last_scan_code; }; /* debug print helpers */ #define deb_info(args...) dprintk(debug, 0x01, args) #define deb_eeprom(args...) dprintk(debug, 0x02, args) #define deb_i2c(args...) dprintk(debug, 0x04, args) #define deb_rc(args...) dprintk(debug, 0x08, args) /* vendor requests */ #define SET_IFCLK_TO_EXTERNAL_TSCLK_VENDOR_REQUEST 0xB3 #define SET_FRONT_END_RESET_VENDOR_REQUEST 0xB4 #define GET_VERSION_INFO_VENDOR_REQUEST 0xB5 #define SET_GREEN_LED_VENDOR_REQUEST 0xB6 #define SET_RED_LED_VENDOR_REQUEST 0xB7 #define GET_IR_DATA_VENDOR_REQUEST 0xB8 #define SET_LED_TIMER_DIVIDER_VENDOR_REQUEST 0xB9 #define SET_USB_REENUMERATION 0xBA /* i2c-access methods */ #define I2C_SPEED_100KHZ_BIT 0x40 #define I2C_STATUS_NAK 7 #define I2C_STATUS_OK 8 static int technisat_usb2_i2c_access(struct usb_device *udev, u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen) { u8 b[64]; int ret, actual_length; deb_i2c("i2c-access: %02x, tx: ", device_addr); debug_dump(tx, txlen, deb_i2c); deb_i2c(" "); if (txlen > 62) { err("i2c TX buffer can't exceed 62 bytes (dev 0x%02x)", device_addr); txlen = 62; } if (rxlen > 62) { err("i2c RX buffer can't exceed 62 bytes (dev 0x%02x)", device_addr); txlen = 62; } b[0] = I2C_SPEED_100KHZ_BIT; b[1] = device_addr << 1; if (rx != NULL) { b[0] |= rxlen; b[1] |= 1; } memcpy(&b[2], tx, txlen); ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 0x01), b, 2 + txlen, NULL, 1000); if (ret < 0) { err("i2c-error: out failed %02x = %d", device_addr, ret); return -ENODEV; } ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, 0x01), b, 64, &actual_length, 1000); if (ret < 0) { err("i2c-error: in failed %02x = %d", device_addr, ret); return -ENODEV; } if (b[0] != I2C_STATUS_OK) { err("i2c-error: %02x = %d", device_addr, b[0]); /* handle tuner-i2c-nak */ if (!(b[0] == I2C_STATUS_NAK && device_addr == 0x60 /* && device_is_technisat_usb2 */)) return -ENODEV; } deb_i2c("status: %d, ", b[0]); if (rx != NULL) { memcpy(rx, &b[2], rxlen); deb_i2c("rx (%d): ", rxlen); debug_dump(rx, rxlen, deb_i2c); } deb_i2c("\n"); return 0; } static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) { int ret = 0, i; struct dvb_usb_device *d = i2c_get_adapdata(adap); /* Ensure nobody else hits the i2c bus while we're sending our sequence of messages, (such as the remote control thread) */ if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { if (i+1 < num && msg[i+1].flags & I2C_M_RD) { ret = technisat_usb2_i2c_access(d->udev, msg[i+1].addr, msg[i].buf, msg[i].len, msg[i+1].buf, msg[i+1].len); if (ret != 0) break; i++; } else { ret = technisat_usb2_i2c_access(d->udev, msg[i].addr, msg[i].buf, msg[i].len, NULL, 0); if (ret != 0) break; } } if (ret == 0) ret = i; mutex_unlock(&d->i2c_mutex); return ret; } static u32 technisat_usb2_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm technisat_usb2_i2c_algo = { .master_xfer = technisat_usb2_i2c_xfer, .functionality = technisat_usb2_i2c_func, }; #if 0 static void technisat_usb2_frontend_reset(struct usb_device *udev) { usb_control_msg(udev, usb_sndctrlpipe(udev, 0), SET_FRONT_END_RESET_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, 10, 0, NULL, 0, 500); } #endif /* LED control */ enum technisat_usb2_led_state { LED_OFF, LED_BLINK, LED_ON, LED_UNDEFINED }; static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum technisat_usb2_led_state state) { int ret; u8 led[8] = { red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST, 0 }; if (disable_led_control && state != LED_OFF) return 0; switch (state) { case LED_ON: led[1] = 0x82; break; case LED_BLINK: led[1] = 0x82; if (red) { led[2] = 0x02; led[3] = 10; led[4] = 10; } else { led[2] = 0xff; led[3] = 50; led[4] = 50; } led[5] = 1; break; default: case LED_OFF: led[1] = 0x80; break; } if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, led, sizeof(led), 500); mutex_unlock(&d->i2c_mutex); return ret; } static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green) { int ret; u8 b = 0; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), SET_LED_TIMER_DIVIDER_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, (red << 8) | green, 0, &b, 1, 500); mutex_unlock(&d->i2c_mutex); return ret; } static void technisat_usb2_green_led_control(struct work_struct *work) { struct technisat_usb2_state *state = container_of(work, struct technisat_usb2_state, green_led_work.work); struct dvb_frontend *fe = state->dev->adapter[0].fe_adap[0].fe; if (state->power_state == 0) goto schedule; if (fe != NULL) { enum fe_status status; if (fe->ops.read_status(fe, &status) != 0) goto schedule; if (status & FE_HAS_LOCK) { u32 ber; if (fe->ops.read_ber(fe, &ber) != 0) goto schedule; if (ber > 1000) technisat_usb2_set_led(state->dev, 0, LED_BLINK); else technisat_usb2_set_led(state->dev, 0, LED_ON); } else technisat_usb2_set_led(state->dev, 0, LED_OFF); } schedule: schedule_delayed_work(&state->green_led_work, msecs_to_jiffies(500)); } /* method to find out whether the firmware has to be downloaded or not */ static int technisat_usb2_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { int ret; u8 version[3]; /* first select the interface */ if (usb_set_interface(udev, 0, 1) != 0) err("could not set alternate setting to 0"); else info("set alternate setting"); *cold = 0; /* by default do not download a firmware - just in case something is wrong */ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), GET_VERSION_INFO_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version, sizeof(version), 500); if (ret < 0) *cold = 1; else { info("firmware version: %d.%d", version[1], version[2]); *cold = 0; } return 0; } /* power control */ static int technisat_usb2_power_ctrl(struct dvb_usb_device *d, int level) { struct technisat_usb2_state *state = d->priv; state->power_state = level; if (disable_led_control) return 0; /* green led is turned off in any case - will be turned on when tuning */ technisat_usb2_set_led(d, 0, LED_OFF); /* red led is turned on all the time */ technisat_usb2_set_led(d, 1, LED_ON); return 0; } /* mac address reading - from the eeprom */ #if 0 static void technisat_usb2_eeprom_dump(struct dvb_usb_device *d) { u8 reg; u8 b[16]; int i, j; /* full EEPROM dump */ for (j = 0; j < 256 * 4; j += 16) { reg = j; if (technisat_usb2_i2c_access(d->udev, 0x50 + j / 256, &reg, 1, b, 16) != 0) break; deb_eeprom("EEPROM: %01x%02x: ", j / 256, reg); for (i = 0; i < 16; i++) deb_eeprom("%02x ", b[i]); deb_eeprom("\n"); } } #endif static u8 technisat_usb2_calc_lrc(const u8 *b, u16 length) { u8 lrc = 0; while (--length) lrc ^= *b++; return lrc; } static int technisat_usb2_eeprom_lrc_read(struct dvb_usb_device *d, u16 offset, u8 *b, u16 length, u8 tries) { u8 bo = offset & 0xff; struct i2c_msg msg[] = { { .addr = 0x50 | ((offset >> 8) & 0x3), .buf = &bo, .len = 1 }, { .addr = 0x50 | ((offset >> 8) & 0x3), .flags = I2C_M_RD, .buf = b, .len = length } }; while (tries--) { int status; if (i2c_transfer(&d->i2c_adap, msg, 2) != 2) break; status = technisat_usb2_calc_lrc(b, length - 1) == b[length - 1]; if (status) return 0; } return -EREMOTEIO; } #define EEPROM_MAC_START 0x3f8 #define EEPROM_MAC_TOTAL 8 static int technisat_usb2_read_mac_address(struct dvb_usb_device *d, u8 mac[]) { u8 buf[EEPROM_MAC_TOTAL]; if (technisat_usb2_eeprom_lrc_read(d, EEPROM_MAC_START, buf, EEPROM_MAC_TOTAL, 4) != 0) return -ENODEV; memcpy(mac, buf, 6); return 0; } /* frontend attach */ static int technisat_usb2_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { int i; u8 gpio[3] = { 0 }; /* 0 = 2, 1 = 3, 2 = 4 */ gpio[2] = 1; /* high - voltage ? */ switch (voltage) { case SEC_VOLTAGE_13: gpio[0] = 1; break; case SEC_VOLTAGE_18: gpio[0] = 1; gpio[1] = 1; break; default: case SEC_VOLTAGE_OFF: break; } for (i = 0; i < 3; i++) if (stv090x_set_gpio(fe, i+2, 0, gpio[i], 0) != 0) return -EREMOTEIO; return 0; } static struct stv090x_config technisat_usb2_stv090x_config = { .device = STV0903, .demod_mode = STV090x_SINGLE, .clk_mode = STV090x_CLK_EXT, .xtal = 8000000, .address = 0x68, .ts1_mode = STV090x_TSMODE_DVBCI, .ts1_clk = 13400000, .ts1_tei = 1, .repeater_level = STV090x_RPTLEVEL_64, .tuner_bbgain = 6, }; static struct stv6110x_config technisat_usb2_stv6110x_config = { .addr = 0x60, .refclk = 16000000, .clk_div = 2, }; static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a) { struct usb_device *udev = a->dev->udev; int ret; a->fe_adap[0].fe = dvb_attach(stv090x_attach, &technisat_usb2_stv090x_config, &a->dev->i2c_adap, STV090x_DEMODULATOR_0); if (a->fe_adap[0].fe) { struct stv6110x_devctl *ctl; ctl = dvb_attach(stv6110x_attach, a->fe_adap[0].fe, &technisat_usb2_stv6110x_config, &a->dev->i2c_adap); if (ctl) { technisat_usb2_stv090x_config.tuner_init = ctl->tuner_init; technisat_usb2_stv090x_config.tuner_sleep = ctl->tuner_sleep; technisat_usb2_stv090x_config.tuner_set_mode = ctl->tuner_set_mode; technisat_usb2_stv090x_config.tuner_set_frequency = ctl->tuner_set_frequency; technisat_usb2_stv090x_config.tuner_get_frequency = ctl->tuner_get_frequency; technisat_usb2_stv090x_config.tuner_set_bandwidth = ctl->tuner_set_bandwidth; technisat_usb2_stv090x_config.tuner_get_bandwidth = ctl->tuner_get_bandwidth; technisat_usb2_stv090x_config.tuner_set_bbgain = ctl->tuner_set_bbgain; technisat_usb2_stv090x_config.tuner_get_bbgain = ctl->tuner_get_bbgain; technisat_usb2_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk; technisat_usb2_stv090x_config.tuner_get_status = ctl->tuner_get_status; /* call the init function once to initialize tuner's clock output divider and demod's master clock */ if (a->fe_adap[0].fe->ops.init) a->fe_adap[0].fe->ops.init(a->fe_adap[0].fe); if (mutex_lock_interruptible(&a->dev->i2c_mutex) < 0) return -EAGAIN; ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), SET_IFCLK_TO_EXTERNAL_TSCLK_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, NULL, 0, 500); mutex_unlock(&a->dev->i2c_mutex); if (ret != 0) err("could not set IF_CLK to external"); a->fe_adap[0].fe->ops.set_voltage = technisat_usb2_set_voltage; /* if everything was successful assign a nice name to the frontend */ strlcpy(a->fe_adap[0].fe->ops.info.name, a->dev->desc->name, sizeof(a->fe_adap[0].fe->ops.info.name)); } else { dvb_frontend_detach(a->fe_adap[0].fe); a->fe_adap[0].fe = NULL; } } technisat_usb2_set_led_timer(a->dev, 1, 1); return a->fe_adap[0].fe == NULL ? -ENODEV : 0; } /* Remote control */ /* the device is giving providing raw IR-signals to the host mapping * it only to one remote control is just the default implementation */ #define NOMINAL_IR_BIT_TRANSITION_TIME_US 889 #define NOMINAL_IR_BIT_TIME_US (2 * NOMINAL_IR_BIT_TRANSITION_TIME_US) #define FIRMWARE_CLOCK_TICK 83333 #define FIRMWARE_CLOCK_DIVISOR 256 #define IR_PERCENT_TOLERANCE 15 #define NOMINAL_IR_BIT_TRANSITION_TICKS ((NOMINAL_IR_BIT_TRANSITION_TIME_US * 1000 * 1000) / FIRMWARE_CLOCK_TICK) #define NOMINAL_IR_BIT_TRANSITION_TICK_COUNT (NOMINAL_IR_BIT_TRANSITION_TICKS / FIRMWARE_CLOCK_DIVISOR) #define NOMINAL_IR_BIT_TIME_TICKS ((NOMINAL_IR_BIT_TIME_US * 1000 * 1000) / FIRMWARE_CLOCK_TICK) #define NOMINAL_IR_BIT_TIME_TICK_COUNT (NOMINAL_IR_BIT_TIME_TICKS / FIRMWARE_CLOCK_DIVISOR) #define MINIMUM_IR_BIT_TRANSITION_TICK_COUNT (NOMINAL_IR_BIT_TRANSITION_TICK_COUNT - ((NOMINAL_IR_BIT_TRANSITION_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100)) #define MAXIMUM_IR_BIT_TRANSITION_TICK_COUNT (NOMINAL_IR_BIT_TRANSITION_TICK_COUNT + ((NOMINAL_IR_BIT_TRANSITION_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100)) #define MINIMUM_IR_BIT_TIME_TICK_COUNT (NOMINAL_IR_BIT_TIME_TICK_COUNT - ((NOMINAL_IR_BIT_TIME_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100)) #define MAXIMUM_IR_BIT_TIME_TICK_COUNT (NOMINAL_IR_BIT_TIME_TICK_COUNT + ((NOMINAL_IR_BIT_TIME_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100)) static int technisat_usb2_get_ir(struct dvb_usb_device *d) { u8 buf[62], *b; int ret; struct ir_raw_event ev; buf[0] = GET_IR_DATA_VENDOR_REQUEST; buf[1] = 0x08; buf[2] = 0x8f; buf[3] = MINIMUM_IR_BIT_TRANSITION_TICK_COUNT; buf[4] = MAXIMUM_IR_BIT_TIME_TICK_COUNT; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), GET_IR_DATA_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, buf, 5, 500); if (ret < 0) goto unlock; buf[1] = 0; buf[2] = 0; ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), GET_IR_DATA_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_IN, 0x8080, 0, buf, sizeof(buf), 500); unlock: mutex_unlock(&d->i2c_mutex); if (ret < 0) return ret; if (ret == 1) return 0; /* no key pressed */ /* decoding */ b = buf+1; #if 0 deb_rc("RC: %d ", ret); debug_dump(b, ret, deb_rc); #endif ev.pulse = 0; while (1) { ev.pulse = !ev.pulse; ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000; ir_raw_event_store(d->rc_dev, &ev); b++; if (*b == 0xff) { ev.pulse = 0; ev.duration = 888888*2; ir_raw_event_store(d->rc_dev, &ev); break; } } ir_raw_event_handle(d->rc_dev); return 1; } static int technisat_usb2_rc_query(struct dvb_usb_device *d) { int ret = technisat_usb2_get_ir(d); if (ret < 0) return ret; if (ret == 0) return 0; if (!disable_led_control) technisat_usb2_set_led(d, 1, LED_BLINK); return 0; } /* DVB-USB and USB stuff follows */ static struct usb_device_id technisat_usb2_id_table[] = { { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_DVB_S2) }, { 0 } /* Terminating entry */ }; /* device description */ static struct dvb_usb_device_properties technisat_usb2_devices = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .identify_state = technisat_usb2_identify_state, .firmware = "dvb-usb-SkyStar_USB_HD_FW_v17_63.HEX.fw", .size_of_priv = sizeof(struct technisat_usb2_state), .i2c_algo = &technisat_usb2_i2c_algo, .power_ctrl = technisat_usb2_power_ctrl, .read_mac_address = technisat_usb2_read_mac_address, .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = technisat_usb2_frontend_attach, .stream = { .type = USB_ISOC, .count = 8, .endpoint = 0x2, .u = { .isoc = { .framesperurb = 32, .framesize = 2048, .interval = 3, } } }, }}, .size_of_priv = 0, }, }, .num_device_descs = 1, .devices = { { "Technisat SkyStar USB HD (DVB-S/S2)", { &technisat_usb2_id_table[0], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = 100, .rc_codes = RC_MAP_TECHNISAT_USB2, .module_name = "technisat-usb2", .rc_query = technisat_usb2_rc_query, .allowed_protos = RC_TYPE_ALL, .driver_type = RC_DRIVER_IR_RAW, } }; static int technisat_usb2_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct dvb_usb_device *dev; if (dvb_usb_device_init(intf, &technisat_usb2_devices, THIS_MODULE, &dev, adapter_nr) != 0) return -ENODEV; if (dev) { struct technisat_usb2_state *state = dev->priv; state->dev = dev; if (!disable_led_control) { INIT_DELAYED_WORK(&state->green_led_work, technisat_usb2_green_led_control); schedule_delayed_work(&state->green_led_work, msecs_to_jiffies(500)); } } return 0; } static void technisat_usb2_disconnect(struct usb_interface *intf) { struct dvb_usb_device *dev = usb_get_intfdata(intf); /* work and stuff was only created when the device is is hot-state */ if (dev != NULL) { struct technisat_usb2_state *state = dev->priv; if (state != NULL) cancel_delayed_work_sync(&state->green_led_work); } dvb_usb_device_exit(intf); } static struct usb_driver technisat_usb2_driver = { .name = "dvb_usb_technisat_usb2", .probe = technisat_usb2_probe, .disconnect = technisat_usb2_disconnect, .id_table = technisat_usb2_id_table, }; /* module stuff */ static int __init technisat_usb2_module_init(void) { int result = usb_register(&technisat_usb2_driver); if (result) { err("usb_register failed. Code %d", result); return result; } return 0; } static void __exit technisat_usb2_module_exit(void) { usb_deregister(&technisat_usb2_driver); } module_init(technisat_usb2_module_init); module_exit(technisat_usb2_module_exit); MODULE_AUTHOR("Patrick Boettcher <pboettcher@kernellabs.com>"); MODULE_DESCRIPTION("Driver for Technisat DVB-S/S2 USB 2.0 device"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
gpl-2.0
tmerrifi/conversion_linux
drivers/staging/go7007/go7007-usb.c
162
34352
/* * Copyright (C) 2005-2006 Micronas USA Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/usb.h> #include <linux/i2c.h> #include <asm/byteorder.h> #include <media/tvaudio.h> #include "go7007-priv.h" #include "wis-i2c.h" static unsigned int assume_endura; module_param(assume_endura, int, 0644); MODULE_PARM_DESC(assume_endura, "when probing fails, " "hardware is a Pelco Endura"); /* #define GO7007_USB_DEBUG */ /* #define GO7007_I2C_DEBUG */ /* for debugging the EZ-USB I2C adapter */ #define HPI_STATUS_ADDR 0xFFF4 #define INT_PARAM_ADDR 0xFFF6 #define INT_INDEX_ADDR 0xFFF8 /* * Pipes on EZ-USB interface: * 0 snd - Control * 0 rcv - Control * 2 snd - Download firmware (control) * 4 rcv - Read Interrupt (interrupt) * 6 rcv - Read Video (bulk) * 8 rcv - Read Audio (bulk) */ #define GO7007_USB_EZUSB (1<<0) #define GO7007_USB_EZUSB_I2C (1<<1) struct go7007_usb_board { unsigned int flags; struct go7007_board_info main_info; }; struct go7007_usb { struct go7007_usb_board *board; struct mutex i2c_lock; struct usb_device *usbdev; struct urb *video_urbs[8]; struct urb *audio_urbs[8]; struct urb *intr_urb; }; /*********************** Product specification data ***********************/ static struct go7007_usb_board board_matrix_ii = { .flags = GO7007_USB_EZUSB, .main_info = { .firmware = "go7007tv.bin", .flags = GO7007_BOARD_HAS_AUDIO | GO7007_BOARD_USE_ONBOARD_I2C, .audio_flags = GO7007_AUDIO_I2S_MODE_1 | GO7007_AUDIO_WORD_16, .audio_rate = 48000, .audio_bclk_div = 8, .audio_main_div = 2, .hpi_buffer_cap = 7, .sensor_flags = GO7007_SENSOR_656 | GO7007_SENSOR_VALID_ENABLE | GO7007_SENSOR_TV | GO7007_SENSOR_VBI | GO7007_SENSOR_SCALING, .num_i2c_devs = 1, .i2c_devs = { { .type = "wis_saa7115", .id = I2C_DRIVERID_WIS_SAA7115, .addr = 0x20, }, }, .num_inputs = 2, .inputs = { { .video_input = 0, .name = "Composite", }, { .video_input = 9, .name = "S-Video", }, }, }, }; static struct go7007_usb_board board_matrix_reload = { .flags = GO7007_USB_EZUSB, .main_info = { .firmware = "go7007tv.bin", .flags = GO7007_BOARD_HAS_AUDIO | GO7007_BOARD_USE_ONBOARD_I2C, .audio_flags = GO7007_AUDIO_I2S_MODE_1 | GO7007_AUDIO_I2S_MASTER | GO7007_AUDIO_WORD_16, .audio_rate = 48000, .audio_bclk_div = 8, .audio_main_div = 2, .hpi_buffer_cap = 7, .sensor_flags = GO7007_SENSOR_656 | GO7007_SENSOR_TV, .num_i2c_devs = 1, .i2c_devs = { { .type = "wis_saa7113", .id = I2C_DRIVERID_WIS_SAA7113, .addr = 0x25, }, }, .num_inputs = 2, .inputs = { { .video_input = 0, .name = "Composite", }, { .video_input = 9, .name = "S-Video", }, }, }, }; static struct go7007_usb_board board_star_trek = { .flags = GO7007_USB_EZUSB | GO7007_USB_EZUSB_I2C, .main_info = { .firmware = "go7007tv.bin", .flags = GO7007_BOARD_HAS_AUDIO, /* | GO7007_BOARD_HAS_TUNER, */ .sensor_flags = GO7007_SENSOR_656 | GO7007_SENSOR_VALID_ENABLE | GO7007_SENSOR_TV | GO7007_SENSOR_VBI | GO7007_SENSOR_SCALING, .audio_flags = GO7007_AUDIO_I2S_MODE_1 | GO7007_AUDIO_WORD_16, .audio_bclk_div = 8, .audio_main_div = 2, .hpi_buffer_cap = 7, .num_i2c_devs = 1, .i2c_devs = { { .type = "wis_saa7115", .id = I2C_DRIVERID_WIS_SAA7115, .addr = 0x20, }, }, .num_inputs = 2, .inputs = { { .video_input = 1, /* .audio_input = AUDIO_EXTERN, */ .name = "Composite", }, { .video_input = 8, /* .audio_input = AUDIO_EXTERN, */ .name = "S-Video", }, /* { * .video_input = 3, * .audio_input = AUDIO_TUNER, * .name = "Tuner", * }, */ }, }, }; static struct go7007_usb_board board_px_tv402u = { .flags = GO7007_USB_EZUSB | GO7007_USB_EZUSB_I2C, .main_info = { .firmware = "go7007tv.bin", .flags = GO7007_BOARD_HAS_AUDIO | GO7007_BOARD_HAS_TUNER, .sensor_flags = GO7007_SENSOR_656 | GO7007_SENSOR_VALID_ENABLE | GO7007_SENSOR_TV | GO7007_SENSOR_VBI | GO7007_SENSOR_SCALING, .audio_flags = GO7007_AUDIO_I2S_MODE_1 | GO7007_AUDIO_WORD_16, .audio_bclk_div = 8, .audio_main_div = 2, .hpi_buffer_cap = 7, .num_i2c_devs = 3, .i2c_devs = { { .type = "wis_saa7115", .id = I2C_DRIVERID_WIS_SAA7115, .addr = 0x20, }, { .type = "wis_uda1342", .id = I2C_DRIVERID_WIS_UDA1342, .addr = 0x1a, }, { .type = "wis_sony_tuner", .id = I2C_DRIVERID_WIS_SONY_TUNER, .addr = 0x60, }, }, .num_inputs = 3, .inputs = { { .video_input = 1, .audio_input = TVAUDIO_INPUT_EXTERN, .name = "Composite", }, { .video_input = 8, .audio_input = TVAUDIO_INPUT_EXTERN, .name = "S-Video", }, { .video_input = 3, .audio_input = TVAUDIO_INPUT_TUNER, .name = "Tuner", }, }, }, }; static struct go7007_usb_board board_xmen = { .flags = 0, .main_info = { .firmware = "go7007tv.bin", .flags = GO7007_BOARD_USE_ONBOARD_I2C, .hpi_buffer_cap = 0, .sensor_flags = GO7007_SENSOR_VREF_POLAR, .sensor_width = 320, .sensor_height = 240, .sensor_framerate = 30030, .audio_flags = GO7007_AUDIO_ONE_CHANNEL | GO7007_AUDIO_I2S_MODE_3 | GO7007_AUDIO_WORD_14 | GO7007_AUDIO_I2S_MASTER | GO7007_AUDIO_BCLK_POLAR | GO7007_AUDIO_OKI_MODE, .audio_rate = 8000, .audio_bclk_div = 48, .audio_main_div = 1, .num_i2c_devs = 1, .i2c_devs = { { .type = "wis_ov7640", .id = I2C_DRIVERID_WIS_OV7640, .addr = 0x21, }, }, .num_inputs = 1, .inputs = { { .name = "Camera", }, }, }, }; static struct go7007_usb_board board_matrix_revolution = { .flags = GO7007_USB_EZUSB, .main_info = { .firmware = "go7007tv.bin", .flags = GO7007_BOARD_HAS_AUDIO | GO7007_BOARD_USE_ONBOARD_I2C, .audio_flags = GO7007_AUDIO_I2S_MODE_1 | GO7007_AUDIO_I2S_MASTER | GO7007_AUDIO_WORD_16, .audio_rate = 48000, .audio_bclk_div = 8, .audio_main_div = 2, .hpi_buffer_cap = 7, .sensor_flags = GO7007_SENSOR_656 | GO7007_SENSOR_TV | GO7007_SENSOR_VBI, .num_i2c_devs = 1, .i2c_devs = { { .type = "wis_tw9903", .id = I2C_DRIVERID_WIS_TW9903, .addr = 0x44, }, }, .num_inputs = 2, .inputs = { { .video_input = 2, .name = "Composite", }, { .video_input = 8, .name = "S-Video", }, }, }, }; static struct go7007_usb_board board_lifeview_lr192 = { .flags = GO7007_USB_EZUSB, .main_info = { .firmware = "go7007tv.bin", .flags = GO7007_BOARD_HAS_AUDIO | GO7007_BOARD_USE_ONBOARD_I2C, .audio_flags = GO7007_AUDIO_I2S_MODE_1 | GO7007_AUDIO_WORD_16, .audio_rate = 48000, .audio_bclk_div = 8, .audio_main_div = 2, .hpi_buffer_cap = 7, .sensor_flags = GO7007_SENSOR_656 | GO7007_SENSOR_VALID_ENABLE | GO7007_SENSOR_TV | GO7007_SENSOR_VBI | GO7007_SENSOR_SCALING, .num_i2c_devs = 0, .num_inputs = 1, .inputs = { { .video_input = 0, .name = "Composite", }, }, }, }; static struct go7007_usb_board board_endura = { .flags = 0, .main_info = { .firmware = "go7007tv.bin", .flags = 0, .audio_flags = GO7007_AUDIO_I2S_MODE_1 | GO7007_AUDIO_I2S_MASTER | GO7007_AUDIO_WORD_16, .audio_rate = 8000, .audio_bclk_div = 48, .audio_main_div = 8, .hpi_buffer_cap = 0, .sensor_flags = GO7007_SENSOR_656 | GO7007_SENSOR_TV, .sensor_h_offset = 8, .num_i2c_devs = 0, .num_inputs = 1, .inputs = { { .name = "Camera", }, }, }, }; static struct go7007_usb_board board_adlink_mpg24 = { .flags = 0, .main_info = { .firmware = "go7007tv.bin", .flags = GO7007_BOARD_USE_ONBOARD_I2C, .audio_flags = GO7007_AUDIO_I2S_MODE_1 | GO7007_AUDIO_I2S_MASTER | GO7007_AUDIO_WORD_16, .audio_rate = 48000, .audio_bclk_div = 8, .audio_main_div = 2, .hpi_buffer_cap = 0, .sensor_flags = GO7007_SENSOR_656 | GO7007_SENSOR_TV | GO7007_SENSOR_VBI, .num_i2c_devs = 1, .i2c_devs = { { .type = "wis_tw2804", .id = I2C_DRIVERID_WIS_TW2804, .addr = 0x00, /* yes, really */ }, }, .num_inputs = 1, .inputs = { { .name = "Composite", }, }, }, }; static struct go7007_usb_board board_sensoray_2250 = { .flags = GO7007_USB_EZUSB | GO7007_USB_EZUSB_I2C, .main_info = { .firmware = "go7007tv.bin", .audio_flags = GO7007_AUDIO_I2S_MODE_1 | GO7007_AUDIO_I2S_MASTER | GO7007_AUDIO_WORD_16, .flags = GO7007_BOARD_HAS_AUDIO, .audio_rate = 48000, .audio_bclk_div = 8, .audio_main_div = 2, .hpi_buffer_cap = 7, .sensor_flags = GO7007_SENSOR_656 | GO7007_SENSOR_TV, .num_i2c_devs = 1, .i2c_devs = { { .type = "s2250", .id = I2C_DRIVERID_S2250, .addr = 0x43, }, }, .num_inputs = 2, .inputs = { { .video_input = 0, .name = "Composite", }, { .video_input = 1, .name = "S-Video", }, }, }, }; MODULE_FIRMWARE("go7007tv.bin"); static const struct usb_device_id go7007_usb_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */ .idProduct = 0x7007, /* Product ID of GO7007SB chip */ .bcdDevice_lo = 0x200, /* Revision number of XMen */ .bcdDevice_hi = 0x200, .bInterfaceClass = 255, .bInterfaceSubClass = 0, .bInterfaceProtocol = 255, .driver_info = (kernel_ulong_t)GO7007_BOARDID_XMEN, }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, .idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */ .idProduct = 0x7007, /* Product ID of GO7007SB chip */ .bcdDevice_lo = 0x202, /* Revision number of Matrix II */ .bcdDevice_hi = 0x202, .driver_info = (kernel_ulong_t)GO7007_BOARDID_MATRIX_II, }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, .idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */ .idProduct = 0x7007, /* Product ID of GO7007SB chip */ .bcdDevice_lo = 0x204, /* Revision number of Matrix */ .bcdDevice_hi = 0x204, /* Reloaded */ .driver_info = (kernel_ulong_t)GO7007_BOARDID_MATRIX_RELOAD, }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */ .idProduct = 0x7007, /* Product ID of GO7007SB chip */ .bcdDevice_lo = 0x205, /* Revision number of XMen-II */ .bcdDevice_hi = 0x205, .bInterfaceClass = 255, .bInterfaceSubClass = 0, .bInterfaceProtocol = 255, .driver_info = (kernel_ulong_t)GO7007_BOARDID_XMEN_II, }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, .idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */ .idProduct = 0x7007, /* Product ID of GO7007SB chip */ .bcdDevice_lo = 0x208, /* Revision number of Star Trek */ .bcdDevice_hi = 0x208, .driver_info = (kernel_ulong_t)GO7007_BOARDID_STAR_TREK, }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */ .idProduct = 0x7007, /* Product ID of GO7007SB chip */ .bcdDevice_lo = 0x209, /* Revision number of XMen-III */ .bcdDevice_hi = 0x209, .bInterfaceClass = 255, .bInterfaceSubClass = 0, .bInterfaceProtocol = 255, .driver_info = (kernel_ulong_t)GO7007_BOARDID_XMEN_III, }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, .idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */ .idProduct = 0x7007, /* Product ID of GO7007SB chip */ .bcdDevice_lo = 0x210, /* Revision number of Matrix */ .bcdDevice_hi = 0x210, /* Revolution */ .driver_info = (kernel_ulong_t)GO7007_BOARDID_MATRIX_REV, }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, .idVendor = 0x093b, /* Vendor ID of Plextor */ .idProduct = 0xa102, /* Product ID of M402U */ .bcdDevice_lo = 0x1, /* revision number of Blueberry */ .bcdDevice_hi = 0x1, .driver_info = (kernel_ulong_t)GO7007_BOARDID_PX_M402U, }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, .idVendor = 0x093b, /* Vendor ID of Plextor */ .idProduct = 0xa104, /* Product ID of TV402U */ .bcdDevice_lo = 0x1, .bcdDevice_hi = 0x1, .driver_info = (kernel_ulong_t)GO7007_BOARDID_PX_TV402U_ANY, }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, .idVendor = 0x10fd, /* Vendor ID of Anubis Electronics */ .idProduct = 0xde00, /* Product ID of Lifeview LR192 */ .bcdDevice_lo = 0x1, .bcdDevice_hi = 0x1, .driver_info = (kernel_ulong_t)GO7007_BOARDID_LIFEVIEW_LR192, }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, .idVendor = 0x1943, /* Vendor ID Sensoray */ .idProduct = 0x2250, /* Product ID of 2250/2251 */ .bcdDevice_lo = 0x1, .bcdDevice_hi = 0x1, .driver_info = (kernel_ulong_t)GO7007_BOARDID_SENSORAY_2250, }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, go7007_usb_id_table); /********************* Driver for EZ-USB HPI interface *********************/ static int go7007_usb_vendor_request(struct go7007 *go, int request, int value, int index, void *transfer_buffer, int length, int in) { struct go7007_usb *usb = go->hpi_context; int timeout = 5000; if (in) { return usb_control_msg(usb->usbdev, usb_rcvctrlpipe(usb->usbdev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, value, index, transfer_buffer, length, timeout); } else { return usb_control_msg(usb->usbdev, usb_sndctrlpipe(usb->usbdev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, transfer_buffer, length, timeout); } } static int go7007_usb_interface_reset(struct go7007 *go) { struct go7007_usb *usb = go->hpi_context; u16 intr_val, intr_data; /* Reset encoder */ if (go7007_write_interrupt(go, 0x0001, 0x0001) < 0) return -1; msleep(100); if (usb->board->flags & GO7007_USB_EZUSB) { /* Reset buffer in EZ-USB */ #ifdef GO7007_USB_DEBUG printk(KERN_DEBUG "go7007-usb: resetting EZ-USB buffers\n"); #endif if (go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0 || go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0) return -1; /* Reset encoder again */ if (go7007_write_interrupt(go, 0x0001, 0x0001) < 0) return -1; msleep(100); } /* Wait for an interrupt to indicate successful hardware reset */ if (go7007_read_interrupt(go, &intr_val, &intr_data) < 0 || (intr_val & ~0x1) != 0x55aa) { printk(KERN_ERR "go7007-usb: unable to reset the USB interface\n"); return -1; } return 0; } static int go7007_usb_ezusb_write_interrupt(struct go7007 *go, int addr, int data) { struct go7007_usb *usb = go->hpi_context; int i, r; u16 status_reg; int timeout = 500; #ifdef GO7007_USB_DEBUG printk(KERN_DEBUG "go7007-usb: WriteInterrupt: %04x %04x\n", addr, data); #endif for (i = 0; i < 100; ++i) { r = usb_control_msg(usb->usbdev, usb_rcvctrlpipe(usb->usbdev, 0), 0x14, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0, HPI_STATUS_ADDR, &status_reg, sizeof(status_reg), timeout); if (r < 0) goto write_int_error; __le16_to_cpus(&status_reg); if (!(status_reg & 0x0010)) break; msleep(10); } if (i == 100) { printk(KERN_ERR "go7007-usb: device is hung, status reg = 0x%04x\n", status_reg); return -1; } r = usb_control_msg(usb->usbdev, usb_sndctrlpipe(usb->usbdev, 0), 0x12, USB_TYPE_VENDOR | USB_RECIP_DEVICE, data, INT_PARAM_ADDR, NULL, 0, timeout); if (r < 0) goto write_int_error; r = usb_control_msg(usb->usbdev, usb_sndctrlpipe(usb->usbdev, 0), 0x12, USB_TYPE_VENDOR | USB_RECIP_DEVICE, addr, INT_INDEX_ADDR, NULL, 0, timeout); if (r < 0) goto write_int_error; return 0; write_int_error: printk(KERN_ERR "go7007-usb: error in WriteInterrupt: %d\n", r); return r; } static int go7007_usb_onboard_write_interrupt(struct go7007 *go, int addr, int data) { struct go7007_usb *usb = go->hpi_context; u8 *tbuf; int r; int timeout = 500; #ifdef GO7007_USB_DEBUG printk(KERN_DEBUG "go7007-usb: WriteInterrupt: %04x %04x\n", addr, data); #endif tbuf = kzalloc(8, GFP_KERNEL); if (tbuf == NULL) return -ENOMEM; tbuf[0] = data & 0xff; tbuf[1] = data >> 8; tbuf[2] = addr & 0xff; tbuf[3] = addr >> 8; r = usb_control_msg(usb->usbdev, usb_sndctrlpipe(usb->usbdev, 2), 0x00, USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0x55aa, 0xf0f0, tbuf, 8, timeout); kfree(tbuf); if (r < 0) { printk(KERN_ERR "go7007-usb: error in WriteInterrupt: %d\n", r); return r; } return 0; } static void go7007_usb_readinterrupt_complete(struct urb *urb) { struct go7007 *go = (struct go7007 *)urb->context; u16 *regs = (u16 *)urb->transfer_buffer; int status = urb->status; if (status) { if (status != -ESHUTDOWN && go->status != STATUS_SHUTDOWN) { printk(KERN_ERR "go7007-usb: error in read interrupt: %d\n", urb->status); } else { wake_up(&go->interrupt_waitq); return; } } else if (urb->actual_length != urb->transfer_buffer_length) { printk(KERN_ERR "go7007-usb: short read in interrupt pipe!\n"); } else { go->interrupt_available = 1; go->interrupt_data = __le16_to_cpu(regs[0]); go->interrupt_value = __le16_to_cpu(regs[1]); #ifdef GO7007_USB_DEBUG printk(KERN_DEBUG "go7007-usb: ReadInterrupt: %04x %04x\n", go->interrupt_value, go->interrupt_data); #endif } wake_up(&go->interrupt_waitq); } static int go7007_usb_read_interrupt(struct go7007 *go) { struct go7007_usb *usb = go->hpi_context; int r; r = usb_submit_urb(usb->intr_urb, GFP_KERNEL); if (r < 0) { printk(KERN_ERR "go7007-usb: unable to submit interrupt urb: %d\n", r); return r; } return 0; } static void go7007_usb_read_video_pipe_complete(struct urb *urb) { struct go7007 *go = (struct go7007 *)urb->context; int r, status = urb->status; if (!go->streaming) { wake_up_interruptible(&go->frame_waitq); return; } if (status) { printk(KERN_ERR "go7007-usb: error in video pipe: %d\n", status); return; } if (urb->actual_length != urb->transfer_buffer_length) { printk(KERN_ERR "go7007-usb: short read in video pipe!\n"); return; } go7007_parse_video_stream(go, urb->transfer_buffer, urb->actual_length); r = usb_submit_urb(urb, GFP_ATOMIC); if (r < 0) printk(KERN_ERR "go7007-usb: error in video pipe: %d\n", r); } static void go7007_usb_read_audio_pipe_complete(struct urb *urb) { struct go7007 *go = (struct go7007 *)urb->context; int r, status = urb->status; if (!go->streaming) return; if (status) { printk(KERN_ERR "go7007-usb: error in audio pipe: %d\n", status); return; } if (urb->actual_length != urb->transfer_buffer_length) { printk(KERN_ERR "go7007-usb: short read in audio pipe!\n"); return; } if (go->audio_deliver != NULL) go->audio_deliver(go, urb->transfer_buffer, urb->actual_length); r = usb_submit_urb(urb, GFP_ATOMIC); if (r < 0) printk(KERN_ERR "go7007-usb: error in audio pipe: %d\n", r); } static int go7007_usb_stream_start(struct go7007 *go) { struct go7007_usb *usb = go->hpi_context; int i, r; for (i = 0; i < 8; ++i) { r = usb_submit_urb(usb->video_urbs[i], GFP_KERNEL); if (r < 0) { printk(KERN_ERR "go7007-usb: error submitting video " "urb %d: %d\n", i, r); goto video_submit_failed; } } if (!go->audio_enabled) return 0; for (i = 0; i < 8; ++i) { r = usb_submit_urb(usb->audio_urbs[i], GFP_KERNEL); if (r < 0) { printk(KERN_ERR "go7007-usb: error submitting audio " "urb %d: %d\n", i, r); goto audio_submit_failed; } } return 0; audio_submit_failed: for (i = 0; i < 7; ++i) usb_kill_urb(usb->audio_urbs[i]); video_submit_failed: for (i = 0; i < 8; ++i) usb_kill_urb(usb->video_urbs[i]); return -1; } static int go7007_usb_stream_stop(struct go7007 *go) { struct go7007_usb *usb = go->hpi_context; int i; if (go->status == STATUS_SHUTDOWN) return 0; for (i = 0; i < 8; ++i) usb_kill_urb(usb->video_urbs[i]); if (go->audio_enabled) for (i = 0; i < 8; ++i) usb_kill_urb(usb->audio_urbs[i]); return 0; } static int go7007_usb_send_firmware(struct go7007 *go, u8 *data, int len) { struct go7007_usb *usb = go->hpi_context; int transferred, pipe; int timeout = 500; #ifdef GO7007_USB_DEBUG printk(KERN_DEBUG "go7007-usb: DownloadBuffer sending %d bytes\n", len); #endif if (usb->board->flags & GO7007_USB_EZUSB) pipe = usb_sndbulkpipe(usb->usbdev, 2); else pipe = usb_sndbulkpipe(usb->usbdev, 3); return usb_bulk_msg(usb->usbdev, pipe, data, len, &transferred, timeout); } static struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = { .interface_reset = go7007_usb_interface_reset, .write_interrupt = go7007_usb_ezusb_write_interrupt, .read_interrupt = go7007_usb_read_interrupt, .stream_start = go7007_usb_stream_start, .stream_stop = go7007_usb_stream_stop, .send_firmware = go7007_usb_send_firmware, }; static struct go7007_hpi_ops go7007_usb_onboard_hpi_ops = { .interface_reset = go7007_usb_interface_reset, .write_interrupt = go7007_usb_onboard_write_interrupt, .read_interrupt = go7007_usb_read_interrupt, .stream_start = go7007_usb_stream_start, .stream_stop = go7007_usb_stream_stop, .send_firmware = go7007_usb_send_firmware, }; /********************* Driver for EZ-USB I2C adapter *********************/ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num) { struct go7007 *go = i2c_get_adapdata(adapter); struct go7007_usb *usb = go->hpi_context; u8 buf[16]; int buf_len, i; int ret = -1; if (go->status == STATUS_SHUTDOWN) return -1; mutex_lock(&usb->i2c_lock); for (i = 0; i < num; ++i) { /* The hardware command is "write some bytes then read some * bytes", so we try to coalesce a write followed by a read * into a single USB transaction */ if (i + 1 < num && msgs[i].addr == msgs[i + 1].addr && !(msgs[i].flags & I2C_M_RD) && (msgs[i + 1].flags & I2C_M_RD)) { #ifdef GO7007_I2C_DEBUG printk(KERN_DEBUG "go7007-usb: i2c write/read %d/%d " "bytes on %02x\n", msgs[i].len, msgs[i + 1].len, msgs[i].addr); #endif buf[0] = 0x01; buf[1] = msgs[i].len + 1; buf[2] = msgs[i].addr << 1; memcpy(&buf[3], msgs[i].buf, msgs[i].len); buf_len = msgs[i].len + 3; buf[buf_len++] = msgs[++i].len; } else if (msgs[i].flags & I2C_M_RD) { #ifdef GO7007_I2C_DEBUG printk(KERN_DEBUG "go7007-usb: i2c read %d " "bytes on %02x\n", msgs[i].len, msgs[i].addr); #endif buf[0] = 0x01; buf[1] = 1; buf[2] = msgs[i].addr << 1; buf[3] = msgs[i].len; buf_len = 4; } else { #ifdef GO7007_I2C_DEBUG printk(KERN_DEBUG "go7007-usb: i2c write %d " "bytes on %02x\n", msgs[i].len, msgs[i].addr); #endif buf[0] = 0x00; buf[1] = msgs[i].len + 1; buf[2] = msgs[i].addr << 1; memcpy(&buf[3], msgs[i].buf, msgs[i].len); buf_len = msgs[i].len + 3; buf[buf_len++] = 0; } if (go7007_usb_vendor_request(go, 0x24, 0, 0, buf, buf_len, 0) < 0) goto i2c_done; if (msgs[i].flags & I2C_M_RD) { memset(buf, 0, sizeof(buf)); if (go7007_usb_vendor_request(go, 0x25, 0, 0, buf, msgs[i].len + 1, 1) < 0) goto i2c_done; memcpy(msgs[i].buf, buf + 1, msgs[i].len); } } ret = 0; i2c_done: mutex_unlock(&usb->i2c_lock); return ret; } static u32 go7007_usb_functionality(struct i2c_adapter *adapter) { /* No errors are reported by the hardware, so we don't bother * supporting quick writes to avoid confusing probing */ return (I2C_FUNC_SMBUS_EMUL) & ~I2C_FUNC_SMBUS_QUICK; } static struct i2c_algorithm go7007_usb_algo = { .master_xfer = go7007_usb_i2c_master_xfer, .functionality = go7007_usb_functionality, }; static struct i2c_adapter go7007_usb_adap_templ = { .owner = THIS_MODULE, .name = "WIS GO7007SB EZ-USB", .algo = &go7007_usb_algo, }; /********************* USB add/remove functions *********************/ static int go7007_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct go7007 *go; struct go7007_usb *usb; struct go7007_usb_board *board; struct usb_device *usbdev = interface_to_usbdev(intf); char *name; int video_pipe, i, v_urb_len; printk(KERN_DEBUG "go7007-usb: probing new GO7007 USB board\n"); switch (id->driver_info) { case GO7007_BOARDID_MATRIX_II: name = "WIS Matrix II or compatible"; board = &board_matrix_ii; break; case GO7007_BOARDID_MATRIX_RELOAD: name = "WIS Matrix Reloaded or compatible"; board = &board_matrix_reload; break; case GO7007_BOARDID_MATRIX_REV: name = "WIS Matrix Revolution or compatible"; board = &board_matrix_revolution; break; case GO7007_BOARDID_STAR_TREK: name = "WIS Star Trek or compatible"; board = &board_star_trek; break; case GO7007_BOARDID_XMEN: name = "WIS XMen or compatible"; board = &board_xmen; break; case GO7007_BOARDID_XMEN_II: name = "WIS XMen II or compatible"; board = &board_xmen; break; case GO7007_BOARDID_XMEN_III: name = "WIS XMen III or compatible"; board = &board_xmen; break; case GO7007_BOARDID_PX_M402U: name = "Plextor PX-M402U"; board = &board_matrix_ii; break; case GO7007_BOARDID_PX_TV402U_ANY: name = "Plextor PX-TV402U (unknown tuner)"; board = &board_px_tv402u; break; case GO7007_BOARDID_LIFEVIEW_LR192: printk(KERN_ERR "go7007-usb: The Lifeview TV Walker Ultra " "is not supported. Sorry!\n"); return 0; name = "Lifeview TV Walker Ultra"; board = &board_lifeview_lr192; break; case GO7007_BOARDID_SENSORAY_2250: printk(KERN_INFO "Sensoray 2250 found\n"); name = "Sensoray 2250/2251"; board = &board_sensoray_2250; break; default: printk(KERN_ERR "go7007-usb: unknown board ID %d!\n", (unsigned int)id->driver_info); return 0; } usb = kzalloc(sizeof(struct go7007_usb), GFP_KERNEL); if (usb == NULL) return -ENOMEM; /* Allocate the URB and buffer for receiving incoming interrupts */ usb->intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (usb->intr_urb == NULL) goto allocfail; usb->intr_urb->transfer_buffer = kmalloc(2*sizeof(u16), GFP_KERNEL); if (usb->intr_urb->transfer_buffer == NULL) goto allocfail; go = go7007_alloc(&board->main_info, &intf->dev); if (go == NULL) goto allocfail; usb->board = board; usb->usbdev = usbdev; go->board_id = id->driver_info; strncpy(go->name, name, sizeof(go->name)); if (board->flags & GO7007_USB_EZUSB) go->hpi_ops = &go7007_usb_ezusb_hpi_ops; else go->hpi_ops = &go7007_usb_onboard_hpi_ops; go->hpi_context = usb; usb_fill_int_urb(usb->intr_urb, usb->usbdev, usb_rcvintpipe(usb->usbdev, 4), usb->intr_urb->transfer_buffer, 2*sizeof(u16), go7007_usb_readinterrupt_complete, go, 8); usb_set_intfdata(intf, &go->v4l2_dev); /* Boot the GO7007 */ if (go7007_boot_encoder(go, go->board_info->flags & GO7007_BOARD_USE_ONBOARD_I2C) < 0) goto initfail; /* Register the EZ-USB I2C adapter, if we're using it */ if (board->flags & GO7007_USB_EZUSB_I2C) { memcpy(&go->i2c_adapter, &go7007_usb_adap_templ, sizeof(go7007_usb_adap_templ)); mutex_init(&usb->i2c_lock); go->i2c_adapter.dev.parent = go->dev; i2c_set_adapdata(&go->i2c_adapter, go); if (i2c_add_adapter(&go->i2c_adapter) < 0) { printk(KERN_ERR "go7007-usb: error: i2c_add_adapter failed\n"); goto initfail; } go->i2c_adapter_online = 1; } /* Pelco and Adlink reused the XMen and XMen-III vendor and product * IDs for their own incompatible designs. We can detect XMen boards * by probing the sensor, but there is no way to probe the sensors on * the Pelco and Adlink designs so we default to the Adlink. If it * is actually a Pelco, the user must set the assume_endura module * parameter. */ if ((go->board_id == GO7007_BOARDID_XMEN || go->board_id == GO7007_BOARDID_XMEN_III) && go->i2c_adapter_online) { union i2c_smbus_data data; /* Check to see if register 0x0A is 0x76 */ i2c_smbus_xfer(&go->i2c_adapter, 0x21, I2C_CLIENT_SCCB, I2C_SMBUS_READ, 0x0A, I2C_SMBUS_BYTE_DATA, &data); if (data.byte != 0x76) { if (assume_endura) { go->board_id = GO7007_BOARDID_ENDURA; usb->board = board = &board_endura; go->board_info = &board->main_info; strncpy(go->name, "Pelco Endura", sizeof(go->name)); } else { u16 channel; /* set GPIO5 to be an output, currently low */ go7007_write_addr(go, 0x3c82, 0x0000); go7007_write_addr(go, 0x3c80, 0x00df); /* read channel number from GPIO[1:0] */ go7007_read_addr(go, 0x3c81, &channel); channel &= 0x3; go->board_id = GO7007_BOARDID_ADLINK_MPG24; usb->board = board = &board_adlink_mpg24; go->board_info = &board->main_info; go->channel_number = channel; snprintf(go->name, sizeof(go->name), "Adlink PCI-MPG24, channel #%d", channel); } } } /* Probe the tuner model on the TV402U */ if (go->board_id == GO7007_BOARDID_PX_TV402U_ANY) { u8 data[3]; /* Board strapping indicates tuner model */ if (go7007_usb_vendor_request(go, 0x41, 0, 0, data, 3, 1) < 0) { printk(KERN_ERR "go7007-usb: GPIO read failed!\n"); goto initfail; } switch (data[0] >> 6) { case 1: go->board_id = GO7007_BOARDID_PX_TV402U_EU; go->tuner_type = TUNER_SONY_BTF_PG472Z; strncpy(go->name, "Plextor PX-TV402U-EU", sizeof(go->name)); break; case 2: go->board_id = GO7007_BOARDID_PX_TV402U_JP; go->tuner_type = TUNER_SONY_BTF_PK467Z; strncpy(go->name, "Plextor PX-TV402U-JP", sizeof(go->name)); break; case 3: go->board_id = GO7007_BOARDID_PX_TV402U_NA; go->tuner_type = TUNER_SONY_BTF_PB463Z; strncpy(go->name, "Plextor PX-TV402U-NA", sizeof(go->name)); break; default: printk(KERN_DEBUG "go7007-usb: unable to detect " "tuner type!\n"); break; } /* Configure tuner mode selection inputs connected * to the EZ-USB GPIO output pins */ if (go7007_usb_vendor_request(go, 0x40, 0x7f02, 0, NULL, 0, 0) < 0) { printk(KERN_ERR "go7007-usb: GPIO write failed!\n"); goto initfail; } } /* Print a nasty message if the user attempts to use a USB2.0 device in * a USB1.1 port. There will be silent corruption of the stream. */ if ((board->flags & GO7007_USB_EZUSB) && usbdev->speed != USB_SPEED_HIGH) printk(KERN_ERR "go7007-usb: *** WARNING *** This device " "must be connected to a USB 2.0 port! " "Attempting to capture video through a USB 1.1 " "port will result in stream corruption, even " "at low bitrates!\n"); /* Do any final GO7007 initialization, then register the * V4L2 and ALSA interfaces */ if (go7007_register_encoder(go) < 0) goto initfail; /* Allocate the URBs and buffers for receiving the video stream */ if (board->flags & GO7007_USB_EZUSB) { v_urb_len = 1024; video_pipe = usb_rcvbulkpipe(usb->usbdev, 6); } else { v_urb_len = 512; video_pipe = usb_rcvbulkpipe(usb->usbdev, 1); } for (i = 0; i < 8; ++i) { usb->video_urbs[i] = usb_alloc_urb(0, GFP_KERNEL); if (usb->video_urbs[i] == NULL) goto initfail; usb->video_urbs[i]->transfer_buffer = kmalloc(v_urb_len, GFP_KERNEL); if (usb->video_urbs[i]->transfer_buffer == NULL) goto initfail; usb_fill_bulk_urb(usb->video_urbs[i], usb->usbdev, video_pipe, usb->video_urbs[i]->transfer_buffer, v_urb_len, go7007_usb_read_video_pipe_complete, go); } /* Allocate the URBs and buffers for receiving the audio stream */ if ((board->flags & GO7007_USB_EZUSB) && go->audio_enabled) for (i = 0; i < 8; ++i) { usb->audio_urbs[i] = usb_alloc_urb(0, GFP_KERNEL); if (usb->audio_urbs[i] == NULL) goto initfail; usb->audio_urbs[i]->transfer_buffer = kmalloc(4096, GFP_KERNEL); if (usb->audio_urbs[i]->transfer_buffer == NULL) goto initfail; usb_fill_bulk_urb(usb->audio_urbs[i], usb->usbdev, usb_rcvbulkpipe(usb->usbdev, 8), usb->audio_urbs[i]->transfer_buffer, 4096, go7007_usb_read_audio_pipe_complete, go); } go->status = STATUS_ONLINE; return 0; initfail: go->status = STATUS_SHUTDOWN; return 0; allocfail: if (usb->intr_urb) { kfree(usb->intr_urb->transfer_buffer); usb_free_urb(usb->intr_urb); } kfree(usb); return -ENOMEM; } static void go7007_usb_disconnect(struct usb_interface *intf) { struct go7007 *go = to_go7007(usb_get_intfdata(intf)); struct go7007_usb *usb = go->hpi_context; struct urb *vurb, *aurb; int i; go->status = STATUS_SHUTDOWN; usb_kill_urb(usb->intr_urb); /* Free USB-related structs */ for (i = 0; i < 8; ++i) { vurb = usb->video_urbs[i]; if (vurb) { usb_kill_urb(vurb); if (vurb->transfer_buffer) kfree(vurb->transfer_buffer); usb_free_urb(vurb); } aurb = usb->audio_urbs[i]; if (aurb) { usb_kill_urb(aurb); if (aurb->transfer_buffer) kfree(aurb->transfer_buffer); usb_free_urb(aurb); } } kfree(usb->intr_urb->transfer_buffer); usb_free_urb(usb->intr_urb); kfree(go->hpi_context); go7007_remove(go); } static struct usb_driver go7007_usb_driver = { .name = "go7007", .probe = go7007_usb_probe, .disconnect = go7007_usb_disconnect, .id_table = go7007_usb_id_table, }; static int __init go7007_usb_init(void) { return usb_register(&go7007_usb_driver); } static void __exit go7007_usb_cleanup(void) { usb_deregister(&go7007_usb_driver); } module_init(go7007_usb_init); module_exit(go7007_usb_cleanup); MODULE_LICENSE("GPL v2");
gpl-2.0
boyan3010/ShooterU_Kernel_3.2.X
drivers/media/video/gspca/finepix.c
162
8070
/* * Fujifilm Finepix subdriver * * Copyright (C) 2008 Frank Zago * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "finepix" #include "gspca.h" MODULE_AUTHOR("Frank Zago <frank@zago.net>"); MODULE_DESCRIPTION("Fujifilm FinePix USB V4L2 driver"); MODULE_LICENSE("GPL"); /* Default timeout, in ms */ #define FPIX_TIMEOUT 250 /* Maximum transfer size to use. The windows driver reads by chunks of * 0x2000 bytes, so do the same. Note: reading more seems to work * too. */ #define FPIX_MAX_TRANSFER 0x2000 /* Structure to hold all of our device specific stuff */ struct usb_fpix { struct gspca_dev gspca_dev; /* !! must be the first item */ struct work_struct work_struct; struct workqueue_struct *work_thread; }; /* Delay after which claim the next frame. If the delay is too small, * the camera will return old frames. On the 4800Z, 20ms is bad, 25ms * will fail every 4 or 5 frames, but 30ms is perfect. On the A210, * 30ms is bad while 35ms is perfect. */ #define NEXT_FRAME_DELAY 35 /* These cameras only support 320x200. */ static const struct v4l2_pix_format fpix_mode[1] = { { 320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0} }; /* send a command to the webcam */ static int command(struct gspca_dev *gspca_dev, int order) /* 0: reset, 1: frame request */ { static u8 order_values[2][12] = { {0xc6, 0, 0, 0, 0, 0, 0, 0, 0x20, 0, 0, 0}, /* reset */ {0xd3, 0, 0, 0, 0, 0, 0, 0x01, 0, 0, 0, 0}, /* fr req */ }; memcpy(gspca_dev->usb_buf, order_values[order], 12); return usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_GET_STATUS, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, 0, gspca_dev->usb_buf, 12, FPIX_TIMEOUT); } /* workqueue */ static void dostream(struct work_struct *work) { struct usb_fpix *dev = container_of(work, struct usb_fpix, work_struct); struct gspca_dev *gspca_dev = &dev->gspca_dev; struct urb *urb = gspca_dev->urb[0]; u8 *data = urb->transfer_buffer; int ret = 0; int len; /* synchronize with the main driver */ mutex_lock(&gspca_dev->usb_lock); mutex_unlock(&gspca_dev->usb_lock); PDEBUG(D_STREAM, "dostream started"); /* loop reading a frame */ again: while (gspca_dev->present && gspca_dev->streaming) { /* request a frame */ mutex_lock(&gspca_dev->usb_lock); ret = command(gspca_dev, 1); mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) break; if (!gspca_dev->present || !gspca_dev->streaming) break; /* the frame comes in parts */ for (;;) { ret = usb_bulk_msg(gspca_dev->dev, urb->pipe, data, FPIX_MAX_TRANSFER, &len, FPIX_TIMEOUT); if (ret < 0) { /* Most of the time we get a timeout * error. Just restart. */ goto again; } if (!gspca_dev->present || !gspca_dev->streaming) goto out; if (len < FPIX_MAX_TRANSFER || (data[len - 2] == 0xff && data[len - 1] == 0xd9)) { /* If the result is less than what was asked * for, then it's the end of the * frame. Sometimes the jpeg is not complete, * but there's nothing we can do. We also end * here if the the jpeg ends right at the end * of the frame. */ gspca_frame_add(gspca_dev, LAST_PACKET, data, len); break; } /* got a partial image */ gspca_frame_add(gspca_dev, gspca_dev->last_packet_type == LAST_PACKET ? FIRST_PACKET : INTER_PACKET, data, len); } /* We must wait before trying reading the next * frame. If we don't, or if the delay is too short, * the camera will disconnect. */ msleep(NEXT_FRAME_DELAY); } out: PDEBUG(D_STREAM, "dostream stopped"); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct usb_fpix *dev = (struct usb_fpix *) gspca_dev; struct cam *cam = &gspca_dev->cam; cam->cam_mode = fpix_mode; cam->nmodes = 1; cam->bulk = 1; cam->bulk_size = FPIX_MAX_TRANSFER; INIT_WORK(&dev->work_struct, dostream); return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return 0; } /* start the camera */ static int sd_start(struct gspca_dev *gspca_dev) { struct usb_fpix *dev = (struct usb_fpix *) gspca_dev; int ret, len; /* Init the device */ ret = command(gspca_dev, 0); if (ret < 0) { pr_err("init failed %d\n", ret); return ret; } /* Read the result of the command. Ignore the result, for it * varies with the device. */ ret = usb_bulk_msg(gspca_dev->dev, gspca_dev->urb[0]->pipe, gspca_dev->urb[0]->transfer_buffer, FPIX_MAX_TRANSFER, &len, FPIX_TIMEOUT); if (ret < 0) { pr_err("usb_bulk_msg failed %d\n", ret); return ret; } /* Request a frame, but don't read it */ ret = command(gspca_dev, 1); if (ret < 0) { pr_err("frame request failed %d\n", ret); return ret; } /* Again, reset bulk in endpoint */ usb_clear_halt(gspca_dev->dev, gspca_dev->urb[0]->pipe); /* Start the workqueue function to do the streaming */ dev->work_thread = create_singlethread_workqueue(MODULE_NAME); queue_work(dev->work_thread, &dev->work_struct); return 0; } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct usb_fpix *dev = (struct usb_fpix *) gspca_dev; /* wait for the work queue to terminate */ mutex_unlock(&gspca_dev->usb_lock); destroy_workqueue(dev->work_thread); mutex_lock(&gspca_dev->usb_lock); dev->work_thread = NULL; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x04cb, 0x0104)}, {USB_DEVICE(0x04cb, 0x0109)}, {USB_DEVICE(0x04cb, 0x010b)}, {USB_DEVICE(0x04cb, 0x010f)}, {USB_DEVICE(0x04cb, 0x0111)}, {USB_DEVICE(0x04cb, 0x0113)}, {USB_DEVICE(0x04cb, 0x0115)}, {USB_DEVICE(0x04cb, 0x0117)}, {USB_DEVICE(0x04cb, 0x0119)}, {USB_DEVICE(0x04cb, 0x011b)}, {USB_DEVICE(0x04cb, 0x011d)}, {USB_DEVICE(0x04cb, 0x0121)}, {USB_DEVICE(0x04cb, 0x0123)}, {USB_DEVICE(0x04cb, 0x0125)}, {USB_DEVICE(0x04cb, 0x0127)}, {USB_DEVICE(0x04cb, 0x0129)}, {USB_DEVICE(0x04cb, 0x012b)}, {USB_DEVICE(0x04cb, 0x012d)}, {USB_DEVICE(0x04cb, 0x012f)}, {USB_DEVICE(0x04cb, 0x0131)}, {USB_DEVICE(0x04cb, 0x013b)}, {USB_DEVICE(0x04cb, 0x013d)}, {USB_DEVICE(0x04cb, 0x013f)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct usb_fpix), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; /* -- module insert / remove -- */ static int __init sd_mod_init(void) { return usb_register(&sd_driver); } static void __exit sd_mod_exit(void) { usb_deregister(&sd_driver); } module_init(sd_mod_init); module_exit(sd_mod_exit);
gpl-2.0
JoshWu/linux-at91
drivers/staging/cptm1217/clearpad_tm1217.c
418
17463
/* * clearpad_tm1217.c - Touch Screen driver for Synaptics Clearpad * TM1217 controller * * Copyright (C) 2008 Intel Corp * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; ifnot, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * Questions/Comments/Bug fixes to Ramesh Agarwal (ramesh.agarwal@intel.com) * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/timer.h> #include <linux/gpio.h> #include <linux/hrtimer.h> #include <linux/kthread.h> #include <linux/delay.h> #include <linux/slab.h> #include "cp_tm1217.h" #define CPTM1217_DEVICE_NAME "cptm1217" #define CPTM1217_DRIVER_NAME CPTM1217_DEVICE_NAME #define MAX_TOUCH_SUPPORTED 2 #define TOUCH_SUPPORTED 1 #define SAMPLING_FREQ 80 /* Frequency in HZ */ #define DELAY_BTWIN_SAMPLE (1000 / SAMPLING_FREQ) #define WAIT_FOR_RESPONSE 5 /* 5msec just works */ #define MAX_RETRIES 5 /* As above */ #define INCREMENTAL_DELAY 5 /* As above */ /* Regster Definitions */ #define TMA1217_DEV_STATUS 0x13 /* Device Status */ #define TMA1217_INT_STATUS 0x14 /* Interrupt Status */ /* Controller can detect up to 2 possible finger touches. * Each finger touch provides 12 bit X Y co-ordinates, the values are split * across 2 registers, and an 8 bit Z value */ #define TMA1217_FINGER_STATE 0x18 /* Finger State */ #define TMA1217_FINGER1_X_HIGHER8 0x19 /* Higher 8 bit of X coordinate */ #define TMA1217_FINGER1_Y_HIGHER8 0x1A /* Higher 8 bit of Y coordinate */ #define TMA1217_FINGER1_XY_LOWER4 0x1B /* Lower 4 bits of X and Y */ #define TMA1217_FINGER1_Z_VALUE 0x1D /* 8 bit Z value for finger 1 */ #define TMA1217_FINGER2_X_HIGHER8 0x1E /* Higher 8 bit of X coordinate */ #define TMA1217_FINGER2_Y_HIGHER8 0x1F /* Higher 8 bit of Y coordinate */ #define TMA1217_FINGER2_XY_LOWER4 0x20 /* Lower 4 bits of X and Y */ #define TMA1217_FINGER2_Z_VALUE 0x22 /* 8 bit Z value for finger 2 */ #define TMA1217_DEVICE_CTRL 0x23 /* Device Control */ #define TMA1217_INTERRUPT_ENABLE 0x24 /* Interrupt Enable */ #define TMA1217_REPORT_MODE 0x2B /* Reporting Mode */ #define TMA1217_MAX_X_LOWER8 0x31 /* Bit 0-7 for Max X */ #define TMA1217_MAX_X_HIGHER4 0x32 /* Bit 8-11 for Max X */ #define TMA1217_MAX_Y_LOWER8 0x33 /* Bit 0-7 for Max Y */ #define TMA1217_MAX_Y_HIGHER4 0x34 /* Bit 8-11 for Max Y */ #define TMA1217_DEVICE_CMD_RESET 0x67 /* Device CMD reg for reset */ #define TMA1217_DEVICE_CMD_REZERO 0x69 /* Device CMD reg for rezero */ #define TMA1217_MANUFACTURER_ID 0x73 /* Manufacturer Id */ #define TMA1217_PRODUCT_FAMILY 0x75 /* Product Family */ #define TMA1217_FIRMWARE_REVISION 0x76 /* Firmware Revision */ #define TMA1217_SERIAL_NO_HIGH 0x7C /* Bit 8-15 of device serial no. */ #define TMA1217_SERIAL_NO_LOW 0x7D /* Bit 0-7 of device serial no. */ #define TMA1217_PRODUCT_ID_START 0x7E /* Start address for 10 byte ID */ #define TMA1217_DEVICE_CAPABILITY 0x8B /* Reporting capability */ /* * The touch position structure. */ struct touch_state { int x; int y; bool button; }; /* Device Specific info given by the controller */ struct cp_dev_info { u16 maxX; u16 maxY; }; /* Vendor related info given by the controller */ struct cp_vendor_info { u8 vendor_id; u8 product_family; u8 firmware_rev; u16 serial_no; }; /* * Private structure to store the device details */ struct cp_tm1217_device { struct i2c_client *client; struct device *dev; struct cp_vendor_info vinfo; struct cp_dev_info dinfo; struct input_dev_info { char phys[32]; char name[128]; struct input_dev *input; struct touch_state touch; } cp_input_info[MAX_TOUCH_SUPPORTED]; int thread_running; struct mutex thread_mutex; int gpio; }; /* The following functions are used to read/write registers on the device * as per the RMI prorocol. Technically, a page select should be written * before doing read/write but since the register offsets are below 0xFF * we can use the default value of page which is 0x00 */ static int cp_tm1217_read(struct cp_tm1217_device *ts, u8 *req, int size) { int i, retval; /* Send the address */ retval = i2c_master_send(ts->client, &req[0], 1); if (retval != 1) { dev_err(ts->dev, "cp_tm1217: I2C send failed\n"); return retval; } msleep(WAIT_FOR_RESPONSE); for (i = 0; i < MAX_RETRIES; i++) { retval = i2c_master_recv(ts->client, &req[1], size); if (retval == size) break; msleep(INCREMENTAL_DELAY); dev_dbg(ts->dev, "cp_tm1217: Retry count is %d\n", i); } if (retval != size) dev_err(ts->dev, "cp_tm1217: Read from device failed\n"); return retval; } static int cp_tm1217_write(struct cp_tm1217_device *ts, u8 *req, int size) { int retval; /* Send the address and the data to be written */ retval = i2c_master_send(ts->client, &req[0], size + 1); if (retval != size + 1) { dev_err(ts->dev, "cp_tm1217: I2C write failed: %d\n", retval); return retval; } /* Wait for the write to complete. TBD why this is required */ msleep(WAIT_FOR_RESPONSE); return size; } static int cp_tm1217_mask_interrupt(struct cp_tm1217_device *ts) { u8 req[2]; int retval; req[0] = TMA1217_INTERRUPT_ENABLE; req[1] = 0x0; retval = cp_tm1217_write(ts, req, 1); if (retval != 1) return -EIO; return 0; } static int cp_tm1217_unmask_interrupt(struct cp_tm1217_device *ts) { u8 req[2]; int retval; req[0] = TMA1217_INTERRUPT_ENABLE; req[1] = 0xa; retval = cp_tm1217_write(ts, req, 1); if (retval != 1) return -EIO; return 0; } static void process_touch(struct cp_tm1217_device *ts, int index) { int retval; struct input_dev_info *input_info = (struct input_dev_info *)&ts->cp_input_info[index]; u8 xy_data[6]; if (index == 0) xy_data[0] = TMA1217_FINGER1_X_HIGHER8; else xy_data[0] = TMA1217_FINGER2_X_HIGHER8; retval = cp_tm1217_read(ts, xy_data, 5); if (retval < 5) { dev_err(ts->dev, "cp_tm1217: XY read from device failed\n"); return; } /* Note: Currently not using the Z values but may be requried in the future. */ input_info->touch.x = (xy_data[1] << 4) | (xy_data[3] & 0x0F); input_info->touch.y = (xy_data[2] << 4) | ((xy_data[3] & 0xF0) >> 4); input_report_abs(input_info->input, ABS_X, input_info->touch.x); input_report_abs(input_info->input, ABS_Y, input_info->touch.y); input_sync(input_info->input); } static void cp_tm1217_get_data(struct cp_tm1217_device *ts) { u8 req[2]; int retval, i, finger_touched = 0; do { req[0] = TMA1217_FINGER_STATE; retval = cp_tm1217_read(ts, req, 1); if (retval != 1) { dev_err(ts->dev, "cp_tm1217: Read from device failed\n"); continue; } finger_touched = 0; /* Start sampling until the pressure is below threshold */ for (i = 0; i < TOUCH_SUPPORTED; i++) { if (req[1] & 0x3) { finger_touched++; if (ts->cp_input_info[i].touch.button == 0) { /* send the button touch event */ input_report_key( ts->cp_input_info[i].input, BTN_TOUCH, 1); ts->cp_input_info[i].touch.button = 1; } process_touch(ts, i); } else { if (ts->cp_input_info[i].touch.button == 1) { /* send the button release event */ input_report_key( ts->cp_input_info[i].input, BTN_TOUCH, 0); input_sync(ts->cp_input_info[i].input); ts->cp_input_info[i].touch.button = 0; } } req[1] = req[1] >> 2; } msleep(DELAY_BTWIN_SAMPLE); } while (finger_touched > 0); } static irqreturn_t cp_tm1217_sample_thread(int irq, void *handle) { struct cp_tm1217_device *ts = (struct cp_tm1217_device *) handle; u8 req[2]; int retval; /* Chedk if another thread is already running */ mutex_lock(&ts->thread_mutex); if (ts->thread_running == 1) { mutex_unlock(&ts->thread_mutex); return IRQ_HANDLED; } ts->thread_running = 1; mutex_unlock(&ts->thread_mutex); /* Mask the interrupts */ retval = cp_tm1217_mask_interrupt(ts); /* Read the Interrupt Status register to find the cause of the Interrupt */ req[0] = TMA1217_INT_STATUS; retval = cp_tm1217_read(ts, req, 1); if (retval != 1) goto exit_thread; if (!(req[1] & 0x8)) goto exit_thread; cp_tm1217_get_data(ts); exit_thread: /* Unmask the interrupts before going to sleep */ retval = cp_tm1217_unmask_interrupt(ts); mutex_lock(&ts->thread_mutex); ts->thread_running = 0; mutex_unlock(&ts->thread_mutex); return IRQ_HANDLED; } static int cp_tm1217_init_data(struct cp_tm1217_device *ts) { int retval; u8 req[2]; /* Read the vendor id/ fw revision etc. Ignoring return check as this is non critical info */ req[0] = TMA1217_MANUFACTURER_ID; retval = cp_tm1217_read(ts, req, 1); ts->vinfo.vendor_id = req[1]; req[0] = TMA1217_PRODUCT_FAMILY; retval = cp_tm1217_read(ts, req, 1); ts->vinfo.product_family = req[1]; req[0] = TMA1217_FIRMWARE_REVISION; retval = cp_tm1217_read(ts, req, 1); ts->vinfo.firmware_rev = req[1]; req[0] = TMA1217_SERIAL_NO_HIGH; retval = cp_tm1217_read(ts, req, 1); ts->vinfo.serial_no = (req[1] << 8); req[0] = TMA1217_SERIAL_NO_LOW; retval = cp_tm1217_read(ts, req, 1); ts->vinfo.serial_no = ts->vinfo.serial_no | req[1]; req[0] = TMA1217_MAX_X_HIGHER4; retval = cp_tm1217_read(ts, req, 1); ts->dinfo.maxX = (req[1] & 0xF) << 8; req[0] = TMA1217_MAX_X_LOWER8; retval = cp_tm1217_read(ts, req, 1); ts->dinfo.maxX = ts->dinfo.maxX | req[1]; req[0] = TMA1217_MAX_Y_HIGHER4; retval = cp_tm1217_read(ts, req, 1); ts->dinfo.maxY = (req[1] & 0xF) << 8; req[0] = TMA1217_MAX_Y_LOWER8; retval = cp_tm1217_read(ts, req, 1); ts->dinfo.maxY = ts->dinfo.maxY | req[1]; return 0; } /* * Set up a GPIO for use as the interrupt. We can't simply do this at * boot time because the GPIO drivers themselves may not be around at * boot/firmware set up time to do the work. Instead defer it to driver * detection. */ static int cp_tm1217_setup_gpio_irq(struct cp_tm1217_device *ts) { int retval; /* Hook up the irq handler */ retval = gpio_request(ts->gpio, "cp_tm1217_touch"); if (retval < 0) { dev_err(ts->dev, "cp_tm1217: GPIO request failed error %d\n", retval); return retval; } retval = gpio_direction_input(ts->gpio); if (retval < 0) { dev_err(ts->dev, "cp_tm1217: GPIO direction configuration failed, error %d\n", retval); gpio_free(ts->gpio); return retval; } retval = gpio_to_irq(ts->gpio); if (retval < 0) { dev_err(ts->dev, "cp_tm1217: GPIO to IRQ failed, error %d\n", retval); gpio_free(ts->gpio); } dev_dbg(ts->dev, "cp_tm1217: Got IRQ number is %d for GPIO %d\n", retval, ts->gpio); return retval; } static int cp_tm1217_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct cp_tm1217_device *ts; struct input_dev *input_dev; struct input_dev_info *input_info; struct cp_tm1217_platform_data *pdata; u8 req[2]; int i, retval; /* No pdata is fine - we then use "normal" IRQ mode */ pdata = client->dev.platform_data; ts = kzalloc(sizeof(struct cp_tm1217_device), GFP_KERNEL); if (!ts) return -ENOMEM; ts->client = client; ts->dev = &client->dev; i2c_set_clientdata(client, ts); ts->thread_running = 0; mutex_init(&ts->thread_mutex); /* Reset the Controller */ req[0] = TMA1217_DEVICE_CMD_RESET; req[1] = 0x1; retval = cp_tm1217_write(ts, req, 1); if (retval != 1) { dev_err(ts->dev, "cp_tm1217: Controller reset failed\n"); kfree(ts); return -EIO; } /* Clear up the interrupt status from reset. */ req[0] = TMA1217_INT_STATUS; retval = cp_tm1217_read(ts, req, 1); /* Mask all the interrupts */ retval = cp_tm1217_mask_interrupt(ts); /* Read the controller information */ cp_tm1217_init_data(ts); /* The following code will register multiple event devices when multi-pointer is enabled, the code has not been tested with MPX */ for (i = 0; i < TOUCH_SUPPORTED; i++) { input_dev = input_allocate_device(); if (input_dev == NULL) { retval = -ENOMEM; goto fail; } input_info = &ts->cp_input_info[i]; snprintf(input_info->name, sizeof(input_info->name), "cp_tm1217_touchscreen_%d", i); input_dev->name = input_info->name; snprintf(input_info->phys, sizeof(input_info->phys), "%s/input%d", dev_name(&client->dev), i); input_dev->phys = input_info->phys; input_dev->id.bustype = BUS_I2C; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, 0, ts->dinfo.maxX, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, ts->dinfo.maxY, 0, 0); retval = input_register_device(input_dev); if (retval) { dev_err(ts->dev, "Input dev registration failed for %s\n", input_dev->name); input_free_device(input_dev); goto fail; } input_info->input = input_dev; } /* Setup the reporting mode to send an interrupt only when finger arrives or departs. */ req[0] = TMA1217_REPORT_MODE; req[1] = 0x02; retval = cp_tm1217_write(ts, req, 1); /* Setup the device to no sleep mode for now and make it configured */ req[0] = TMA1217_DEVICE_CTRL; req[1] = 0x84; retval = cp_tm1217_write(ts, req, 1); /* Check for the status of the device */ req[0] = TMA1217_DEV_STATUS; retval = cp_tm1217_read(ts, req, 1); if (req[1] != 0) { dev_err(ts->dev, "cp_tm1217: Device Status 0x%x != 0: config failed\n", req[1]); retval = -EIO; goto fail; } if (pdata && pdata->gpio) { ts->gpio = pdata->gpio; retval = cp_tm1217_setup_gpio_irq(ts); } else retval = client->irq; if (retval < 0) { dev_err(ts->dev, "cp_tm1217: GPIO request failed error %d\n", retval); goto fail; } client->irq = retval; retval = request_threaded_irq(client->irq, NULL, cp_tm1217_sample_thread, IRQF_TRIGGER_FALLING, "cp_tm1217_touch", ts); if (retval < 0) { dev_err(ts->dev, "cp_tm1217: Request IRQ error %d\n", retval); goto fail_gpio; } /* Unmask the interrupts */ retval = cp_tm1217_unmask_interrupt(ts); if (retval == 0) return 0; free_irq(client->irq, ts); fail_gpio: if (ts->gpio) gpio_free(ts->gpio); fail: /* Clean up before returning failure */ for (i = 0; i < TOUCH_SUPPORTED; i++) { if (ts->cp_input_info[i].input) input_unregister_device(ts->cp_input_info[i].input); } kfree(ts); return retval; } #ifdef CONFIG_PM_SLEEP /* * cp_tm1217 suspend * */ static int cp_tm1217_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct cp_tm1217_device *ts = i2c_get_clientdata(client); u8 req[2]; int retval; /* Put the controller to sleep */ req[0] = TMA1217_DEVICE_CTRL; retval = cp_tm1217_read(ts, req, 1); req[1] = (req[1] & 0xF8) | 0x1; retval = cp_tm1217_write(ts, req, 1); if (device_may_wakeup(&client->dev)) enable_irq_wake(client->irq); return 0; } /* * cp_tm1217_resume * */ static int cp_tm1217_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct cp_tm1217_device *ts = i2c_get_clientdata(client); u8 req[2]; int retval; /* Take the controller out of sleep */ req[0] = TMA1217_DEVICE_CTRL; retval = cp_tm1217_read(ts, req, 1); req[1] = (req[1] & 0xF8) | 0x4; retval = cp_tm1217_write(ts, req, 1); /* Restore the register settings sinc the power to the could have been cut off */ /* Setup the reporting mode to send an interrupt only when finger arrives or departs. */ req[0] = TMA1217_REPORT_MODE; req[1] = 0x02; retval = cp_tm1217_write(ts, req, 1); /* Setup the device to no sleep mode for now and make it configured */ req[0] = TMA1217_DEVICE_CTRL; req[1] = 0x84; retval = cp_tm1217_write(ts, req, 1); /* Setup the interrupt mask */ retval = cp_tm1217_unmask_interrupt(ts); if (device_may_wakeup(&client->dev)) disable_irq_wake(client->irq); return 0; } #endif static SIMPLE_DEV_PM_OPS(cp_tm1217_pm_ops, cp_tm1217_suspend, cp_tm1217_resume); /* * cp_tm1217_remove * */ static int cp_tm1217_remove(struct i2c_client *client) { struct cp_tm1217_device *ts = i2c_get_clientdata(client); int i; free_irq(client->irq, ts); if (ts->gpio) gpio_free(ts->gpio); for (i = 0; i < TOUCH_SUPPORTED; i++) input_unregister_device(ts->cp_input_info[i].input); kfree(ts); return 0; } static struct i2c_device_id cp_tm1217_idtable[] = { { CPTM1217_DEVICE_NAME, 0 }, { } }; MODULE_DEVICE_TABLE(i2c, cp_tm1217_idtable); static struct i2c_driver cp_tm1217_driver = { .driver = { .owner = THIS_MODULE, .name = CPTM1217_DRIVER_NAME, .pm = &cp_tm1217_pm_ops, }, .id_table = cp_tm1217_idtable, .probe = cp_tm1217_probe, .remove = cp_tm1217_remove, }; module_i2c_driver(cp_tm1217_driver); MODULE_AUTHOR("Ramesh Agarwal <ramesh.agarwal@intel.com>"); MODULE_DESCRIPTION("Synaptics TM1217 TouchScreen Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
onejay09/OLD----kernel_HTC_msm7x30_KK
drivers/base/bus.c
674
25728
/* * bus.c - bus driver management * * Copyright (c) 2002-3 Patrick Mochel * Copyright (c) 2002-3 Open Source Development Labs * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2007 Novell Inc. * * This file is released under the GPLv2 * */ #include <linux/device.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/string.h> #include "base.h" #include "power/power.h" #define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr) /* * sysfs bindings for drivers */ #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr) static int __must_check bus_rescan_devices_helper(struct device *dev, void *data); static struct bus_type *bus_get(struct bus_type *bus) { if (bus) { kset_get(&bus->p->subsys); return bus; } return NULL; } static void bus_put(struct bus_type *bus) { if (bus) kset_put(&bus->p->subsys); } static ssize_t drv_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct driver_attribute *drv_attr = to_drv_attr(attr); struct driver_private *drv_priv = to_driver(kobj); ssize_t ret = -EIO; if (drv_attr->show) ret = drv_attr->show(drv_priv->driver, buf); return ret; } static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct driver_attribute *drv_attr = to_drv_attr(attr); struct driver_private *drv_priv = to_driver(kobj); ssize_t ret = -EIO; if (drv_attr->store) ret = drv_attr->store(drv_priv->driver, buf, count); return ret; } static const struct sysfs_ops driver_sysfs_ops = { .show = drv_attr_show, .store = drv_attr_store, }; static void driver_release(struct kobject *kobj) { struct driver_private *drv_priv = to_driver(kobj); pr_debug("driver: '%s': %s\n", kobject_name(kobj), __func__); kfree(drv_priv); } static struct kobj_type driver_ktype = { .sysfs_ops = &driver_sysfs_ops, .release = driver_release, }; /* * sysfs bindings for buses */ static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct bus_attribute *bus_attr = to_bus_attr(attr); struct subsys_private *subsys_priv = to_subsys_private(kobj); ssize_t ret = 0; if (bus_attr->show) ret = bus_attr->show(subsys_priv->bus, buf); return ret; } static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct bus_attribute *bus_attr = to_bus_attr(attr); struct subsys_private *subsys_priv = to_subsys_private(kobj); ssize_t ret = 0; if (bus_attr->store) ret = bus_attr->store(subsys_priv->bus, buf, count); return ret; } static const struct sysfs_ops bus_sysfs_ops = { .show = bus_attr_show, .store = bus_attr_store, }; int bus_create_file(struct bus_type *bus, struct bus_attribute *attr) { int error; if (bus_get(bus)) { error = sysfs_create_file(&bus->p->subsys.kobj, &attr->attr); bus_put(bus); } else error = -EINVAL; return error; } EXPORT_SYMBOL_GPL(bus_create_file); void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr) { if (bus_get(bus)) { sysfs_remove_file(&bus->p->subsys.kobj, &attr->attr); bus_put(bus); } } EXPORT_SYMBOL_GPL(bus_remove_file); static struct kobj_type bus_ktype = { .sysfs_ops = &bus_sysfs_ops, }; static int bus_uevent_filter(struct kset *kset, struct kobject *kobj) { struct kobj_type *ktype = get_ktype(kobj); if (ktype == &bus_ktype) return 1; return 0; } static const struct kset_uevent_ops bus_uevent_ops = { .filter = bus_uevent_filter, }; static struct kset *bus_kset; #ifdef CONFIG_HOTPLUG /* Manually detach a device from its associated driver. */ static ssize_t driver_unbind(struct device_driver *drv, const char *buf, size_t count) { struct bus_type *bus = bus_get(drv->bus); struct device *dev; int err = -ENODEV; dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == drv) { if (dev->parent) /* Needed for USB */ device_lock(dev->parent); device_release_driver(dev); if (dev->parent) device_unlock(dev->parent); err = count; } put_device(dev); bus_put(bus); return err; } static DRIVER_ATTR(unbind, S_IWUSR, NULL, driver_unbind); /* * Manually attach a device to a driver. * Note: the driver must want to bind to the device, * it is not possible to override the driver's id table. */ static ssize_t driver_bind(struct device_driver *drv, const char *buf, size_t count) { struct bus_type *bus = bus_get(drv->bus); struct device *dev; int err = -ENODEV; dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == NULL && driver_match_device(drv, dev)) { if (dev->parent) /* Needed for USB */ device_lock(dev->parent); device_lock(dev); err = driver_probe_device(drv, dev); device_unlock(dev); if (dev->parent) device_unlock(dev->parent); if (err > 0) { /* success */ err = count; } else if (err == 0) { /* driver didn't accept device */ err = -ENODEV; } } put_device(dev); bus_put(bus); return err; } static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind); static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf) { return sprintf(buf, "%d\n", bus->p->drivers_autoprobe); } static ssize_t store_drivers_autoprobe(struct bus_type *bus, const char *buf, size_t count) { if (buf[0] == '0') bus->p->drivers_autoprobe = 0; else bus->p->drivers_autoprobe = 1; return count; } static ssize_t store_drivers_probe(struct bus_type *bus, const char *buf, size_t count) { struct device *dev; dev = bus_find_device_by_name(bus, NULL, buf); if (!dev) return -ENODEV; if (bus_rescan_devices_helper(dev, NULL) != 0) return -EINVAL; return count; } #endif static struct device *next_device(struct klist_iter *i) { struct klist_node *n = klist_next(i); struct device *dev = NULL; struct device_private *dev_prv; if (n) { dev_prv = to_device_private_bus(n); dev = dev_prv->device; } return dev; } /** * bus_for_each_dev - device iterator. * @bus: bus type. * @start: device to start iterating from. * @data: data for the callback. * @fn: function to be called for each device. * * Iterate over @bus's list of devices, and call @fn for each, * passing it @data. If @start is not NULL, we use that device to * begin iterating from. * * We check the return of @fn each time. If it returns anything * other than 0, we break out and return that value. * * NOTE: The device that returns a non-zero value is not retained * in any way, nor is its refcount incremented. If the caller needs * to retain this data, it should do so, and increment the reference * count in the supplied callback. */ int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, int (*fn)(struct device *, void *)) { struct klist_iter i; struct device *dev; int error = 0; if (!bus || !bus->p) return -EINVAL; klist_iter_init_node(&bus->p->klist_devices, &i, (start ? &start->p->knode_bus : NULL)); while ((dev = next_device(&i)) && !error) error = fn(dev, data); klist_iter_exit(&i); return error; } EXPORT_SYMBOL_GPL(bus_for_each_dev); /** * bus_find_device - device iterator for locating a particular device. * @bus: bus type * @start: Device to begin with * @data: Data to pass to match function * @match: Callback function to check device * * This is similar to the bus_for_each_dev() function above, but it * returns a reference to a device that is 'found' for later use, as * determined by the @match callback. * * The callback should return 0 if the device doesn't match and non-zero * if it does. If the callback returns non-zero, this function will * return to the caller and not iterate over any more devices. */ struct device *bus_find_device(struct bus_type *bus, struct device *start, void *data, int (*match)(struct device *dev, void *data)) { struct klist_iter i; struct device *dev; if (!bus || !bus->p) return NULL; klist_iter_init_node(&bus->p->klist_devices, &i, (start ? &start->p->knode_bus : NULL)); while ((dev = next_device(&i))) if (match(dev, data) && get_device(dev)) break; klist_iter_exit(&i); return dev; } EXPORT_SYMBOL_GPL(bus_find_device); static int match_name(struct device *dev, void *data) { const char *name = data; return sysfs_streq(name, dev_name(dev)); } /** * bus_find_device_by_name - device iterator for locating a particular device of a specific name * @bus: bus type * @start: Device to begin with * @name: name of the device to match * * This is similar to the bus_find_device() function above, but it handles * searching by a name automatically, no need to write another strcmp matching * function. */ struct device *bus_find_device_by_name(struct bus_type *bus, struct device *start, const char *name) { return bus_find_device(bus, start, (void *)name, match_name); } EXPORT_SYMBOL_GPL(bus_find_device_by_name); static struct device_driver *next_driver(struct klist_iter *i) { struct klist_node *n = klist_next(i); struct driver_private *drv_priv; if (n) { drv_priv = container_of(n, struct driver_private, knode_bus); return drv_priv->driver; } return NULL; } /** * bus_for_each_drv - driver iterator * @bus: bus we're dealing with. * @start: driver to start iterating on. * @data: data to pass to the callback. * @fn: function to call for each driver. * * This is nearly identical to the device iterator above. * We iterate over each driver that belongs to @bus, and call * @fn for each. If @fn returns anything but 0, we break out * and return it. If @start is not NULL, we use it as the head * of the list. * * NOTE: we don't return the driver that returns a non-zero * value, nor do we leave the reference count incremented for that * driver. If the caller needs to know that info, it must set it * in the callback. It must also be sure to increment the refcount * so it doesn't disappear before returning to the caller. */ int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, void *data, int (*fn)(struct device_driver *, void *)) { struct klist_iter i; struct device_driver *drv; int error = 0; if (!bus) return -EINVAL; klist_iter_init_node(&bus->p->klist_drivers, &i, start ? &start->p->knode_bus : NULL); while ((drv = next_driver(&i)) && !error) error = fn(drv, data); klist_iter_exit(&i); return error; } EXPORT_SYMBOL_GPL(bus_for_each_drv); static int device_add_attrs(struct bus_type *bus, struct device *dev) { int error = 0; int i; if (!bus->dev_attrs) return 0; for (i = 0; attr_name(bus->dev_attrs[i]); i++) { error = device_create_file(dev, &bus->dev_attrs[i]); if (error) { while (--i >= 0) device_remove_file(dev, &bus->dev_attrs[i]); break; } } return error; } static void device_remove_attrs(struct bus_type *bus, struct device *dev) { int i; if (bus->dev_attrs) { for (i = 0; attr_name(bus->dev_attrs[i]); i++) device_remove_file(dev, &bus->dev_attrs[i]); } } /** * bus_add_device - add device to bus * @dev: device being added * * - Add device's bus attributes. * - Create links to device's bus. * - Add the device to its bus's list of devices. */ int bus_add_device(struct device *dev) { struct bus_type *bus = bus_get(dev->bus); int error = 0; if (bus) { pr_debug("bus: '%s': add device %s\n", bus->name, dev_name(dev)); error = device_add_attrs(bus, dev); if (error) goto out_put; error = sysfs_create_link(&bus->p->devices_kset->kobj, &dev->kobj, dev_name(dev)); if (error) goto out_id; error = sysfs_create_link(&dev->kobj, &dev->bus->p->subsys.kobj, "subsystem"); if (error) goto out_subsys; klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices); } return 0; out_subsys: sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev)); out_id: device_remove_attrs(bus, dev); out_put: bus_put(dev->bus); return error; } /** * bus_probe_device - probe drivers for a new device * @dev: device to probe * * - Automatically probe for a driver if the bus allows it. */ void bus_probe_device(struct device *dev) { struct bus_type *bus = dev->bus; int ret; if (bus && bus->p->drivers_autoprobe) { ret = device_attach(dev); WARN_ON(ret < 0); } } /** * bus_remove_device - remove device from bus * @dev: device to be removed * * - Remove symlink from bus's directory. * - Delete device from bus's list. * - Detach from its driver. * - Drop reference taken in bus_add_device(). */ void bus_remove_device(struct device *dev) { if (dev->bus) { sysfs_remove_link(&dev->kobj, "subsystem"); sysfs_remove_link(&dev->bus->p->devices_kset->kobj, dev_name(dev)); device_remove_attrs(dev->bus, dev); if (klist_node_attached(&dev->p->knode_bus)) klist_del(&dev->p->knode_bus); pr_debug("bus: '%s': remove device %s\n", dev->bus->name, dev_name(dev)); device_release_driver(dev); bus_put(dev->bus); } } static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv) { int error = 0; int i; if (bus->drv_attrs) { for (i = 0; attr_name(bus->drv_attrs[i]); i++) { error = driver_create_file(drv, &bus->drv_attrs[i]); if (error) goto err; } } done: return error; err: while (--i >= 0) driver_remove_file(drv, &bus->drv_attrs[i]); goto done; } static void driver_remove_attrs(struct bus_type *bus, struct device_driver *drv) { int i; if (bus->drv_attrs) { for (i = 0; attr_name(bus->drv_attrs[i]); i++) driver_remove_file(drv, &bus->drv_attrs[i]); } } #ifdef CONFIG_HOTPLUG /* * Thanks to drivers making their tables __devinit, we can't allow manual * bind and unbind from userspace unless CONFIG_HOTPLUG is enabled. */ static int __must_check add_bind_files(struct device_driver *drv) { int ret; ret = driver_create_file(drv, &driver_attr_unbind); if (ret == 0) { ret = driver_create_file(drv, &driver_attr_bind); if (ret) driver_remove_file(drv, &driver_attr_unbind); } return ret; } static void remove_bind_files(struct device_driver *drv) { driver_remove_file(drv, &driver_attr_bind); driver_remove_file(drv, &driver_attr_unbind); } static BUS_ATTR(drivers_probe, S_IWUSR, NULL, store_drivers_probe); static BUS_ATTR(drivers_autoprobe, S_IWUSR | S_IRUGO, show_drivers_autoprobe, store_drivers_autoprobe); static int add_probe_files(struct bus_type *bus) { int retval; retval = bus_create_file(bus, &bus_attr_drivers_probe); if (retval) goto out; retval = bus_create_file(bus, &bus_attr_drivers_autoprobe); if (retval) bus_remove_file(bus, &bus_attr_drivers_probe); out: return retval; } static void remove_probe_files(struct bus_type *bus) { bus_remove_file(bus, &bus_attr_drivers_autoprobe); bus_remove_file(bus, &bus_attr_drivers_probe); } #else static inline int add_bind_files(struct device_driver *drv) { return 0; } static inline void remove_bind_files(struct device_driver *drv) {} static inline int add_probe_files(struct bus_type *bus) { return 0; } static inline void remove_probe_files(struct bus_type *bus) {} #endif static ssize_t driver_uevent_store(struct device_driver *drv, const char *buf, size_t count) { enum kobject_action action; if (kobject_action_type(buf, count, &action) == 0) kobject_uevent(&drv->p->kobj, action); return count; } static DRIVER_ATTR(uevent, S_IWUSR, NULL, driver_uevent_store); /** * bus_add_driver - Add a driver to the bus. * @drv: driver. */ int bus_add_driver(struct device_driver *drv) { struct bus_type *bus; struct driver_private *priv; int error = 0; bus = bus_get(drv->bus); if (!bus) return -EINVAL; pr_debug("bus: '%s': add driver %s\n", bus->name, drv->name); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { error = -ENOMEM; goto out_put_bus; } klist_init(&priv->klist_devices, NULL, NULL); priv->driver = drv; drv->p = priv; priv->kobj.kset = bus->p->drivers_kset; error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL, "%s", drv->name); if (error) goto out_unregister; if (drv->bus->p->drivers_autoprobe) { error = driver_attach(drv); if (error) goto out_unregister; } klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers); module_add_driver(drv->owner, drv); error = driver_create_file(drv, &driver_attr_uevent); if (error) { printk(KERN_ERR "%s: uevent attr (%s) failed\n", __func__, drv->name); } error = driver_add_attrs(bus, drv); if (error) { /* How the hell do we get out of this pickle? Give up */ printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n", __func__, drv->name); } if (!drv->suppress_bind_attrs) { error = add_bind_files(drv); if (error) { /* Ditto */ printk(KERN_ERR "%s: add_bind_files(%s) failed\n", __func__, drv->name); } } kobject_uevent(&priv->kobj, KOBJ_ADD); return 0; out_unregister: kobject_put(&priv->kobj); kfree(drv->p); drv->p = NULL; out_put_bus: bus_put(bus); return error; } /** * bus_remove_driver - delete driver from bus's knowledge. * @drv: driver. * * Detach the driver from the devices it controls, and remove * it from its bus's list of drivers. Finally, we drop the reference * to the bus we took in bus_add_driver(). */ void bus_remove_driver(struct device_driver *drv) { if (!drv->bus) return; if (!drv->suppress_bind_attrs) remove_bind_files(drv); driver_remove_attrs(drv->bus, drv); driver_remove_file(drv, &driver_attr_uevent); klist_remove(&drv->p->knode_bus); pr_debug("bus: '%s': remove driver %s\n", drv->bus->name, drv->name); driver_detach(drv); module_remove_driver(drv); kobject_put(&drv->p->kobj); bus_put(drv->bus); } /* Helper for bus_rescan_devices's iter */ static int __must_check bus_rescan_devices_helper(struct device *dev, void *data) { int ret = 0; if (!dev->driver) { if (dev->parent) /* Needed for USB */ device_lock(dev->parent); ret = device_attach(dev); if (dev->parent) device_unlock(dev->parent); } return ret < 0 ? ret : 0; } /** * bus_rescan_devices - rescan devices on the bus for possible drivers * @bus: the bus to scan. * * This function will look for devices on the bus with no driver * attached and rescan it against existing drivers to see if it matches * any by calling device_attach() for the unbound devices. */ int bus_rescan_devices(struct bus_type *bus) { return bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper); } EXPORT_SYMBOL_GPL(bus_rescan_devices); /** * device_reprobe - remove driver for a device and probe for a new driver * @dev: the device to reprobe * * This function detaches the attached driver (if any) for the given * device and restarts the driver probing process. It is intended * to use if probing criteria changed during a devices lifetime and * driver attachment should change accordingly. */ int device_reprobe(struct device *dev) { if (dev->driver) { if (dev->parent) /* Needed for USB */ device_lock(dev->parent); device_release_driver(dev); if (dev->parent) device_unlock(dev->parent); } return bus_rescan_devices_helper(dev, NULL); } EXPORT_SYMBOL_GPL(device_reprobe); /** * find_bus - locate bus by name. * @name: name of bus. * * Call kset_find_obj() to iterate over list of buses to * find a bus by name. Return bus if found. * * Note that kset_find_obj increments bus' reference count. */ #if 0 struct bus_type *find_bus(char *name) { struct kobject *k = kset_find_obj(bus_kset, name); return k ? to_bus(k) : NULL; } #endif /* 0 */ /** * bus_add_attrs - Add default attributes for this bus. * @bus: Bus that has just been registered. */ static int bus_add_attrs(struct bus_type *bus) { int error = 0; int i; if (bus->bus_attrs) { for (i = 0; attr_name(bus->bus_attrs[i]); i++) { error = bus_create_file(bus, &bus->bus_attrs[i]); if (error) goto err; } } done: return error; err: while (--i >= 0) bus_remove_file(bus, &bus->bus_attrs[i]); goto done; } static void bus_remove_attrs(struct bus_type *bus) { int i; if (bus->bus_attrs) { for (i = 0; attr_name(bus->bus_attrs[i]); i++) bus_remove_file(bus, &bus->bus_attrs[i]); } } static void klist_devices_get(struct klist_node *n) { struct device_private *dev_prv = to_device_private_bus(n); struct device *dev = dev_prv->device; get_device(dev); } static void klist_devices_put(struct klist_node *n) { struct device_private *dev_prv = to_device_private_bus(n); struct device *dev = dev_prv->device; put_device(dev); } static ssize_t bus_uevent_store(struct bus_type *bus, const char *buf, size_t count) { enum kobject_action action; if (kobject_action_type(buf, count, &action) == 0) kobject_uevent(&bus->p->subsys.kobj, action); return count; } static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store); /** * bus_register - register a bus with the system. * @bus: bus. * * Once we have that, we registered the bus with the kobject * infrastructure, then register the children subsystems it has: * the devices and drivers that belong to the bus. */ int bus_register(struct bus_type *bus) { int retval; struct subsys_private *priv; priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL); if (!priv) return -ENOMEM; priv->bus = bus; bus->p = priv; BLOCKING_INIT_NOTIFIER_HEAD(&priv->bus_notifier); retval = kobject_set_name(&priv->subsys.kobj, "%s", bus->name); if (retval) goto out; priv->subsys.kobj.kset = bus_kset; priv->subsys.kobj.ktype = &bus_ktype; priv->drivers_autoprobe = 1; retval = kset_register(&priv->subsys); if (retval) goto out; retval = bus_create_file(bus, &bus_attr_uevent); if (retval) goto bus_uevent_fail; priv->devices_kset = kset_create_and_add("devices", NULL, &priv->subsys.kobj); if (!priv->devices_kset) { retval = -ENOMEM; goto bus_devices_fail; } priv->drivers_kset = kset_create_and_add("drivers", NULL, &priv->subsys.kobj); if (!priv->drivers_kset) { retval = -ENOMEM; goto bus_drivers_fail; } klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put); klist_init(&priv->klist_drivers, NULL, NULL); retval = add_probe_files(bus); if (retval) goto bus_probe_files_fail; retval = bus_add_attrs(bus); if (retval) goto bus_attrs_fail; pr_debug("bus: '%s': registered\n", bus->name); return 0; bus_attrs_fail: remove_probe_files(bus); bus_probe_files_fail: kset_unregister(bus->p->drivers_kset); bus_drivers_fail: kset_unregister(bus->p->devices_kset); bus_devices_fail: bus_remove_file(bus, &bus_attr_uevent); bus_uevent_fail: kset_unregister(&bus->p->subsys); out: kfree(bus->p); bus->p = NULL; return retval; } EXPORT_SYMBOL_GPL(bus_register); /** * bus_unregister - remove a bus from the system * @bus: bus. * * Unregister the child subsystems and the bus itself. * Finally, we call bus_put() to release the refcount */ void bus_unregister(struct bus_type *bus) { pr_debug("bus: '%s': unregistering\n", bus->name); bus_remove_attrs(bus); remove_probe_files(bus); kset_unregister(bus->p->drivers_kset); kset_unregister(bus->p->devices_kset); bus_remove_file(bus, &bus_attr_uevent); kset_unregister(&bus->p->subsys); kfree(bus->p); bus->p = NULL; } EXPORT_SYMBOL_GPL(bus_unregister); int bus_register_notifier(struct bus_type *bus, struct notifier_block *nb) { return blocking_notifier_chain_register(&bus->p->bus_notifier, nb); } EXPORT_SYMBOL_GPL(bus_register_notifier); int bus_unregister_notifier(struct bus_type *bus, struct notifier_block *nb) { return blocking_notifier_chain_unregister(&bus->p->bus_notifier, nb); } EXPORT_SYMBOL_GPL(bus_unregister_notifier); struct kset *bus_get_kset(struct bus_type *bus) { return &bus->p->subsys; } EXPORT_SYMBOL_GPL(bus_get_kset); struct klist *bus_get_device_klist(struct bus_type *bus) { return &bus->p->klist_devices; } EXPORT_SYMBOL_GPL(bus_get_device_klist); /* * Yes, this forcibly breaks the klist abstraction temporarily. It * just wants to sort the klist, not change reference counts and * take/drop locks rapidly in the process. It does all this while * holding the lock for the list, so objects can't otherwise be * added/removed while we're swizzling. */ static void device_insertion_sort_klist(struct device *a, struct list_head *list, int (*compare)(const struct device *a, const struct device *b)) { struct list_head *pos; struct klist_node *n; struct device_private *dev_prv; struct device *b; list_for_each(pos, list) { n = container_of(pos, struct klist_node, n_node); dev_prv = to_device_private_bus(n); b = dev_prv->device; if (compare(a, b) <= 0) { list_move_tail(&a->p->knode_bus.n_node, &b->p->knode_bus.n_node); return; } } list_move_tail(&a->p->knode_bus.n_node, list); } void bus_sort_breadthfirst(struct bus_type *bus, int (*compare)(const struct device *a, const struct device *b)) { LIST_HEAD(sorted_devices); struct list_head *pos, *tmp; struct klist_node *n; struct device_private *dev_prv; struct device *dev; struct klist *device_klist; device_klist = bus_get_device_klist(bus); spin_lock(&device_klist->k_lock); list_for_each_safe(pos, tmp, &device_klist->k_list) { n = container_of(pos, struct klist_node, n_node); dev_prv = to_device_private_bus(n); dev = dev_prv->device; device_insertion_sort_klist(dev, &sorted_devices, compare); } list_splice(&sorted_devices, &device_klist->k_list); spin_unlock(&device_klist->k_lock); } EXPORT_SYMBOL_GPL(bus_sort_breadthfirst); int __init buses_init(void) { bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL); if (!bus_kset) return -ENOMEM; return 0; }
gpl-2.0
nightscape/yoga-900-kernel
fs/ubifs/journal.c
930
43457
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter */ /* * This file implements UBIFS journal. * * The journal consists of 2 parts - the log and bud LEBs. The log has fixed * length and position, while a bud logical eraseblock is any LEB in the main * area. Buds contain file system data - data nodes, inode nodes, etc. The log * contains only references to buds and some other stuff like commit * start node. The idea is that when we commit the journal, we do * not copy the data, the buds just become indexed. Since after the commit the * nodes in bud eraseblocks become leaf nodes of the file system index tree, we * use term "bud". Analogy is obvious, bud eraseblocks contain nodes which will * become leafs in the future. * * The journal is multi-headed because we want to write data to the journal as * optimally as possible. It is nice to have nodes belonging to the same inode * in one LEB, so we may write data owned by different inodes to different * journal heads, although at present only one data head is used. * * For recovery reasons, the base head contains all inode nodes, all directory * entry nodes and all truncate nodes. This means that the other heads contain * only data nodes. * * Bud LEBs may be half-indexed. For example, if the bud was not full at the * time of commit, the bud is retained to continue to be used in the journal, * even though the "front" of the LEB is now indexed. In that case, the log * reference contains the offset where the bud starts for the purposes of the * journal. * * The journal size has to be limited, because the larger is the journal, the * longer it takes to mount UBIFS (scanning the journal) and the more memory it * takes (indexing in the TNC). * * All the journal write operations like 'ubifs_jnl_update()' here, which write * multiple UBIFS nodes to the journal at one go, are atomic with respect to * unclean reboots. Should the unclean reboot happen, the recovery code drops * all the nodes. */ #include "ubifs.h" /** * zero_ino_node_unused - zero out unused fields of an on-flash inode node. * @ino: the inode to zero out */ static inline void zero_ino_node_unused(struct ubifs_ino_node *ino) { memset(ino->padding1, 0, 4); memset(ino->padding2, 0, 26); } /** * zero_dent_node_unused - zero out unused fields of an on-flash directory * entry node. * @dent: the directory entry to zero out */ static inline void zero_dent_node_unused(struct ubifs_dent_node *dent) { dent->padding1 = 0; memset(dent->padding2, 0, 4); } /** * zero_data_node_unused - zero out unused fields of an on-flash data node. * @data: the data node to zero out */ static inline void zero_data_node_unused(struct ubifs_data_node *data) { memset(data->padding, 0, 2); } /** * zero_trun_node_unused - zero out unused fields of an on-flash truncation * node. * @trun: the truncation node to zero out */ static inline void zero_trun_node_unused(struct ubifs_trun_node *trun) { memset(trun->padding, 0, 12); } /** * reserve_space - reserve space in the journal. * @c: UBIFS file-system description object * @jhead: journal head number * @len: node length * * This function reserves space in journal head @head. If the reservation * succeeded, the journal head stays locked and later has to be unlocked using * 'release_head()'. 'write_node()' and 'write_head()' functions also unlock * it. Returns zero in case of success, %-EAGAIN if commit has to be done, and * other negative error codes in case of other failures. */ static int reserve_space(struct ubifs_info *c, int jhead, int len) { int err = 0, err1, retries = 0, avail, lnum, offs, squeeze; struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf; /* * Typically, the base head has smaller nodes written to it, so it is * better to try to allocate space at the ends of eraseblocks. This is * what the squeeze parameter does. */ ubifs_assert(!c->ro_media && !c->ro_mount); squeeze = (jhead == BASEHD); again: mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); if (c->ro_error) { err = -EROFS; goto out_unlock; } avail = c->leb_size - wbuf->offs - wbuf->used; if (wbuf->lnum != -1 && avail >= len) return 0; /* * Write buffer wasn't seek'ed or there is no enough space - look for an * LEB with some empty space. */ lnum = ubifs_find_free_space(c, len, &offs, squeeze); if (lnum >= 0) goto out; err = lnum; if (err != -ENOSPC) goto out_unlock; /* * No free space, we have to run garbage collector to make * some. But the write-buffer mutex has to be unlocked because * GC also takes it. */ dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead)); mutex_unlock(&wbuf->io_mutex); lnum = ubifs_garbage_collect(c, 0); if (lnum < 0) { err = lnum; if (err != -ENOSPC) return err; /* * GC could not make a free LEB. But someone else may * have allocated new bud for this journal head, * because we dropped @wbuf->io_mutex, so try once * again. */ dbg_jnl("GC couldn't make a free LEB for jhead %s", dbg_jhead(jhead)); if (retries++ < 2) { dbg_jnl("retry (%d)", retries); goto again; } dbg_jnl("return -ENOSPC"); return err; } mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead)); avail = c->leb_size - wbuf->offs - wbuf->used; if (wbuf->lnum != -1 && avail >= len) { /* * Someone else has switched the journal head and we have * enough space now. This happens when more than one process is * trying to write to the same journal head at the same time. */ dbg_jnl("return LEB %d back, already have LEB %d:%d", lnum, wbuf->lnum, wbuf->offs + wbuf->used); err = ubifs_return_leb(c, lnum); if (err) goto out_unlock; return 0; } offs = 0; out: /* * Make sure we synchronize the write-buffer before we add the new bud * to the log. Otherwise we may have a power cut after the log * reference node for the last bud (@lnum) is written but before the * write-buffer data are written to the next-to-last bud * (@wbuf->lnum). And the effect would be that the recovery would see * that there is corruption in the next-to-last bud. */ err = ubifs_wbuf_sync_nolock(wbuf); if (err) goto out_return; err = ubifs_add_bud_to_log(c, jhead, lnum, offs); if (err) goto out_return; err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs); if (err) goto out_unlock; return 0; out_unlock: mutex_unlock(&wbuf->io_mutex); return err; out_return: /* An error occurred and the LEB has to be returned to lprops */ ubifs_assert(err < 0); err1 = ubifs_return_leb(c, lnum); if (err1 && err == -EAGAIN) /* * Return original error code only if it is not %-EAGAIN, * which is not really an error. Otherwise, return the error * code of 'ubifs_return_leb()'. */ err = err1; mutex_unlock(&wbuf->io_mutex); return err; } /** * write_node - write node to a journal head. * @c: UBIFS file-system description object * @jhead: journal head * @node: node to write * @len: node length * @lnum: LEB number written is returned here * @offs: offset written is returned here * * This function writes a node to reserved space of journal head @jhead. * Returns zero in case of success and a negative error code in case of * failure. */ static int write_node(struct ubifs_info *c, int jhead, void *node, int len, int *lnum, int *offs) { struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf; ubifs_assert(jhead != GCHD); *lnum = c->jheads[jhead].wbuf.lnum; *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; dbg_jnl("jhead %s, LEB %d:%d, len %d", dbg_jhead(jhead), *lnum, *offs, len); ubifs_prepare_node(c, node, len, 0); return ubifs_wbuf_write_nolock(wbuf, node, len); } /** * write_head - write data to a journal head. * @c: UBIFS file-system description object * @jhead: journal head * @buf: buffer to write * @len: length to write * @lnum: LEB number written is returned here * @offs: offset written is returned here * @sync: non-zero if the write-buffer has to by synchronized * * This function is the same as 'write_node()' but it does not assume the * buffer it is writing is a node, so it does not prepare it (which means * initializing common header and calculating CRC). */ static int write_head(struct ubifs_info *c, int jhead, void *buf, int len, int *lnum, int *offs, int sync) { int err; struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf; ubifs_assert(jhead != GCHD); *lnum = c->jheads[jhead].wbuf.lnum; *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; dbg_jnl("jhead %s, LEB %d:%d, len %d", dbg_jhead(jhead), *lnum, *offs, len); err = ubifs_wbuf_write_nolock(wbuf, buf, len); if (err) return err; if (sync) err = ubifs_wbuf_sync_nolock(wbuf); return err; } /** * make_reservation - reserve journal space. * @c: UBIFS file-system description object * @jhead: journal head * @len: how many bytes to reserve * * This function makes space reservation in journal head @jhead. The function * takes the commit lock and locks the journal head, and the caller has to * unlock the head and finish the reservation with 'finish_reservation()'. * Returns zero in case of success and a negative error code in case of * failure. * * Note, the journal head may be unlocked as soon as the data is written, while * the commit lock has to be released after the data has been added to the * TNC. */ static int make_reservation(struct ubifs_info *c, int jhead, int len) { int err, cmt_retries = 0, nospc_retries = 0; again: down_read(&c->commit_sem); err = reserve_space(c, jhead, len); if (!err) return 0; up_read(&c->commit_sem); if (err == -ENOSPC) { /* * GC could not make any progress. We should try to commit * once because it could make some dirty space and GC would * make progress, so make the error -EAGAIN so that the below * will commit and re-try. */ if (nospc_retries++ < 2) { dbg_jnl("no space, retry"); err = -EAGAIN; } /* * This means that the budgeting is incorrect. We always have * to be able to write to the media, because all operations are * budgeted. Deletions are not budgeted, though, but we reserve * an extra LEB for them. */ } if (err != -EAGAIN) goto out; /* * -EAGAIN means that the journal is full or too large, or the above * code wants to do one commit. Do this and re-try. */ if (cmt_retries > 128) { /* * This should not happen unless the journal size limitations * are too tough. */ ubifs_err(c, "stuck in space allocation"); err = -ENOSPC; goto out; } else if (cmt_retries > 32) ubifs_warn(c, "too many space allocation re-tries (%d)", cmt_retries); dbg_jnl("-EAGAIN, commit and retry (retried %d times)", cmt_retries); cmt_retries += 1; err = ubifs_run_commit(c); if (err) return err; goto again; out: ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d", len, jhead, err); if (err == -ENOSPC) { /* This are some budgeting problems, print useful information */ down_write(&c->commit_sem); dump_stack(); ubifs_dump_budg(c, &c->bi); ubifs_dump_lprops(c); cmt_retries = dbg_check_lprops(c); up_write(&c->commit_sem); } return err; } /** * release_head - release a journal head. * @c: UBIFS file-system description object * @jhead: journal head * * This function releases journal head @jhead which was locked by * the 'make_reservation()' function. It has to be called after each successful * 'make_reservation()' invocation. */ static inline void release_head(struct ubifs_info *c, int jhead) { mutex_unlock(&c->jheads[jhead].wbuf.io_mutex); } /** * finish_reservation - finish a reservation. * @c: UBIFS file-system description object * * This function finishes journal space reservation. It must be called after * 'make_reservation()'. */ static void finish_reservation(struct ubifs_info *c) { up_read(&c->commit_sem); } /** * get_dent_type - translate VFS inode mode to UBIFS directory entry type. * @mode: inode mode */ static int get_dent_type(int mode) { switch (mode & S_IFMT) { case S_IFREG: return UBIFS_ITYPE_REG; case S_IFDIR: return UBIFS_ITYPE_DIR; case S_IFLNK: return UBIFS_ITYPE_LNK; case S_IFBLK: return UBIFS_ITYPE_BLK; case S_IFCHR: return UBIFS_ITYPE_CHR; case S_IFIFO: return UBIFS_ITYPE_FIFO; case S_IFSOCK: return UBIFS_ITYPE_SOCK; default: BUG(); } return 0; } /** * pack_inode - pack an inode node. * @c: UBIFS file-system description object * @ino: buffer in which to pack inode node * @inode: inode to pack * @last: indicates the last node of the group */ static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino, const struct inode *inode, int last) { int data_len = 0, last_reference = !inode->i_nlink; struct ubifs_inode *ui = ubifs_inode(inode); ino->ch.node_type = UBIFS_INO_NODE; ino_key_init_flash(c, &ino->key, inode->i_ino); ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum); ino->atime_sec = cpu_to_le64(inode->i_atime.tv_sec); ino->atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); ino->ctime_sec = cpu_to_le64(inode->i_ctime.tv_sec); ino->ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec); ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); ino->uid = cpu_to_le32(i_uid_read(inode)); ino->gid = cpu_to_le32(i_gid_read(inode)); ino->mode = cpu_to_le32(inode->i_mode); ino->flags = cpu_to_le32(ui->flags); ino->size = cpu_to_le64(ui->ui_size); ino->nlink = cpu_to_le32(inode->i_nlink); ino->compr_type = cpu_to_le16(ui->compr_type); ino->data_len = cpu_to_le32(ui->data_len); ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt); ino->xattr_size = cpu_to_le32(ui->xattr_size); ino->xattr_names = cpu_to_le32(ui->xattr_names); zero_ino_node_unused(ino); /* * Drop the attached data if this is a deletion inode, the data is not * needed anymore. */ if (!last_reference) { memcpy(ino->data, ui->data, ui->data_len); data_len = ui->data_len; } ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last); } /** * mark_inode_clean - mark UBIFS inode as clean. * @c: UBIFS file-system description object * @ui: UBIFS inode to mark as clean * * This helper function marks UBIFS inode @ui as clean by cleaning the * @ui->dirty flag and releasing its budget. Note, VFS may still treat the * inode as dirty and try to write it back, but 'ubifs_write_inode()' would * just do nothing. */ static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui) { if (ui->dirty) ubifs_release_dirty_inode_budget(c, ui); ui->dirty = 0; } /** * ubifs_jnl_update - update inode. * @c: UBIFS file-system description object * @dir: parent inode or host inode in case of extended attributes * @nm: directory entry name * @inode: inode to update * @deletion: indicates a directory entry deletion i.e unlink or rmdir * @xent: non-zero if the directory entry is an extended attribute entry * * This function updates an inode by writing a directory entry (or extended * attribute entry), the inode itself, and the parent directory inode (or the * host inode) to the journal. * * The function writes the host inode @dir last, which is important in case of * extended attributes. Indeed, then we guarantee that if the host inode gets * synchronized (with 'fsync()'), and the write-buffer it sits in gets flushed, * the extended attribute inode gets flushed too. And this is exactly what the * user expects - synchronizing the host inode synchronizes its extended * attributes. Similarly, this guarantees that if @dir is synchronized, its * directory entry corresponding to @nm gets synchronized too. * * If the inode (@inode) or the parent directory (@dir) are synchronous, this * function synchronizes the write-buffer. * * This function marks the @dir and @inode inodes as clean and returns zero on * success. In case of failure, a negative error code is returned. */ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, const struct qstr *nm, const struct inode *inode, int deletion, int xent) { int err, dlen, ilen, len, lnum, ino_offs, dent_offs; int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir); int last_reference = !!(deletion && inode->i_nlink == 0); struct ubifs_inode *ui = ubifs_inode(inode); struct ubifs_inode *host_ui = ubifs_inode(dir); struct ubifs_dent_node *dent; struct ubifs_ino_node *ino; union ubifs_key dent_key, ino_key; dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu", inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino); ubifs_assert(mutex_is_locked(&host_ui->ui_mutex)); dlen = UBIFS_DENT_NODE_SZ + nm->len + 1; ilen = UBIFS_INO_NODE_SZ; /* * If the last reference to the inode is being deleted, then there is * no need to attach and write inode data, it is being deleted anyway. * And if the inode is being deleted, no need to synchronize * write-buffer even if the inode is synchronous. */ if (!last_reference) { ilen += ui->data_len; sync |= IS_SYNC(inode); } aligned_dlen = ALIGN(dlen, 8); aligned_ilen = ALIGN(ilen, 8); len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ; /* Make sure to also account for extended attributes */ len += host_ui->data_len; dent = kmalloc(len, GFP_NOFS); if (!dent) return -ENOMEM; /* Make reservation before allocating sequence numbers */ err = make_reservation(c, BASEHD, len); if (err) goto out_free; if (!xent) { dent->ch.node_type = UBIFS_DENT_NODE; dent_key_init(c, &dent_key, dir->i_ino, nm); } else { dent->ch.node_type = UBIFS_XENT_NODE; xent_key_init(c, &dent_key, dir->i_ino, nm); } key_write(c, &dent_key, dent->key); dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino); dent->type = get_dent_type(inode->i_mode); dent->nlen = cpu_to_le16(nm->len); memcpy(dent->name, nm->name, nm->len); dent->name[nm->len] = '\0'; zero_dent_node_unused(dent); ubifs_prep_grp_node(c, dent, dlen, 0); ino = (void *)dent + aligned_dlen; pack_inode(c, ino, inode, 0); ino = (void *)ino + aligned_ilen; pack_inode(c, ino, dir, 1); if (last_reference) { err = ubifs_add_orphan(c, inode->i_ino); if (err) { release_head(c, BASEHD); goto out_finish; } ui->del_cmtno = c->cmt_no; } err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync); if (err) goto out_release; if (!sync) { struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino); ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino); } release_head(c, BASEHD); kfree(dent); if (deletion) { err = ubifs_tnc_remove_nm(c, &dent_key, nm); if (err) goto out_ro; err = ubifs_add_dirt(c, lnum, dlen); } else err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, nm); if (err) goto out_ro; /* * Note, we do not remove the inode from TNC even if the last reference * to it has just been deleted, because the inode may still be opened. * Instead, the inode has been added to orphan lists and the orphan * subsystem will take further care about it. */ ino_key_init(c, &ino_key, inode->i_ino); ino_offs = dent_offs + aligned_dlen; err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen); if (err) goto out_ro; ino_key_init(c, &ino_key, dir->i_ino); ino_offs += aligned_ilen; err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, UBIFS_INO_NODE_SZ + host_ui->data_len); if (err) goto out_ro; finish_reservation(c); spin_lock(&ui->ui_lock); ui->synced_i_size = ui->ui_size; spin_unlock(&ui->ui_lock); mark_inode_clean(c, ui); mark_inode_clean(c, host_ui); return 0; out_finish: finish_reservation(c); out_free: kfree(dent); return err; out_release: release_head(c, BASEHD); kfree(dent); out_ro: ubifs_ro_mode(c, err); if (last_reference) ubifs_delete_orphan(c, inode->i_ino); finish_reservation(c); return err; } /** * ubifs_jnl_write_data - write a data node to the journal. * @c: UBIFS file-system description object * @inode: inode the data node belongs to * @key: node key * @buf: buffer to write * @len: data length (must not exceed %UBIFS_BLOCK_SIZE) * * This function writes a data node to the journal. Returns %0 if the data node * was successfully written, and a negative error code in case of failure. */ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, const union ubifs_key *key, const void *buf, int len) { struct ubifs_data_node *data; int err, lnum, offs, compr_type, out_len; int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1; struct ubifs_inode *ui = ubifs_inode(inode); dbg_jnlk(key, "ino %lu, blk %u, len %d, key ", (unsigned long)key_inum(c, key), key_block(c, key), len); ubifs_assert(len <= UBIFS_BLOCK_SIZE); data = kmalloc(dlen, GFP_NOFS | __GFP_NOWARN); if (!data) { /* * Fall-back to the write reserve buffer. Note, we might be * currently on the memory reclaim path, when the kernel is * trying to free some memory by writing out dirty pages. The * write reserve buffer helps us to guarantee that we are * always able to write the data. */ allocated = 0; mutex_lock(&c->write_reserve_mutex); data = c->write_reserve_buf; } data->ch.node_type = UBIFS_DATA_NODE; key_write(c, key, &data->key); data->size = cpu_to_le32(len); zero_data_node_unused(data); if (!(ui->flags & UBIFS_COMPR_FL)) /* Compression is disabled for this inode */ compr_type = UBIFS_COMPR_NONE; else compr_type = ui->compr_type; out_len = dlen - UBIFS_DATA_NODE_SZ; ubifs_compress(c, buf, len, &data->data, &out_len, &compr_type); ubifs_assert(out_len <= UBIFS_BLOCK_SIZE); dlen = UBIFS_DATA_NODE_SZ + out_len; data->compr_type = cpu_to_le16(compr_type); /* Make reservation before allocating sequence numbers */ err = make_reservation(c, DATAHD, dlen); if (err) goto out_free; err = write_node(c, DATAHD, data, dlen, &lnum, &offs); if (err) goto out_release; ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key)); release_head(c, DATAHD); err = ubifs_tnc_add(c, key, lnum, offs, dlen); if (err) goto out_ro; finish_reservation(c); if (!allocated) mutex_unlock(&c->write_reserve_mutex); else kfree(data); return 0; out_release: release_head(c, DATAHD); out_ro: ubifs_ro_mode(c, err); finish_reservation(c); out_free: if (!allocated) mutex_unlock(&c->write_reserve_mutex); else kfree(data); return err; } /** * ubifs_jnl_write_inode - flush inode to the journal. * @c: UBIFS file-system description object * @inode: inode to flush * * This function writes inode @inode to the journal. If the inode is * synchronous, it also synchronizes the write-buffer. Returns zero in case of * success and a negative error code in case of failure. */ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) { int err, lnum, offs; struct ubifs_ino_node *ino; struct ubifs_inode *ui = ubifs_inode(inode); int sync = 0, len = UBIFS_INO_NODE_SZ, last_reference = !inode->i_nlink; dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink); /* * If the inode is being deleted, do not write the attached data. No * need to synchronize the write-buffer either. */ if (!last_reference) { len += ui->data_len; sync = IS_SYNC(inode); } ino = kmalloc(len, GFP_NOFS); if (!ino) return -ENOMEM; /* Make reservation before allocating sequence numbers */ err = make_reservation(c, BASEHD, len); if (err) goto out_free; pack_inode(c, ino, inode, 1); err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync); if (err) goto out_release; if (!sync) ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inode->i_ino); release_head(c, BASEHD); if (last_reference) { err = ubifs_tnc_remove_ino(c, inode->i_ino); if (err) goto out_ro; ubifs_delete_orphan(c, inode->i_ino); err = ubifs_add_dirt(c, lnum, len); } else { union ubifs_key key; ino_key_init(c, &key, inode->i_ino); err = ubifs_tnc_add(c, &key, lnum, offs, len); } if (err) goto out_ro; finish_reservation(c); spin_lock(&ui->ui_lock); ui->synced_i_size = ui->ui_size; spin_unlock(&ui->ui_lock); kfree(ino); return 0; out_release: release_head(c, BASEHD); out_ro: ubifs_ro_mode(c, err); finish_reservation(c); out_free: kfree(ino); return err; } /** * ubifs_jnl_delete_inode - delete an inode. * @c: UBIFS file-system description object * @inode: inode to delete * * This function deletes inode @inode which includes removing it from orphans, * deleting it from TNC and, in some cases, writing a deletion inode to the * journal. * * When regular file inodes are unlinked or a directory inode is removed, the * 'ubifs_jnl_update()' function writes a corresponding deletion inode and * direntry to the media, and adds the inode to orphans. After this, when the * last reference to this inode has been dropped, this function is called. In * general, it has to write one more deletion inode to the media, because if * a commit happened between 'ubifs_jnl_update()' and * 'ubifs_jnl_delete_inode()', the deletion inode is not in the journal * anymore, and in fact it might not be on the flash anymore, because it might * have been garbage-collected already. And for optimization reasons UBIFS does * not read the orphan area if it has been unmounted cleanly, so it would have * no indication in the journal that there is a deleted inode which has to be * removed from TNC. * * However, if there was no commit between 'ubifs_jnl_update()' and * 'ubifs_jnl_delete_inode()', then there is no need to write the deletion * inode to the media for the second time. And this is quite a typical case. * * This function returns zero in case of success and a negative error code in * case of failure. */ int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode) { int err; struct ubifs_inode *ui = ubifs_inode(inode); ubifs_assert(inode->i_nlink == 0); if (ui->del_cmtno != c->cmt_no) /* A commit happened for sure */ return ubifs_jnl_write_inode(c, inode); down_read(&c->commit_sem); /* * Check commit number again, because the first test has been done * without @c->commit_sem, so a commit might have happened. */ if (ui->del_cmtno != c->cmt_no) { up_read(&c->commit_sem); return ubifs_jnl_write_inode(c, inode); } err = ubifs_tnc_remove_ino(c, inode->i_ino); if (err) ubifs_ro_mode(c, err); else ubifs_delete_orphan(c, inode->i_ino); up_read(&c->commit_sem); return err; } /** * ubifs_jnl_rename - rename a directory entry. * @c: UBIFS file-system description object * @old_dir: parent inode of directory entry to rename * @old_dentry: directory entry to rename * @new_dir: parent inode of directory entry to rename * @new_dentry: new directory entry (or directory entry to replace) * @sync: non-zero if the write-buffer has to be synchronized * * This function implements the re-name operation which may involve writing up * to 3 inodes and 2 directory entries. It marks the written inodes as clean * and returns zero on success. In case of failure, a negative error code is * returned. */ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, const struct dentry *old_dentry, const struct inode *new_dir, const struct dentry *new_dentry, int sync) { void *p; union ubifs_key key; struct ubifs_dent_node *dent, *dent2; int err, dlen1, dlen2, ilen, lnum, offs, len; const struct inode *old_inode = d_inode(old_dentry); const struct inode *new_inode = d_inode(new_dentry); int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ; int last_reference = !!(new_inode && new_inode->i_nlink == 0); int move = (old_dir != new_dir); struct ubifs_inode *uninitialized_var(new_ui); dbg_jnl("dent '%pd' in dir ino %lu to dent '%pd' in dir ino %lu", old_dentry, old_dir->i_ino, new_dentry, new_dir->i_ino); ubifs_assert(ubifs_inode(old_dir)->data_len == 0); ubifs_assert(ubifs_inode(new_dir)->data_len == 0); ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex)); ubifs_assert(mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex)); dlen1 = UBIFS_DENT_NODE_SZ + new_dentry->d_name.len + 1; dlen2 = UBIFS_DENT_NODE_SZ + old_dentry->d_name.len + 1; if (new_inode) { new_ui = ubifs_inode(new_inode); ubifs_assert(mutex_is_locked(&new_ui->ui_mutex)); ilen = UBIFS_INO_NODE_SZ; if (!last_reference) ilen += new_ui->data_len; } else ilen = 0; aligned_dlen1 = ALIGN(dlen1, 8); aligned_dlen2 = ALIGN(dlen2, 8); len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8); if (old_dir != new_dir) len += plen; dent = kmalloc(len, GFP_NOFS); if (!dent) return -ENOMEM; /* Make reservation before allocating sequence numbers */ err = make_reservation(c, BASEHD, len); if (err) goto out_free; /* Make new dent */ dent->ch.node_type = UBIFS_DENT_NODE; dent_key_init_flash(c, &dent->key, new_dir->i_ino, &new_dentry->d_name); dent->inum = cpu_to_le64(old_inode->i_ino); dent->type = get_dent_type(old_inode->i_mode); dent->nlen = cpu_to_le16(new_dentry->d_name.len); memcpy(dent->name, new_dentry->d_name.name, new_dentry->d_name.len); dent->name[new_dentry->d_name.len] = '\0'; zero_dent_node_unused(dent); ubifs_prep_grp_node(c, dent, dlen1, 0); /* Make deletion dent */ dent2 = (void *)dent + aligned_dlen1; dent2->ch.node_type = UBIFS_DENT_NODE; dent_key_init_flash(c, &dent2->key, old_dir->i_ino, &old_dentry->d_name); dent2->inum = 0; dent2->type = DT_UNKNOWN; dent2->nlen = cpu_to_le16(old_dentry->d_name.len); memcpy(dent2->name, old_dentry->d_name.name, old_dentry->d_name.len); dent2->name[old_dentry->d_name.len] = '\0'; zero_dent_node_unused(dent2); ubifs_prep_grp_node(c, dent2, dlen2, 0); p = (void *)dent2 + aligned_dlen2; if (new_inode) { pack_inode(c, p, new_inode, 0); p += ALIGN(ilen, 8); } if (!move) pack_inode(c, p, old_dir, 1); else { pack_inode(c, p, old_dir, 0); p += ALIGN(plen, 8); pack_inode(c, p, new_dir, 1); } if (last_reference) { err = ubifs_add_orphan(c, new_inode->i_ino); if (err) { release_head(c, BASEHD); goto out_finish; } new_ui->del_cmtno = c->cmt_no; } err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync); if (err) goto out_release; if (!sync) { struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino); ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino); if (new_inode) ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, new_inode->i_ino); } release_head(c, BASEHD); dent_key_init(c, &key, new_dir->i_ino, &new_dentry->d_name); err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, &new_dentry->d_name); if (err) goto out_ro; err = ubifs_add_dirt(c, lnum, dlen2); if (err) goto out_ro; dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name); err = ubifs_tnc_remove_nm(c, &key, &old_dentry->d_name); if (err) goto out_ro; offs += aligned_dlen1 + aligned_dlen2; if (new_inode) { ino_key_init(c, &key, new_inode->i_ino); err = ubifs_tnc_add(c, &key, lnum, offs, ilen); if (err) goto out_ro; offs += ALIGN(ilen, 8); } ino_key_init(c, &key, old_dir->i_ino); err = ubifs_tnc_add(c, &key, lnum, offs, plen); if (err) goto out_ro; if (old_dir != new_dir) { offs += ALIGN(plen, 8); ino_key_init(c, &key, new_dir->i_ino); err = ubifs_tnc_add(c, &key, lnum, offs, plen); if (err) goto out_ro; } finish_reservation(c); if (new_inode) { mark_inode_clean(c, new_ui); spin_lock(&new_ui->ui_lock); new_ui->synced_i_size = new_ui->ui_size; spin_unlock(&new_ui->ui_lock); } mark_inode_clean(c, ubifs_inode(old_dir)); if (move) mark_inode_clean(c, ubifs_inode(new_dir)); kfree(dent); return 0; out_release: release_head(c, BASEHD); out_ro: ubifs_ro_mode(c, err); if (last_reference) ubifs_delete_orphan(c, new_inode->i_ino); out_finish: finish_reservation(c); out_free: kfree(dent); return err; } /** * recomp_data_node - re-compress a truncated data node. * @dn: data node to re-compress * @new_len: new length * * This function is used when an inode is truncated and the last data node of * the inode has to be re-compressed and re-written. */ static int recomp_data_node(const struct ubifs_info *c, struct ubifs_data_node *dn, int *new_len) { void *buf; int err, len, compr_type, out_len; out_len = le32_to_cpu(dn->size); buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS); if (!buf) return -ENOMEM; len = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; compr_type = le16_to_cpu(dn->compr_type); err = ubifs_decompress(c, &dn->data, len, buf, &out_len, compr_type); if (err) goto out; ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type); ubifs_assert(out_len <= UBIFS_BLOCK_SIZE); dn->compr_type = cpu_to_le16(compr_type); dn->size = cpu_to_le32(*new_len); *new_len = UBIFS_DATA_NODE_SZ + out_len; out: kfree(buf); return err; } /** * ubifs_jnl_truncate - update the journal for a truncation. * @c: UBIFS file-system description object * @inode: inode to truncate * @old_size: old size * @new_size: new size * * When the size of a file decreases due to truncation, a truncation node is * written, the journal tree is updated, and the last data block is re-written * if it has been affected. The inode is also updated in order to synchronize * the new inode size. * * This function marks the inode as clean and returns zero on success. In case * of failure, a negative error code is returned. */ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, loff_t old_size, loff_t new_size) { union ubifs_key key, to_key; struct ubifs_ino_node *ino; struct ubifs_trun_node *trun; struct ubifs_data_node *uninitialized_var(dn); int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode); struct ubifs_inode *ui = ubifs_inode(inode); ino_t inum = inode->i_ino; unsigned int blk; dbg_jnl("ino %lu, size %lld -> %lld", (unsigned long)inum, old_size, new_size); ubifs_assert(!ui->data_len); ubifs_assert(S_ISREG(inode->i_mode)); ubifs_assert(mutex_is_locked(&ui->ui_mutex)); sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ + UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR; ino = kmalloc(sz, GFP_NOFS); if (!ino) return -ENOMEM; trun = (void *)ino + UBIFS_INO_NODE_SZ; trun->ch.node_type = UBIFS_TRUN_NODE; trun->inum = cpu_to_le32(inum); trun->old_size = cpu_to_le64(old_size); trun->new_size = cpu_to_le64(new_size); zero_trun_node_unused(trun); dlen = new_size & (UBIFS_BLOCK_SIZE - 1); if (dlen) { /* Get last data block so it can be truncated */ dn = (void *)trun + UBIFS_TRUN_NODE_SZ; blk = new_size >> UBIFS_BLOCK_SHIFT; data_key_init(c, &key, inum, blk); dbg_jnlk(&key, "last block key "); err = ubifs_tnc_lookup(c, &key, dn); if (err == -ENOENT) dlen = 0; /* Not found (so it is a hole) */ else if (err) goto out_free; else { if (le32_to_cpu(dn->size) <= dlen) dlen = 0; /* Nothing to do */ else { int compr_type = le16_to_cpu(dn->compr_type); if (compr_type != UBIFS_COMPR_NONE) { err = recomp_data_node(c, dn, &dlen); if (err) goto out_free; } else { dn->size = cpu_to_le32(dlen); dlen += UBIFS_DATA_NODE_SZ; } zero_data_node_unused(dn); } } } /* Must make reservation before allocating sequence numbers */ len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ; if (dlen) len += dlen; err = make_reservation(c, BASEHD, len); if (err) goto out_free; pack_inode(c, ino, inode, 0); ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1); if (dlen) ubifs_prep_grp_node(c, dn, dlen, 1); err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync); if (err) goto out_release; if (!sync) ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum); release_head(c, BASEHD); if (dlen) { sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ; err = ubifs_tnc_add(c, &key, lnum, sz, dlen); if (err) goto out_ro; } ino_key_init(c, &key, inum); err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ); if (err) goto out_ro; err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ); if (err) goto out_ro; bit = new_size & (UBIFS_BLOCK_SIZE - 1); blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0); data_key_init(c, &key, inum, blk); bit = old_size & (UBIFS_BLOCK_SIZE - 1); blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1); data_key_init(c, &to_key, inum, blk); err = ubifs_tnc_remove_range(c, &key, &to_key); if (err) goto out_ro; finish_reservation(c); spin_lock(&ui->ui_lock); ui->synced_i_size = ui->ui_size; spin_unlock(&ui->ui_lock); mark_inode_clean(c, ui); kfree(ino); return 0; out_release: release_head(c, BASEHD); out_ro: ubifs_ro_mode(c, err); finish_reservation(c); out_free: kfree(ino); return err; } /** * ubifs_jnl_delete_xattr - delete an extended attribute. * @c: UBIFS file-system description object * @host: host inode * @inode: extended attribute inode * @nm: extended attribute entry name * * This function delete an extended attribute which is very similar to * un-linking regular files - it writes a deletion xentry, a deletion inode and * updates the target inode. Returns zero in case of success and a negative * error code in case of failure. */ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, const struct inode *inode, const struct qstr *nm) { int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen; struct ubifs_dent_node *xent; struct ubifs_ino_node *ino; union ubifs_key xent_key, key1, key2; int sync = IS_DIRSYNC(host); struct ubifs_inode *host_ui = ubifs_inode(host); dbg_jnl("host %lu, xattr ino %lu, name '%s', data len %d", host->i_ino, inode->i_ino, nm->name, ubifs_inode(inode)->data_len); ubifs_assert(inode->i_nlink == 0); ubifs_assert(mutex_is_locked(&host_ui->ui_mutex)); /* * Since we are deleting the inode, we do not bother to attach any data * to it and assume its length is %UBIFS_INO_NODE_SZ. */ xlen = UBIFS_DENT_NODE_SZ + nm->len + 1; aligned_xlen = ALIGN(xlen, 8); hlen = host_ui->data_len + UBIFS_INO_NODE_SZ; len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8); xent = kmalloc(len, GFP_NOFS); if (!xent) return -ENOMEM; /* Make reservation before allocating sequence numbers */ err = make_reservation(c, BASEHD, len); if (err) { kfree(xent); return err; } xent->ch.node_type = UBIFS_XENT_NODE; xent_key_init(c, &xent_key, host->i_ino, nm); key_write(c, &xent_key, xent->key); xent->inum = 0; xent->type = get_dent_type(inode->i_mode); xent->nlen = cpu_to_le16(nm->len); memcpy(xent->name, nm->name, nm->len); xent->name[nm->len] = '\0'; zero_dent_node_unused(xent); ubifs_prep_grp_node(c, xent, xlen, 0); ino = (void *)xent + aligned_xlen; pack_inode(c, ino, inode, 0); ino = (void *)ino + UBIFS_INO_NODE_SZ; pack_inode(c, ino, host, 1); err = write_head(c, BASEHD, xent, len, &lnum, &xent_offs, sync); if (!sync && !err) ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino); release_head(c, BASEHD); kfree(xent); if (err) goto out_ro; /* Remove the extended attribute entry from TNC */ err = ubifs_tnc_remove_nm(c, &xent_key, nm); if (err) goto out_ro; err = ubifs_add_dirt(c, lnum, xlen); if (err) goto out_ro; /* * Remove all nodes belonging to the extended attribute inode from TNC. * Well, there actually must be only one node - the inode itself. */ lowest_ino_key(c, &key1, inode->i_ino); highest_ino_key(c, &key2, inode->i_ino); err = ubifs_tnc_remove_range(c, &key1, &key2); if (err) goto out_ro; err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ); if (err) goto out_ro; /* And update TNC with the new host inode position */ ino_key_init(c, &key1, host->i_ino); err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen); if (err) goto out_ro; finish_reservation(c); spin_lock(&host_ui->ui_lock); host_ui->synced_i_size = host_ui->ui_size; spin_unlock(&host_ui->ui_lock); mark_inode_clean(c, host_ui); return 0; out_ro: ubifs_ro_mode(c, err); finish_reservation(c); return err; } /** * ubifs_jnl_change_xattr - change an extended attribute. * @c: UBIFS file-system description object * @inode: extended attribute inode * @host: host inode * * This function writes the updated version of an extended attribute inode and * the host inode to the journal (to the base head). The host inode is written * after the extended attribute inode in order to guarantee that the extended * attribute will be flushed when the inode is synchronized by 'fsync()' and * consequently, the write-buffer is synchronized. This function returns zero * in case of success and a negative error code in case of failure. */ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode, const struct inode *host) { int err, len1, len2, aligned_len, aligned_len1, lnum, offs; struct ubifs_inode *host_ui = ubifs_inode(host); struct ubifs_ino_node *ino; union ubifs_key key; int sync = IS_DIRSYNC(host); dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino); ubifs_assert(host->i_nlink > 0); ubifs_assert(inode->i_nlink > 0); ubifs_assert(mutex_is_locked(&host_ui->ui_mutex)); len1 = UBIFS_INO_NODE_SZ + host_ui->data_len; len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len; aligned_len1 = ALIGN(len1, 8); aligned_len = aligned_len1 + ALIGN(len2, 8); ino = kmalloc(aligned_len, GFP_NOFS); if (!ino) return -ENOMEM; /* Make reservation before allocating sequence numbers */ err = make_reservation(c, BASEHD, aligned_len); if (err) goto out_free; pack_inode(c, ino, host, 0); pack_inode(c, (void *)ino + aligned_len1, inode, 1); err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0); if (!sync && !err) { struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino); ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino); } release_head(c, BASEHD); if (err) goto out_ro; ino_key_init(c, &key, host->i_ino); err = ubifs_tnc_add(c, &key, lnum, offs, len1); if (err) goto out_ro; ino_key_init(c, &key, inode->i_ino); err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2); if (err) goto out_ro; finish_reservation(c); spin_lock(&host_ui->ui_lock); host_ui->synced_i_size = host_ui->ui_size; spin_unlock(&host_ui->ui_lock); mark_inode_clean(c, host_ui); kfree(ino); return 0; out_ro: ubifs_ro_mode(c, err); finish_reservation(c); out_free: kfree(ino); return err; }
gpl-2.0
sandymanu/manufooty_yu
fs/ocfs2/aops.c
1442
54275
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2002, 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <asm/byteorder.h> #include <linux/swap.h> #include <linux/pipe_fs_i.h> #include <linux/mpage.h> #include <linux/quotaops.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "aops.h" #include "dlmglue.h" #include "extent_map.h" #include "file.h" #include "inode.h" #include "journal.h" #include "suballoc.h" #include "super.h" #include "symlink.h" #include "refcounttree.h" #include "ocfs2_trace.h" #include "buffer_head_io.h" static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int err = -EIO; int status; struct ocfs2_dinode *fe = NULL; struct buffer_head *bh = NULL; struct buffer_head *buffer_cache_bh = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); void *kaddr; trace_ocfs2_symlink_get_block( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)iblock, bh_result, create); BUG_ON(ocfs2_inode_is_fast_symlink(inode)); if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) { mlog(ML_ERROR, "block offset > PATH_MAX: %llu", (unsigned long long)iblock); goto bail; } status = ocfs2_read_inode_block(inode, &bh); if (status < 0) { mlog_errno(status); goto bail; } fe = (struct ocfs2_dinode *) bh->b_data; if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb, le32_to_cpu(fe->i_clusters))) { mlog(ML_ERROR, "block offset is outside the allocated size: " "%llu\n", (unsigned long long)iblock); goto bail; } /* We don't use the page cache to create symlink data, so if * need be, copy it over from the buffer cache. */ if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) { u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock; buffer_cache_bh = sb_getblk(osb->sb, blkno); if (!buffer_cache_bh) { mlog(ML_ERROR, "couldn't getblock for symlink!\n"); goto bail; } /* we haven't locked out transactions, so a commit * could've happened. Since we've got a reference on * the bh, even if it commits while we're doing the * copy, the data is still good. */ if (buffer_jbd(buffer_cache_bh) && ocfs2_inode_is_new(inode)) { kaddr = kmap_atomic(bh_result->b_page); if (!kaddr) { mlog(ML_ERROR, "couldn't kmap!\n"); goto bail; } memcpy(kaddr + (bh_result->b_size * iblock), buffer_cache_bh->b_data, bh_result->b_size); kunmap_atomic(kaddr); set_buffer_uptodate(bh_result); } brelse(buffer_cache_bh); } map_bh(bh_result, inode->i_sb, le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock); err = 0; bail: brelse(bh); return err; } int ocfs2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int err = 0; unsigned int ext_flags; u64 max_blocks = bh_result->b_size >> inode->i_blkbits; u64 p_blkno, count, past_eof; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)iblock, bh_result, create); if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", inode, inode->i_ino); if (S_ISLNK(inode->i_mode)) { /* this always does I/O for some reason. */ err = ocfs2_symlink_get_block(inode, iblock, bh_result, create); goto bail; } err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count, &ext_flags); if (err) { mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, " "%llu, NULL)\n", err, inode, (unsigned long long)iblock, (unsigned long long)p_blkno); goto bail; } if (max_blocks < count) count = max_blocks; /* * ocfs2 never allocates in this function - the only time we * need to use BH_New is when we're extending i_size on a file * system which doesn't support holes, in which case BH_New * allows __block_write_begin() to zero. * * If we see this on a sparse file system, then a truncate has * raced us and removed the cluster. In this case, we clear * the buffers dirty and uptodate bits and let the buffer code * ignore it as a hole. */ if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) { clear_buffer_dirty(bh_result); clear_buffer_uptodate(bh_result); goto bail; } /* Treat the unwritten extent as a hole for zeroing purposes. */ if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) map_bh(bh_result, inode->i_sb, p_blkno); bh_result->b_size = count << inode->i_blkbits; if (!ocfs2_sparse_alloc(osb)) { if (p_blkno == 0) { err = -EIO; mlog(ML_ERROR, "iblock = %llu p_blkno = %llu blkno=(%llu)\n", (unsigned long long)iblock, (unsigned long long)p_blkno, (unsigned long long)OCFS2_I(inode)->ip_blkno); mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters); dump_stack(); goto bail; } } past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)past_eof); if (create && (iblock >= past_eof)) set_buffer_new(bh_result); bail: if (err < 0) err = -EIO; return err; } int ocfs2_read_inline_data(struct inode *inode, struct page *page, struct buffer_head *di_bh) { void *kaddr; loff_t size; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) { ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag", (unsigned long long)OCFS2_I(inode)->ip_blkno); return -EROFS; } size = i_size_read(inode); if (size > PAGE_CACHE_SIZE || size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { ocfs2_error(inode->i_sb, "Inode %llu has with inline data has bad size: %Lu", (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)size); return -EROFS; } kaddr = kmap_atomic(page); if (size) memcpy(kaddr, di->id2.i_data.id_data, size); /* Clear the remaining part of the page */ memset(kaddr + size, 0, PAGE_CACHE_SIZE - size); flush_dcache_page(page); kunmap_atomic(kaddr); SetPageUptodate(page); return 0; } static int ocfs2_readpage_inline(struct inode *inode, struct page *page) { int ret; struct buffer_head *di_bh = NULL; BUG_ON(!PageLocked(page)); BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_read_inline_data(inode, page, di_bh); out: unlock_page(page); brelse(di_bh); return ret; } static int ocfs2_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct ocfs2_inode_info *oi = OCFS2_I(inode); loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; int ret, unlock = 1; trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, (page ? page->index : 0)); ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page); if (ret != 0) { if (ret == AOP_TRUNCATED_PAGE) unlock = 0; mlog_errno(ret); goto out; } if (down_read_trylock(&oi->ip_alloc_sem) == 0) { /* * Unlock the page and cycle ip_alloc_sem so that we don't * busyloop waiting for ip_alloc_sem to unlock */ ret = AOP_TRUNCATED_PAGE; unlock_page(page); unlock = 0; down_read(&oi->ip_alloc_sem); up_read(&oi->ip_alloc_sem); goto out_inode_unlock; } /* * i_size might have just been updated as we grabed the meta lock. We * might now be discovering a truncate that hit on another node. * block_read_full_page->get_block freaks out if it is asked to read * beyond the end of a file, so we check here. Callers * (generic_file_read, vm_ops->fault) are clever enough to check i_size * and notice that the page they just read isn't needed. * * XXX sys_readahead() seems to get that wrong? */ if (start >= i_size_read(inode)) { zero_user(page, 0, PAGE_SIZE); SetPageUptodate(page); ret = 0; goto out_alloc; } if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) ret = ocfs2_readpage_inline(inode, page); else ret = block_read_full_page(page, ocfs2_get_block); unlock = 0; out_alloc: up_read(&OCFS2_I(inode)->ip_alloc_sem); out_inode_unlock: ocfs2_inode_unlock(inode, 0); out: if (unlock) unlock_page(page); return ret; } /* * This is used only for read-ahead. Failures or difficult to handle * situations are safe to ignore. * * Right now, we don't bother with BH_Boundary - in-inode extent lists * are quite large (243 extents on 4k blocks), so most inodes don't * grow out to a tree. If need be, detecting boundary extents could * trivially be added in a future version of ocfs2_get_block(). */ static int ocfs2_readpages(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { int ret, err = -EIO; struct inode *inode = mapping->host; struct ocfs2_inode_info *oi = OCFS2_I(inode); loff_t start; struct page *last; /* * Use the nonblocking flag for the dlm code to avoid page * lock inversion, but don't bother with retrying. */ ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK); if (ret) return err; if (down_read_trylock(&oi->ip_alloc_sem) == 0) { ocfs2_inode_unlock(inode, 0); return err; } /* * Don't bother with inline-data. There isn't anything * to read-ahead in that case anyway... */ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) goto out_unlock; /* * Check whether a remote node truncated this file - we just * drop out in that case as it's not worth handling here. */ last = list_entry(pages->prev, struct page, lru); start = (loff_t)last->index << PAGE_CACHE_SHIFT; if (start >= i_size_read(inode)) goto out_unlock; err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block); out_unlock: up_read(&oi->ip_alloc_sem); ocfs2_inode_unlock(inode, 0); return err; } /* Note: Because we don't support holes, our allocation has * already happened (allocation writes zeros to the file data) * so we don't have to worry about ordered writes in * ocfs2_writepage. * * ->writepage is called during the process of invalidating the page cache * during blocked lock processing. It can't block on any cluster locks * to during block mapping. It's relying on the fact that the block * mapping can't have disappeared under the dirty pages that it is * being asked to write back. */ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) { trace_ocfs2_writepage( (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno, page->index); return block_write_full_page(page, ocfs2_get_block, wbc); } /* Taken from ext3. We don't necessarily need the full blown * functionality yet, but IMHO it's better to cut and paste the whole * thing so we can avoid introducing our own bugs (and easily pick up * their fixes when they happen) --Mark */ int walk_page_buffers( handle_t *handle, struct buffer_head *head, unsigned from, unsigned to, int *partial, int (*fn)( handle_t *handle, struct buffer_head *bh)) { struct buffer_head *bh; unsigned block_start, block_end; unsigned blocksize = head->b_size; int err, ret = 0; struct buffer_head *next; for ( bh = head, block_start = 0; ret == 0 && (bh != head || !block_start); block_start = block_end, bh = next) { next = bh->b_this_page; block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (partial && !buffer_uptodate(bh)) *partial = 1; continue; } err = (*fn)(handle, bh); if (!ret) ret = err; } return ret; } static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) { sector_t status; u64 p_blkno = 0; int err = 0; struct inode *inode = mapping->host; trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)block); /* We don't need to lock journal system files, since they aren't * accessed concurrently from multiple nodes. */ if (!INODE_JOURNAL(inode)) { err = ocfs2_inode_lock(inode, NULL, 0); if (err) { if (err != -ENOENT) mlog_errno(err); goto bail; } down_read(&OCFS2_I(inode)->ip_alloc_sem); } if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, NULL); if (!INODE_JOURNAL(inode)) { up_read(&OCFS2_I(inode)->ip_alloc_sem); ocfs2_inode_unlock(inode, 0); } if (err) { mlog(ML_ERROR, "get_blocks() failed, block = %llu\n", (unsigned long long)block); mlog_errno(err); goto bail; } bail: status = err ? 0 : p_blkno; return status; } /* * TODO: Make this into a generic get_blocks function. * * From do_direct_io in direct-io.c: * "So what we do is to permit the ->get_blocks function to populate * bh.b_size with the size of IO which is permitted at this offset and * this i_blkbits." * * This function is called directly from get_more_blocks in direct-io.c. * * called like this: dio->get_blocks(dio->inode, fs_startblk, * fs_count, map_bh, dio->rw == WRITE); * * Note that we never bother to allocate blocks here, and thus ignore the * create argument. */ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int ret; u64 p_blkno, inode_blocks, contig_blocks; unsigned int ext_flags; unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; /* This function won't even be called if the request isn't all * nicely aligned and of the right size, so there's no need * for us to check any of that. */ inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); /* This figures out the size of the next contiguous block, and * our logical offset */ ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &contig_blocks, &ext_flags); if (ret) { mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n", (unsigned long long)iblock); ret = -EIO; goto bail; } /* We should already CoW the refcounted extent in case of create. */ BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED)); /* * get_more_blocks() expects us to describe a hole by clearing * the mapped bit on bh_result(). * * Consider an unwritten extent as a hole. */ if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) map_bh(bh_result, inode->i_sb, p_blkno); else clear_buffer_mapped(bh_result); /* make sure we don't map more than max_blocks blocks here as that's all the kernel will handle at this point. */ if (max_blocks < contig_blocks) contig_blocks = max_blocks; bh_result->b_size = contig_blocks << blocksize_bits; bail: return ret; } /* * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're * particularly interested in the aio/dio case. We use the rw_lock DLM lock * to protect io on one node from truncation on another. */ static void ocfs2_dio_end_io(struct kiocb *iocb, loff_t offset, ssize_t bytes, void *private, int ret, bool is_async) { struct inode *inode = file_inode(iocb->ki_filp); int level; wait_queue_head_t *wq = ocfs2_ioend_wq(inode); /* this io's submitter should not have unlocked this before we could */ BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); if (ocfs2_iocb_is_sem_locked(iocb)) ocfs2_iocb_clear_sem_locked(iocb); if (ocfs2_iocb_is_unaligned_aio(iocb)) { ocfs2_iocb_clear_unaligned_aio(iocb); if (atomic_dec_and_test(&OCFS2_I(inode)->ip_unaligned_aio) && waitqueue_active(wq)) { wake_up_all(wq); } } ocfs2_iocb_clear_rw_locked(iocb); level = ocfs2_iocb_rw_locked_level(iocb); ocfs2_rw_unlock(inode, level); inode_dio_done(inode); if (is_async) aio_complete(iocb, ret, 0); } /* * ocfs2_invalidatepage() and ocfs2_releasepage() are shamelessly stolen * from ext3. PageChecked() bits have been removed as OCFS2 does not * do journalled data. */ static void ocfs2_invalidatepage(struct page *page, unsigned long offset) { journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal; jbd2_journal_invalidatepage(journal, page, offset); } static int ocfs2_releasepage(struct page *page, gfp_t wait) { journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal; if (!page_has_buffers(page)) return 0; return jbd2_journal_try_to_free_buffers(journal, page, wait); } static ssize_t ocfs2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file)->i_mapping->host; /* * Fallback to buffered I/O if we see an inode without * extents. */ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return 0; /* Fallback to buffered I/O if we are appending. */ if (i_size_read(inode) <= offset) return 0; return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, ocfs2_direct_IO_get_blocks, ocfs2_dio_end_io, NULL, 0); } static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, u32 cpos, unsigned int *start, unsigned int *end) { unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE; if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) { unsigned int cpp; cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits); cluster_start = cpos % cpp; cluster_start = cluster_start << osb->s_clustersize_bits; cluster_end = cluster_start + osb->s_clustersize; } BUG_ON(cluster_start > PAGE_SIZE); BUG_ON(cluster_end > PAGE_SIZE); if (start) *start = cluster_start; if (end) *end = cluster_end; } /* * 'from' and 'to' are the region in the page to avoid zeroing. * * If pagesize > clustersize, this function will avoid zeroing outside * of the cluster boundary. * * from == to == 0 is code for "zero the entire cluster region" */ static void ocfs2_clear_page_regions(struct page *page, struct ocfs2_super *osb, u32 cpos, unsigned from, unsigned to) { void *kaddr; unsigned int cluster_start, cluster_end; ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); kaddr = kmap_atomic(page); if (from || to) { if (from > cluster_start) memset(kaddr + cluster_start, 0, from - cluster_start); if (to < cluster_end) memset(kaddr + to, 0, cluster_end - to); } else { memset(kaddr + cluster_start, 0, cluster_end - cluster_start); } kunmap_atomic(kaddr); } /* * Nonsparse file systems fully allocate before we get to the write * code. This prevents ocfs2_write() from tagging the write as an * allocating one, which means ocfs2_map_page_blocks() might try to * read-in the blocks at the tail of our file. Avoid reading them by * testing i_size against each block offset. */ static int ocfs2_should_read_blk(struct inode *inode, struct page *page, unsigned int block_start) { u64 offset = page_offset(page) + block_start; if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) return 1; if (i_size_read(inode) > offset) return 1; return 0; } /* * Some of this taken from __block_write_begin(). We already have our * mapping by now though, and the entire write will be allocating or * it won't, so not much need to use BH_New. * * This will also skip zeroing, which is handled externally. */ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, struct inode *inode, unsigned int from, unsigned int to, int new) { int ret = 0; struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; unsigned int block_end, block_start; unsigned int bsize = 1 << inode->i_blkbits; if (!page_has_buffers(page)) create_empty_buffers(page, bsize, 0); head = page_buffers(page); for (bh = head, block_start = 0; bh != head || !block_start; bh = bh->b_this_page, block_start += bsize) { block_end = block_start + bsize; clear_buffer_new(bh); /* * Ignore blocks outside of our i/o range - * they may belong to unallocated clusters. */ if (block_start >= to || block_end <= from) { if (PageUptodate(page)) set_buffer_uptodate(bh); continue; } /* * For an allocating write with cluster size >= page * size, we always write the entire page. */ if (new) set_buffer_new(bh); if (!buffer_mapped(bh)) { map_bh(bh, inode->i_sb, *p_blkno); unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); } if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); } else if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_new(bh) && ocfs2_should_read_blk(inode, page, block_start) && (block_start < from || block_end > to)) { ll_rw_block(READ, 1, &bh); *wait_bh++=bh; } *p_blkno = *p_blkno + 1; } /* * If we issued read requests - let them complete. */ while(wait_bh > wait) { wait_on_buffer(*--wait_bh); if (!buffer_uptodate(*wait_bh)) ret = -EIO; } if (ret == 0 || !new) return ret; /* * If we get -EIO above, zero out any newly allocated blocks * to avoid exposing stale data. */ bh = head; block_start = 0; do { block_end = block_start + bsize; if (block_end <= from) goto next_bh; if (block_start >= to) break; zero_user(page, block_start, bh->b_size); set_buffer_uptodate(bh); mark_buffer_dirty(bh); next_bh: block_start = block_end; bh = bh->b_this_page; } while (bh != head); return ret; } #if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE) #define OCFS2_MAX_CTXT_PAGES 1 #else #define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE) #endif #define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE) /* * Describe the state of a single cluster to be written to. */ struct ocfs2_write_cluster_desc { u32 c_cpos; u32 c_phys; /* * Give this a unique field because c_phys eventually gets * filled. */ unsigned c_new; unsigned c_unwritten; unsigned c_needs_zero; }; struct ocfs2_write_ctxt { /* Logical cluster position / len of write */ u32 w_cpos; u32 w_clen; /* First cluster allocated in a nonsparse extend */ u32 w_first_new_cpos; struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; /* * This is true if page_size > cluster_size. * * It triggers a set of special cases during write which might * have to deal with allocating writes to partial pages. */ unsigned int w_large_pages; /* * Pages involved in this write. * * w_target_page is the page being written to by the user. * * w_pages is an array of pages which always contains * w_target_page, and in the case of an allocating write with * page_size < cluster size, it will contain zero'd and mapped * pages adjacent to w_target_page which need to be written * out in so that future reads from that region will get * zero's. */ unsigned int w_num_pages; struct page *w_pages[OCFS2_MAX_CTXT_PAGES]; struct page *w_target_page; /* * w_target_locked is used for page_mkwrite path indicating no unlocking * against w_target_page in ocfs2_write_end_nolock. */ unsigned int w_target_locked:1; /* * ocfs2_write_end() uses this to know what the real range to * write in the target should be. */ unsigned int w_target_from; unsigned int w_target_to; /* * We could use journal_current_handle() but this is cleaner, * IMHO -Mark */ handle_t *w_handle; struct buffer_head *w_di_bh; struct ocfs2_cached_dealloc_ctxt w_dealloc; }; void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) { int i; for(i = 0; i < num_pages; i++) { if (pages[i]) { unlock_page(pages[i]); mark_page_accessed(pages[i]); page_cache_release(pages[i]); } } } static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) { int i; /* * w_target_locked is only set to true in the page_mkwrite() case. * The intent is to allow us to lock the target page from write_begin() * to write_end(). The caller must hold a ref on w_target_page. */ if (wc->w_target_locked) { BUG_ON(!wc->w_target_page); for (i = 0; i < wc->w_num_pages; i++) { if (wc->w_target_page == wc->w_pages[i]) { wc->w_pages[i] = NULL; break; } } mark_page_accessed(wc->w_target_page); page_cache_release(wc->w_target_page); } ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); brelse(wc->w_di_bh); kfree(wc); } static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, struct ocfs2_super *osb, loff_t pos, unsigned len, struct buffer_head *di_bh) { u32 cend; struct ocfs2_write_ctxt *wc; wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS); if (!wc) return -ENOMEM; wc->w_cpos = pos >> osb->s_clustersize_bits; wc->w_first_new_cpos = UINT_MAX; cend = (pos + len - 1) >> osb->s_clustersize_bits; wc->w_clen = cend - wc->w_cpos + 1; get_bh(di_bh); wc->w_di_bh = di_bh; if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) wc->w_large_pages = 1; else wc->w_large_pages = 0; ocfs2_init_dealloc_ctxt(&wc->w_dealloc); *wcp = wc; return 0; } /* * If a page has any new buffers, zero them out here, and mark them uptodate * and dirty so they'll be written out (in order to prevent uninitialised * block data from leaking). And clear the new bit. */ static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to) { unsigned int block_start, block_end; struct buffer_head *head, *bh; BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) return; bh = head = page_buffers(page); block_start = 0; do { block_end = block_start + bh->b_size; if (buffer_new(bh)) { if (block_end > from && block_start < to) { if (!PageUptodate(page)) { unsigned start, end; start = max(from, block_start); end = min(to, block_end); zero_user_segment(page, start, end); set_buffer_uptodate(bh); } clear_buffer_new(bh); mark_buffer_dirty(bh); } } block_start = block_end; bh = bh->b_this_page; } while (bh != head); } /* * Only called when we have a failure during allocating write to write * zero's to the newly allocated region. */ static void ocfs2_write_failure(struct inode *inode, struct ocfs2_write_ctxt *wc, loff_t user_pos, unsigned user_len) { int i; unsigned from = user_pos & (PAGE_CACHE_SIZE - 1), to = user_pos + user_len; struct page *tmppage; ocfs2_zero_new_buffers(wc->w_target_page, from, to); for(i = 0; i < wc->w_num_pages; i++) { tmppage = wc->w_pages[i]; if (page_has_buffers(tmppage)) { if (ocfs2_should_order_data(inode)) ocfs2_jbd2_file_inode(wc->w_handle, inode); block_commit_write(tmppage, from, to); } } } static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, struct ocfs2_write_ctxt *wc, struct page *page, u32 cpos, loff_t user_pos, unsigned user_len, int new) { int ret; unsigned int map_from = 0, map_to = 0; unsigned int cluster_start, cluster_end; unsigned int user_data_from = 0, user_data_to = 0; ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, &cluster_start, &cluster_end); /* treat the write as new if the a hole/lseek spanned across * the page boundary. */ new = new | ((i_size_read(inode) <= page_offset(page)) && (page_offset(page) <= user_pos)); if (page == wc->w_target_page) { map_from = user_pos & (PAGE_CACHE_SIZE - 1); map_to = map_from + user_len; if (new) ret = ocfs2_map_page_blocks(page, p_blkno, inode, cluster_start, cluster_end, new); else ret = ocfs2_map_page_blocks(page, p_blkno, inode, map_from, map_to, new); if (ret) { mlog_errno(ret); goto out; } user_data_from = map_from; user_data_to = map_to; if (new) { map_from = cluster_start; map_to = cluster_end; } } else { /* * If we haven't allocated the new page yet, we * shouldn't be writing it out without copying user * data. This is likely a math error from the caller. */ BUG_ON(!new); map_from = cluster_start; map_to = cluster_end; ret = ocfs2_map_page_blocks(page, p_blkno, inode, cluster_start, cluster_end, new); if (ret) { mlog_errno(ret); goto out; } } /* * Parts of newly allocated pages need to be zero'd. * * Above, we have also rewritten 'to' and 'from' - as far as * the rest of the function is concerned, the entire cluster * range inside of a page needs to be written. * * We can skip this if the page is up to date - it's already * been zero'd from being read in as a hole. */ if (new && !PageUptodate(page)) ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb), cpos, user_data_from, user_data_to); flush_dcache_page(page); out: return ret; } /* * This function will only grab one clusters worth of pages. */ static int ocfs2_grab_pages_for_write(struct address_space *mapping, struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos, unsigned user_len, int new, struct page *mmap_page) { int ret = 0, i; unsigned long start, target_index, end_index, index; struct inode *inode = mapping->host; loff_t last_byte; target_index = user_pos >> PAGE_CACHE_SHIFT; /* * Figure out how many pages we'll be manipulating here. For * non allocating write, we just change the one * page. Otherwise, we'll need a whole clusters worth. If we're * writing past i_size, we only need enough pages to cover the * last page of the write. */ if (new) { wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); /* * We need the index *past* the last page we could possibly * touch. This is the page past the end of the write or * i_size, whichever is greater. */ last_byte = max(user_pos + user_len, i_size_read(inode)); BUG_ON(last_byte < 1); end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1; if ((start + wc->w_num_pages) > end_index) wc->w_num_pages = end_index - start; } else { wc->w_num_pages = 1; start = target_index; } for(i = 0; i < wc->w_num_pages; i++) { index = start + i; if (index == target_index && mmap_page) { /* * ocfs2_pagemkwrite() is a little different * and wants us to directly use the page * passed in. */ lock_page(mmap_page); /* Exit and let the caller retry */ if (mmap_page->mapping != mapping) { WARN_ON(mmap_page->mapping); unlock_page(mmap_page); ret = -EAGAIN; goto out; } page_cache_get(mmap_page); wc->w_pages[i] = mmap_page; wc->w_target_locked = true; } else { wc->w_pages[i] = find_or_create_page(mapping, index, GFP_NOFS); if (!wc->w_pages[i]) { ret = -ENOMEM; mlog_errno(ret); goto out; } } wait_for_stable_page(wc->w_pages[i]); if (index == target_index) wc->w_target_page = wc->w_pages[i]; } out: if (ret) wc->w_target_locked = false; return ret; } /* * Prepare a single cluster for write one cluster into the file. */ static int ocfs2_write_cluster(struct address_space *mapping, u32 phys, unsigned int unwritten, unsigned int should_zero, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos, unsigned user_len) { int ret, i, new; u64 v_blkno, p_blkno; struct inode *inode = mapping->host; struct ocfs2_extent_tree et; new = phys == 0 ? 1 : 0; if (new) { u32 tmp_pos; /* * This is safe to call with the page locks - it won't take * any additional semaphores or cluster locks. */ tmp_pos = cpos; ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode, &tmp_pos, 1, 0, wc->w_di_bh, wc->w_handle, data_ac, meta_ac, NULL); /* * This shouldn't happen because we must have already * calculated the correct meta data allocation required. The * internal tree allocation code should know how to increase * transaction credits itself. * * If need be, we could handle -EAGAIN for a * RESTART_TRANS here. */ mlog_bug_on_msg(ret == -EAGAIN, "Inode %llu: EAGAIN return during allocation.\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); if (ret < 0) { mlog_errno(ret); goto out; } } else if (unwritten) { ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), wc->w_di_bh); ret = ocfs2_mark_extent_written(inode, &et, wc->w_handle, cpos, 1, phys, meta_ac, &wc->w_dealloc); if (ret < 0) { mlog_errno(ret); goto out; } } if (should_zero) v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, cpos); else v_blkno = user_pos >> inode->i_sb->s_blocksize_bits; /* * The only reason this should fail is due to an inability to * find the extent added. */ ret = ocfs2_extent_map_get_blocks(inode, v_blkno, &p_blkno, NULL, NULL); if (ret < 0) { ocfs2_error(inode->i_sb, "Corrupting extend for inode %llu, " "at logical block %llu", (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)v_blkno); goto out; } BUG_ON(p_blkno == 0); for(i = 0; i < wc->w_num_pages; i++) { int tmpret; tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc, wc->w_pages[i], cpos, user_pos, user_len, should_zero); if (tmpret) { mlog_errno(tmpret); if (ret == 0) ret = tmpret; } } /* * We only have cleanup to do in case of allocating write. */ if (ret && new) ocfs2_write_failure(inode, wc, user_pos, user_len); out: return ret; } static int ocfs2_write_cluster_by_desc(struct address_space *mapping, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct ocfs2_write_ctxt *wc, loff_t pos, unsigned len) { int ret, i; loff_t cluster_off; unsigned int local_len = len; struct ocfs2_write_cluster_desc *desc; struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb); for (i = 0; i < wc->w_clen; i++) { desc = &wc->w_desc[i]; /* * We have to make sure that the total write passed in * doesn't extend past a single cluster. */ local_len = len; cluster_off = pos & (osb->s_clustersize - 1); if ((cluster_off + local_len) > osb->s_clustersize) local_len = osb->s_clustersize - cluster_off; ret = ocfs2_write_cluster(mapping, desc->c_phys, desc->c_unwritten, desc->c_needs_zero, data_ac, meta_ac, wc, desc->c_cpos, pos, local_len); if (ret) { mlog_errno(ret); goto out; } len -= local_len; pos += local_len; } ret = 0; out: return ret; } /* * ocfs2_write_end() wants to know which parts of the target page it * should complete the write on. It's easiest to compute them ahead of * time when a more complete view of the write is available. */ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, struct ocfs2_write_ctxt *wc, loff_t pos, unsigned len, int alloc) { struct ocfs2_write_cluster_desc *desc; wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1); wc->w_target_to = wc->w_target_from + len; if (alloc == 0) return; /* * Allocating write - we may have different boundaries based * on page size and cluster size. * * NOTE: We can no longer compute one value from the other as * the actual write length and user provided length may be * different. */ if (wc->w_large_pages) { /* * We only care about the 1st and last cluster within * our range and whether they should be zero'd or not. Either * value may be extended out to the start/end of a * newly allocated cluster. */ desc = &wc->w_desc[0]; if (desc->c_needs_zero) ocfs2_figure_cluster_boundaries(osb, desc->c_cpos, &wc->w_target_from, NULL); desc = &wc->w_desc[wc->w_clen - 1]; if (desc->c_needs_zero) ocfs2_figure_cluster_boundaries(osb, desc->c_cpos, NULL, &wc->w_target_to); } else { wc->w_target_from = 0; wc->w_target_to = PAGE_CACHE_SIZE; } } /* * Populate each single-cluster write descriptor in the write context * with information about the i/o to be done. * * Returns the number of clusters that will have to be allocated, as * well as a worst case estimate of the number of extent records that * would have to be created during a write to an unwritten region. */ static int ocfs2_populate_write_desc(struct inode *inode, struct ocfs2_write_ctxt *wc, unsigned int *clusters_to_alloc, unsigned int *extents_to_split) { int ret; struct ocfs2_write_cluster_desc *desc; unsigned int num_clusters = 0; unsigned int ext_flags = 0; u32 phys = 0; int i; *clusters_to_alloc = 0; *extents_to_split = 0; for (i = 0; i < wc->w_clen; i++) { desc = &wc->w_desc[i]; desc->c_cpos = wc->w_cpos + i; if (num_clusters == 0) { /* * Need to look up the next extent record. */ ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys, &num_clusters, &ext_flags); if (ret) { mlog_errno(ret); goto out; } /* We should already CoW the refcountd extent. */ BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); /* * Assume worst case - that we're writing in * the middle of the extent. * * We can assume that the write proceeds from * left to right, in which case the extent * insert code is smart enough to coalesce the * next splits into the previous records created. */ if (ext_flags & OCFS2_EXT_UNWRITTEN) *extents_to_split = *extents_to_split + 2; } else if (phys) { /* * Only increment phys if it doesn't describe * a hole. */ phys++; } /* * If w_first_new_cpos is < UINT_MAX, we have a non-sparse * file that got extended. w_first_new_cpos tells us * where the newly allocated clusters are so we can * zero them. */ if (desc->c_cpos >= wc->w_first_new_cpos) { BUG_ON(phys == 0); desc->c_needs_zero = 1; } desc->c_phys = phys; if (phys == 0) { desc->c_new = 1; desc->c_needs_zero = 1; *clusters_to_alloc = *clusters_to_alloc + 1; } if (ext_flags & OCFS2_EXT_UNWRITTEN) { desc->c_unwritten = 1; desc->c_needs_zero = 1; } num_clusters--; } ret = 0; out: return ret; } static int ocfs2_write_begin_inline(struct address_space *mapping, struct inode *inode, struct ocfs2_write_ctxt *wc) { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct page *page; handle_t *handle; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; page = find_or_create_page(mapping, 0, GFP_NOFS); if (!page) { ret = -ENOMEM; mlog_errno(ret); goto out; } /* * If we don't set w_num_pages then this page won't get unlocked * and freed on cleanup of the write context. */ wc->w_pages[0] = wc->w_target_page = page; wc->w_num_pages = 1; handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { ocfs2_commit_trans(osb, handle); mlog_errno(ret); goto out; } if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) ocfs2_set_inode_data_inline(inode, di); if (!PageUptodate(page)) { ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh); if (ret) { ocfs2_commit_trans(osb, handle); goto out; } } wc->w_handle = handle; out: return ret; } int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size) { struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; if (new_size <= le16_to_cpu(di->id2.i_data.id_count)) return 1; return 0; } static int ocfs2_try_to_write_inline_data(struct address_space *mapping, struct inode *inode, loff_t pos, unsigned len, struct page *mmap_page, struct ocfs2_write_ctxt *wc) { int ret, written = 0; loff_t end = pos + len; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = NULL; trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno, len, (unsigned long long)pos, oi->ip_dyn_features); /* * Handle inodes which already have inline data 1st. */ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { if (mmap_page == NULL && ocfs2_size_fits_inline_data(wc->w_di_bh, end)) goto do_inline_write; /* * The write won't fit - we have to give this inode an * inline extent list now. */ ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh); if (ret) mlog_errno(ret); goto out; } /* * Check whether the inode can accept inline data. */ if (oi->ip_clusters != 0 || i_size_read(inode) != 0) return 0; /* * Check whether the write can fit. */ di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; if (mmap_page || end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) return 0; do_inline_write: ret = ocfs2_write_begin_inline(mapping, inode, wc); if (ret) { mlog_errno(ret); goto out; } /* * This signals to the caller that the data can be written * inline. */ written = 1; out: return written ? written : ret; } /* * This function only does anything for file systems which can't * handle sparse files. * * What we want to do here is fill in any hole between the current end * of allocation and the end of our write. That way the rest of the * write path can treat it as an non-allocating write, which has no * special case code for sparse/nonsparse files. */ static int ocfs2_expand_nonsparse_inode(struct inode *inode, struct buffer_head *di_bh, loff_t pos, unsigned len, struct ocfs2_write_ctxt *wc) { int ret; loff_t newsize = pos + len; BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); if (newsize <= i_size_read(inode)) return 0; ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos); if (ret) mlog_errno(ret); wc->w_first_new_cpos = ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); return ret; } static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh, loff_t pos) { int ret = 0; BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); if (pos > i_size_read(inode)) ret = ocfs2_zero_extend(inode, di_bh, pos); return ret; } /* * Try to flush truncate logs if we can free enough clusters from it. * As for return value, "< 0" means error, "0" no space and "1" means * we have freed enough spaces and let the caller try to allocate again. */ static int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb, unsigned int needed) { tid_t target; int ret = 0; unsigned int truncated_clusters; mutex_lock(&osb->osb_tl_inode->i_mutex); truncated_clusters = osb->truncated_clusters; mutex_unlock(&osb->osb_tl_inode->i_mutex); /* * Check whether we can succeed in allocating if we free * the truncate log. */ if (truncated_clusters < needed) goto out; ret = ocfs2_flush_truncate_log(osb); if (ret) { mlog_errno(ret); goto out; } if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) { jbd2_log_wait_commit(osb->journal->j_journal, target); ret = 1; } out: return ret; } int ocfs2_write_begin_nolock(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata, struct buffer_head *di_bh, struct page *mmap_page) { int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0; struct ocfs2_write_ctxt *wc; struct inode *inode = mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di; struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *meta_ac = NULL; handle_t *handle; struct ocfs2_extent_tree et; int try_free = 1, ret1; try_again: ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh); if (ret) { mlog_errno(ret); return ret; } if (ocfs2_supports_inline_data(osb)) { ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len, mmap_page, wc); if (ret == 1) { ret = 0; goto success; } if (ret < 0) { mlog_errno(ret); goto out; } } if (ocfs2_sparse_alloc(osb)) ret = ocfs2_zero_tail(inode, di_bh, pos); else ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len, wc); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_check_range_for_refcount(inode, pos, len); if (ret < 0) { mlog_errno(ret); goto out; } else if (ret == 1) { clusters_need = wc->w_clen; ret = ocfs2_refcount_cow(inode, filp, di_bh, wc->w_cpos, wc->w_clen, UINT_MAX); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc, &extents_to_split); if (ret) { mlog_errno(ret); goto out; } clusters_need += clusters_to_alloc; di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; trace_ocfs2_write_begin_nolock( (unsigned long long)OCFS2_I(inode)->ip_blkno, (long long)i_size_read(inode), le32_to_cpu(di->i_clusters), pos, len, flags, mmap_page, clusters_to_alloc, extents_to_split); /* * We set w_target_from, w_target_to here so that * ocfs2_write_end() knows which range in the target page to * write out. An allocation requires that we write the entire * cluster range. */ if (clusters_to_alloc || extents_to_split) { /* * XXX: We are stretching the limits of * ocfs2_lock_allocators(). It greatly over-estimates * the work to be done. */ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), wc->w_di_bh); ret = ocfs2_lock_allocators(inode, &et, clusters_to_alloc, extents_to_split, &data_ac, &meta_ac); if (ret) { mlog_errno(ret); goto out; } if (data_ac) data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list, clusters_to_alloc); } /* * We have to zero sparse allocated clusters, unwritten extent clusters, * and non-sparse clusters we just extended. For non-sparse writes, * we know zeros will only be needed in the first and/or last cluster. */ if (clusters_to_alloc || extents_to_split || (wc->w_clen && (wc->w_desc[0].c_needs_zero || wc->w_desc[wc->w_clen - 1].c_needs_zero))) cluster_of_pages = 1; else cluster_of_pages = 0; ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages); handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } wc->w_handle = handle; if (clusters_to_alloc) { ret = dquot_alloc_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); if (ret) goto out_commit; } /* * We don't want this to fail in ocfs2_write_end(), so do it * here. */ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_quota; } /* * Fill our page array first. That way we've grabbed enough so * that we can zero and flush if we error after adding the * extent. */ ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, cluster_of_pages, mmap_page); if (ret && ret != -EAGAIN) { mlog_errno(ret); goto out_quota; } /* * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock * the target page. In this case, we exit with no error and no target * page. This will trigger the caller, page_mkwrite(), to re-try * the operation. */ if (ret == -EAGAIN) { BUG_ON(wc->w_target_page); ret = 0; goto out_quota; } ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, len); if (ret) { mlog_errno(ret); goto out_quota; } if (data_ac) ocfs2_free_alloc_context(data_ac); if (meta_ac) ocfs2_free_alloc_context(meta_ac); success: *pagep = wc->w_target_page; *fsdata = wc; return 0; out_quota: if (clusters_to_alloc) dquot_free_space(inode, ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); out_commit: ocfs2_commit_trans(osb, handle); out: ocfs2_free_write_ctxt(wc); if (data_ac) ocfs2_free_alloc_context(data_ac); if (meta_ac) ocfs2_free_alloc_context(meta_ac); if (ret == -ENOSPC && try_free) { /* * Try to free some truncate log so that we can have enough * clusters to allocate. */ try_free = 0; ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need); if (ret1 == 1) goto try_again; if (ret1 < 0) mlog_errno(ret1); } return ret; } static int ocfs2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; struct buffer_head *di_bh = NULL; struct inode *inode = mapping->host; ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret) { mlog_errno(ret); return ret; } /* * Take alloc sem here to prevent concurrent lookups. That way * the mapping, zeroing and tree manipulation within * ocfs2_write() will be safe against ->readpage(). This * should also serve to lock out allocation from a shared * writeable region. */ down_write(&OCFS2_I(inode)->ip_alloc_sem); ret = ocfs2_write_begin_nolock(file, mapping, pos, len, flags, pagep, fsdata, di_bh, NULL); if (ret) { mlog_errno(ret); goto out_fail; } brelse(di_bh); return 0; out_fail: up_write(&OCFS2_I(inode)->ip_alloc_sem); brelse(di_bh); ocfs2_inode_unlock(inode, 1); return ret; } static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, unsigned len, unsigned *copied, struct ocfs2_dinode *di, struct ocfs2_write_ctxt *wc) { void *kaddr; if (unlikely(*copied < len)) { if (!PageUptodate(wc->w_target_page)) { *copied = 0; return; } } kaddr = kmap_atomic(wc->w_target_page); memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); kunmap_atomic(kaddr); trace_ocfs2_write_end_inline( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)pos, *copied, le16_to_cpu(di->id2.i_data.id_count), le16_to_cpu(di->i_dyn_features)); } int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { int i; unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1); struct inode *inode = mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_write_ctxt *wc = fsdata; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; handle_t *handle = wc->w_handle; struct page *tmppage; if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { ocfs2_write_end_inline(inode, pos, len, &copied, di, wc); goto out_write_size; } if (unlikely(copied < len)) { if (!PageUptodate(wc->w_target_page)) copied = 0; ocfs2_zero_new_buffers(wc->w_target_page, start+copied, start+len); } flush_dcache_page(wc->w_target_page); for(i = 0; i < wc->w_num_pages; i++) { tmppage = wc->w_pages[i]; if (tmppage == wc->w_target_page) { from = wc->w_target_from; to = wc->w_target_to; BUG_ON(from > PAGE_CACHE_SIZE || to > PAGE_CACHE_SIZE || to < from); } else { /* * Pages adjacent to the target (if any) imply * a hole-filling write in which case we want * to flush their entire range. */ from = 0; to = PAGE_CACHE_SIZE; } if (page_has_buffers(tmppage)) { if (ocfs2_should_order_data(inode)) ocfs2_jbd2_file_inode(wc->w_handle, inode); block_commit_write(tmppage, from, to); } } out_write_size: pos += copied; if (pos > inode->i_size) { i_size_write(inode, pos); mark_inode_dirty(inode); } inode->i_blocks = ocfs2_inode_sector_count(inode); di->i_size = cpu_to_le64((u64)i_size_read(inode)); inode->i_mtime = inode->i_ctime = CURRENT_TIME; di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); ocfs2_journal_dirty(handle, wc->w_di_bh); ocfs2_commit_trans(osb, handle); ocfs2_run_deallocs(osb, &wc->w_dealloc); ocfs2_free_write_ctxt(wc); return copied; } static int ocfs2_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { int ret; struct inode *inode = mapping->host; ret = ocfs2_write_end_nolock(mapping, pos, len, copied, page, fsdata); up_write(&OCFS2_I(inode)->ip_alloc_sem); ocfs2_inode_unlock(inode, 1); return ret; } const struct address_space_operations ocfs2_aops = { .readpage = ocfs2_readpage, .readpages = ocfs2_readpages, .writepage = ocfs2_writepage, .write_begin = ocfs2_write_begin, .write_end = ocfs2_write_end, .bmap = ocfs2_bmap, .direct_IO = ocfs2_direct_IO, .invalidatepage = ocfs2_invalidatepage, .releasepage = ocfs2_releasepage, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, };
gpl-2.0
crewrktablets/rk29_kernel_308
net/netfilter/nf_conntrack_netlink.c
1442
54229
/* Connection tracking via netlink socket. Allows for user space * protocol helpers and general trouble making from userspace. * * (C) 2001 by Jay Schulist <jschlst@samba.org> * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> * (C) 2003 by Patrick Mchardy <kaber@trash.net> * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org> * * Initial connection tracking via netlink development funded and * generally made possible by Network Robots, Inc. (www.networkrobots.com) * * Further development of this code funded by Astaro AG (http://www.astaro.com) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/rculist.h> #include <linux/rculist_nulls.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/security.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/netlink.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/netfilter.h> #include <net/netlink.h> #include <net/sock.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_l3proto.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_acct.h> #include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/nf_conntrack_timestamp.h> #ifdef CONFIG_NF_NAT_NEEDED #include <net/netfilter/nf_nat_core.h> #include <net/netfilter/nf_nat_protocol.h> #endif #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_conntrack.h> MODULE_LICENSE("GPL"); static char __initdata version[] = "0.93"; static inline int ctnetlink_dump_tuples_proto(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple, struct nf_conntrack_l4proto *l4proto) { int ret = 0; struct nlattr *nest_parms; nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; NLA_PUT_U8(skb, CTA_PROTO_NUM, tuple->dst.protonum); if (likely(l4proto->tuple_to_nlattr)) ret = l4proto->tuple_to_nlattr(skb, tuple); nla_nest_end(skb, nest_parms); return ret; nla_put_failure: return -1; } static inline int ctnetlink_dump_tuples_ip(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple, struct nf_conntrack_l3proto *l3proto) { int ret = 0; struct nlattr *nest_parms; nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; if (likely(l3proto->tuple_to_nlattr)) ret = l3proto->tuple_to_nlattr(skb, tuple); nla_nest_end(skb, nest_parms); return ret; nla_put_failure: return -1; } static int ctnetlink_dump_tuples(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { int ret; struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; l3proto = __nf_ct_l3proto_find(tuple->src.l3num); ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto); if (unlikely(ret < 0)) return ret; l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto); return ret; } static inline int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) { NLA_PUT_BE32(skb, CTA_STATUS, htonl(ct->status)); return 0; nla_put_failure: return -1; } static inline int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) { long timeout = (ct->timeout.expires - jiffies) / HZ; if (timeout < 0) timeout = 0; NLA_PUT_BE32(skb, CTA_TIMEOUT, htonl(timeout)); return 0; nla_put_failure: return -1; } static inline int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct) { struct nf_conntrack_l4proto *l4proto; struct nlattr *nest_proto; int ret; l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (!l4proto->to_nlattr) return 0; nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED); if (!nest_proto) goto nla_put_failure; ret = l4proto->to_nlattr(skb, nest_proto, ct); nla_nest_end(skb, nest_proto); return ret; nla_put_failure: return -1; } static inline int ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct) { struct nlattr *nest_helper; const struct nf_conn_help *help = nfct_help(ct); struct nf_conntrack_helper *helper; if (!help) return 0; helper = rcu_dereference(help->helper); if (!helper) goto out; nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED); if (!nest_helper) goto nla_put_failure; NLA_PUT_STRING(skb, CTA_HELP_NAME, helper->name); if (helper->to_nlattr) helper->to_nlattr(skb, ct); nla_nest_end(skb, nest_helper); out: return 0; nla_put_failure: return -1; } static int ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct, enum ip_conntrack_dir dir) { enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; struct nlattr *nest_count; const struct nf_conn_counter *acct; acct = nf_conn_acct_find(ct); if (!acct) return 0; nest_count = nla_nest_start(skb, type | NLA_F_NESTED); if (!nest_count) goto nla_put_failure; NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(acct[dir].packets)); NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(acct[dir].bytes)); nla_nest_end(skb, nest_count); return 0; nla_put_failure: return -1; } static int ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) { struct nlattr *nest_count; const struct nf_conn_tstamp *tstamp; tstamp = nf_conn_tstamp_find(ct); if (!tstamp) return 0; nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED); if (!nest_count) goto nla_put_failure; NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)); if (tstamp->stop != 0) { NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP, cpu_to_be64(tstamp->stop)); } nla_nest_end(skb, nest_count); return 0; nla_put_failure: return -1; } #ifdef CONFIG_NF_CONNTRACK_MARK static inline int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) { NLA_PUT_BE32(skb, CTA_MARK, htonl(ct->mark)); return 0; nla_put_failure: return -1; } #else #define ctnetlink_dump_mark(a, b) (0) #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK static inline int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) { struct nlattr *nest_secctx; int len, ret; char *secctx; ret = security_secid_to_secctx(ct->secmark, &secctx, &len); if (ret) return 0; ret = -1; nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED); if (!nest_secctx) goto nla_put_failure; NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx); nla_nest_end(skb, nest_secctx); ret = 0; nla_put_failure: security_release_secctx(secctx, len); return ret; } #else #define ctnetlink_dump_secctx(a, b) (0) #endif #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple) static inline int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct) { struct nlattr *nest_parms; if (!(ct->status & IPS_EXPECTED)) return 0; nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); return 0; nla_put_failure: return -1; } #ifdef CONFIG_NF_NAT_NEEDED static int dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type) { struct nlattr *nest_parms; nest_parms = nla_nest_start(skb, type | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS, htonl(natseq->correction_pos)); NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE, htonl(natseq->offset_before)); NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER, htonl(natseq->offset_after)); nla_nest_end(skb, nest_parms); return 0; nla_put_failure: return -1; } static inline int ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct) { struct nf_nat_seq *natseq; struct nf_conn_nat *nat = nfct_nat(ct); if (!(ct->status & IPS_SEQ_ADJUST) || !nat) return 0; natseq = &nat->seq[IP_CT_DIR_ORIGINAL]; if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_ORIG) == -1) return -1; natseq = &nat->seq[IP_CT_DIR_REPLY]; if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_REPLY) == -1) return -1; return 0; } #else #define ctnetlink_dump_nat_seq_adj(a, b) (0) #endif static inline int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) { NLA_PUT_BE32(skb, CTA_ID, htonl((unsigned long)ct)); return 0; nla_put_failure: return -1; } static inline int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) { NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))); return 0; nla_put_failure: return -1; } static int ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, struct nf_conn *ct) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; struct nlattr *nest_parms; unsigned int flags = pid ? NLM_F_MULTI : 0; event |= NFNL_SUBSYS_CTNETLINK << 8; nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = nf_ct_l3num(ct); nfmsg->version = NFNETLINK_V0; nfmsg->res_id = 0; nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); if (nf_ct_zone(ct)) NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); if (ctnetlink_dump_status(skb, ct) < 0 || ctnetlink_dump_timeout(skb, ct) < 0 || ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 || ctnetlink_dump_timestamp(skb, ct) < 0 || ctnetlink_dump_protoinfo(skb, ct) < 0 || ctnetlink_dump_helpinfo(skb, ct) < 0 || ctnetlink_dump_mark(skb, ct) < 0 || ctnetlink_dump_secctx(skb, ct) < 0 || ctnetlink_dump_id(skb, ct) < 0 || ctnetlink_dump_use(skb, ct) < 0 || ctnetlink_dump_master(skb, ct) < 0 || ctnetlink_dump_nat_seq_adj(skb, ct) < 0) goto nla_put_failure; nlmsg_end(skb, nlh); return skb->len; nlmsg_failure: nla_put_failure: nlmsg_cancel(skb, nlh); return -1; } #ifdef CONFIG_NF_CONNTRACK_EVENTS static inline size_t ctnetlink_proto_size(const struct nf_conn *ct) { struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; size_t len = 0; rcu_read_lock(); l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); len += l3proto->nla_size; l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); len += l4proto->nla_size; rcu_read_unlock(); return len; } static inline size_t ctnetlink_counters_size(const struct nf_conn *ct) { if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT)) return 0; return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */ + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */ + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */ ; } static inline int ctnetlink_secctx_size(const struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_SECMARK int len, ret; ret = security_secid_to_secctx(ct->secmark, NULL, &len); if (ret) return 0; return nla_total_size(0) /* CTA_SECCTX */ + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */ #else return 0; #endif } static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP)) return 0; return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t)); #else return 0; #endif } static inline size_t ctnetlink_nlmsg_size(const struct nf_conn *ct) { return NLMSG_ALIGN(sizeof(struct nfgenmsg)) + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */ + 3 * nla_total_size(0) /* CTA_TUPLE_IP */ + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */ + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */ + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ + ctnetlink_counters_size(ct) + ctnetlink_timestamp_size(ct) + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ + nla_total_size(0) /* CTA_PROTOINFO */ + nla_total_size(0) /* CTA_HELP */ + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ + ctnetlink_secctx_size(ct) #ifdef CONFIG_NF_NAT_NEEDED + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */ #endif #ifdef CONFIG_NF_CONNTRACK_MARK + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ #endif + ctnetlink_proto_size(ct) ; } static int ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) { struct net *net; struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; struct nlattr *nest_parms; struct nf_conn *ct = item->ct; struct sk_buff *skb; unsigned int type; unsigned int flags = 0, group; int err; /* ignore our fake conntrack entry */ if (nf_ct_is_untracked(ct)) return 0; if (events & (1 << IPCT_DESTROY)) { type = IPCTNL_MSG_CT_DELETE; group = NFNLGRP_CONNTRACK_DESTROY; } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) { type = IPCTNL_MSG_CT_NEW; flags = NLM_F_CREATE|NLM_F_EXCL; group = NFNLGRP_CONNTRACK_NEW; } else if (events) { type = IPCTNL_MSG_CT_NEW; group = NFNLGRP_CONNTRACK_UPDATE; } else return 0; net = nf_ct_net(ct); if (!item->report && !nfnetlink_has_listeners(net, group)) return 0; skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC); if (skb == NULL) goto errout; type |= NFNL_SUBSYS_CTNETLINK << 8; nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = nf_ct_l3num(ct); nfmsg->version = NFNETLINK_V0; nfmsg->res_id = 0; rcu_read_lock(); nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); if (nf_ct_zone(ct)) NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); if (ctnetlink_dump_id(skb, ct) < 0) goto nla_put_failure; if (ctnetlink_dump_status(skb, ct) < 0) goto nla_put_failure; if (events & (1 << IPCT_DESTROY)) { if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 || ctnetlink_dump_timestamp(skb, ct) < 0) goto nla_put_failure; } else { if (ctnetlink_dump_timeout(skb, ct) < 0) goto nla_put_failure; if (events & (1 << IPCT_PROTOINFO) && ctnetlink_dump_protoinfo(skb, ct) < 0) goto nla_put_failure; if ((events & (1 << IPCT_HELPER) || nfct_help(ct)) && ctnetlink_dump_helpinfo(skb, ct) < 0) goto nla_put_failure; #ifdef CONFIG_NF_CONNTRACK_SECMARK if ((events & (1 << IPCT_SECMARK) || ct->secmark) && ctnetlink_dump_secctx(skb, ct) < 0) goto nla_put_failure; #endif if (events & (1 << IPCT_RELATED) && ctnetlink_dump_master(skb, ct) < 0) goto nla_put_failure; if (events & (1 << IPCT_NATSEQADJ) && ctnetlink_dump_nat_seq_adj(skb, ct) < 0) goto nla_put_failure; } #ifdef CONFIG_NF_CONNTRACK_MARK if ((events & (1 << IPCT_MARK) || ct->mark) && ctnetlink_dump_mark(skb, ct) < 0) goto nla_put_failure; #endif rcu_read_unlock(); nlmsg_end(skb, nlh); err = nfnetlink_send(skb, net, item->pid, group, item->report, GFP_ATOMIC); if (err == -ENOBUFS || err == -EAGAIN) return -ENOBUFS; return 0; nla_put_failure: rcu_read_unlock(); nlmsg_cancel(skb, nlh); nlmsg_failure: kfree_skb(skb); errout: if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0) return -ENOBUFS; return 0; } #endif /* CONFIG_NF_CONNTRACK_EVENTS */ static int ctnetlink_done(struct netlink_callback *cb) { if (cb->args[1]) nf_ct_put((struct nf_conn *)cb->args[1]); return 0; } static int ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct nf_conn *ct, *last; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); u_int8_t l3proto = nfmsg->nfgen_family; spin_lock_bh(&nf_conntrack_lock); last = (struct nf_conn *)cb->args[1]; for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { restart: hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]], hnnode) { if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; ct = nf_ct_tuplehash_to_ctrack(h); /* Dump entries of a given L3 protocol number. * If it is not specified, ie. l3proto == 0, * then dump everything. */ if (l3proto && nf_ct_l3num(ct) != l3proto) continue; if (cb->args[1]) { if (ct != last) continue; cb->args[1] = 0; } if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, IPCTNL_MSG_CT_NEW, ct) < 0) { nf_conntrack_get(&ct->ct_general); cb->args[1] = (unsigned long)ct; goto out; } if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == IPCTNL_MSG_CT_GET_CTRZERO) { struct nf_conn_counter *acct; acct = nf_conn_acct_find(ct); if (acct) memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); } } if (cb->args[1]) { cb->args[1] = 0; goto restart; } } out: spin_unlock_bh(&nf_conntrack_lock); if (last) nf_ct_put(last); return skb->len; } static inline int ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple) { struct nlattr *tb[CTA_IP_MAX+1]; struct nf_conntrack_l3proto *l3proto; int ret = 0; nla_parse_nested(tb, CTA_IP_MAX, attr, NULL); rcu_read_lock(); l3proto = __nf_ct_l3proto_find(tuple->src.l3num); if (likely(l3proto->nlattr_to_tuple)) { ret = nla_validate_nested(attr, CTA_IP_MAX, l3proto->nla_policy); if (ret == 0) ret = l3proto->nlattr_to_tuple(tb, tuple); } rcu_read_unlock(); return ret; } static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = { [CTA_PROTO_NUM] = { .type = NLA_U8 }, }; static inline int ctnetlink_parse_tuple_proto(struct nlattr *attr, struct nf_conntrack_tuple *tuple) { struct nlattr *tb[CTA_PROTO_MAX+1]; struct nf_conntrack_l4proto *l4proto; int ret = 0; ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy); if (ret < 0) return ret; if (!tb[CTA_PROTO_NUM]) return -EINVAL; tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]); rcu_read_lock(); l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); if (likely(l4proto->nlattr_to_tuple)) { ret = nla_validate_nested(attr, CTA_PROTO_MAX, l4proto->nla_policy); if (ret == 0) ret = l4proto->nlattr_to_tuple(tb, tuple); } rcu_read_unlock(); return ret; } static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = { [CTA_TUPLE_IP] = { .type = NLA_NESTED }, [CTA_TUPLE_PROTO] = { .type = NLA_NESTED }, }; static int ctnetlink_parse_tuple(const struct nlattr * const cda[], struct nf_conntrack_tuple *tuple, enum ctattr_type type, u_int8_t l3num) { struct nlattr *tb[CTA_TUPLE_MAX+1]; int err; memset(tuple, 0, sizeof(*tuple)); nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy); if (!tb[CTA_TUPLE_IP]) return -EINVAL; tuple->src.l3num = l3num; err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple); if (err < 0) return err; if (!tb[CTA_TUPLE_PROTO]) return -EINVAL; err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple); if (err < 0) return err; /* orig and expect tuples get DIR_ORIGINAL */ if (type == CTA_TUPLE_REPLY) tuple->dst.dir = IP_CT_DIR_REPLY; else tuple->dst.dir = IP_CT_DIR_ORIGINAL; return 0; } static int ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone) { if (attr) #ifdef CONFIG_NF_CONNTRACK_ZONES *zone = ntohs(nla_get_be16(attr)); #else return -EOPNOTSUPP; #endif else *zone = 0; return 0; } static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = { [CTA_HELP_NAME] = { .type = NLA_NUL_STRING }, }; static inline int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name) { struct nlattr *tb[CTA_HELP_MAX+1]; nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy); if (!tb[CTA_HELP_NAME]) return -EINVAL; *helper_name = nla_data(tb[CTA_HELP_NAME]); return 0; } static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { [CTA_TUPLE_ORIG] = { .type = NLA_NESTED }, [CTA_TUPLE_REPLY] = { .type = NLA_NESTED }, [CTA_STATUS] = { .type = NLA_U32 }, [CTA_PROTOINFO] = { .type = NLA_NESTED }, [CTA_HELP] = { .type = NLA_NESTED }, [CTA_NAT_SRC] = { .type = NLA_NESTED }, [CTA_TIMEOUT] = { .type = NLA_U32 }, [CTA_MARK] = { .type = NLA_U32 }, [CTA_ID] = { .type = NLA_U32 }, [CTA_NAT_DST] = { .type = NLA_NESTED }, [CTA_TUPLE_MASTER] = { .type = NLA_NESTED }, [CTA_ZONE] = { .type = NLA_U16 }, }; static int ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]) { struct net *net = sock_net(ctnl); struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; struct nf_conn *ct; struct nfgenmsg *nfmsg = nlmsg_data(nlh); u_int8_t u3 = nfmsg->nfgen_family; u16 zone; int err; err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); if (err < 0) return err; if (cda[CTA_TUPLE_ORIG]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); else if (cda[CTA_TUPLE_REPLY]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); else { /* Flush the whole table */ nf_conntrack_flush_report(net, NETLINK_CB(skb).pid, nlmsg_report(nlh)); return 0; } if (err < 0) return err; h = nf_conntrack_find_get(net, zone, &tuple); if (!h) return -ENOENT; ct = nf_ct_tuplehash_to_ctrack(h); if (cda[CTA_ID]) { u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID])); if (id != (u32)(unsigned long)ct) { nf_ct_put(ct); return -ENOENT; } } if (nf_conntrack_event_report(IPCT_DESTROY, ct, NETLINK_CB(skb).pid, nlmsg_report(nlh)) < 0) { nf_ct_delete_from_lists(ct); /* we failed to report the event, try later */ nf_ct_insert_dying_list(ct); nf_ct_put(ct); return 0; } /* death_by_timeout would report the event again */ set_bit(IPS_DYING_BIT, &ct->status); nf_ct_kill(ct); nf_ct_put(ct); return 0; } static int ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]) { struct net *net = sock_net(ctnl); struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; struct nf_conn *ct; struct sk_buff *skb2 = NULL; struct nfgenmsg *nfmsg = nlmsg_data(nlh); u_int8_t u3 = nfmsg->nfgen_family; u16 zone; int err; if (nlh->nlmsg_flags & NLM_F_DUMP) return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, ctnetlink_done); err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); if (err < 0) return err; if (cda[CTA_TUPLE_ORIG]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); else if (cda[CTA_TUPLE_REPLY]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); else return -EINVAL; if (err < 0) return err; h = nf_conntrack_find_get(net, zone, &tuple); if (!h) return -ENOENT; ct = nf_ct_tuplehash_to_ctrack(h); err = -ENOMEM; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (skb2 == NULL) { nf_ct_put(ct); return -ENOMEM; } rcu_read_lock(); err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, IPCTNL_MSG_CT_NEW, ct); rcu_read_unlock(); nf_ct_put(ct); if (err <= 0) goto free; err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); if (err < 0) goto out; return 0; free: kfree_skb(skb2); out: /* this avoids a loop in nfnetlink. */ return err == -EAGAIN ? -ENOBUFS : err; } #ifdef CONFIG_NF_NAT_NEEDED static int ctnetlink_parse_nat_setup(struct nf_conn *ct, enum nf_nat_manip_type manip, const struct nlattr *attr) { typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup; parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook); if (!parse_nat_setup) { #ifdef CONFIG_MODULES rcu_read_unlock(); spin_unlock_bh(&nf_conntrack_lock); nfnl_unlock(); if (request_module("nf-nat-ipv4") < 0) { nfnl_lock(); spin_lock_bh(&nf_conntrack_lock); rcu_read_lock(); return -EOPNOTSUPP; } nfnl_lock(); spin_lock_bh(&nf_conntrack_lock); rcu_read_lock(); if (nfnetlink_parse_nat_setup_hook) return -EAGAIN; #endif return -EOPNOTSUPP; } return parse_nat_setup(ct, manip, attr); } #endif static int ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[]) { unsigned long d; unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS])); d = ct->status ^ status; if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) /* unchangeable */ return -EBUSY; if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) /* SEEN_REPLY bit can only be set */ return -EBUSY; if (d & IPS_ASSURED && !(status & IPS_ASSURED)) /* ASSURED bit can only be set */ return -EBUSY; /* Be careful here, modifying NAT bits can screw up things, * so don't let users modify them directly if they don't pass * nf_nat_range. */ ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK); return 0; } static int ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[]) { #ifdef CONFIG_NF_NAT_NEEDED int ret; if (cda[CTA_NAT_DST]) { ret = ctnetlink_parse_nat_setup(ct, IP_NAT_MANIP_DST, cda[CTA_NAT_DST]); if (ret < 0) return ret; } if (cda[CTA_NAT_SRC]) { ret = ctnetlink_parse_nat_setup(ct, IP_NAT_MANIP_SRC, cda[CTA_NAT_SRC]); if (ret < 0) return ret; } return 0; #else return -EOPNOTSUPP; #endif } static inline int ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[]) { struct nf_conntrack_helper *helper; struct nf_conn_help *help = nfct_help(ct); char *helpname = NULL; int err; /* don't change helper of sibling connections */ if (ct->master) return -EBUSY; err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); if (err < 0) return err; if (!strcmp(helpname, "")) { if (help && help->helper) { /* we had a helper before ... */ nf_ct_remove_expectations(ct); rcu_assign_pointer(help->helper, NULL); } return 0; } helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), nf_ct_protonum(ct)); if (helper == NULL) { #ifdef CONFIG_MODULES spin_unlock_bh(&nf_conntrack_lock); if (request_module("nfct-helper-%s", helpname) < 0) { spin_lock_bh(&nf_conntrack_lock); return -EOPNOTSUPP; } spin_lock_bh(&nf_conntrack_lock); helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), nf_ct_protonum(ct)); if (helper) return -EAGAIN; #endif return -EOPNOTSUPP; } if (help) { if (help->helper == helper) return 0; if (help->helper) return -EBUSY; /* need to zero data of old helper */ memset(&help->help, 0, sizeof(help->help)); } else { /* we cannot set a helper for an existing conntrack */ return -EOPNOTSUPP; } rcu_assign_pointer(help->helper, helper); return 0; } static inline int ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[]) { u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); if (!del_timer(&ct->timeout)) return -ETIME; ct->timeout.expires = jiffies + timeout * HZ; add_timer(&ct->timeout); return 0; } static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = { [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED }, [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED }, [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED }, }; static inline int ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]) { const struct nlattr *attr = cda[CTA_PROTOINFO]; struct nlattr *tb[CTA_PROTOINFO_MAX+1]; struct nf_conntrack_l4proto *l4proto; int err = 0; nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy); rcu_read_lock(); l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (l4proto->from_nlattr) err = l4proto->from_nlattr(tb, ct); rcu_read_unlock(); return err; } #ifdef CONFIG_NF_NAT_NEEDED static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = { [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 }, [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 }, [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 }, }; static inline int change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr) { struct nlattr *cda[CTA_NAT_SEQ_MAX+1]; nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy); if (!cda[CTA_NAT_SEQ_CORRECTION_POS]) return -EINVAL; natseq->correction_pos = ntohl(nla_get_be32(cda[CTA_NAT_SEQ_CORRECTION_POS])); if (!cda[CTA_NAT_SEQ_OFFSET_BEFORE]) return -EINVAL; natseq->offset_before = ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_BEFORE])); if (!cda[CTA_NAT_SEQ_OFFSET_AFTER]) return -EINVAL; natseq->offset_after = ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_AFTER])); return 0; } static int ctnetlink_change_nat_seq_adj(struct nf_conn *ct, const struct nlattr * const cda[]) { int ret = 0; struct nf_conn_nat *nat = nfct_nat(ct); if (!nat) return 0; if (cda[CTA_NAT_SEQ_ADJ_ORIG]) { ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_ORIGINAL], cda[CTA_NAT_SEQ_ADJ_ORIG]); if (ret < 0) return ret; ct->status |= IPS_SEQ_ADJUST; } if (cda[CTA_NAT_SEQ_ADJ_REPLY]) { ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_REPLY], cda[CTA_NAT_SEQ_ADJ_REPLY]); if (ret < 0) return ret; ct->status |= IPS_SEQ_ADJUST; } return 0; } #endif static int ctnetlink_change_conntrack(struct nf_conn *ct, const struct nlattr * const cda[]) { int err; /* only allow NAT changes and master assignation for new conntracks */ if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER]) return -EOPNOTSUPP; if (cda[CTA_HELP]) { err = ctnetlink_change_helper(ct, cda); if (err < 0) return err; } if (cda[CTA_TIMEOUT]) { err = ctnetlink_change_timeout(ct, cda); if (err < 0) return err; } if (cda[CTA_STATUS]) { err = ctnetlink_change_status(ct, cda); if (err < 0) return err; } if (cda[CTA_PROTOINFO]) { err = ctnetlink_change_protoinfo(ct, cda); if (err < 0) return err; } #if defined(CONFIG_NF_CONNTRACK_MARK) if (cda[CTA_MARK]) ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); #endif #ifdef CONFIG_NF_NAT_NEEDED if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) { err = ctnetlink_change_nat_seq_adj(ct, cda); if (err < 0) return err; } #endif return 0; } static struct nf_conn * ctnetlink_create_conntrack(struct net *net, u16 zone, const struct nlattr * const cda[], struct nf_conntrack_tuple *otuple, struct nf_conntrack_tuple *rtuple, u8 u3) { struct nf_conn *ct; int err = -EINVAL; struct nf_conntrack_helper *helper; struct nf_conn_tstamp *tstamp; ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); if (IS_ERR(ct)) return ERR_PTR(-ENOMEM); if (!cda[CTA_TIMEOUT]) goto err1; ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); ct->timeout.expires = jiffies + ct->timeout.expires * HZ; rcu_read_lock(); if (cda[CTA_HELP]) { char *helpname = NULL; err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); if (err < 0) goto err2; helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), nf_ct_protonum(ct)); if (helper == NULL) { rcu_read_unlock(); #ifdef CONFIG_MODULES if (request_module("nfct-helper-%s", helpname) < 0) { err = -EOPNOTSUPP; goto err1; } rcu_read_lock(); helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), nf_ct_protonum(ct)); if (helper) { err = -EAGAIN; goto err2; } rcu_read_unlock(); #endif err = -EOPNOTSUPP; goto err1; } else { struct nf_conn_help *help; help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); if (help == NULL) { err = -ENOMEM; goto err2; } /* not in hash table yet so not strictly necessary */ rcu_assign_pointer(help->helper, helper); } } else { /* try an implicit helper assignation */ err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); if (err < 0) goto err2; } if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { err = ctnetlink_change_nat(ct, cda); if (err < 0) goto err2; } nf_ct_acct_ext_add(ct, GFP_ATOMIC); nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); /* we must add conntrack extensions before confirmation. */ ct->status |= IPS_CONFIRMED; if (cda[CTA_STATUS]) { err = ctnetlink_change_status(ct, cda); if (err < 0) goto err2; } #ifdef CONFIG_NF_NAT_NEEDED if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) { err = ctnetlink_change_nat_seq_adj(ct, cda); if (err < 0) goto err2; } #endif memset(&ct->proto, 0, sizeof(ct->proto)); if (cda[CTA_PROTOINFO]) { err = ctnetlink_change_protoinfo(ct, cda); if (err < 0) goto err2; } #if defined(CONFIG_NF_CONNTRACK_MARK) if (cda[CTA_MARK]) ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); #endif /* setup master conntrack: this is a confirmed expectation */ if (cda[CTA_TUPLE_MASTER]) { struct nf_conntrack_tuple master; struct nf_conntrack_tuple_hash *master_h; struct nf_conn *master_ct; err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3); if (err < 0) goto err2; master_h = nf_conntrack_find_get(net, zone, &master); if (master_h == NULL) { err = -ENOENT; goto err2; } master_ct = nf_ct_tuplehash_to_ctrack(master_h); __set_bit(IPS_EXPECTED_BIT, &ct->status); ct->master = master_ct; } tstamp = nf_conn_tstamp_find(ct); if (tstamp) tstamp->start = ktime_to_ns(ktime_get_real()); add_timer(&ct->timeout); nf_conntrack_hash_insert(ct); rcu_read_unlock(); return ct; err2: rcu_read_unlock(); err1: nf_conntrack_free(ct); return ERR_PTR(err); } static int ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]) { struct net *net = sock_net(ctnl); struct nf_conntrack_tuple otuple, rtuple; struct nf_conntrack_tuple_hash *h = NULL; struct nfgenmsg *nfmsg = nlmsg_data(nlh); u_int8_t u3 = nfmsg->nfgen_family; u16 zone; int err; err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); if (err < 0) return err; if (cda[CTA_TUPLE_ORIG]) { err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3); if (err < 0) return err; } if (cda[CTA_TUPLE_REPLY]) { err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3); if (err < 0) return err; } spin_lock_bh(&nf_conntrack_lock); if (cda[CTA_TUPLE_ORIG]) h = __nf_conntrack_find(net, zone, &otuple); else if (cda[CTA_TUPLE_REPLY]) h = __nf_conntrack_find(net, zone, &rtuple); if (h == NULL) { err = -ENOENT; if (nlh->nlmsg_flags & NLM_F_CREATE) { struct nf_conn *ct; enum ip_conntrack_events events; ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, &rtuple, u3); if (IS_ERR(ct)) { err = PTR_ERR(ct); goto out_unlock; } err = 0; nf_conntrack_get(&ct->ct_general); spin_unlock_bh(&nf_conntrack_lock); if (test_bit(IPS_EXPECTED_BIT, &ct->status)) events = IPCT_RELATED; else events = IPCT_NEW; nf_conntrack_eventmask_report((1 << IPCT_REPLY) | (1 << IPCT_ASSURED) | (1 << IPCT_HELPER) | (1 << IPCT_PROTOINFO) | (1 << IPCT_NATSEQADJ) | (1 << IPCT_MARK) | events, ct, NETLINK_CB(skb).pid, nlmsg_report(nlh)); nf_ct_put(ct); } else spin_unlock_bh(&nf_conntrack_lock); return err; } /* implicit 'else' */ /* We manipulate the conntrack inside the global conntrack table lock, * so there's no need to increase the refcount */ err = -EEXIST; if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); err = ctnetlink_change_conntrack(ct, cda); if (err == 0) { nf_conntrack_get(&ct->ct_general); spin_unlock_bh(&nf_conntrack_lock); nf_conntrack_eventmask_report((1 << IPCT_REPLY) | (1 << IPCT_ASSURED) | (1 << IPCT_HELPER) | (1 << IPCT_PROTOINFO) | (1 << IPCT_NATSEQADJ) | (1 << IPCT_MARK), ct, NETLINK_CB(skb).pid, nlmsg_report(nlh)); nf_ct_put(ct); } else spin_unlock_bh(&nf_conntrack_lock); return err; } out_unlock: spin_unlock_bh(&nf_conntrack_lock); return err; } /*********************************************************************** * EXPECT ***********************************************************************/ static inline int ctnetlink_exp_dump_tuple(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple, enum ctattr_expect type) { struct nlattr *nest_parms; nest_parms = nla_nest_start(skb, type | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, tuple) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); return 0; nla_put_failure: return -1; } static inline int ctnetlink_exp_dump_mask(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple_mask *mask) { int ret; struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; struct nf_conntrack_tuple m; struct nlattr *nest_parms; memset(&m, 0xFF, sizeof(m)); memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3)); m.src.u.all = mask->src.u.all; m.dst.protonum = tuple->dst.protonum; nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; l3proto = __nf_ct_l3proto_find(tuple->src.l3num); ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto); if (unlikely(ret < 0)) goto nla_put_failure; l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto); if (unlikely(ret < 0)) goto nla_put_failure; nla_nest_end(skb, nest_parms); return 0; nla_put_failure: return -1; } static int ctnetlink_exp_dump_expect(struct sk_buff *skb, const struct nf_conntrack_expect *exp) { struct nf_conn *master = exp->master; long timeout = (exp->timeout.expires - jiffies) / HZ; struct nf_conn_help *help; if (timeout < 0) timeout = 0; if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0) goto nla_put_failure; if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0) goto nla_put_failure; if (ctnetlink_exp_dump_tuple(skb, &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple, CTA_EXPECT_MASTER) < 0) goto nla_put_failure; NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)); NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)); NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)); help = nfct_help(master); if (help) { struct nf_conntrack_helper *helper; helper = rcu_dereference(help->helper); if (helper) NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name); } return 0; nla_put_failure: return -1; } static int ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, const struct nf_conntrack_expect *exp) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; unsigned int flags = pid ? NLM_F_MULTI : 0; event |= NFNL_SUBSYS_CTNETLINK_EXP << 8; nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = exp->tuple.src.l3num; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = 0; if (ctnetlink_exp_dump_expect(skb, exp) < 0) goto nla_put_failure; nlmsg_end(skb, nlh); return skb->len; nlmsg_failure: nla_put_failure: nlmsg_cancel(skb, nlh); return -1; } #ifdef CONFIG_NF_CONNTRACK_EVENTS static int ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item) { struct nf_conntrack_expect *exp = item->exp; struct net *net = nf_ct_exp_net(exp); struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; struct sk_buff *skb; unsigned int type, group; int flags = 0; if (events & (1 << IPEXP_DESTROY)) { type = IPCTNL_MSG_EXP_DELETE; group = NFNLGRP_CONNTRACK_EXP_DESTROY; } else if (events & (1 << IPEXP_NEW)) { type = IPCTNL_MSG_EXP_NEW; flags = NLM_F_CREATE|NLM_F_EXCL; group = NFNLGRP_CONNTRACK_EXP_NEW; } else return 0; if (!item->report && !nfnetlink_has_listeners(net, group)) return 0; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (skb == NULL) goto errout; type |= NFNL_SUBSYS_CTNETLINK_EXP << 8; nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = exp->tuple.src.l3num; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = 0; rcu_read_lock(); if (ctnetlink_exp_dump_expect(skb, exp) < 0) goto nla_put_failure; rcu_read_unlock(); nlmsg_end(skb, nlh); nfnetlink_send(skb, net, item->pid, group, item->report, GFP_ATOMIC); return 0; nla_put_failure: rcu_read_unlock(); nlmsg_cancel(skb, nlh); nlmsg_failure: kfree_skb(skb); errout: nfnetlink_set_err(net, 0, 0, -ENOBUFS); return 0; } #endif static int ctnetlink_exp_done(struct netlink_callback *cb) { if (cb->args[1]) nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]); return 0; } static int ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct nf_conntrack_expect *exp, *last; struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); struct hlist_node *n; u_int8_t l3proto = nfmsg->nfgen_family; rcu_read_lock(); last = (struct nf_conntrack_expect *)cb->args[1]; for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { restart: hlist_for_each_entry(exp, n, &net->ct.expect_hash[cb->args[0]], hnode) { if (l3proto && exp->tuple.src.l3num != l3proto) continue; if (cb->args[1]) { if (exp != last) continue; cb->args[1] = 0; } if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp) < 0) { if (!atomic_inc_not_zero(&exp->use)) continue; cb->args[1] = (unsigned long)exp; goto out; } } if (cb->args[1]) { cb->args[1] = 0; goto restart; } } out: rcu_read_unlock(); if (last) nf_ct_expect_put(last); return skb->len; } static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { [CTA_EXPECT_MASTER] = { .type = NLA_NESTED }, [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED }, [CTA_EXPECT_MASK] = { .type = NLA_NESTED }, [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, [CTA_EXPECT_ID] = { .type = NLA_U32 }, [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING }, [CTA_EXPECT_ZONE] = { .type = NLA_U16 }, [CTA_EXPECT_FLAGS] = { .type = NLA_U32 }, }; static int ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]) { struct net *net = sock_net(ctnl); struct nf_conntrack_tuple tuple; struct nf_conntrack_expect *exp; struct sk_buff *skb2; struct nfgenmsg *nfmsg = nlmsg_data(nlh); u_int8_t u3 = nfmsg->nfgen_family; u16 zone; int err; if (nlh->nlmsg_flags & NLM_F_DUMP) { return netlink_dump_start(ctnl, skb, nlh, ctnetlink_exp_dump_table, ctnetlink_exp_done); } err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); if (err < 0) return err; if (cda[CTA_EXPECT_MASTER]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3); else return -EINVAL; if (err < 0) return err; exp = nf_ct_expect_find_get(net, zone, &tuple); if (!exp) return -ENOENT; if (cda[CTA_EXPECT_ID]) { __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); if (ntohl(id) != (u32)(unsigned long)exp) { nf_ct_expect_put(exp); return -ENOENT; } } err = -ENOMEM; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (skb2 == NULL) goto out; rcu_read_lock(); err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp); rcu_read_unlock(); if (err <= 0) goto free; nf_ct_expect_put(exp); return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); free: kfree_skb(skb2); out: nf_ct_expect_put(exp); return err; } static int ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]) { struct net *net = sock_net(ctnl); struct nf_conntrack_expect *exp; struct nf_conntrack_tuple tuple; struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct hlist_node *n, *next; u_int8_t u3 = nfmsg->nfgen_family; unsigned int i; u16 zone; int err; if (cda[CTA_EXPECT_TUPLE]) { /* delete a single expect by tuple */ err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); if (err < 0) return err; err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); if (err < 0) return err; /* bump usage count to 2 */ exp = nf_ct_expect_find_get(net, zone, &tuple); if (!exp) return -ENOENT; if (cda[CTA_EXPECT_ID]) { __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); if (ntohl(id) != (u32)(unsigned long)exp) { nf_ct_expect_put(exp); return -ENOENT; } } /* after list removal, usage count == 1 */ spin_lock_bh(&nf_conntrack_lock); if (del_timer(&exp->timeout)) { nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).pid, nlmsg_report(nlh)); nf_ct_expect_put(exp); } spin_unlock_bh(&nf_conntrack_lock); /* have to put what we 'get' above. * after this line usage count == 0 */ nf_ct_expect_put(exp); } else if (cda[CTA_EXPECT_HELP_NAME]) { char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]); struct nf_conn_help *m_help; /* delete all expectations for this helper */ spin_lock_bh(&nf_conntrack_lock); for (i = 0; i < nf_ct_expect_hsize; i++) { hlist_for_each_entry_safe(exp, n, next, &net->ct.expect_hash[i], hnode) { m_help = nfct_help(exp->master); if (!strcmp(m_help->helper->name, name) && del_timer(&exp->timeout)) { nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).pid, nlmsg_report(nlh)); nf_ct_expect_put(exp); } } } spin_unlock_bh(&nf_conntrack_lock); } else { /* This basically means we have to flush everything*/ spin_lock_bh(&nf_conntrack_lock); for (i = 0; i < nf_ct_expect_hsize; i++) { hlist_for_each_entry_safe(exp, n, next, &net->ct.expect_hash[i], hnode) { if (del_timer(&exp->timeout)) { nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).pid, nlmsg_report(nlh)); nf_ct_expect_put(exp); } } } spin_unlock_bh(&nf_conntrack_lock); } return 0; } static int ctnetlink_change_expect(struct nf_conntrack_expect *x, const struct nlattr * const cda[]) { return -EOPNOTSUPP; } static int ctnetlink_create_expect(struct net *net, u16 zone, const struct nlattr * const cda[], u_int8_t u3, u32 pid, int report) { struct nf_conntrack_tuple tuple, mask, master_tuple; struct nf_conntrack_tuple_hash *h = NULL; struct nf_conntrack_expect *exp; struct nf_conn *ct; struct nf_conn_help *help; int err = 0; /* caller guarantees that those three CTA_EXPECT_* exist */ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); if (err < 0) return err; err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3); if (err < 0) return err; err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3); if (err < 0) return err; /* Look for master conntrack of this expectation */ h = nf_conntrack_find_get(net, zone, &master_tuple); if (!h) return -ENOENT; ct = nf_ct_tuplehash_to_ctrack(h); exp = nf_ct_expect_alloc(ct); if (!exp) { err = -ENOMEM; goto out; } help = nfct_help(ct); if (!help) { if (!cda[CTA_EXPECT_TIMEOUT]) { err = -EINVAL; goto out; } exp->timeout.expires = jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ; exp->flags = NF_CT_EXPECT_USERSPACE; if (cda[CTA_EXPECT_FLAGS]) { exp->flags |= ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS])); } } else { if (cda[CTA_EXPECT_FLAGS]) { exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS])); exp->flags &= ~NF_CT_EXPECT_USERSPACE; } else exp->flags = 0; } exp->class = 0; exp->expectfn = NULL; exp->master = ct; exp->helper = NULL; memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple)); memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3)); exp->mask.src.u.all = mask.src.u.all; err = nf_ct_expect_related_report(exp, pid, report); nf_ct_expect_put(exp); out: nf_ct_put(nf_ct_tuplehash_to_ctrack(h)); return err; } static int ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]) { struct net *net = sock_net(ctnl); struct nf_conntrack_tuple tuple; struct nf_conntrack_expect *exp; struct nfgenmsg *nfmsg = nlmsg_data(nlh); u_int8_t u3 = nfmsg->nfgen_family; u16 zone; int err; if (!cda[CTA_EXPECT_TUPLE] || !cda[CTA_EXPECT_MASK] || !cda[CTA_EXPECT_MASTER]) return -EINVAL; err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); if (err < 0) return err; err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); if (err < 0) return err; spin_lock_bh(&nf_conntrack_lock); exp = __nf_ct_expect_find(net, zone, &tuple); if (!exp) { spin_unlock_bh(&nf_conntrack_lock); err = -ENOENT; if (nlh->nlmsg_flags & NLM_F_CREATE) { err = ctnetlink_create_expect(net, zone, cda, u3, NETLINK_CB(skb).pid, nlmsg_report(nlh)); } return err; } err = -EEXIST; if (!(nlh->nlmsg_flags & NLM_F_EXCL)) err = ctnetlink_change_expect(exp, cda); spin_unlock_bh(&nf_conntrack_lock); return err; } #ifdef CONFIG_NF_CONNTRACK_EVENTS static struct nf_ct_event_notifier ctnl_notifier = { .fcn = ctnetlink_conntrack_event, }; static struct nf_exp_event_notifier ctnl_notifier_exp = { .fcn = ctnetlink_expect_event, }; #endif static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = { [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack, .attr_count = CTA_MAX, .policy = ct_nla_policy }, [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack, .attr_count = CTA_MAX, .policy = ct_nla_policy }, [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack, .attr_count = CTA_MAX, .policy = ct_nla_policy }, [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack, .attr_count = CTA_MAX, .policy = ct_nla_policy }, }; static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = { [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect, .attr_count = CTA_EXPECT_MAX, .policy = exp_nla_policy }, [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect, .attr_count = CTA_EXPECT_MAX, .policy = exp_nla_policy }, [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect, .attr_count = CTA_EXPECT_MAX, .policy = exp_nla_policy }, }; static const struct nfnetlink_subsystem ctnl_subsys = { .name = "conntrack", .subsys_id = NFNL_SUBSYS_CTNETLINK, .cb_count = IPCTNL_MSG_MAX, .cb = ctnl_cb, }; static const struct nfnetlink_subsystem ctnl_exp_subsys = { .name = "conntrack_expect", .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP, .cb_count = IPCTNL_MSG_EXP_MAX, .cb = ctnl_exp_cb, }; MODULE_ALIAS("ip_conntrack_netlink"); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); static int __init ctnetlink_init(void) { int ret; pr_info("ctnetlink v%s: registering with nfnetlink.\n", version); ret = nfnetlink_subsys_register(&ctnl_subsys); if (ret < 0) { pr_err("ctnetlink_init: cannot register with nfnetlink.\n"); goto err_out; } ret = nfnetlink_subsys_register(&ctnl_exp_subsys); if (ret < 0) { pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n"); goto err_unreg_subsys; } #ifdef CONFIG_NF_CONNTRACK_EVENTS ret = nf_conntrack_register_notifier(&ctnl_notifier); if (ret < 0) { pr_err("ctnetlink_init: cannot register notifier.\n"); goto err_unreg_exp_subsys; } ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp); if (ret < 0) { pr_err("ctnetlink_init: cannot expect register notifier.\n"); goto err_unreg_notifier; } #endif return 0; #ifdef CONFIG_NF_CONNTRACK_EVENTS err_unreg_notifier: nf_conntrack_unregister_notifier(&ctnl_notifier); err_unreg_exp_subsys: nfnetlink_subsys_unregister(&ctnl_exp_subsys); #endif err_unreg_subsys: nfnetlink_subsys_unregister(&ctnl_subsys); err_out: return ret; } static void __exit ctnetlink_exit(void) { pr_info("ctnetlink: unregistering from nfnetlink.\n"); nf_ct_remove_userspace_expectations(); #ifdef CONFIG_NF_CONNTRACK_EVENTS nf_ct_expect_unregister_notifier(&ctnl_notifier_exp); nf_conntrack_unregister_notifier(&ctnl_notifier); #endif nfnetlink_subsys_unregister(&ctnl_exp_subsys); nfnetlink_subsys_unregister(&ctnl_subsys); } module_init(ctnetlink_init); module_exit(ctnetlink_exit);
gpl-2.0
DirtyUnicorns/android_kernel_nvidia_shieldtablet
drivers/acpi/processor_throttling.c
1698
31964
/* * processor_throttling.c - Throttling submodule of the ACPI processor driver * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> * - Added processor hotplug support * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/cpufreq.h> #include <asm/io.h> #include <asm/uaccess.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <acpi/processor.h> #define PREFIX "ACPI: " #define ACPI_PROCESSOR_CLASS "processor" #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_throttling"); /* ignore_tpc: * 0 -> acpi processor driver doesn't ignore _TPC values * 1 -> acpi processor driver ignores _TPC values */ static int ignore_tpc; module_param(ignore_tpc, int, 0644); MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support"); struct throttling_tstate { unsigned int cpu; /* cpu nr */ int target_state; /* target T-state */ }; struct acpi_processor_throttling_arg { struct acpi_processor *pr; int target_state; bool force; }; #define THROTTLING_PRECHANGE (1) #define THROTTLING_POSTCHANGE (2) static int acpi_processor_get_throttling(struct acpi_processor *pr); int acpi_processor_set_throttling(struct acpi_processor *pr, int state, bool force); static int acpi_processor_update_tsd_coord(void) { int count, count_target; int retval = 0; unsigned int i, j; cpumask_var_t covered_cpus; struct acpi_processor *pr, *match_pr; struct acpi_tsd_package *pdomain, *match_pdomain; struct acpi_processor_throttling *pthrottling, *match_pthrottling; if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) return -ENOMEM; /* * Now that we have _TSD data from all CPUs, lets setup T-state * coordination between all CPUs. */ for_each_possible_cpu(i) { pr = per_cpu(processors, i); if (!pr) continue; /* Basic validity check for domain info */ pthrottling = &(pr->throttling); /* * If tsd package for one cpu is invalid, the coordination * among all CPUs is thought as invalid. * Maybe it is ugly. */ if (!pthrottling->tsd_valid_flag) { retval = -EINVAL; break; } } if (retval) goto err_ret; for_each_possible_cpu(i) { pr = per_cpu(processors, i); if (!pr) continue; if (cpumask_test_cpu(i, covered_cpus)) continue; pthrottling = &pr->throttling; pdomain = &(pthrottling->domain_info); cpumask_set_cpu(i, pthrottling->shared_cpu_map); cpumask_set_cpu(i, covered_cpus); /* * If the number of processor in the TSD domain is 1, it is * unnecessary to parse the coordination for this CPU. */ if (pdomain->num_processors <= 1) continue; /* Validate the Domain info */ count_target = pdomain->num_processors; count = 1; for_each_possible_cpu(j) { if (i == j) continue; match_pr = per_cpu(processors, j); if (!match_pr) continue; match_pthrottling = &(match_pr->throttling); match_pdomain = &(match_pthrottling->domain_info); if (match_pdomain->domain != pdomain->domain) continue; /* Here i and j are in the same domain. * If two TSD packages have the same domain, they * should have the same num_porcessors and * coordination type. Otherwise it will be regarded * as illegal. */ if (match_pdomain->num_processors != count_target) { retval = -EINVAL; goto err_ret; } if (pdomain->coord_type != match_pdomain->coord_type) { retval = -EINVAL; goto err_ret; } cpumask_set_cpu(j, covered_cpus); cpumask_set_cpu(j, pthrottling->shared_cpu_map); count++; } for_each_possible_cpu(j) { if (i == j) continue; match_pr = per_cpu(processors, j); if (!match_pr) continue; match_pthrottling = &(match_pr->throttling); match_pdomain = &(match_pthrottling->domain_info); if (match_pdomain->domain != pdomain->domain) continue; /* * If some CPUS have the same domain, they * will have the same shared_cpu_map. */ cpumask_copy(match_pthrottling->shared_cpu_map, pthrottling->shared_cpu_map); } } err_ret: free_cpumask_var(covered_cpus); for_each_possible_cpu(i) { pr = per_cpu(processors, i); if (!pr) continue; /* * Assume no coordination on any error parsing domain info. * The coordination type will be forced as SW_ALL. */ if (retval) { pthrottling = &(pr->throttling); cpumask_clear(pthrottling->shared_cpu_map); cpumask_set_cpu(i, pthrottling->shared_cpu_map); pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; } } return retval; } /* * Update the T-state coordination after the _TSD * data for all cpus is obtained. */ void acpi_processor_throttling_init(void) { if (acpi_processor_update_tsd_coord()) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Assume no T-state coordination\n")); } return; } static int acpi_processor_throttling_notifier(unsigned long event, void *data) { struct throttling_tstate *p_tstate = data; struct acpi_processor *pr; unsigned int cpu ; int target_state; struct acpi_processor_limit *p_limit; struct acpi_processor_throttling *p_throttling; cpu = p_tstate->cpu; pr = per_cpu(processors, cpu); if (!pr) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n")); return 0; } if (!pr->flags.throttling) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is " "unsupported on CPU %d\n", cpu)); return 0; } target_state = p_tstate->target_state; p_throttling = &(pr->throttling); switch (event) { case THROTTLING_PRECHANGE: /* * Prechange event is used to choose one proper t-state, * which meets the limits of thermal, user and _TPC. */ p_limit = &pr->limit; if (p_limit->thermal.tx > target_state) target_state = p_limit->thermal.tx; if (p_limit->user.tx > target_state) target_state = p_limit->user.tx; if (pr->throttling_platform_limit > target_state) target_state = pr->throttling_platform_limit; if (target_state >= p_throttling->state_count) { printk(KERN_WARNING "Exceed the limit of T-state \n"); target_state = p_throttling->state_count - 1; } p_tstate->target_state = target_state; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:" "target T-state of CPU %d is T%d\n", cpu, target_state)); break; case THROTTLING_POSTCHANGE: /* * Postchange event is only used to update the * T-state flag of acpi_processor_throttling. */ p_throttling->state = target_state; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:" "CPU %d is switched to T%d\n", cpu, target_state)); break; default: printk(KERN_WARNING "Unsupported Throttling notifier event\n"); break; } return 0; } /* * _TPC - Throttling Present Capabilities */ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) { acpi_status status = 0; unsigned long long tpc = 0; if (!pr) return -EINVAL; if (ignore_tpc) goto end; status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC")); } return -ENODEV; } end: pr->throttling_platform_limit = (int)tpc; return 0; } int acpi_processor_tstate_has_changed(struct acpi_processor *pr) { int result = 0; int throttling_limit; int current_state; struct acpi_processor_limit *limit; int target_state; if (ignore_tpc) return 0; result = acpi_processor_get_platform_limit(pr); if (result) { /* Throttling Limit is unsupported */ return result; } throttling_limit = pr->throttling_platform_limit; if (throttling_limit >= pr->throttling.state_count) { /* Uncorrect Throttling Limit */ return -EINVAL; } current_state = pr->throttling.state; if (current_state > throttling_limit) { /* * The current state can meet the requirement of * _TPC limit. But it is reasonable that OSPM changes * t-states from high to low for better performance. * Of course the limit condition of thermal * and user should be considered. */ limit = &pr->limit; target_state = throttling_limit; if (limit->thermal.tx > target_state) target_state = limit->thermal.tx; if (limit->user.tx > target_state) target_state = limit->user.tx; } else if (current_state == throttling_limit) { /* * Unnecessary to change the throttling state */ return 0; } else { /* * If the current state is lower than the limit of _TPC, it * will be forced to switch to the throttling state defined * by throttling_platfor_limit. * Because the previous state meets with the limit condition * of thermal and user, it is unnecessary to check it again. */ target_state = throttling_limit; } return acpi_processor_set_throttling(pr, target_state, false); } /* * This function is used to reevaluate whether the T-state is valid * after one CPU is onlined/offlined. * It is noted that it won't reevaluate the following properties for * the T-state. * 1. Control method. * 2. the number of supported T-state * 3. TSD domain */ void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, unsigned long action) { int result = 0; if (action == CPU_DEAD) { /* When one CPU is offline, the T-state throttling * will be invalidated. */ pr->flags.throttling = 0; return; } /* the following is to recheck whether the T-state is valid for * the online CPU */ if (!pr->throttling.state_count) { /* If the number of T-state is invalid, it is * invalidated. */ pr->flags.throttling = 0; return; } pr->flags.throttling = 1; /* Disable throttling (if enabled). We'll let subsequent * policy (e.g.thermal) decide to lower performance if it * so chooses, but for now we'll crank up the speed. */ result = acpi_processor_get_throttling(pr); if (result) goto end; if (pr->throttling.state) { result = acpi_processor_set_throttling(pr, 0, false); if (result) goto end; } end: if (result) pr->flags.throttling = 0; } /* * _PTC - Processor Throttling Control (and status) register location */ static int acpi_processor_get_throttling_control(struct acpi_processor *pr) { int result = 0; acpi_status status = 0; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *ptc = NULL; union acpi_object obj = { 0 }; struct acpi_processor_throttling *throttling; status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC")); } return -ENODEV; } ptc = (union acpi_object *)buffer.pointer; if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE) || (ptc->package.count != 2)) { printk(KERN_ERR PREFIX "Invalid _PTC data\n"); result = -EFAULT; goto end; } /* * control_register */ obj = ptc->package.elements[0]; if ((obj.type != ACPI_TYPE_BUFFER) || (obj.buffer.length < sizeof(struct acpi_ptc_register)) || (obj.buffer.pointer == NULL)) { printk(KERN_ERR PREFIX "Invalid _PTC data (control_register)\n"); result = -EFAULT; goto end; } memcpy(&pr->throttling.control_register, obj.buffer.pointer, sizeof(struct acpi_ptc_register)); /* * status_register */ obj = ptc->package.elements[1]; if ((obj.type != ACPI_TYPE_BUFFER) || (obj.buffer.length < sizeof(struct acpi_ptc_register)) || (obj.buffer.pointer == NULL)) { printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n"); result = -EFAULT; goto end; } memcpy(&pr->throttling.status_register, obj.buffer.pointer, sizeof(struct acpi_ptc_register)); throttling = &pr->throttling; if ((throttling->control_register.bit_width + throttling->control_register.bit_offset) > 32) { printk(KERN_ERR PREFIX "Invalid _PTC control register\n"); result = -EFAULT; goto end; } if ((throttling->status_register.bit_width + throttling->status_register.bit_offset) > 32) { printk(KERN_ERR PREFIX "Invalid _PTC status register\n"); result = -EFAULT; goto end; } end: kfree(buffer.pointer); return result; } /* * _TSS - Throttling Supported States */ static int acpi_processor_get_throttling_states(struct acpi_processor *pr) { int result = 0; acpi_status status = AE_OK; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; struct acpi_buffer state = { 0, NULL }; union acpi_object *tss = NULL; int i; status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS")); } return -ENODEV; } tss = buffer.pointer; if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) { printk(KERN_ERR PREFIX "Invalid _TSS data\n"); result = -EFAULT; goto end; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", tss->package.count)); pr->throttling.state_count = tss->package.count; pr->throttling.states_tss = kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count, GFP_KERNEL); if (!pr->throttling.states_tss) { result = -ENOMEM; goto end; } for (i = 0; i < pr->throttling.state_count; i++) { struct acpi_processor_tx_tss *tx = (struct acpi_processor_tx_tss *)&(pr->throttling. states_tss[i]); state.length = sizeof(struct acpi_processor_tx_tss); state.pointer = tx; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); status = acpi_extract_package(&(tss->package.elements[i]), &format, &state); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data")); result = -EFAULT; kfree(pr->throttling.states_tss); goto end; } if (!tx->freqpercentage) { printk(KERN_ERR PREFIX "Invalid _TSS data: freq is zero\n"); result = -EFAULT; kfree(pr->throttling.states_tss); goto end; } } end: kfree(buffer.pointer); return result; } /* * _TSD - T-State Dependencies */ static int acpi_processor_get_tsd(struct acpi_processor *pr) { int result = 0; acpi_status status = AE_OK; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; struct acpi_buffer state = { 0, NULL }; union acpi_object *tsd = NULL; struct acpi_tsd_package *pdomain; struct acpi_processor_throttling *pthrottling; pthrottling = &pr->throttling; pthrottling->tsd_valid_flag = 0; status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD")); } return -ENODEV; } tsd = buffer.pointer; if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) { printk(KERN_ERR PREFIX "Invalid _TSD data\n"); result = -EFAULT; goto end; } if (tsd->package.count != 1) { printk(KERN_ERR PREFIX "Invalid _TSD data\n"); result = -EFAULT; goto end; } pdomain = &(pr->throttling.domain_info); state.length = sizeof(struct acpi_tsd_package); state.pointer = pdomain; status = acpi_extract_package(&(tsd->package.elements[0]), &format, &state); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Invalid _TSD data\n"); result = -EFAULT; goto end; } if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) { printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n"); result = -EFAULT; goto end; } if (pdomain->revision != ACPI_TSD_REV0_REVISION) { printk(KERN_ERR PREFIX "Unknown _TSD:revision\n"); result = -EFAULT; goto end; } pthrottling = &pr->throttling; pthrottling->tsd_valid_flag = 1; pthrottling->shared_type = pdomain->coord_type; cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); /* * If the coordination type is not defined in ACPI spec, * the tsd_valid_flag will be clear and coordination type * will be forecd as DOMAIN_COORD_TYPE_SW_ALL. */ if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { pthrottling->tsd_valid_flag = 0; pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; } end: kfree(buffer.pointer); return result; } /* -------------------------------------------------------------------------- Throttling Control -------------------------------------------------------------------------- */ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) { int state = 0; u32 value = 0; u32 duty_mask = 0; u32 duty_value = 0; if (!pr) return -EINVAL; if (!pr->flags.throttling) return -ENODEV; pr->throttling.state = 0; duty_mask = pr->throttling.state_count - 1; duty_mask <<= pr->throttling.duty_offset; local_irq_disable(); value = inl(pr->throttling.address); /* * Compute the current throttling state when throttling is enabled * (bit 4 is on). */ if (value & 0x10) { duty_value = value & duty_mask; duty_value >>= pr->throttling.duty_offset; if (duty_value) state = pr->throttling.state_count - duty_value; } pr->throttling.state = state; local_irq_enable(); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling state is T%d (%d%% throttling applied)\n", state, pr->throttling.states[state].performance)); return 0; } #ifdef CONFIG_X86 static int acpi_throttling_rdmsr(u64 *value) { u64 msr_high, msr_low; u64 msr = 0; int ret = -1; if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || !this_cpu_has(X86_FEATURE_ACPI)) { printk(KERN_ERR PREFIX "HARDWARE addr space,NOT supported yet\n"); } else { msr_low = 0; msr_high = 0; rdmsr_safe(MSR_IA32_THERM_CONTROL, (u32 *)&msr_low , (u32 *) &msr_high); msr = (msr_high << 32) | msr_low; *value = (u64) msr; ret = 0; } return ret; } static int acpi_throttling_wrmsr(u64 value) { int ret = -1; u64 msr; if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || !this_cpu_has(X86_FEATURE_ACPI)) { printk(KERN_ERR PREFIX "HARDWARE addr space,NOT supported yet\n"); } else { msr = value; wrmsr_safe(MSR_IA32_THERM_CONTROL, msr & 0xffffffff, msr >> 32); ret = 0; } return ret; } #else static int acpi_throttling_rdmsr(u64 *value) { printk(KERN_ERR PREFIX "HARDWARE addr space,NOT supported yet\n"); return -1; } static int acpi_throttling_wrmsr(u64 value) { printk(KERN_ERR PREFIX "HARDWARE addr space,NOT supported yet\n"); return -1; } #endif static int acpi_read_throttling_status(struct acpi_processor *pr, u64 *value) { u32 bit_width, bit_offset; u32 ptc_value; u64 ptc_mask; struct acpi_processor_throttling *throttling; int ret = -1; throttling = &pr->throttling; switch (throttling->status_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: bit_width = throttling->status_register.bit_width; bit_offset = throttling->status_register.bit_offset; acpi_os_read_port((acpi_io_address) throttling->status_register. address, &ptc_value, (u32) (bit_width + bit_offset)); ptc_mask = (1 << bit_width) - 1; *value = (u64) ((ptc_value >> bit_offset) & ptc_mask); ret = 0; break; case ACPI_ADR_SPACE_FIXED_HARDWARE: ret = acpi_throttling_rdmsr(value); break; default: printk(KERN_ERR PREFIX "Unknown addr space %d\n", (u32) (throttling->status_register.space_id)); } return ret; } static int acpi_write_throttling_state(struct acpi_processor *pr, u64 value) { u32 bit_width, bit_offset; u64 ptc_value; u64 ptc_mask; struct acpi_processor_throttling *throttling; int ret = -1; throttling = &pr->throttling; switch (throttling->control_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: bit_width = throttling->control_register.bit_width; bit_offset = throttling->control_register.bit_offset; ptc_mask = (1 << bit_width) - 1; ptc_value = value & ptc_mask; acpi_os_write_port((acpi_io_address) throttling-> control_register.address, (u32) (ptc_value << bit_offset), (u32) (bit_width + bit_offset)); ret = 0; break; case ACPI_ADR_SPACE_FIXED_HARDWARE: ret = acpi_throttling_wrmsr(value); break; default: printk(KERN_ERR PREFIX "Unknown addr space %d\n", (u32) (throttling->control_register.space_id)); } return ret; } static int acpi_get_throttling_state(struct acpi_processor *pr, u64 value) { int i; for (i = 0; i < pr->throttling.state_count; i++) { struct acpi_processor_tx_tss *tx = (struct acpi_processor_tx_tss *)&(pr->throttling. states_tss[i]); if (tx->control == value) return i; } return -1; } static int acpi_get_throttling_value(struct acpi_processor *pr, int state, u64 *value) { int ret = -1; if (state >= 0 && state <= pr->throttling.state_count) { struct acpi_processor_tx_tss *tx = (struct acpi_processor_tx_tss *)&(pr->throttling. states_tss[state]); *value = tx->control; ret = 0; } return ret; } static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) { int state = 0; int ret; u64 value; if (!pr) return -EINVAL; if (!pr->flags.throttling) return -ENODEV; pr->throttling.state = 0; value = 0; ret = acpi_read_throttling_status(pr, &value); if (ret >= 0) { state = acpi_get_throttling_state(pr, value); if (state == -1) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid throttling state, reset\n")); state = 0; ret = acpi_processor_set_throttling(pr, state, true); if (ret) return ret; } pr->throttling.state = state; } return 0; } static int acpi_processor_get_throttling(struct acpi_processor *pr) { cpumask_var_t saved_mask; int ret; if (!pr) return -EINVAL; if (!pr->flags.throttling) return -ENODEV; if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) return -ENOMEM; /* * Migrate task to the cpu pointed by pr. */ cpumask_copy(saved_mask, &current->cpus_allowed); /* FIXME: use work_on_cpu() */ if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { /* Can't migrate to the target pr->id CPU. Exit */ free_cpumask_var(saved_mask); return -ENODEV; } ret = pr->throttling.acpi_processor_get_throttling(pr); /* restore the previous state */ set_cpus_allowed_ptr(current, saved_mask); free_cpumask_var(saved_mask); return ret; } static int acpi_processor_get_fadt_info(struct acpi_processor *pr) { int i, step; if (!pr->throttling.address) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n")); return -EINVAL; } else if (!pr->throttling.duty_width) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n")); return -EINVAL; } /* TBD: Support duty_cycle values that span bit 4. */ else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) { printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n"); return -EINVAL; } pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width; /* * Compute state values. Note that throttling displays a linear power * performance relationship (at 50% performance the CPU will consume * 50% power). Values are in 1/10th of a percent to preserve accuracy. */ step = (1000 / pr->throttling.state_count); for (i = 0; i < pr->throttling.state_count; i++) { pr->throttling.states[i].performance = 1000 - step * i; pr->throttling.states[i].power = 1000 - step * i; } return 0; } static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, int state, bool force) { u32 value = 0; u32 duty_mask = 0; u32 duty_value = 0; if (!pr) return -EINVAL; if ((state < 0) || (state > (pr->throttling.state_count - 1))) return -EINVAL; if (!pr->flags.throttling) return -ENODEV; if (!force && (state == pr->throttling.state)) return 0; if (state < pr->throttling_platform_limit) return -EPERM; /* * Calculate the duty_value and duty_mask. */ if (state) { duty_value = pr->throttling.state_count - state; duty_value <<= pr->throttling.duty_offset; /* Used to clear all duty_value bits */ duty_mask = pr->throttling.state_count - 1; duty_mask <<= acpi_gbl_FADT.duty_offset; duty_mask = ~duty_mask; } local_irq_disable(); /* * Disable throttling by writing a 0 to bit 4. Note that we must * turn it off before you can change the duty_value. */ value = inl(pr->throttling.address); if (value & 0x10) { value &= 0xFFFFFFEF; outl(value, pr->throttling.address); } /* * Write the new duty_value and then enable throttling. Note * that a state value of 0 leaves throttling disabled. */ if (state) { value &= duty_mask; value |= duty_value; outl(value, pr->throttling.address); value |= 0x00000010; outl(value, pr->throttling.address); } pr->throttling.state = state; local_irq_enable(); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling state set to T%d (%d%%)\n", state, (pr->throttling.states[state].performance ? pr-> throttling.states[state].performance / 10 : 0))); return 0; } static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, int state, bool force) { int ret; u64 value; if (!pr) return -EINVAL; if ((state < 0) || (state > (pr->throttling.state_count - 1))) return -EINVAL; if (!pr->flags.throttling) return -ENODEV; if (!force && (state == pr->throttling.state)) return 0; if (state < pr->throttling_platform_limit) return -EPERM; value = 0; ret = acpi_get_throttling_value(pr, state, &value); if (ret >= 0) { acpi_write_throttling_state(pr, value); pr->throttling.state = state; } return 0; } static long acpi_processor_throttling_fn(void *data) { struct acpi_processor_throttling_arg *arg = data; struct acpi_processor *pr = arg->pr; return pr->throttling.acpi_processor_set_throttling(pr, arg->target_state, arg->force); } int acpi_processor_set_throttling(struct acpi_processor *pr, int state, bool force) { int ret = 0; unsigned int i; struct acpi_processor *match_pr; struct acpi_processor_throttling *p_throttling; struct acpi_processor_throttling_arg arg; struct throttling_tstate t_state; if (!pr) return -EINVAL; if (!pr->flags.throttling) return -ENODEV; if ((state < 0) || (state > (pr->throttling.state_count - 1))) return -EINVAL; if (cpu_is_offline(pr->id)) { /* * the cpu pointed by pr->id is offline. Unnecessary to change * the throttling state any more. */ return -ENODEV; } t_state.target_state = state; p_throttling = &(pr->throttling); /* * The throttling notifier will be called for every * affected cpu in order to get one proper T-state. * The notifier event is THROTTLING_PRECHANGE. */ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { t_state.cpu = i; acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, &t_state); } /* * The function of acpi_processor_set_throttling will be called * to switch T-state. If the coordination type is SW_ALL or HW_ALL, * it is necessary to call it for every affected cpu. Otherwise * it can be called only for the cpu pointed by pr. */ if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { arg.pr = pr; arg.target_state = state; arg.force = force; ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg); } else { /* * When the T-state coordination is SW_ALL or HW_ALL, * it is necessary to set T-state for every affected * cpus. */ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { match_pr = per_cpu(processors, i); /* * If the pointer is invalid, we will report the * error message and continue. */ if (!match_pr) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid Pointer for CPU %d\n", i)); continue; } /* * If the throttling control is unsupported on CPU i, * we will report the error message and continue. */ if (!match_pr->flags.throttling) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling Control is unsupported " "on CPU %d\n", i)); continue; } arg.pr = match_pr; arg.target_state = state; arg.force = force; ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg); } } /* * After the set_throttling is called, the * throttling notifier is called for every * affected cpu to update the T-states. * The notifier event is THROTTLING_POSTCHANGE */ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { t_state.cpu = i; acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, &t_state); } return ret; } int acpi_processor_get_throttling_info(struct acpi_processor *pr) { int result = 0; struct acpi_processor_throttling *pthrottling; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", pr->throttling.address, pr->throttling.duty_offset, pr->throttling.duty_width)); /* * Evaluate _PTC, _TSS and _TPC * They must all be present or none of them can be used. */ if (acpi_processor_get_throttling_control(pr) || acpi_processor_get_throttling_states(pr) || acpi_processor_get_platform_limit(pr)) { pr->throttling.acpi_processor_get_throttling = &acpi_processor_get_throttling_fadt; pr->throttling.acpi_processor_set_throttling = &acpi_processor_set_throttling_fadt; if (acpi_processor_get_fadt_info(pr)) return 0; } else { pr->throttling.acpi_processor_get_throttling = &acpi_processor_get_throttling_ptc; pr->throttling.acpi_processor_set_throttling = &acpi_processor_set_throttling_ptc; } /* * If TSD package for one CPU can't be parsed successfully, it means * that this CPU will have no coordination with other CPUs. */ if (acpi_processor_get_tsd(pr)) { pthrottling = &pr->throttling; pthrottling->tsd_valid_flag = 0; cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; } /* * PIIX4 Errata: We don't support throttling on the original PIIX4. * This shouldn't be an issue as few (if any) mobile systems ever * used this part. */ if (errata.piix4.throttle) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling not supported on PIIX4 A- or B-step\n")); return 0; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", pr->throttling.state_count)); pr->flags.throttling = 1; /* * Disable throttling (if enabled). We'll let subsequent policy (e.g. * thermal) decide to lower performance if it so chooses, but for now * we'll crank up the speed. */ result = acpi_processor_get_throttling(pr); if (result) goto end; if (pr->throttling.state) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabling throttling (was T%d)\n", pr->throttling.state)); result = acpi_processor_set_throttling(pr, 0, false); if (result) goto end; } end: if (result) pr->flags.throttling = 0; return result; }
gpl-2.0
aweosomeabhijeet/android_kernel_sony_xmd
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
3234
51883
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include "drmP.h" #include "drm_crtc_helper.h" #include "radeon_drm.h" #include "radeon.h" #include "atom.h" #include <linux/backlight.h> #ifdef CONFIG_PMAC_BACKLIGHT #include <asm/backlight.h> #endif static void radeon_legacy_encoder_disable(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder_helper_funcs *encoder_funcs; encoder_funcs = encoder->helper_private; encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); radeon_encoder->active_device = 0; } static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man; int panel_pwr_delay = 2000; bool is_mac = false; uint8_t backlight_level; DRM_DEBUG_KMS("\n"); lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); backlight_level = (lvds_gen_cntl >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; if (radeon_encoder->enc_priv) { if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; panel_pwr_delay = lvds->panel_pwr_delay; if (lvds->bl_dev) backlight_level = lvds->backlight_level; } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; panel_pwr_delay = lvds->panel_pwr_delay; if (lvds->bl_dev) backlight_level = lvds->backlight_level; } } /* macs (and possibly some x86 oem systems?) wire up LVDS strangely * Taken from radeonfb. */ if ((rdev->mode_info.connector_table == CT_IBOOK) || (rdev->mode_info.connector_table == CT_POWERBOOK_EXTERNAL) || (rdev->mode_info.connector_table == CT_POWERBOOK_INTERNAL) || (rdev->mode_info.connector_table == CT_POWERBOOK_VGA)) is_mac = true; switch (mode) { case DRM_MODE_DPMS_ON: disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN); disp_pwr_man |= RADEON_AUTO_PWRUP_EN; WREG32(RADEON_DISP_PWR_MAN, disp_pwr_man); lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); lvds_pll_cntl |= RADEON_LVDS_PLL_EN; WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); mdelay(1); lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET; WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS | RADEON_LVDS_BL_MOD_LEVEL_MASK); lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON | (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT)); if (is_mac) lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN; mdelay(panel_pwr_delay); WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb); lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; if (is_mac) { lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN; WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_EN); } else { WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON); } mdelay(panel_pwr_delay); WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); mdelay(panel_pwr_delay); break; } if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) { struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); DRM_DEBUG("\n"); if (radeon_encoder->enc_priv) { if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; lvds->dpms_mode = mode; } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; lvds->dpms_mode = mode; } } radeon_legacy_lvds_update(encoder, mode); } static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_lvds_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, false); else radeon_combios_output_lock(encoder, false); } static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl; DRM_DEBUG_KMS("\n"); lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL); if (rdev->is_atom_bios) { /* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl * need to call that on resume to set up the reg properly. */ radeon_encoder->pixel_clock = adjusted_mode->clock; atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE); lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); } else { struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; if (lvds) { DRM_DEBUG_KMS("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl); lvds_gen_cntl = lvds->lvds_gen_cntl; lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) | (0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); lvds_ss_gen_cntl |= ((lvds->panel_digon_delay << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) | (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); } else lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); } lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_RST_FM); if (ASIC_IS_R300(rdev)) lvds_pll_cntl &= ~(R300_LVDS_SRC_SEL_MASK); if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev)) { if (radeon_encoder->rmx_type != RMX_OFF) lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; } else lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; } else { if (ASIC_IS_R300(rdev)) lvds_pll_cntl |= R300_LVDS_SRC_SEL_CRTC2; else lvds_gen_cntl |= RADEON_LVDS_SEL_CRTC2; } WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); WREG32(RADEON_LVDS_SS_GEN_CNTL, lvds_ss_gen_cntl); if (rdev->family == CHIP_RV410) WREG32(RADEON_CLOCK_CNTL_INDEX, 0); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); /* set the active encoder to connector routing */ radeon_encoder_set_active_device(encoder); drm_mode_set_crtcinfo(adjusted_mode, 0); /* get the native mode for LVDS */ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) radeon_panel_mode_fixup(encoder, adjusted_mode); return true; } static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { .dpms = radeon_legacy_lvds_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_lvds_prepare, .mode_set = radeon_legacy_lvds_mode_set, .commit = radeon_legacy_lvds_commit, .disable = radeon_legacy_encoder_disable, }; #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) #define MAX_RADEON_LEVEL 0xFF struct radeon_backlight_privdata { struct radeon_encoder *encoder; uint8_t negative; }; static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd) { struct radeon_backlight_privdata *pdata = bl_get_data(bd); uint8_t level; /* Convert brightness to hardware level */ if (bd->props.brightness < 0) level = 0; else if (bd->props.brightness > MAX_RADEON_LEVEL) level = MAX_RADEON_LEVEL; else level = bd->props.brightness; if (pdata->negative) level = MAX_RADEON_LEVEL - level; return level; } static int radeon_legacy_backlight_update_status(struct backlight_device *bd) { struct radeon_backlight_privdata *pdata = bl_get_data(bd); struct radeon_encoder *radeon_encoder = pdata->encoder; struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; int dpms_mode = DRM_MODE_DPMS_ON; if (radeon_encoder->enc_priv) { if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; dpms_mode = lvds->dpms_mode; lvds->backlight_level = radeon_legacy_lvds_level(bd); } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; dpms_mode = lvds->dpms_mode; lvds->backlight_level = radeon_legacy_lvds_level(bd); } } if (bd->props.brightness > 0) radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode); else radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF); return 0; } static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd) { struct radeon_backlight_privdata *pdata = bl_get_data(bd); struct radeon_encoder *radeon_encoder = pdata->encoder; struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; uint8_t backlight_level; backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level; } static const struct backlight_ops radeon_backlight_ops = { .get_brightness = radeon_legacy_backlight_get_brightness, .update_status = radeon_legacy_backlight_update_status, }; void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, struct drm_connector *drm_connector) { struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct backlight_device *bd; struct backlight_properties props; struct radeon_backlight_privdata *pdata; uint8_t backlight_level; if (!radeon_encoder->enc_priv) return; #ifdef CONFIG_PMAC_BACKLIGHT if (!pmac_has_backlight_type("ati") && !pmac_has_backlight_type("mnca")) return; #endif pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL); if (!pdata) { DRM_ERROR("Memory allocation failed\n"); goto error; } props.max_brightness = MAX_RADEON_LEVEL; props.type = BACKLIGHT_RAW; bd = backlight_device_register("radeon_bl", &drm_connector->kdev, pdata, &radeon_backlight_ops, &props); if (IS_ERR(bd)) { DRM_ERROR("Backlight registration failed\n"); goto error; } pdata->encoder = radeon_encoder; backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; /* First, try to detect backlight level sense based on the assumption * that firmware set it up at full brightness */ if (backlight_level == 0) pdata->negative = true; else if (backlight_level == 0xff) pdata->negative = false; else { /* XXX hack... maybe some day we can figure out in what direction * backlight should work on a given panel? */ pdata->negative = (rdev->family != CHIP_RV200 && rdev->family != CHIP_RV250 && rdev->family != CHIP_RV280 && rdev->family != CHIP_RV350); #ifdef CONFIG_PMAC_BACKLIGHT pdata->negative = (pdata->negative || of_machine_is_compatible("PowerBook4,3") || of_machine_is_compatible("PowerBook6,3") || of_machine_is_compatible("PowerBook6,5")); #endif } if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; lvds->bl_dev = bd; } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; lvds->bl_dev = bd; } bd->props.brightness = radeon_legacy_backlight_get_brightness(bd); bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); DRM_INFO("radeon legacy LVDS backlight initialized\n"); return; error: kfree(pdata); return; } static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder) { struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct backlight_device *bd = NULL; if (!radeon_encoder->enc_priv) return; if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; bd = lvds->bl_dev; lvds->bl_dev = NULL; } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; bd = lvds->bl_dev; lvds->bl_dev = NULL; } if (bd) { struct radeon_legacy_backlight_privdata *pdata; pdata = bl_get_data(bd); backlight_device_unregister(bd); kfree(pdata); DRM_INFO("radeon legacy LVDS backlight unloaded\n"); } } #else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */ void radeon_legacy_backlight_init(struct radeon_encoder *encoder) { } static void radeon_legacy_backlight_exit(struct radeon_encoder *encoder) { } #endif static void radeon_lvds_enc_destroy(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->enc_priv) { radeon_legacy_backlight_exit(radeon_encoder); kfree(radeon_encoder->enc_priv); } drm_encoder_cleanup(encoder); kfree(radeon_encoder); } static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = { .destroy = radeon_lvds_enc_destroy, }; static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL); uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL); DRM_DEBUG_KMS("\n"); switch (mode) { case DRM_MODE_DPMS_ON: crtc_ext_cntl |= RADEON_CRTC_CRT_ON; dac_cntl &= ~RADEON_DAC_PDWN; dac_macro_cntl &= ~(RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: crtc_ext_cntl &= ~RADEON_CRTC_CRT_ON; dac_cntl |= RADEON_DAC_PDWN; dac_macro_cntl |= (RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B); break; } WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); WREG32(RADEON_DAC_CNTL, dac_cntl); WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, false); else radeon_combios_output_lock(encoder, false); } static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl; DRM_DEBUG_KMS("\n"); if (radeon_crtc->crtc_id == 0) { if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & ~(RADEON_DISP_DAC_SOURCE_MASK); WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); } else { dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~(RADEON_DAC2_DAC_CLK_SEL); WREG32(RADEON_DAC_CNTL2, dac2_cntl); } } else { if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & ~(RADEON_DISP_DAC_SOURCE_MASK); disp_output_cntl |= RADEON_DISP_DAC_SOURCE_CRTC2; WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); } else { dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC_CLK_SEL; WREG32(RADEON_DAC_CNTL2, dac2_cntl); } } dac_cntl = (RADEON_DAC_MASK_ALL | RADEON_DAC_VGA_ADR_EN | /* TODO 6-bits */ RADEON_DAC_8BIT_EN); WREG32_P(RADEON_DAC_CNTL, dac_cntl, RADEON_DAC_RANGE_CNTL | RADEON_DAC_BLANKING); if (radeon_encoder->enc_priv) { struct radeon_encoder_primary_dac *p_dac = (struct radeon_encoder_primary_dac *)radeon_encoder->enc_priv; dac_macro_cntl = p_dac->ps2_pdac_adj; } else dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL); dac_macro_cntl |= RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B; WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t vclk_ecp_cntl, crtc_ext_cntl; uint32_t dac_ext_cntl, dac_cntl, dac_macro_cntl, tmp; enum drm_connector_status found = connector_status_disconnected; bool color = true; /* save the regs we need */ vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL); crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL); dac_cntl = RREG32(RADEON_DAC_CNTL); dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL); tmp = vclk_ecp_cntl & ~(RADEON_PIXCLK_ALWAYS_ONb | RADEON_PIXCLK_DAC_ALWAYS_ONb); WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp); tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON; WREG32(RADEON_CRTC_EXT_CNTL, tmp); tmp = RADEON_DAC_FORCE_BLANK_OFF_EN | RADEON_DAC_FORCE_DATA_EN; if (color) tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB; else tmp |= RADEON_DAC_FORCE_DATA_SEL_G; if (ASIC_IS_R300(rdev)) tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT); else tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT); WREG32(RADEON_DAC_EXT_CNTL, tmp); tmp = dac_cntl & ~(RADEON_DAC_RANGE_CNTL_MASK | RADEON_DAC_PDWN); tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN; WREG32(RADEON_DAC_CNTL, tmp); tmp &= ~(RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B); WREG32(RADEON_DAC_MACRO_CNTL, tmp); mdelay(2); if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT) found = connector_status_connected; /* restore the regs we used */ WREG32(RADEON_DAC_CNTL, dac_cntl); WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl); WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); WREG32_PLL(RADEON_VCLK_ECP_CNTL, vclk_ecp_cntl); return found; } static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = { .dpms = radeon_legacy_primary_dac_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_primary_dac_prepare, .mode_set = radeon_legacy_primary_dac_mode_set, .commit = radeon_legacy_primary_dac_commit, .detect = radeon_legacy_primary_dac_detect, .disable = radeon_legacy_encoder_disable, }; static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = { .destroy = radeon_enc_destroy, }; static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL); DRM_DEBUG_KMS("\n"); switch (mode) { case DRM_MODE_DPMS_ON: fp_gen_cntl |= (RADEON_FP_FPON | RADEON_FP_TMDS_EN); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); break; } WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); } static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl; int i; DRM_DEBUG_KMS("\n"); tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); tmp &= 0xfffff; if (rdev->family == CHIP_RV280) { /* bit 22 of TMDS_PLL_CNTL is read-back inverted */ tmp ^= (1 << 22); tmds_pll_cntl ^= (1 << 22); } if (radeon_encoder->enc_priv) { struct radeon_encoder_int_tmds *tmds = (struct radeon_encoder_int_tmds *)radeon_encoder->enc_priv; for (i = 0; i < 4; i++) { if (tmds->tmds_pll[i].freq == 0) break; if ((uint32_t)(mode->clock / 10) < tmds->tmds_pll[i].freq) { tmp = tmds->tmds_pll[i].value ; break; } } } if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV280)) { if (tmp & 0xfff00000) tmds_pll_cntl = tmp; else { tmds_pll_cntl &= 0xfff00000; tmds_pll_cntl |= tmp; } } else tmds_pll_cntl = tmp; tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) & ~(RADEON_TMDS_TRANSMITTER_PLLRST); if (rdev->family == CHIP_R200 || rdev->family == CHIP_R100 || ASIC_IS_R300(rdev)) tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN); else /* RV chips got this bit reversed */ tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN; fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) | (RADEON_FP_CRTC_DONT_SHADOW_VPAR | RADEON_FP_CRTC_DONT_SHADOW_HEND)); fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN | RADEON_FP_DFP_SYNC_SEL | RADEON_FP_CRT_SYNC_SEL | RADEON_FP_CRTC_LOCK_8DOT | RADEON_FP_USE_SHADOW_EN | RADEON_FP_CRTC_USE_SHADOW_VEND | RADEON_FP_CRT_SYNC_ALT); if (1) /* FIXME rgbBits == 8 */ fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */ else fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */ if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; if (radeon_encoder->rmx_type != RMX_OFF) fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; else fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; } else fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2; } else { if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2; } else fp_gen_cntl |= RADEON_FP_SEL_CRTC2; } WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl); WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl); WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = { .dpms = radeon_legacy_tmds_int_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_tmds_int_prepare, .mode_set = radeon_legacy_tmds_int_mode_set, .commit = radeon_legacy_tmds_int_commit, .disable = radeon_legacy_encoder_disable, }; static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = { .destroy = radeon_enc_destroy, }; static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); DRM_DEBUG_KMS("\n"); switch (mode) { case DRM_MODE_DPMS_ON: fp2_gen_cntl &= ~RADEON_FP2_BLANK_EN; fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: fp2_gen_cntl |= RADEON_FP2_BLANK_EN; fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN); break; } WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, false); else radeon_combios_output_lock(encoder, false); } static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t fp2_gen_cntl; DRM_DEBUG_KMS("\n"); if (rdev->is_atom_bios) { radeon_encoder->pixel_clock = adjusted_mode->clock; atombios_dvo_setup(encoder, ATOM_ENABLE); fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); } else { fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); if (1) /* FIXME rgbBits == 8 */ fp2_gen_cntl |= RADEON_FP2_PANEL_FORMAT; /* 24 bit format, */ else fp2_gen_cntl &= ~RADEON_FP2_PANEL_FORMAT;/* 18 bit format, */ fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN | RADEON_FP2_DVO_RATE_SEL_SDR); /* XXX: these are oem specific */ if (ASIC_IS_R300(rdev)) { if ((dev->pdev->device == 0x4850) && (dev->pdev->subsystem_vendor == 0x1028) && (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */ fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE; else fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE; /*if (mode->clock > 165000) fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/ } if (!radeon_combios_external_tmds_setup(encoder)) radeon_external_tmds_setup(encoder); } if (radeon_crtc->crtc_id == 0) { if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; if (radeon_encoder->rmx_type != RMX_OFF) fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; else fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; } else fp2_gen_cntl &= ~RADEON_FP2_SRC_SEL_CRTC2; } else { if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2; } else fp2_gen_cntl |= RADEON_FP2_SRC_SEL_CRTC2; } WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; if (tmds) { if (tmds->i2c_bus) radeon_i2c_destroy(tmds->i2c_bus); } kfree(radeon_encoder->enc_priv); drm_encoder_cleanup(encoder); kfree(radeon_encoder); } static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = { .dpms = radeon_legacy_tmds_ext_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_tmds_ext_prepare, .mode_set = radeon_legacy_tmds_ext_mode_set, .commit = radeon_legacy_tmds_ext_commit, .disable = radeon_legacy_encoder_disable, }; static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = { .destroy = radeon_ext_tmds_enc_destroy, }; static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0; uint32_t tv_master_cntl = 0; bool is_tv; DRM_DEBUG_KMS("\n"); is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false; if (rdev->family == CHIP_R200) fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); else { if (is_tv) tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL); else crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); } switch (mode) { case DRM_MODE_DPMS_ON: if (rdev->family == CHIP_R200) { fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN); } else { if (is_tv) tv_master_cntl |= RADEON_TV_ON; else crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON; if (rdev->family == CHIP_R420 || rdev->family == CHIP_R423 || rdev->family == CHIP_RV410) tv_dac_cntl &= ~(R420_TV_DAC_RDACPD | R420_TV_DAC_GDACPD | R420_TV_DAC_BDACPD | RADEON_TV_DAC_BGSLEEP); else tv_dac_cntl &= ~(RADEON_TV_DAC_RDACPD | RADEON_TV_DAC_GDACPD | RADEON_TV_DAC_BDACPD | RADEON_TV_DAC_BGSLEEP); } break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: if (rdev->family == CHIP_R200) fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN); else { if (is_tv) tv_master_cntl &= ~RADEON_TV_ON; else crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON; if (rdev->family == CHIP_R420 || rdev->family == CHIP_R423 || rdev->family == CHIP_RV410) tv_dac_cntl |= (R420_TV_DAC_RDACPD | R420_TV_DAC_GDACPD | R420_TV_DAC_BDACPD | RADEON_TV_DAC_BGSLEEP); else tv_dac_cntl |= (RADEON_TV_DAC_RDACPD | RADEON_TV_DAC_GDACPD | RADEON_TV_DAC_BDACPD | RADEON_TV_DAC_BGSLEEP); } break; } if (rdev->family == CHIP_R200) { WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); } else { if (is_tv) WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); else WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); } if (rdev->is_atom_bios) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF); } static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_ON); if (rdev->is_atom_bios) radeon_atom_output_lock(encoder, true); else radeon_combios_output_lock(encoder, true); } static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0; uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0, disp_tv_out_cntl = 0; bool is_tv = false; DRM_DEBUG_KMS("\n"); is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false; if (rdev->family != CHIP_R200) { tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); if (rdev->family == CHIP_R420 || rdev->family == CHIP_R423 || rdev->family == CHIP_RV410) { tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | RADEON_TV_DAC_BGADJ_MASK | R420_TV_DAC_DACADJ_MASK | R420_TV_DAC_RDACPD | R420_TV_DAC_GDACPD | R420_TV_DAC_BDACPD | R420_TV_DAC_TVENABLE); } else { tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | RADEON_TV_DAC_BGADJ_MASK | RADEON_TV_DAC_DACADJ_MASK | RADEON_TV_DAC_RDACPD | RADEON_TV_DAC_GDACPD | RADEON_TV_DAC_BDACPD); } tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD; if (is_tv) { if (tv_dac->tv_std == TV_STD_NTSC || tv_dac->tv_std == TV_STD_NTSC_J || tv_dac->tv_std == TV_STD_PAL_M || tv_dac->tv_std == TV_STD_PAL_60) tv_dac_cntl |= tv_dac->ntsc_tvdac_adj; else tv_dac_cntl |= tv_dac->pal_tvdac_adj; if (tv_dac->tv_std == TV_STD_NTSC || tv_dac->tv_std == TV_STD_NTSC_J) tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC; else tv_dac_cntl |= RADEON_TV_DAC_STD_PAL; } else tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 | tv_dac->ps2_tvdac_adj); WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); } if (ASIC_IS_R300(rdev)) { gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1; disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); } else if (rdev->family != CHIP_R200) disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); else if (rdev->family == CHIP_R200) fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); if (rdev->family >= CHIP_R200) disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); if (is_tv) { uint32_t dac_cntl; dac_cntl = RREG32(RADEON_DAC_CNTL); dac_cntl &= ~RADEON_DAC_TVO_EN; WREG32(RADEON_DAC_CNTL, dac_cntl); if (ASIC_IS_R300(rdev)) gpiopad_a = RREG32(RADEON_GPIOPAD_A) & ~1; dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~RADEON_DAC2_DAC2_CLK_SEL; if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev)) { disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; disp_output_cntl |= (RADEON_DISP_TVDAC_SOURCE_CRTC | RADEON_DISP_TV_SOURCE_CRTC); } if (rdev->family >= CHIP_R200) { disp_tv_out_cntl &= ~RADEON_DISP_TV_PATH_SRC_CRTC2; } else { disp_hw_debug |= RADEON_CRT2_DISP1_SEL; } } else { if (ASIC_IS_R300(rdev)) { disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; disp_output_cntl |= RADEON_DISP_TV_SOURCE_CRTC; } if (rdev->family >= CHIP_R200) { disp_tv_out_cntl |= RADEON_DISP_TV_PATH_SRC_CRTC2; } else { disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL; } } WREG32(RADEON_DAC_CNTL2, dac2_cntl); } else { dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL; if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev)) { disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC; } else if (rdev->family == CHIP_R200) { fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK | RADEON_FP2_DVO_RATE_SEL_SDR); } else disp_hw_debug |= RADEON_CRT2_DISP1_SEL; } else { if (ASIC_IS_R300(rdev)) { disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2; } else if (rdev->family == CHIP_R200) { fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK | RADEON_FP2_DVO_RATE_SEL_SDR); fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2; } else disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL; } WREG32(RADEON_DAC_CNTL2, dac2_cntl); } if (ASIC_IS_R300(rdev)) { WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); } else if (rdev->family != CHIP_R200) WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); else if (rdev->family == CHIP_R200) WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); if (rdev->family >= CHIP_R200) WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl); if (is_tv) radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode); if (rdev->is_atom_bios) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); else radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static bool r300_legacy_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl; uint32_t disp_output_cntl, gpiopad_a, tmp; bool found = false; /* save regs needed */ gpiopad_a = RREG32(RADEON_GPIOPAD_A); dac_cntl2 = RREG32(RADEON_DAC_CNTL2); crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL); tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); WREG32_P(RADEON_GPIOPAD_A, 0, ~1); WREG32(RADEON_DAC_CNTL2, RADEON_DAC2_DAC2_CLK_SEL); WREG32(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_CRT2_ON | RADEON_CRTC2_VSYNC_TRISTAT); tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK; tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2; WREG32(RADEON_DISP_OUTPUT_CNTL, tmp); WREG32(RADEON_DAC_EXT_CNTL, RADEON_DAC2_FORCE_BLANK_OFF_EN | RADEON_DAC2_FORCE_DATA_EN | RADEON_DAC_FORCE_DATA_SEL_RGB | (0xec << RADEON_DAC_FORCE_DATA_SHIFT)); WREG32(RADEON_TV_DAC_CNTL, RADEON_TV_DAC_STD_NTSC | (8 << RADEON_TV_DAC_BGADJ_SHIFT) | (6 << RADEON_TV_DAC_DACADJ_SHIFT)); RREG32(RADEON_TV_DAC_CNTL); mdelay(4); WREG32(RADEON_TV_DAC_CNTL, RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD | RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_NTSC | (8 << RADEON_TV_DAC_BGADJ_SHIFT) | (6 << RADEON_TV_DAC_DACADJ_SHIFT)); RREG32(RADEON_TV_DAC_CNTL); mdelay(6); tmp = RREG32(RADEON_TV_DAC_CNTL); if ((tmp & RADEON_TV_DAC_GDACDET) != 0) { found = true; DRM_DEBUG_KMS("S-video TV connection detected\n"); } else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) { found = true; DRM_DEBUG_KMS("Composite TV connection detected\n"); } WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl); WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); WREG32(RADEON_DAC_CNTL2, dac_cntl2); WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); return found; } static bool radeon_legacy_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t tv_dac_cntl, dac_cntl2; uint32_t config_cntl, tv_pre_dac_mux_cntl, tv_master_cntl, tmp; bool found = false; if (ASIC_IS_R300(rdev)) return r300_legacy_tv_detect(encoder, connector); dac_cntl2 = RREG32(RADEON_DAC_CNTL2); tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL); tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); config_cntl = RREG32(RADEON_CONFIG_CNTL); tv_pre_dac_mux_cntl = RREG32(RADEON_TV_PRE_DAC_MUX_CNTL); tmp = dac_cntl2 & ~RADEON_DAC2_DAC2_CLK_SEL; WREG32(RADEON_DAC_CNTL2, tmp); tmp = tv_master_cntl | RADEON_TV_ON; tmp &= ~(RADEON_TV_ASYNC_RST | RADEON_RESTART_PHASE_FIX | RADEON_CRT_FIFO_CE_EN | RADEON_TV_FIFO_CE_EN | RADEON_RE_SYNC_NOW_SEL_MASK); tmp |= RADEON_TV_FIFO_ASYNC_RST | RADEON_CRT_ASYNC_RST; WREG32(RADEON_TV_MASTER_CNTL, tmp); tmp = RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD | RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_NTSC | (8 << RADEON_TV_DAC_BGADJ_SHIFT); if (config_cntl & RADEON_CFG_ATI_REV_ID_MASK) tmp |= (4 << RADEON_TV_DAC_DACADJ_SHIFT); else tmp |= (8 << RADEON_TV_DAC_DACADJ_SHIFT); WREG32(RADEON_TV_DAC_CNTL, tmp); tmp = RADEON_C_GRN_EN | RADEON_CMP_BLU_EN | RADEON_RED_MX_FORCE_DAC_DATA | RADEON_GRN_MX_FORCE_DAC_DATA | RADEON_BLU_MX_FORCE_DAC_DATA | (0x109 << RADEON_TV_FORCE_DAC_DATA_SHIFT); WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tmp); mdelay(3); tmp = RREG32(RADEON_TV_DAC_CNTL); if (tmp & RADEON_TV_DAC_GDACDET) { found = true; DRM_DEBUG_KMS("S-video TV connection detected\n"); } else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) { found = true; DRM_DEBUG_KMS("Composite TV connection detected\n"); } WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tv_pre_dac_mux_cntl); WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); WREG32(RADEON_DAC_CNTL2, dac_cntl2); return found; } static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl; uint32_t disp_hw_debug, disp_output_cntl, gpiopad_a, pixclks_cntl, tmp; enum drm_connector_status found = connector_status_disconnected; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; bool color = true; struct drm_crtc *crtc; /* find out if crtc2 is in use or if this encoder is using it */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); if ((radeon_crtc->crtc_id == 1) && crtc->enabled) { if (encoder->crtc != crtc) { return connector_status_disconnected; } } } if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO || connector->connector_type == DRM_MODE_CONNECTOR_Composite || connector->connector_type == DRM_MODE_CONNECTOR_9PinDIN) { bool tv_detect; if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT)) return connector_status_disconnected; tv_detect = radeon_legacy_tv_detect(encoder, connector); if (tv_detect && tv_dac) found = connector_status_connected; return found; } /* don't probe if the encoder is being used for something else not CRT related */ if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_CRT_SUPPORT)) { DRM_INFO("not detecting due to %08x\n", radeon_encoder->active_device); return connector_status_disconnected; } /* save the regs we need */ pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); gpiopad_a = ASIC_IS_R300(rdev) ? RREG32(RADEON_GPIOPAD_A) : 0; disp_output_cntl = ASIC_IS_R300(rdev) ? RREG32(RADEON_DISP_OUTPUT_CNTL) : 0; disp_hw_debug = ASIC_IS_R300(rdev) ? 0 : RREG32(RADEON_DISP_HW_DEBUG); crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL); dac_cntl2 = RREG32(RADEON_DAC_CNTL2); tmp = pixclks_cntl & ~(RADEON_PIX2CLK_ALWAYS_ONb | RADEON_PIX2CLK_DAC_ALWAYS_ONb); WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); if (ASIC_IS_R300(rdev)) WREG32_P(RADEON_GPIOPAD_A, 1, ~1); tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK; tmp |= RADEON_CRTC2_CRT2_ON | (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT); WREG32(RADEON_CRTC2_GEN_CNTL, tmp); if (ASIC_IS_R300(rdev)) { tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK; tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2; WREG32(RADEON_DISP_OUTPUT_CNTL, tmp); } else { tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL; WREG32(RADEON_DISP_HW_DEBUG, tmp); } tmp = RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD | RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_PS2; WREG32(RADEON_TV_DAC_CNTL, tmp); tmp = RADEON_DAC2_FORCE_BLANK_OFF_EN | RADEON_DAC2_FORCE_DATA_EN; if (color) tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB; else tmp |= RADEON_DAC_FORCE_DATA_SEL_G; if (ASIC_IS_R300(rdev)) tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT); else tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT); WREG32(RADEON_DAC_EXT_CNTL, tmp); tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN; WREG32(RADEON_DAC_CNTL2, tmp); mdelay(10); if (ASIC_IS_R300(rdev)) { if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B) found = connector_status_connected; } else { if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUTPUT) found = connector_status_connected; } /* restore regs we used */ WREG32(RADEON_DAC_CNTL2, dac_cntl2); WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl); WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); if (ASIC_IS_R300(rdev)) { WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); } else { WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); } WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); return found; } static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = { .dpms = radeon_legacy_tv_dac_dpms, .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_tv_dac_prepare, .mode_set = radeon_legacy_tv_dac_mode_set, .commit = radeon_legacy_tv_dac_commit, .detect = radeon_legacy_tv_dac_detect, .disable = radeon_legacy_encoder_disable, }; static const struct drm_encoder_funcs radeon_legacy_tv_dac_enc_funcs = { .destroy = radeon_enc_destroy, }; static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder_int_tmds *tmds = NULL; bool ret; tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); if (!tmds) return NULL; if (rdev->is_atom_bios) ret = radeon_atombios_get_tmds_info(encoder, tmds); else ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds); if (ret == false) radeon_legacy_get_tmds_info_from_table(encoder, tmds); return tmds; } static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder_ext_tmds *tmds = NULL; bool ret; if (rdev->is_atom_bios) return NULL; tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL); if (!tmds) return NULL; ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds); if (ret == false) radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds); return tmds; } void radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device) { struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; /* see if we already added it */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->encoder_enum == encoder_enum) { radeon_encoder->devices |= supported_device; return; } } /* add a new one */ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); if (!radeon_encoder) return; encoder = &radeon_encoder->base; if (rdev->flags & RADEON_SINGLE_CRTC) encoder->possible_crtcs = 0x1; else encoder->possible_crtcs = 0x3; radeon_encoder->enc_priv = NULL; radeon_encoder->encoder_enum = encoder_enum; radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; radeon_encoder->devices = supported_device; radeon_encoder->rmx_type = RMX_OFF; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: encoder->possible_crtcs = 0x1; drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs); if (rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); else radeon_encoder->enc_priv = radeon_combios_get_lvds_info(radeon_encoder); radeon_encoder->rmx_type = RMX_FULL; break; case ENCODER_OBJECT_ID_INTERNAL_TMDS1: drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs); radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC); drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs); if (rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder); else radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder); break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, DRM_MODE_ENCODER_TVDAC); drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs); if (rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder); else radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder); break; case ENCODER_OBJECT_ID_INTERNAL_DVO1: drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); if (!rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder); break; } }
gpl-2.0
waleedq/samsung-kernel-latona
kernel/spinlock.c
4002
9729
/* * Copyright (2004) Linus Torvalds * * Author: Zwane Mwaikambo <zwane@fsmlabs.com> * * Copyright (2004, 2005) Ingo Molnar * * This file contains the spinlock/rwlock implementations for the * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) * * Note that some architectures have special knowledge about the * stack frames of these functions in their profile_pc. If you * change anything significant here that could change the stack * frame contact the architecture maintainers. */ #include <linux/linkage.h> #include <linux/preempt.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/debug_locks.h> #include <linux/module.h> /* * If lockdep is enabled then we use the non-preemption spin-ops * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are * not re-enabled during lock-acquire (which the preempt-spin-ops do): */ #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) /* * The __lock_function inlines are taken from * include/linux/spinlock_api_smp.h */ #else #define raw_read_can_lock(l) read_can_lock(l) #define raw_write_can_lock(l) write_can_lock(l) /* * We build the __lock_function inlines here. They are too large for * inlining all over the place, but here is only one user per function * which embedds them into the calling _lock_function below. * * This could be a long-held lock. We both prepare to spin for a long * time (making _this_ CPU preemptable if possible), and we also signal * towards that other CPU that it should break the lock ASAP. */ #define BUILD_LOCK_OPS(op, locktype) \ void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ { \ for (;;) { \ preempt_disable(); \ if (likely(do_raw_##op##_trylock(lock))) \ break; \ preempt_enable(); \ \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ arch_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ } \ \ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ { \ unsigned long flags; \ \ for (;;) { \ preempt_disable(); \ local_irq_save(flags); \ if (likely(do_raw_##op##_trylock(lock))) \ break; \ local_irq_restore(flags); \ preempt_enable(); \ \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ arch_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ return flags; \ } \ \ void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ { \ _raw_##op##_lock_irqsave(lock); \ } \ \ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ { \ unsigned long flags; \ \ /* */ \ /* Careful: we must exclude softirqs too, hence the */ \ /* irq-disabling. We use the generic preemption-aware */ \ /* function: */ \ /**/ \ flags = _raw_##op##_lock_irqsave(lock); \ local_bh_disable(); \ local_irq_restore(flags); \ } \ /* * Build preemption-friendly versions of the following * lock-spinning functions: * * __[spin|read|write]_lock() * __[spin|read|write]_lock_irq() * __[spin|read|write]_lock_irqsave() * __[spin|read|write]_lock_bh() */ BUILD_LOCK_OPS(spin, raw_spinlock); BUILD_LOCK_OPS(read, rwlock); BUILD_LOCK_OPS(write, rwlock); #endif #ifndef CONFIG_INLINE_SPIN_TRYLOCK int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) { return __raw_spin_trylock(lock); } EXPORT_SYMBOL(_raw_spin_trylock); #endif #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) { return __raw_spin_trylock_bh(lock); } EXPORT_SYMBOL(_raw_spin_trylock_bh); #endif #ifndef CONFIG_INLINE_SPIN_LOCK void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) { __raw_spin_lock(lock); } EXPORT_SYMBOL(_raw_spin_lock); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) { return __raw_spin_lock_irqsave(lock); } EXPORT_SYMBOL(_raw_spin_lock_irqsave); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) { __raw_spin_lock_irq(lock); } EXPORT_SYMBOL(_raw_spin_lock_irq); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_BH void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) { __raw_spin_lock_bh(lock); } EXPORT_SYMBOL(_raw_spin_lock_bh); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) { __raw_spin_unlock(lock); } EXPORT_SYMBOL(_raw_spin_unlock); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { __raw_spin_unlock_irqrestore(lock, flags); } EXPORT_SYMBOL(_raw_spin_unlock_irqrestore); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) { __raw_spin_unlock_irq(lock); } EXPORT_SYMBOL(_raw_spin_unlock_irq); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) { __raw_spin_unlock_bh(lock); } EXPORT_SYMBOL(_raw_spin_unlock_bh); #endif #ifndef CONFIG_INLINE_READ_TRYLOCK int __lockfunc _raw_read_trylock(rwlock_t *lock) { return __raw_read_trylock(lock); } EXPORT_SYMBOL(_raw_read_trylock); #endif #ifndef CONFIG_INLINE_READ_LOCK void __lockfunc _raw_read_lock(rwlock_t *lock) { __raw_read_lock(lock); } EXPORT_SYMBOL(_raw_read_lock); #endif #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) { return __raw_read_lock_irqsave(lock); } EXPORT_SYMBOL(_raw_read_lock_irqsave); #endif #ifndef CONFIG_INLINE_READ_LOCK_IRQ void __lockfunc _raw_read_lock_irq(rwlock_t *lock) { __raw_read_lock_irq(lock); } EXPORT_SYMBOL(_raw_read_lock_irq); #endif #ifndef CONFIG_INLINE_READ_LOCK_BH void __lockfunc _raw_read_lock_bh(rwlock_t *lock) { __raw_read_lock_bh(lock); } EXPORT_SYMBOL(_raw_read_lock_bh); #endif #ifndef CONFIG_INLINE_READ_UNLOCK void __lockfunc _raw_read_unlock(rwlock_t *lock) { __raw_read_unlock(lock); } EXPORT_SYMBOL(_raw_read_unlock); #endif #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { __raw_read_unlock_irqrestore(lock, flags); } EXPORT_SYMBOL(_raw_read_unlock_irqrestore); #endif #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) { __raw_read_unlock_irq(lock); } EXPORT_SYMBOL(_raw_read_unlock_irq); #endif #ifndef CONFIG_INLINE_READ_UNLOCK_BH void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) { __raw_read_unlock_bh(lock); } EXPORT_SYMBOL(_raw_read_unlock_bh); #endif #ifndef CONFIG_INLINE_WRITE_TRYLOCK int __lockfunc _raw_write_trylock(rwlock_t *lock) { return __raw_write_trylock(lock); } EXPORT_SYMBOL(_raw_write_trylock); #endif #ifndef CONFIG_INLINE_WRITE_LOCK void __lockfunc _raw_write_lock(rwlock_t *lock) { __raw_write_lock(lock); } EXPORT_SYMBOL(_raw_write_lock); #endif #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) { return __raw_write_lock_irqsave(lock); } EXPORT_SYMBOL(_raw_write_lock_irqsave); #endif #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ void __lockfunc _raw_write_lock_irq(rwlock_t *lock) { __raw_write_lock_irq(lock); } EXPORT_SYMBOL(_raw_write_lock_irq); #endif #ifndef CONFIG_INLINE_WRITE_LOCK_BH void __lockfunc _raw_write_lock_bh(rwlock_t *lock) { __raw_write_lock_bh(lock); } EXPORT_SYMBOL(_raw_write_lock_bh); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK void __lockfunc _raw_write_unlock(rwlock_t *lock) { __raw_write_unlock(lock); } EXPORT_SYMBOL(_raw_write_unlock); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { __raw_write_unlock_irqrestore(lock, flags); } EXPORT_SYMBOL(_raw_write_unlock_irqrestore); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) { __raw_write_unlock_irq(lock); } EXPORT_SYMBOL(_raw_write_unlock_irq); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) { __raw_write_unlock_bh(lock); } EXPORT_SYMBOL(_raw_write_unlock_bh); #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) { preempt_disable(); spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } EXPORT_SYMBOL(_raw_spin_lock_nested); unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) { unsigned long flags; local_irq_save(flags); preempt_disable(); spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock, do_raw_spin_lock_flags, &flags); return flags; } EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested); void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *nest_lock) { preempt_disable(); spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } EXPORT_SYMBOL(_raw_spin_lock_nest_lock); #endif notrace int in_lock_functions(unsigned long addr) { /* Linker adds these: start and end of __lockfunc functions */ extern char __lock_text_start[], __lock_text_end[]; return addr >= (unsigned long)__lock_text_start && addr < (unsigned long)__lock_text_end; } EXPORT_SYMBOL(in_lock_functions);
gpl-2.0
srsdanitest/swingacera9
net/netfilter/ipset/ip_set_hash_netiface.c
4770
19935
/* Copyright (C) 2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Kernel module implementing an IP set type: the hash:net,iface type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/random.h> #include <linux/rbtree.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_timeout.h> #include <linux/netfilter/ipset/ip_set_hash.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_DESCRIPTION("hash:net,iface type of IP sets"); MODULE_ALIAS("ip_set_hash:net,iface"); /* Interface name rbtree */ struct iface_node { struct rb_node node; char iface[IFNAMSIZ]; }; #define iface_data(n) (rb_entry(n, struct iface_node, node)->iface) static inline long ifname_compare(const char *_a, const char *_b) { const long *a = (const long *)_a; const long *b = (const long *)_b; BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); if (a[0] != b[0]) return a[0] - b[0]; if (IFNAMSIZ > sizeof(long)) { if (a[1] != b[1]) return a[1] - b[1]; } if (IFNAMSIZ > 2 * sizeof(long)) { if (a[2] != b[2]) return a[2] - b[2]; } if (IFNAMSIZ > 3 * sizeof(long)) { if (a[3] != b[3]) return a[3] - b[3]; } return 0; } static void rbtree_destroy(struct rb_root *root) { struct rb_node *p, *n = root->rb_node; struct iface_node *node; /* Non-recursive destroy, like in ext3 */ while (n) { if (n->rb_left) { n = n->rb_left; continue; } if (n->rb_right) { n = n->rb_right; continue; } p = rb_parent(n); node = rb_entry(n, struct iface_node, node); if (!p) *root = RB_ROOT; else if (p->rb_left == n) p->rb_left = NULL; else if (p->rb_right == n) p->rb_right = NULL; kfree(node); n = p; } } static int iface_test(struct rb_root *root, const char **iface) { struct rb_node *n = root->rb_node; while (n) { const char *d = iface_data(n); long res = ifname_compare(*iface, d); if (res < 0) n = n->rb_left; else if (res > 0) n = n->rb_right; else { *iface = d; return 1; } } return 0; } static int iface_add(struct rb_root *root, const char **iface) { struct rb_node **n = &(root->rb_node), *p = NULL; struct iface_node *d; while (*n) { char *ifname = iface_data(*n); long res = ifname_compare(*iface, ifname); p = *n; if (res < 0) n = &((*n)->rb_left); else if (res > 0) n = &((*n)->rb_right); else { *iface = ifname; return 0; } } d = kzalloc(sizeof(*d), GFP_ATOMIC); if (!d) return -ENOMEM; strcpy(d->iface, *iface); rb_link_node(&d->node, p, n); rb_insert_color(&d->node, root); *iface = d->iface; return 0; } /* Type specific function prefix */ #define TYPE hash_netiface static bool hash_netiface_same_set(const struct ip_set *a, const struct ip_set *b); #define hash_netiface4_same_set hash_netiface_same_set #define hash_netiface6_same_set hash_netiface_same_set #define STREQ(a, b) (strcmp(a, b) == 0) /* The type variant functions: IPv4 */ struct hash_netiface4_elem_hashed { __be32 ip; u8 physdev; u8 cidr; u8 nomatch; u8 padding; }; #define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed) /* Member elements without timeout */ struct hash_netiface4_elem { __be32 ip; u8 physdev; u8 cidr; u8 nomatch; u8 padding; const char *iface; }; /* Member elements with timeout support */ struct hash_netiface4_telem { __be32 ip; u8 physdev; u8 cidr; u8 nomatch; u8 padding; const char *iface; unsigned long timeout; }; static inline bool hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1, const struct hash_netiface4_elem *ip2, u32 *multi) { return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr && (++*multi) && ip1->physdev == ip2->physdev && ip1->iface == ip2->iface; } static inline bool hash_netiface4_data_isnull(const struct hash_netiface4_elem *elem) { return elem->cidr == 0; } static inline void hash_netiface4_data_copy(struct hash_netiface4_elem *dst, const struct hash_netiface4_elem *src) { dst->ip = src->ip; dst->cidr = src->cidr; dst->physdev = src->physdev; dst->iface = src->iface; dst->nomatch = src->nomatch; } static inline void hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags) { dst->nomatch = flags & IPSET_FLAG_NOMATCH; } static inline bool hash_netiface4_data_match(const struct hash_netiface4_elem *elem) { return !elem->nomatch; } static inline void hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr) { elem->ip &= ip_set_netmask(cidr); elem->cidr = cidr; } static inline void hash_netiface4_data_zero_out(struct hash_netiface4_elem *elem) { elem->cidr = 0; } static bool hash_netiface4_data_list(struct sk_buff *skb, const struct hash_netiface4_elem *data) { u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); if (flags) NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); return 0; nla_put_failure: return 1; } static bool hash_netiface4_data_tlist(struct sk_buff *skb, const struct hash_netiface4_elem *data) { const struct hash_netiface4_telem *tdata = (const struct hash_netiface4_telem *)data; u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); if (flags) NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(ip_set_timeout_get(tdata->timeout))); return 0; nla_put_failure: return 1; } #define IP_SET_HASH_WITH_NETS #define IP_SET_HASH_WITH_RBTREE #define IP_SET_HASH_WITH_MULTI #define PF 4 #define HOST_MASK 32 #include <linux/netfilter/ipset/ip_set_ahash.h> static inline void hash_netiface4_data_next(struct ip_set_hash *h, const struct hash_netiface4_elem *d) { h->next.ip = ntohl(d->ip); } static int hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, const struct ip_set_adt_opt *opt) { struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface4_elem data = { .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK }; int ret; if (data.cidr == 0) return -EINVAL; if (adt == IPSET_TEST) data.cidr = HOST_MASK; ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); data.ip &= ip_set_netmask(data.cidr); #define IFACE(dir) (par->dir ? par->dir->name : NULL) #define PHYSDEV(dir) (nf_bridge->dir ? nf_bridge->dir->name : NULL) #define SRCDIR (opt->flags & IPSET_DIM_TWO_SRC) if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { #ifdef CONFIG_BRIDGE_NETFILTER const struct nf_bridge_info *nf_bridge = skb->nf_bridge; if (!nf_bridge) return -EINVAL; data.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev); data.physdev = 1; #else data.iface = NULL; #endif } else data.iface = SRCDIR ? IFACE(in) : IFACE(out); if (!data.iface) return -EINVAL; ret = iface_test(&h->rbtree, &data.iface); if (adt == IPSET_ADD) { if (!ret) { ret = iface_add(&h->rbtree, &data.iface); if (ret) return ret; } } else if (!ret) return ret; return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); } static int hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface4_elem data = { .cidr = HOST_MASK }; u32 ip = 0, ip_to, last; u32 timeout = h->timeout; char iface[IFNAMSIZ] = {}; int ret; if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IFACE] || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; if (tb[IPSET_ATTR_CIDR]) { data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (!data.cidr || data.cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } if (tb[IPSET_ATTR_TIMEOUT]) { if (!with_timeout(h->timeout)) return -IPSET_ERR_TIMEOUT; timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE])); data.iface = iface; ret = iface_test(&h->rbtree, &data.iface); if (adt == IPSET_ADD) { if (!ret) { ret = iface_add(&h->rbtree, &data.iface); if (ret) return ret; } } else if (!ret) return ret; if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_PHYSDEV) data.physdev = 1; if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH)) flags |= (cadt_flags << 16); } if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { data.ip = htonl(ip & ip_set_hostmask(data.cidr)); ret = adtfn(set, &data, timeout, flags); return ip_set_eexist(ret, flags) ? 0 : ret; } if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip_to < ip) swap(ip, ip_to); if (ip + UINT_MAX == ip_to) return -IPSET_ERR_HASH_RANGE; } else { ip_set_mask_from_to(ip, ip_to, data.cidr); } if (retried) ip = h->next.ip; while (!after(ip, ip_to)) { data.ip = htonl(ip); last = ip_set_range_to_cidr(ip, ip_to, &data.cidr); ret = adtfn(set, &data, timeout, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; else ret = 0; ip = last + 1; } return ret; } static bool hash_netiface_same_set(const struct ip_set *a, const struct ip_set *b) { const struct ip_set_hash *x = a->data; const struct ip_set_hash *y = b->data; /* Resizing changes htable_bits, so we ignore it */ return x->maxelem == y->maxelem && x->timeout == y->timeout; } /* The type variant functions: IPv6 */ struct hash_netiface6_elem_hashed { union nf_inet_addr ip; u8 physdev; u8 cidr; u8 nomatch; u8 padding; }; #define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed) struct hash_netiface6_elem { union nf_inet_addr ip; u8 physdev; u8 cidr; u8 nomatch; u8 padding; const char *iface; }; struct hash_netiface6_telem { union nf_inet_addr ip; u8 physdev; u8 cidr; u8 nomatch; u8 padding; const char *iface; unsigned long timeout; }; static inline bool hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1, const struct hash_netiface6_elem *ip2, u32 *multi) { return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && ip1->cidr == ip2->cidr && (++*multi) && ip1->physdev == ip2->physdev && ip1->iface == ip2->iface; } static inline bool hash_netiface6_data_isnull(const struct hash_netiface6_elem *elem) { return elem->cidr == 0; } static inline void hash_netiface6_data_copy(struct hash_netiface6_elem *dst, const struct hash_netiface6_elem *src) { memcpy(dst, src, sizeof(*dst)); } static inline void hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags) { dst->nomatch = flags & IPSET_FLAG_NOMATCH; } static inline bool hash_netiface6_data_match(const struct hash_netiface6_elem *elem) { return !elem->nomatch; } static inline void hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem) { elem->cidr = 0; } static inline void ip6_netmask(union nf_inet_addr *ip, u8 prefix) { ip->ip6[0] &= ip_set_netmask6(prefix)[0]; ip->ip6[1] &= ip_set_netmask6(prefix)[1]; ip->ip6[2] &= ip_set_netmask6(prefix)[2]; ip->ip6[3] &= ip_set_netmask6(prefix)[3]; } static inline void hash_netiface6_data_netmask(struct hash_netiface6_elem *elem, u8 cidr) { ip6_netmask(&elem->ip, cidr); elem->cidr = cidr; } static bool hash_netiface6_data_list(struct sk_buff *skb, const struct hash_netiface6_elem *data) { u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); if (flags) NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); return 0; nla_put_failure: return 1; } static bool hash_netiface6_data_tlist(struct sk_buff *skb, const struct hash_netiface6_elem *data) { const struct hash_netiface6_telem *e = (const struct hash_netiface6_telem *)data; u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); if (flags) NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(ip_set_timeout_get(e->timeout))); return 0; nla_put_failure: return 1; } #undef PF #undef HOST_MASK #define PF 6 #define HOST_MASK 128 #include <linux/netfilter/ipset/ip_set_ahash.h> static inline void hash_netiface6_data_next(struct ip_set_hash *h, const struct hash_netiface6_elem *d) { } static int hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, const struct ip_set_adt_opt *opt) { struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface6_elem data = { .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK }; int ret; if (data.cidr == 0) return -EINVAL; if (adt == IPSET_TEST) data.cidr = HOST_MASK; ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); ip6_netmask(&data.ip, data.cidr); if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { #ifdef CONFIG_BRIDGE_NETFILTER const struct nf_bridge_info *nf_bridge = skb->nf_bridge; if (!nf_bridge) return -EINVAL; data.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev); data.physdev = 1; #else data.iface = NULL; #endif } else data.iface = SRCDIR ? IFACE(in) : IFACE(out); if (!data.iface) return -EINVAL; ret = iface_test(&h->rbtree, &data.iface); if (adt == IPSET_ADD) { if (!ret) { ret = iface_add(&h->rbtree, &data.iface); if (ret) return ret; } } else if (!ret) return ret; return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); } static int hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface6_elem data = { .cidr = HOST_MASK }; u32 timeout = h->timeout; char iface[IFNAMSIZ] = {}; int ret; if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IFACE] || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); if (ret) return ret; if (tb[IPSET_ATTR_CIDR]) data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (!data.cidr || data.cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; ip6_netmask(&data.ip, data.cidr); if (tb[IPSET_ATTR_TIMEOUT]) { if (!with_timeout(h->timeout)) return -IPSET_ERR_TIMEOUT; timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE])); data.iface = iface; ret = iface_test(&h->rbtree, &data.iface); if (adt == IPSET_ADD) { if (!ret) { ret = iface_add(&h->rbtree, &data.iface); if (ret) return ret; } } else if (!ret) return ret; if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_PHYSDEV) data.physdev = 1; if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH)) flags |= (cadt_flags << 16); } ret = adtfn(set, &data, timeout, flags); return ip_set_eexist(ret, flags) ? 0 : ret; } /* Create hash:ip type of sets */ static int hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags) { struct ip_set_hash *h; u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; u8 hbits; size_t hsize; if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) return -IPSET_ERR_INVALID_FAMILY; if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_HASHSIZE]) { hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); if (hashsize < IPSET_MIMINAL_HASHSIZE) hashsize = IPSET_MIMINAL_HASHSIZE; } if (tb[IPSET_ATTR_MAXELEM]) maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); h = kzalloc(sizeof(*h) + sizeof(struct ip_set_hash_nets) * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL); if (!h) return -ENOMEM; h->maxelem = maxelem; get_random_bytes(&h->initval, sizeof(h->initval)); h->timeout = IPSET_NO_TIMEOUT; h->ahash_max = AHASH_MAX_SIZE; hbits = htable_bits(hashsize); hsize = htable_size(hbits); if (hsize == 0) { kfree(h); return -ENOMEM; } h->table = ip_set_alloc(hsize); if (!h->table) { kfree(h); return -ENOMEM; } h->table->htable_bits = hbits; h->rbtree = RB_ROOT; set->data = h; if (tb[IPSET_ATTR_TIMEOUT]) { h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); set->variant = set->family == NFPROTO_IPV4 ? &hash_netiface4_tvariant : &hash_netiface6_tvariant; if (set->family == NFPROTO_IPV4) hash_netiface4_gc_init(set); else hash_netiface6_gc_init(set); } else { set->variant = set->family == NFPROTO_IPV4 ? &hash_netiface4_variant : &hash_netiface6_variant; } pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", set->name, jhash_size(h->table->htable_bits), h->table->htable_bits, h->maxelem, set->data, h->table); return 0; } static struct ip_set_type hash_netiface_type __read_mostly = { .name = "hash:net,iface", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_IP | IPSET_TYPE_IFACE, .dimension = IPSET_DIM_TWO, .family = NFPROTO_UNSPEC, .revision_min = 0, .revision_max = 1, /* nomatch flag support added */ .create = hash_netiface_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, }, .me = THIS_MODULE, }; static int __init hash_netiface_init(void) { return ip_set_type_register(&hash_netiface_type); } static void __exit hash_netiface_fini(void) { ip_set_type_unregister(&hash_netiface_type); } module_init(hash_netiface_init); module_exit(hash_netiface_fini);
gpl-2.0
larks/linux-rcu
drivers/ide/ide-4drives.c
4770
1515
#include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/ide.h> #define DRV_NAME "ide-4drives" static int probe_4drives; module_param_named(probe, probe_4drives, bool, 0); MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port"); static void ide_4drives_init_dev(ide_drive_t *drive) { if (drive->hwif->channel) drive->select ^= 0x20; } static const struct ide_port_ops ide_4drives_port_ops = { .init_dev = ide_4drives_init_dev, }; static const struct ide_port_info ide_4drives_port_info = { .port_ops = &ide_4drives_port_ops, .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA | IDE_HFLAG_4DRIVES, .chipset = ide_4drives, }; static int __init ide_4drives_init(void) { unsigned long base = 0x1f0, ctl = 0x3f6; struct ide_hw hw, *hws[] = { &hw, &hw }; if (probe_4drives == 0) return -ENODEV; if (!request_region(base, 8, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", DRV_NAME, base, base + 7); return -EBUSY; } if (!request_region(ctl, 1, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n", DRV_NAME, ctl); release_region(base, 8); return -EBUSY; } memset(&hw, 0, sizeof(hw)); ide_std_init_ports(&hw, base, ctl); hw.irq = 14; return ide_host_add(&ide_4drives_port_info, hws, 2, NULL); } module_init(ide_4drives_init); MODULE_AUTHOR("Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("generic IDE chipset with 4 drives/port support"); MODULE_LICENSE("GPL");
gpl-2.0
SlimRoms/kernel_sony_msm8974
drivers/media/video/au0828/au0828-i2c.c
5026
9239
/* * Driver for the Auvitek AU0828 USB bridge * * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/io.h> #include "au0828.h" #include <media/v4l2-common.h> static int i2c_scan; module_param(i2c_scan, int, 0444); MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time"); #define I2C_WAIT_DELAY 512 #define I2C_WAIT_RETRY 64 static inline int i2c_slave_did_write_ack(struct i2c_adapter *i2c_adap) { struct au0828_dev *dev = i2c_adap->algo_data; return au0828_read(dev, AU0828_I2C_STATUS_201) & AU0828_I2C_STATUS_NO_WRITE_ACK ? 0 : 1; } static inline int i2c_slave_did_read_ack(struct i2c_adapter *i2c_adap) { struct au0828_dev *dev = i2c_adap->algo_data; return au0828_read(dev, AU0828_I2C_STATUS_201) & AU0828_I2C_STATUS_NO_READ_ACK ? 0 : 1; } static int i2c_wait_read_ack(struct i2c_adapter *i2c_adap) { int count; for (count = 0; count < I2C_WAIT_RETRY; count++) { if (!i2c_slave_did_read_ack(i2c_adap)) break; udelay(I2C_WAIT_DELAY); } if (I2C_WAIT_RETRY == count) return 0; return 1; } static inline int i2c_is_read_busy(struct i2c_adapter *i2c_adap) { struct au0828_dev *dev = i2c_adap->algo_data; return au0828_read(dev, AU0828_I2C_STATUS_201) & AU0828_I2C_STATUS_READ_DONE ? 0 : 1; } static int i2c_wait_read_done(struct i2c_adapter *i2c_adap) { int count; for (count = 0; count < I2C_WAIT_RETRY; count++) { if (!i2c_is_read_busy(i2c_adap)) break; udelay(I2C_WAIT_DELAY); } if (I2C_WAIT_RETRY == count) return 0; return 1; } static inline int i2c_is_write_done(struct i2c_adapter *i2c_adap) { struct au0828_dev *dev = i2c_adap->algo_data; return au0828_read(dev, AU0828_I2C_STATUS_201) & AU0828_I2C_STATUS_WRITE_DONE ? 1 : 0; } static int i2c_wait_write_done(struct i2c_adapter *i2c_adap) { int count; for (count = 0; count < I2C_WAIT_RETRY; count++) { if (i2c_is_write_done(i2c_adap)) break; udelay(I2C_WAIT_DELAY); } if (I2C_WAIT_RETRY == count) return 0; return 1; } static inline int i2c_is_busy(struct i2c_adapter *i2c_adap) { struct au0828_dev *dev = i2c_adap->algo_data; return au0828_read(dev, AU0828_I2C_STATUS_201) & AU0828_I2C_STATUS_BUSY ? 1 : 0; } static int i2c_wait_done(struct i2c_adapter *i2c_adap) { int count; for (count = 0; count < I2C_WAIT_RETRY; count++) { if (!i2c_is_busy(i2c_adap)) break; udelay(I2C_WAIT_DELAY); } if (I2C_WAIT_RETRY == count) return 0; return 1; } /* FIXME: Implement join handling correctly */ static int i2c_sendbytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg, int joined_rlen) { int i, strobe = 0; struct au0828_dev *dev = i2c_adap->algo_data; dprintk(4, "%s()\n", __func__); au0828_write(dev, AU0828_I2C_MULTIBYTE_MODE_2FF, 0x01); /* Set the I2C clock */ au0828_write(dev, AU0828_I2C_CLK_DIVIDER_202, dev->board.i2c_clk_divider); /* Hardware needs 8 bit addresses */ au0828_write(dev, AU0828_I2C_DEST_ADDR_203, msg->addr << 1); dprintk(4, "SEND: %02x\n", msg->addr); /* Deal with i2c_scan */ if (msg->len == 0) { /* The analog tuner detection code makes use of the SMBUS_QUICK message (which involves a zero length i2c write). To avoid checking the status register when we didn't strobe out any actual bytes to the bus, just do a read check. This is consistent with how I saw i2c device checking done in the USB trace of the Windows driver */ au0828_write(dev, AU0828_I2C_TRIGGER_200, AU0828_I2C_TRIGGER_READ); if (!i2c_wait_done(i2c_adap)) return -EIO; if (i2c_wait_read_ack(i2c_adap)) return -EIO; return 0; } for (i = 0; i < msg->len;) { dprintk(4, " %02x\n", msg->buf[i]); au0828_write(dev, AU0828_I2C_WRITE_FIFO_205, msg->buf[i]); strobe++; i++; if ((strobe >= 4) || (i >= msg->len)) { /* Strobe the byte into the bus */ if (i < msg->len) au0828_write(dev, AU0828_I2C_TRIGGER_200, AU0828_I2C_TRIGGER_WRITE | AU0828_I2C_TRIGGER_HOLD); else au0828_write(dev, AU0828_I2C_TRIGGER_200, AU0828_I2C_TRIGGER_WRITE); /* Reset strobe trigger */ strobe = 0; if (!i2c_wait_write_done(i2c_adap)) return -EIO; } } if (!i2c_wait_done(i2c_adap)) return -EIO; dprintk(4, "\n"); return msg->len; } /* FIXME: Implement join handling correctly */ static int i2c_readbytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg, int joined) { struct au0828_dev *dev = i2c_adap->algo_data; int i; dprintk(4, "%s()\n", __func__); au0828_write(dev, AU0828_I2C_MULTIBYTE_MODE_2FF, 0x01); /* Set the I2C clock */ au0828_write(dev, AU0828_I2C_CLK_DIVIDER_202, dev->board.i2c_clk_divider); /* Hardware needs 8 bit addresses */ au0828_write(dev, AU0828_I2C_DEST_ADDR_203, msg->addr << 1); dprintk(4, " RECV:\n"); /* Deal with i2c_scan */ if (msg->len == 0) { au0828_write(dev, AU0828_I2C_TRIGGER_200, AU0828_I2C_TRIGGER_READ); if (i2c_wait_read_ack(i2c_adap)) return -EIO; return 0; } for (i = 0; i < msg->len;) { i++; if (i < msg->len) au0828_write(dev, AU0828_I2C_TRIGGER_200, AU0828_I2C_TRIGGER_READ | AU0828_I2C_TRIGGER_HOLD); else au0828_write(dev, AU0828_I2C_TRIGGER_200, AU0828_I2C_TRIGGER_READ); if (!i2c_wait_read_done(i2c_adap)) return -EIO; msg->buf[i-1] = au0828_read(dev, AU0828_I2C_READ_FIFO_209) & 0xff; dprintk(4, " %02x\n", msg->buf[i-1]); } if (!i2c_wait_done(i2c_adap)) return -EIO; dprintk(4, "\n"); return msg->len; } static int i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { int i, retval = 0; dprintk(4, "%s(num = %d)\n", __func__, num); for (i = 0; i < num; i++) { dprintk(4, "%s(num = %d) addr = 0x%02x len = 0x%x\n", __func__, num, msgs[i].addr, msgs[i].len); if (msgs[i].flags & I2C_M_RD) { /* read */ retval = i2c_readbytes(i2c_adap, &msgs[i], 0); } else if (i + 1 < num && (msgs[i + 1].flags & I2C_M_RD) && msgs[i].addr == msgs[i + 1].addr) { /* write then read from same address */ retval = i2c_sendbytes(i2c_adap, &msgs[i], msgs[i + 1].len); if (retval < 0) goto err; i++; retval = i2c_readbytes(i2c_adap, &msgs[i], 1); } else { /* write */ retval = i2c_sendbytes(i2c_adap, &msgs[i], 0); } if (retval < 0) goto err; } return num; err: return retval; } static u32 au0828_functionality(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C; } static struct i2c_algorithm au0828_i2c_algo_template = { .master_xfer = i2c_xfer, .functionality = au0828_functionality, }; /* ----------------------------------------------------------------------- */ static struct i2c_adapter au0828_i2c_adap_template = { .name = DRIVER_NAME, .owner = THIS_MODULE, .algo = &au0828_i2c_algo_template, }; static struct i2c_client au0828_i2c_client_template = { .name = "au0828 internal", }; static char *i2c_devs[128] = { [0x8e >> 1] = "au8522", [0xa0 >> 1] = "eeprom", [0xc2 >> 1] = "tuner/xc5000", }; static void do_i2c_scan(char *name, struct i2c_client *c) { unsigned char buf; int i, rc; for (i = 0; i < 128; i++) { c->addr = i; rc = i2c_master_recv(c, &buf, 0); if (rc < 0) continue; printk(KERN_INFO "%s: i2c scan: found device @ 0x%x [%s]\n", name, i << 1, i2c_devs[i] ? i2c_devs[i] : "???"); } } /* init + register i2c adapter */ int au0828_i2c_register(struct au0828_dev *dev) { dprintk(1, "%s()\n", __func__); memcpy(&dev->i2c_adap, &au0828_i2c_adap_template, sizeof(dev->i2c_adap)); memcpy(&dev->i2c_algo, &au0828_i2c_algo_template, sizeof(dev->i2c_algo)); memcpy(&dev->i2c_client, &au0828_i2c_client_template, sizeof(dev->i2c_client)); dev->i2c_adap.dev.parent = &dev->usbdev->dev; strlcpy(dev->i2c_adap.name, DRIVER_NAME, sizeof(dev->i2c_adap.name)); dev->i2c_adap.algo = &dev->i2c_algo; dev->i2c_adap.algo_data = dev; i2c_set_adapdata(&dev->i2c_adap, &dev->v4l2_dev); i2c_add_adapter(&dev->i2c_adap); dev->i2c_client.adapter = &dev->i2c_adap; if (0 == dev->i2c_rc) { printk(KERN_INFO "%s: i2c bus registered\n", DRIVER_NAME); if (i2c_scan) do_i2c_scan(DRIVER_NAME, &dev->i2c_client); } else printk(KERN_INFO "%s: i2c bus register FAILED\n", DRIVER_NAME); return dev->i2c_rc; } int au0828_i2c_unregister(struct au0828_dev *dev) { i2c_del_adapter(&dev->i2c_adap); return 0; }
gpl-2.0
CyanideL/android_kernel_lge_hammerhead
sound/soc/fsl/mpc5200_psc_ac97.c
5026
8807
/* * linux/sound/mpc5200-ac97.c -- AC97 support for the Freescale MPC52xx chip. * * Copyright (C) 2009 Jon Smirl, Digispeaker * Author: Jon Smirl <jonsmirl@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/delay.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <asm/time.h> #include <asm/delay.h> #include <asm/mpc52xx.h> #include <asm/mpc52xx_psc.h> #include "mpc5200_dma.h" #include "mpc5200_psc_ac97.h" #define DRV_NAME "mpc5200-psc-ac97" /* ALSA only supports a single AC97 device so static is recommend here */ static struct psc_dma *psc_dma; static unsigned short psc_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { int status; unsigned int val; mutex_lock(&psc_dma->mutex); /* Wait for command send status zero = ready */ status = spin_event_timeout(!(in_be16(&psc_dma->psc_regs->sr_csr.status) & MPC52xx_PSC_SR_CMDSEND), 100, 0); if (status == 0) { pr_err("timeout on ac97 bus (rdy)\n"); mutex_unlock(&psc_dma->mutex); return -ENODEV; } /* Force clear the data valid bit */ in_be32(&psc_dma->psc_regs->ac97_data); /* Send the read */ out_be32(&psc_dma->psc_regs->ac97_cmd, (1<<31) | ((reg & 0x7f) << 24)); /* Wait for the answer */ status = spin_event_timeout((in_be16(&psc_dma->psc_regs->sr_csr.status) & MPC52xx_PSC_SR_DATA_VAL), 100, 0); if (status == 0) { pr_err("timeout on ac97 read (val) %x\n", in_be16(&psc_dma->psc_regs->sr_csr.status)); mutex_unlock(&psc_dma->mutex); return -ENODEV; } /* Get the data */ val = in_be32(&psc_dma->psc_regs->ac97_data); if (((val >> 24) & 0x7f) != reg) { pr_err("reg echo error on ac97 read\n"); mutex_unlock(&psc_dma->mutex); return -ENODEV; } val = (val >> 8) & 0xffff; mutex_unlock(&psc_dma->mutex); return (unsigned short) val; } static void psc_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { int status; mutex_lock(&psc_dma->mutex); /* Wait for command status zero = ready */ status = spin_event_timeout(!(in_be16(&psc_dma->psc_regs->sr_csr.status) & MPC52xx_PSC_SR_CMDSEND), 100, 0); if (status == 0) { pr_err("timeout on ac97 bus (write)\n"); goto out; } /* Write data */ out_be32(&psc_dma->psc_regs->ac97_cmd, ((reg & 0x7f) << 24) | (val << 8)); out: mutex_unlock(&psc_dma->mutex); } static void psc_ac97_warm_reset(struct snd_ac97 *ac97) { struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; mutex_lock(&psc_dma->mutex); out_be32(&regs->sicr, psc_dma->sicr | MPC52xx_PSC_SICR_AWR); udelay(3); out_be32(&regs->sicr, psc_dma->sicr); mutex_unlock(&psc_dma->mutex); } static void psc_ac97_cold_reset(struct snd_ac97 *ac97) { struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; mutex_lock(&psc_dma->mutex); dev_dbg(psc_dma->dev, "cold reset\n"); mpc5200_psc_ac97_gpio_reset(psc_dma->id); /* Notify the PSC that a reset has occurred */ out_be32(&regs->sicr, psc_dma->sicr | MPC52xx_PSC_SICR_ACRB); /* Re-enable RX and TX */ out_8(&regs->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); mutex_unlock(&psc_dma->mutex); msleep(1); psc_ac97_warm_reset(ac97); } struct snd_ac97_bus_ops soc_ac97_ops = { .read = psc_ac97_read, .write = psc_ac97_write, .reset = psc_ac97_cold_reset, .warm_reset = psc_ac97_warm_reset, }; EXPORT_SYMBOL_GPL(soc_ac97_ops); static int psc_ac97_hw_analog_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(cpu_dai); struct psc_dma_stream *s = to_psc_dma_stream(substream, psc_dma); dev_dbg(psc_dma->dev, "%s(substream=%p) p_size=%i p_bytes=%i" " periods=%i buffer_size=%i buffer_bytes=%i channels=%i" " rate=%i format=%i\n", __func__, substream, params_period_size(params), params_period_bytes(params), params_periods(params), params_buffer_size(params), params_buffer_bytes(params), params_channels(params), params_rate(params), params_format(params)); /* Determine the set of enable bits to turn on */ s->ac97_slot_bits = (params_channels(params) == 1) ? 0x100 : 0x300; if (substream->pstr->stream != SNDRV_PCM_STREAM_CAPTURE) s->ac97_slot_bits <<= 16; return 0; } static int psc_ac97_hw_digital_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(cpu_dai); dev_dbg(psc_dma->dev, "%s(substream=%p)\n", __func__, substream); if (params_channels(params) == 1) out_be32(&psc_dma->psc_regs->ac97_slots, 0x01000000); else out_be32(&psc_dma->psc_regs->ac97_slots, 0x03000000); return 0; } static int psc_ac97_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(dai); struct psc_dma_stream *s = to_psc_dma_stream(substream, psc_dma); switch (cmd) { case SNDRV_PCM_TRIGGER_START: dev_dbg(psc_dma->dev, "AC97 START: stream=%i\n", substream->pstr->stream); /* Set the slot enable bits */ psc_dma->slots |= s->ac97_slot_bits; out_be32(&psc_dma->psc_regs->ac97_slots, psc_dma->slots); break; case SNDRV_PCM_TRIGGER_STOP: dev_dbg(psc_dma->dev, "AC97 STOP: stream=%i\n", substream->pstr->stream); /* Clear the slot enable bits */ psc_dma->slots &= ~(s->ac97_slot_bits); out_be32(&psc_dma->psc_regs->ac97_slots, psc_dma->slots); break; } return 0; } static int psc_ac97_probe(struct snd_soc_dai *cpu_dai) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(cpu_dai); struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; /* Go */ out_8(&regs->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); return 0; } /* --------------------------------------------------------------------- * ALSA SoC Bindings * * - Digital Audio Interface (DAI) template * - create/destroy dai hooks */ /** * psc_ac97_dai_template: template CPU Digital Audio Interface */ static const struct snd_soc_dai_ops psc_ac97_analog_ops = { .hw_params = psc_ac97_hw_analog_params, .trigger = psc_ac97_trigger, }; static const struct snd_soc_dai_ops psc_ac97_digital_ops = { .hw_params = psc_ac97_hw_digital_params, }; static struct snd_soc_dai_driver psc_ac97_dai[] = { { .ac97_control = 1, .probe = psc_ac97_probe, .playback = { .channels_min = 1, .channels_max = 6, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S32_BE, }, .capture = { .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S32_BE, }, .ops = &psc_ac97_analog_ops, }, { .ac97_control = 1, .playback = { .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_32000 | \ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE, }, .ops = &psc_ac97_digital_ops, } }; /* --------------------------------------------------------------------- * OF platform bus binding code: * - Probe/remove operations * - OF device match table */ static int __devinit psc_ac97_of_probe(struct platform_device *op) { int rc; struct snd_ac97 ac97; struct mpc52xx_psc __iomem *regs; rc = snd_soc_register_dais(&op->dev, psc_ac97_dai, ARRAY_SIZE(psc_ac97_dai)); if (rc != 0) { dev_err(&op->dev, "Failed to register DAI\n"); return rc; } psc_dma = dev_get_drvdata(&op->dev); regs = psc_dma->psc_regs; ac97.private_data = psc_dma; psc_dma->imr = 0; out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr); /* Configure the serial interface mode to AC97 */ psc_dma->sicr = MPC52xx_PSC_SICR_SIM_AC97 | MPC52xx_PSC_SICR_ENAC97; out_be32(&regs->sicr, psc_dma->sicr); /* No slots active */ out_be32(&regs->ac97_slots, 0x00000000); return 0; } static int __devexit psc_ac97_of_remove(struct platform_device *op) { snd_soc_unregister_dais(&op->dev, ARRAY_SIZE(psc_ac97_dai)); return 0; } /* Match table for of_platform binding */ static struct of_device_id psc_ac97_match[] __devinitdata = { { .compatible = "fsl,mpc5200-psc-ac97", }, { .compatible = "fsl,mpc5200b-psc-ac97", }, {} }; MODULE_DEVICE_TABLE(of, psc_ac97_match); static struct platform_driver psc_ac97_driver = { .probe = psc_ac97_of_probe, .remove = __devexit_p(psc_ac97_of_remove), .driver = { .name = "mpc5200-psc-ac97", .owner = THIS_MODULE, .of_match_table = psc_ac97_match, }, }; module_platform_driver(psc_ac97_driver); MODULE_AUTHOR("Jon Smirl <jonsmirl@gmail.com>"); MODULE_DESCRIPTION("mpc5200 AC97 module"); MODULE_LICENSE("GPL");
gpl-2.0
pausa/android_kernel_htc_k2_ul
drivers/staging/ramster/cluster/masklog.c
7330
3944
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/uaccess.h> #include "masklog.h" struct mlog_bits r2_mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK); EXPORT_SYMBOL_GPL(r2_mlog_and_bits); struct mlog_bits r2_mlog_not_bits = MLOG_BITS_RHS(0); EXPORT_SYMBOL_GPL(r2_mlog_not_bits); static ssize_t mlog_mask_show(u64 mask, char *buf) { char *state; if (__mlog_test_u64(mask, r2_mlog_and_bits)) state = "allow"; else if (__mlog_test_u64(mask, r2_mlog_not_bits)) state = "deny"; else state = "off"; return snprintf(buf, PAGE_SIZE, "%s\n", state); } static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count) { if (!strnicmp(buf, "allow", 5)) { __mlog_set_u64(mask, r2_mlog_and_bits); __mlog_clear_u64(mask, r2_mlog_not_bits); } else if (!strnicmp(buf, "deny", 4)) { __mlog_set_u64(mask, r2_mlog_not_bits); __mlog_clear_u64(mask, r2_mlog_and_bits); } else if (!strnicmp(buf, "off", 3)) { __mlog_clear_u64(mask, r2_mlog_not_bits); __mlog_clear_u64(mask, r2_mlog_and_bits); } else return -EINVAL; return count; } struct mlog_attribute { struct attribute attr; u64 mask; }; #define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr) #define define_mask(_name) { \ .attr = { \ .name = #_name, \ .mode = S_IRUGO | S_IWUSR, \ }, \ .mask = ML_##_name, \ } static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = { define_mask(TCP), define_mask(MSG), define_mask(SOCKET), define_mask(HEARTBEAT), define_mask(HB_BIO), define_mask(DLMFS), define_mask(DLM), define_mask(DLM_DOMAIN), define_mask(DLM_THREAD), define_mask(DLM_MASTER), define_mask(DLM_RECOVERY), define_mask(DLM_GLUE), define_mask(VOTE), define_mask(CONN), define_mask(QUORUM), define_mask(BASTS), define_mask(CLUSTER), define_mask(ERROR), define_mask(NOTICE), define_mask(KTHREAD), }; static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, }; static ssize_t mlog_show(struct kobject *obj, struct attribute *attr, char *buf) { struct mlog_attribute *mlog_attr = to_mlog_attr(attr); return mlog_mask_show(mlog_attr->mask, buf); } static ssize_t mlog_store(struct kobject *obj, struct attribute *attr, const char *buf, size_t count) { struct mlog_attribute *mlog_attr = to_mlog_attr(attr); return mlog_mask_store(mlog_attr->mask, buf, count); } static const struct sysfs_ops mlog_attr_ops = { .show = mlog_show, .store = mlog_store, }; static struct kobj_type mlog_ktype = { .default_attrs = mlog_attr_ptrs, .sysfs_ops = &mlog_attr_ops, }; static struct kset mlog_kset = { .kobj = {.ktype = &mlog_ktype}, }; int r2_mlog_sys_init(struct kset *r2cb_kset) { int i = 0; while (mlog_attrs[i].attr.mode) { mlog_attr_ptrs[i] = &mlog_attrs[i].attr; i++; } mlog_attr_ptrs[i] = NULL; kobject_set_name(&mlog_kset.kobj, "logmask"); mlog_kset.kobj.kset = r2cb_kset; return kset_register(&mlog_kset); } void r2_mlog_sys_shutdown(void) { kset_unregister(&mlog_kset); }
gpl-2.0
jfvelte-dev/android_kernel_samsung_jf
drivers/staging/ramster/cluster/masklog.c
7330
3944
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/uaccess.h> #include "masklog.h" struct mlog_bits r2_mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK); EXPORT_SYMBOL_GPL(r2_mlog_and_bits); struct mlog_bits r2_mlog_not_bits = MLOG_BITS_RHS(0); EXPORT_SYMBOL_GPL(r2_mlog_not_bits); static ssize_t mlog_mask_show(u64 mask, char *buf) { char *state; if (__mlog_test_u64(mask, r2_mlog_and_bits)) state = "allow"; else if (__mlog_test_u64(mask, r2_mlog_not_bits)) state = "deny"; else state = "off"; return snprintf(buf, PAGE_SIZE, "%s\n", state); } static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count) { if (!strnicmp(buf, "allow", 5)) { __mlog_set_u64(mask, r2_mlog_and_bits); __mlog_clear_u64(mask, r2_mlog_not_bits); } else if (!strnicmp(buf, "deny", 4)) { __mlog_set_u64(mask, r2_mlog_not_bits); __mlog_clear_u64(mask, r2_mlog_and_bits); } else if (!strnicmp(buf, "off", 3)) { __mlog_clear_u64(mask, r2_mlog_not_bits); __mlog_clear_u64(mask, r2_mlog_and_bits); } else return -EINVAL; return count; } struct mlog_attribute { struct attribute attr; u64 mask; }; #define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr) #define define_mask(_name) { \ .attr = { \ .name = #_name, \ .mode = S_IRUGO | S_IWUSR, \ }, \ .mask = ML_##_name, \ } static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = { define_mask(TCP), define_mask(MSG), define_mask(SOCKET), define_mask(HEARTBEAT), define_mask(HB_BIO), define_mask(DLMFS), define_mask(DLM), define_mask(DLM_DOMAIN), define_mask(DLM_THREAD), define_mask(DLM_MASTER), define_mask(DLM_RECOVERY), define_mask(DLM_GLUE), define_mask(VOTE), define_mask(CONN), define_mask(QUORUM), define_mask(BASTS), define_mask(CLUSTER), define_mask(ERROR), define_mask(NOTICE), define_mask(KTHREAD), }; static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, }; static ssize_t mlog_show(struct kobject *obj, struct attribute *attr, char *buf) { struct mlog_attribute *mlog_attr = to_mlog_attr(attr); return mlog_mask_show(mlog_attr->mask, buf); } static ssize_t mlog_store(struct kobject *obj, struct attribute *attr, const char *buf, size_t count) { struct mlog_attribute *mlog_attr = to_mlog_attr(attr); return mlog_mask_store(mlog_attr->mask, buf, count); } static const struct sysfs_ops mlog_attr_ops = { .show = mlog_show, .store = mlog_store, }; static struct kobj_type mlog_ktype = { .default_attrs = mlog_attr_ptrs, .sysfs_ops = &mlog_attr_ops, }; static struct kset mlog_kset = { .kobj = {.ktype = &mlog_ktype}, }; int r2_mlog_sys_init(struct kset *r2cb_kset) { int i = 0; while (mlog_attrs[i].attr.mode) { mlog_attr_ptrs[i] = &mlog_attrs[i].attr; i++; } mlog_attr_ptrs[i] = NULL; kobject_set_name(&mlog_kset.kobj, "logmask"); mlog_kset.kobj.kset = r2cb_kset; return kset_register(&mlog_kset); } void r2_mlog_sys_shutdown(void) { kset_unregister(&mlog_kset); }
gpl-2.0
Altaf-Mahdi/mako-kernel-old
drivers/staging/comedi/drivers/comedi_bond.c
8098
15371
/* comedi/drivers/comedi_bond.c A Comedi driver to 'bond' or merge multiple drivers and devices as one. COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> Copyright (C) 2005 Calin A. Culianu <calin@ajvar.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: comedi_bond Description: A driver to 'bond' (merge) multiple subdevices from multiple devices together as one. Devices: Author: ds Updated: Mon, 10 Oct 00:18:25 -0500 Status: works This driver allows you to 'bond' (merge) multiple comedi subdevices (coming from possibly difference boards and/or drivers) together. For example, if you had a board with 2 different DIO subdevices, and another with 1 DIO subdevice, you could 'bond' them with this driver so that they look like one big fat DIO subdevice. This makes writing applications slightly easier as you don't have to worry about managing different subdevices in the application -- you just worry about indexing one linear array of channel id's. Right now only DIO subdevices are supported as that's the personal itch I am scratching with this driver. If you want to add support for AI and AO subdevs, go right on ahead and do so! Commands aren't supported -- although it would be cool if they were. Configuration Options: List of comedi-minors to bond. All subdevices of the same type within each minor will be concatenated together in the order given here. */ #include <linux/string.h> #include <linux/slab.h> #include "../comedi.h" #include "../comedilib.h" #include "../comedidev.h" /* The maxiumum number of channels per subdevice. */ #define MAX_CHANS 256 #define MODULE_NAME "comedi_bond" MODULE_LICENSE("GPL"); #ifndef STR # define STR1(x) #x # define STR(x) STR1(x) #endif static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "If true, print extra cryptic debugging output useful" "only to developers."); #define LOG_MSG(x...) printk(KERN_INFO MODULE_NAME": "x) #define DEBUG(x...) \ do { \ if (debug) \ printk(KERN_DEBUG MODULE_NAME": DEBUG: "x); \ } while (0) #define WARNING(x...) printk(KERN_WARNING MODULE_NAME ": WARNING: "x) #define ERROR(x...) printk(KERN_ERR MODULE_NAME ": INTERNAL ERROR: "x) MODULE_AUTHOR("Calin A. Culianu"); MODULE_DESCRIPTION(MODULE_NAME "A driver for COMEDI to bond multiple COMEDI " "devices together as one. In the words of John Lennon: " "'And the world will live as one...'"); /* * Board descriptions for two imaginary boards. Describing the * boards in this way is optional, and completely driver-dependent. * Some drivers use arrays such as this, other do not. */ struct BondingBoard { const char *name; }; static const struct BondingBoard bondingBoards[] = { { .name = MODULE_NAME, }, }; /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct BondingBoard *)dev->board_ptr) struct BondedDevice { struct comedi_device *dev; unsigned minor; unsigned subdev; unsigned subdev_type; unsigned nchans; unsigned chanid_offset; /* The offset into our unified linear channel-id's of chanid 0 on this subdevice. */ }; /* this structure is for data unique to this hardware driver. If several hardware drivers keep similar information in this structure, feel free to suggest moving the variable to the struct comedi_device struct. */ struct Private { # define MAX_BOARD_NAME 256 char name[MAX_BOARD_NAME]; struct BondedDevice **devs; unsigned ndevs; struct BondedDevice *chanIdDevMap[MAX_CHANS]; unsigned nchans; }; /* * most drivers define the following macro to make it easy to * access the private structure. */ #define devpriv ((struct Private *)dev->private) /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int bonding_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int bonding_detach(struct comedi_device *dev); /** Build Private array of all devices.. */ static int doDevConfig(struct comedi_device *dev, struct comedi_devconfig *it); static void doDevUnconfig(struct comedi_device *dev); /* Ugly implementation of realloc that always copies memory around -- I'm lazy, * what can I say? I like to do wasteful memcopies.. :) */ static void *Realloc(const void *ptr, size_t len, size_t old_len); static struct comedi_driver driver_bonding = { .driver_name = MODULE_NAME, .module = THIS_MODULE, .attach = bonding_attach, .detach = bonding_detach, /* It is not necessary to implement the following members if you are * writing a driver for a ISA PnP or PCI card */ /* Most drivers will support multiple types of boards by * having an array of board structures. These were defined * in skel_boards[] above. Note that the element 'name' * was first in the structure -- Comedi uses this fact to * extract the name of the board without knowing any details * about the structure except for its length. * When a device is attached (by comedi_config), the name * of the device is given to Comedi, and Comedi tries to * match it by going through the list of board names. If * there is a match, the address of the pointer is put * into dev->board_ptr and driver->attach() is called. * * Note that these are not necessary if you can determine * the type of board in software. ISA PnP, PCI, and PCMCIA * devices are such boards. */ .board_name = &bondingBoards[0].name, .offset = sizeof(struct BondingBoard), .num_names = ARRAY_SIZE(bondingBoards), }; static int bonding_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int bonding_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); /* * Attach is called by the Comedi core to configure the driver * for a particular board. If you specified a board_name array * in the driver structure, dev->board_ptr contains that * address. */ static int bonding_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; LOG_MSG("comedi%d\n", dev->minor); /* * Allocate the private structure area. alloc_private() is a * convenient macro defined in comedidev.h. */ if (alloc_private(dev, sizeof(struct Private)) < 0) return -ENOMEM; /* * Setup our bonding from config params.. sets up our Private struct.. */ if (!doDevConfig(dev, it)) return -EINVAL; /* * Initialize dev->board_name. Note that we can use the "thisboard" * macro now, since we just initialized it in the last line. */ dev->board_name = devpriv->name; /* * Allocate the subdevice structures. alloc_subdevice() is a * convenient macro defined in comedidev.h. */ if (alloc_subdevices(dev, 1) < 0) return -ENOMEM; s = dev->subdevices + 0; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = devpriv->nchans; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = bonding_dio_insn_bits; s->insn_config = bonding_dio_insn_config; LOG_MSG("attached with %u DIO channels coming from %u different " "subdevices all bonded together. " "John Lennon would be proud!\n", devpriv->nchans, devpriv->ndevs); return 1; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int bonding_detach(struct comedi_device *dev) { LOG_MSG("comedi%d: remove\n", dev->minor); doDevUnconfig(dev); return 0; } /* DIO devices are slightly special. Although it is possible to * implement the insn_read/insn_write interface, it is much more * useful to applications if you implement the insn_bits interface. * This allows packed reading/writing of the DIO channels. The * comedi core can convert between insn_bits and insn_read/write */ static int bonding_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { #define LSAMPL_BITS (sizeof(unsigned int)*8) unsigned nchans = LSAMPL_BITS, num_done = 0, i; if (insn->n != 2) return -EINVAL; if (devpriv->nchans < nchans) nchans = devpriv->nchans; /* The insn data is a mask in data[0] and the new data * in data[1], each channel cooresponding to a bit. */ for (i = 0; num_done < nchans && i < devpriv->ndevs; ++i) { struct BondedDevice *bdev = devpriv->devs[i]; /* Grab the channel mask and data of only the bits corresponding to this subdevice.. need to shift them to zero position of course. */ /* Bits corresponding to this subdev. */ unsigned int subdevMask = ((1 << bdev->nchans) - 1); unsigned int writeMask, dataBits; /* Argh, we have >= LSAMPL_BITS chans.. take all bits */ if (bdev->nchans >= LSAMPL_BITS) subdevMask = (unsigned int)(-1); writeMask = (data[0] >> num_done) & subdevMask; dataBits = (data[1] >> num_done) & subdevMask; /* Read/Write the new digital lines */ if (comedi_dio_bitfield(bdev->dev, bdev->subdev, writeMask, &dataBits) != 2) return -EINVAL; /* Make room for the new bits in data[1], the return value */ data[1] &= ~(subdevMask << num_done); /* Put the bits in the return value */ data[1] |= (dataBits & subdevMask) << num_done; /* Save the new bits to the saved state.. */ s->state = data[1]; num_done += bdev->nchans; } return insn->n; } static int bonding_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int chan = CR_CHAN(insn->chanspec), ret, io_bits = s->io_bits; unsigned int io; struct BondedDevice *bdev; if (chan < 0 || chan >= devpriv->nchans) return -EINVAL; bdev = devpriv->chanIdDevMap[chan]; /* The input or output configuration of each digital line is * configured by a special insn_config instruction. chanspec * contains the channel to be changed, and data[0] contains the * value COMEDI_INPUT or COMEDI_OUTPUT. */ switch (data[0]) { case INSN_CONFIG_DIO_OUTPUT: io = COMEDI_OUTPUT; /* is this really necessary? */ io_bits |= 1 << chan; break; case INSN_CONFIG_DIO_INPUT: io = COMEDI_INPUT; /* is this really necessary? */ io_bits &= ~(1 << chan); break; case INSN_CONFIG_DIO_QUERY: data[1] = (io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT; return insn->n; break; default: return -EINVAL; break; } /* 'real' channel id for this subdev.. */ chan -= bdev->chanid_offset; ret = comedi_dio_config(bdev->dev, bdev->subdev, chan, io); if (ret != 1) return -EINVAL; /* Finally, save the new io_bits values since we didn't get an error above. */ s->io_bits = io_bits; return insn->n; } static void *Realloc(const void *oldmem, size_t newlen, size_t oldlen) { void *newmem = kmalloc(newlen, GFP_KERNEL); if (newmem && oldmem) memcpy(newmem, oldmem, min(oldlen, newlen)); kfree(oldmem); return newmem; } static int doDevConfig(struct comedi_device *dev, struct comedi_devconfig *it) { int i; struct comedi_device *devs_opened[COMEDI_NUM_BOARD_MINORS]; memset(devs_opened, 0, sizeof(devs_opened)); devpriv->name[0] = 0; /* Loop through all comedi devices specified on the command-line, building our device list */ for (i = 0; i < COMEDI_NDEVCONFOPTS && (!i || it->options[i]); ++i) { char file[] = "/dev/comediXXXXXX"; int minor = it->options[i]; struct comedi_device *d; int sdev = -1, nchans, tmp; struct BondedDevice *bdev = NULL; if (minor < 0 || minor >= COMEDI_NUM_BOARD_MINORS) { ERROR("Minor %d is invalid!\n", minor); return 0; } if (minor == dev->minor) { ERROR("Cannot bond this driver to itself!\n"); return 0; } if (devs_opened[minor]) { ERROR("Minor %d specified more than once!\n", minor); return 0; } snprintf(file, sizeof(file), "/dev/comedi%u", minor); file[sizeof(file) - 1] = 0; d = devs_opened[minor] = comedi_open(file); if (!d) { ERROR("Minor %u could not be opened\n", minor); return 0; } /* Do DIO, as that's all we support now.. */ while ((sdev = comedi_find_subdevice_by_type(d, COMEDI_SUBD_DIO, sdev + 1)) > -1) { nchans = comedi_get_n_channels(d, sdev); if (nchans <= 0) { ERROR("comedi_get_n_channels() returned %d " "on minor %u subdev %d!\n", nchans, minor, sdev); return 0; } bdev = kmalloc(sizeof(*bdev), GFP_KERNEL); if (!bdev) { ERROR("Out of memory.\n"); return 0; } bdev->dev = d; bdev->minor = minor; bdev->subdev = sdev; bdev->subdev_type = COMEDI_SUBD_DIO; bdev->nchans = nchans; bdev->chanid_offset = devpriv->nchans; /* map channel id's to BondedDevice * pointer.. */ while (nchans--) devpriv->chanIdDevMap[devpriv->nchans++] = bdev; /* Now put bdev pointer at end of devpriv->devs array * list.. */ /* ergh.. ugly.. we need to realloc :( */ tmp = devpriv->ndevs * sizeof(bdev); devpriv->devs = Realloc(devpriv->devs, ++devpriv->ndevs * sizeof(bdev), tmp); if (!devpriv->devs) { ERROR("Could not allocate memory. " "Out of memory?"); return 0; } devpriv->devs[devpriv->ndevs - 1] = bdev; { /** Append dev:subdev to devpriv->name */ char buf[20]; int left = MAX_BOARD_NAME - strlen(devpriv->name) - 1; snprintf(buf, sizeof(buf), "%d:%d ", dev->minor, bdev->subdev); buf[sizeof(buf) - 1] = 0; strncat(devpriv->name, buf, left); } } } if (!devpriv->nchans) { ERROR("No channels found!\n"); return 0; } return 1; } static void doDevUnconfig(struct comedi_device *dev) { unsigned long devs_closed = 0; if (devpriv) { while (devpriv->ndevs-- && devpriv->devs) { struct BondedDevice *bdev; bdev = devpriv->devs[devpriv->ndevs]; if (!bdev) continue; if (!(devs_closed & (0x1 << bdev->minor))) { comedi_close(bdev->dev); devs_closed |= (0x1 << bdev->minor); } kfree(bdev); } kfree(devpriv->devs); devpriv->devs = NULL; kfree(devpriv); dev->private = NULL; } } static int __init init(void) { return comedi_driver_register(&driver_bonding); } static void __exit cleanup(void) { comedi_driver_unregister(&driver_bonding); } module_init(init); module_exit(cleanup);
gpl-2.0
chonix/trinity
drivers/hwmon/ibmpex.c
8098
15426
/* * A hwmon driver for the IBM PowerExecutive temperature/power sensors * Copyright (C) 2007 IBM * * Author: Darrick J. Wong <djwong@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/ipmi.h> #include <linux/module.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include <linux/slab.h> #define REFRESH_INTERVAL (2 * HZ) #define DRVNAME "ibmpex" #define PEX_GET_VERSION 1 #define PEX_GET_SENSOR_COUNT 2 #define PEX_GET_SENSOR_NAME 3 #define PEX_RESET_HIGH_LOW 4 #define PEX_GET_SENSOR_DATA 6 #define PEX_NET_FUNCTION 0x3A #define PEX_COMMAND 0x3C static inline u16 extract_value(const char *data, int offset) { return be16_to_cpup((__be16 *)&data[offset]); } #define TEMP_SENSOR 1 #define POWER_SENSOR 2 #define PEX_SENSOR_TYPE_LEN 3 static u8 const power_sensor_sig[] = {0x70, 0x77, 0x72}; static u8 const temp_sensor_sig[] = {0x74, 0x65, 0x6D}; #define PEX_MULT_LEN 2 static u8 const watt_sensor_sig[] = {0x41, 0x43}; #define PEX_NUM_SENSOR_FUNCS 3 static char const * const power_sensor_name_templates[] = { "%s%d_average", "%s%d_average_lowest", "%s%d_average_highest" }; static char const * const temp_sensor_name_templates[] = { "%s%d_input", "%s%d_input_lowest", "%s%d_input_highest" }; static void ibmpex_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data); static void ibmpex_register_bmc(int iface, struct device *dev); static void ibmpex_bmc_gone(int iface); struct ibmpex_sensor_data { int in_use; s16 values[PEX_NUM_SENSOR_FUNCS]; int multiplier; struct sensor_device_attribute_2 attr[PEX_NUM_SENSOR_FUNCS]; }; struct ibmpex_bmc_data { struct list_head list; struct device *hwmon_dev; struct device *bmc_device; struct mutex lock; char valid; unsigned long last_updated; /* In jiffies */ struct ipmi_addr address; struct completion read_complete; ipmi_user_t user; int interface; struct kernel_ipmi_msg tx_message; unsigned char tx_msg_data[IPMI_MAX_MSG_LENGTH]; long tx_msgid; unsigned char rx_msg_data[IPMI_MAX_MSG_LENGTH]; unsigned long rx_msg_len; unsigned char rx_result; int rx_recv_type; unsigned char sensor_major; unsigned char sensor_minor; unsigned char num_sensors; struct ibmpex_sensor_data *sensors; }; struct ibmpex_driver_data { struct list_head bmc_data; struct ipmi_smi_watcher bmc_events; struct ipmi_user_hndl ipmi_hndlrs; }; static struct ibmpex_driver_data driver_data = { .bmc_data = LIST_HEAD_INIT(driver_data.bmc_data), .bmc_events = { .owner = THIS_MODULE, .new_smi = ibmpex_register_bmc, .smi_gone = ibmpex_bmc_gone, }, .ipmi_hndlrs = { .ipmi_recv_hndl = ibmpex_msg_handler, }, }; static int ibmpex_send_message(struct ibmpex_bmc_data *data) { int err; err = ipmi_validate_addr(&data->address, sizeof(data->address)); if (err) goto out; data->tx_msgid++; err = ipmi_request_settime(data->user, &data->address, data->tx_msgid, &data->tx_message, data, 0, 0, 0); if (err) goto out1; return 0; out1: dev_err(data->bmc_device, "request_settime=%x\n", err); return err; out: dev_err(data->bmc_device, "validate_addr=%x\n", err); return err; } static int ibmpex_ver_check(struct ibmpex_bmc_data *data) { data->tx_msg_data[0] = PEX_GET_VERSION; data->tx_message.data_len = 1; ibmpex_send_message(data); wait_for_completion(&data->read_complete); if (data->rx_result || data->rx_msg_len != 6) return -ENOENT; data->sensor_major = data->rx_msg_data[0]; data->sensor_minor = data->rx_msg_data[1]; dev_info(data->bmc_device, "Found BMC with sensor interface " "v%d.%d %d-%02d-%02d on interface %d\n", data->sensor_major, data->sensor_minor, extract_value(data->rx_msg_data, 2), data->rx_msg_data[4], data->rx_msg_data[5], data->interface); return 0; } static int ibmpex_query_sensor_count(struct ibmpex_bmc_data *data) { data->tx_msg_data[0] = PEX_GET_SENSOR_COUNT; data->tx_message.data_len = 1; ibmpex_send_message(data); wait_for_completion(&data->read_complete); if (data->rx_result || data->rx_msg_len != 1) return -ENOENT; return data->rx_msg_data[0]; } static int ibmpex_query_sensor_name(struct ibmpex_bmc_data *data, int sensor) { data->tx_msg_data[0] = PEX_GET_SENSOR_NAME; data->tx_msg_data[1] = sensor; data->tx_message.data_len = 2; ibmpex_send_message(data); wait_for_completion(&data->read_complete); if (data->rx_result || data->rx_msg_len < 1) return -ENOENT; return 0; } static int ibmpex_query_sensor_data(struct ibmpex_bmc_data *data, int sensor) { data->tx_msg_data[0] = PEX_GET_SENSOR_DATA; data->tx_msg_data[1] = sensor; data->tx_message.data_len = 2; ibmpex_send_message(data); wait_for_completion(&data->read_complete); if (data->rx_result || data->rx_msg_len < 26) { dev_err(data->bmc_device, "Error reading sensor %d.\n", sensor); return -ENOENT; } return 0; } static int ibmpex_reset_high_low_data(struct ibmpex_bmc_data *data) { data->tx_msg_data[0] = PEX_RESET_HIGH_LOW; data->tx_message.data_len = 1; ibmpex_send_message(data); wait_for_completion(&data->read_complete); return 0; } static void ibmpex_update_device(struct ibmpex_bmc_data *data) { int i, err; mutex_lock(&data->lock); if (time_before(jiffies, data->last_updated + REFRESH_INTERVAL) && data->valid) goto out; for (i = 0; i < data->num_sensors; i++) { if (!data->sensors[i].in_use) continue; err = ibmpex_query_sensor_data(data, i); if (err) continue; data->sensors[i].values[0] = extract_value(data->rx_msg_data, 16); data->sensors[i].values[1] = extract_value(data->rx_msg_data, 18); data->sensors[i].values[2] = extract_value(data->rx_msg_data, 20); } data->last_updated = jiffies; data->valid = 1; out: mutex_unlock(&data->lock); } static struct ibmpex_bmc_data *get_bmc_data(int iface) { struct ibmpex_bmc_data *p, *next; list_for_each_entry_safe(p, next, &driver_data.bmc_data, list) if (p->interface == iface) return p; return NULL; } static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { return sprintf(buf, "%s\n", DRVNAME); } static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0); static ssize_t ibmpex_show_sensor(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); struct ibmpex_bmc_data *data = dev_get_drvdata(dev); int mult = data->sensors[attr->index].multiplier; ibmpex_update_device(data); return sprintf(buf, "%d\n", data->sensors[attr->index].values[attr->nr] * mult); } static ssize_t ibmpex_reset_high_low(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct ibmpex_bmc_data *data = dev_get_drvdata(dev); ibmpex_reset_high_low_data(data); return count; } static SENSOR_DEVICE_ATTR(reset_high_low, S_IWUSR, NULL, ibmpex_reset_high_low, 0); static int is_power_sensor(const char *sensor_id, int len) { if (len < PEX_SENSOR_TYPE_LEN) return 0; if (!memcmp(sensor_id, power_sensor_sig, PEX_SENSOR_TYPE_LEN)) return 1; return 0; } static int is_temp_sensor(const char *sensor_id, int len) { if (len < PEX_SENSOR_TYPE_LEN) return 0; if (!memcmp(sensor_id, temp_sensor_sig, PEX_SENSOR_TYPE_LEN)) return 1; return 0; } static int power_sensor_multiplier(struct ibmpex_bmc_data *data, const char *sensor_id, int len) { int i; if (data->sensor_major == 2) return 1000000; for (i = PEX_SENSOR_TYPE_LEN; i < len - 1; i++) if (!memcmp(&sensor_id[i], watt_sensor_sig, PEX_MULT_LEN)) return 1000000; return 100000; } static int create_sensor(struct ibmpex_bmc_data *data, int type, int counter, int sensor, int func) { int err; char *n; n = kmalloc(32, GFP_KERNEL); if (!n) return -ENOMEM; if (type == TEMP_SENSOR) sprintf(n, temp_sensor_name_templates[func], "temp", counter); else if (type == POWER_SENSOR) sprintf(n, power_sensor_name_templates[func], "power", counter); sysfs_attr_init(&data->sensors[sensor].attr[func].dev_attr.attr); data->sensors[sensor].attr[func].dev_attr.attr.name = n; data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO; data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor; data->sensors[sensor].attr[func].index = sensor; data->sensors[sensor].attr[func].nr = func; err = device_create_file(data->bmc_device, &data->sensors[sensor].attr[func].dev_attr); if (err) { data->sensors[sensor].attr[func].dev_attr.attr.name = NULL; kfree(n); return err; } return 0; } static int ibmpex_find_sensors(struct ibmpex_bmc_data *data) { int i, j, err; int sensor_type; int sensor_counter; int num_power = 0; int num_temp = 0; err = ibmpex_query_sensor_count(data); if (err <= 0) return -ENOENT; data->num_sensors = err; data->sensors = kzalloc(data->num_sensors * sizeof(*data->sensors), GFP_KERNEL); if (!data->sensors) return -ENOMEM; for (i = 0; i < data->num_sensors; i++) { err = ibmpex_query_sensor_name(data, i); if (err) continue; if (is_power_sensor(data->rx_msg_data, data->rx_msg_len)) { sensor_type = POWER_SENSOR; num_power++; sensor_counter = num_power; data->sensors[i].multiplier = power_sensor_multiplier(data, data->rx_msg_data, data->rx_msg_len); } else if (is_temp_sensor(data->rx_msg_data, data->rx_msg_len)) { sensor_type = TEMP_SENSOR; num_temp++; sensor_counter = num_temp; data->sensors[i].multiplier = 1000; } else continue; data->sensors[i].in_use = 1; /* Create attributes */ for (j = 0; j < PEX_NUM_SENSOR_FUNCS; j++) { err = create_sensor(data, sensor_type, sensor_counter, i, j); if (err) goto exit_remove; } } err = device_create_file(data->bmc_device, &sensor_dev_attr_reset_high_low.dev_attr); if (err) goto exit_remove; err = device_create_file(data->bmc_device, &sensor_dev_attr_name.dev_attr); if (err) goto exit_remove; return 0; exit_remove: device_remove_file(data->bmc_device, &sensor_dev_attr_reset_high_low.dev_attr); device_remove_file(data->bmc_device, &sensor_dev_attr_name.dev_attr); for (i = 0; i < data->num_sensors; i++) for (j = 0; j < PEX_NUM_SENSOR_FUNCS; j++) { if (!data->sensors[i].attr[j].dev_attr.attr.name) continue; device_remove_file(data->bmc_device, &data->sensors[i].attr[j].dev_attr); kfree(data->sensors[i].attr[j].dev_attr.attr.name); } kfree(data->sensors); return err; } static void ibmpex_register_bmc(int iface, struct device *dev) { struct ibmpex_bmc_data *data; int err; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { dev_err(dev, "Insufficient memory for BMC interface.\n"); return; } data->address.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; data->address.channel = IPMI_BMC_CHANNEL; data->address.data[0] = 0; data->interface = iface; data->bmc_device = dev; /* Create IPMI messaging interface user */ err = ipmi_create_user(data->interface, &driver_data.ipmi_hndlrs, data, &data->user); if (err < 0) { dev_err(dev, "Unable to register user with IPMI " "interface %d\n", data->interface); goto out; } mutex_init(&data->lock); /* Initialize message */ data->tx_msgid = 0; init_completion(&data->read_complete); data->tx_message.netfn = PEX_NET_FUNCTION; data->tx_message.cmd = PEX_COMMAND; data->tx_message.data = data->tx_msg_data; /* Does this BMC support PowerExecutive? */ err = ibmpex_ver_check(data); if (err) goto out_user; /* Register the BMC as a HWMON class device */ data->hwmon_dev = hwmon_device_register(data->bmc_device); if (IS_ERR(data->hwmon_dev)) { dev_err(data->bmc_device, "Unable to register hwmon " "device for IPMI interface %d\n", data->interface); goto out_user; } /* finally add the new bmc data to the bmc data list */ dev_set_drvdata(dev, data); list_add_tail(&data->list, &driver_data.bmc_data); /* Now go find all the sensors */ err = ibmpex_find_sensors(data); if (err) { dev_err(data->bmc_device, "Error %d finding sensors\n", err); goto out_register; } return; out_register: hwmon_device_unregister(data->hwmon_dev); out_user: ipmi_destroy_user(data->user); out: kfree(data); } static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data) { int i, j; device_remove_file(data->bmc_device, &sensor_dev_attr_reset_high_low.dev_attr); device_remove_file(data->bmc_device, &sensor_dev_attr_name.dev_attr); for (i = 0; i < data->num_sensors; i++) for (j = 0; j < PEX_NUM_SENSOR_FUNCS; j++) { if (!data->sensors[i].attr[j].dev_attr.attr.name) continue; device_remove_file(data->bmc_device, &data->sensors[i].attr[j].dev_attr); kfree(data->sensors[i].attr[j].dev_attr.attr.name); } list_del(&data->list); dev_set_drvdata(data->bmc_device, NULL); hwmon_device_unregister(data->hwmon_dev); ipmi_destroy_user(data->user); kfree(data->sensors); kfree(data); } static void ibmpex_bmc_gone(int iface) { struct ibmpex_bmc_data *data = get_bmc_data(iface); if (!data) return; ibmpex_bmc_delete(data); } static void ibmpex_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) { struct ibmpex_bmc_data *data = (struct ibmpex_bmc_data *)user_msg_data; if (msg->msgid != data->tx_msgid) { dev_err(data->bmc_device, "Mismatch between received msgid " "(%02x) and transmitted msgid (%02x)!\n", (int)msg->msgid, (int)data->tx_msgid); ipmi_free_recv_msg(msg); return; } data->rx_recv_type = msg->recv_type; if (msg->msg.data_len > 0) data->rx_result = msg->msg.data[0]; else data->rx_result = IPMI_UNKNOWN_ERR_COMPLETION_CODE; if (msg->msg.data_len > 1) { data->rx_msg_len = msg->msg.data_len - 1; memcpy(data->rx_msg_data, msg->msg.data + 1, data->rx_msg_len); } else data->rx_msg_len = 0; ipmi_free_recv_msg(msg); complete(&data->read_complete); } static int __init ibmpex_init(void) { return ipmi_smi_watcher_register(&driver_data.bmc_events); } static void __exit ibmpex_exit(void) { struct ibmpex_bmc_data *p, *next; ipmi_smi_watcher_unregister(&driver_data.bmc_events); list_for_each_entry_safe(p, next, &driver_data.bmc_data, list) ibmpex_bmc_delete(p); } MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>"); MODULE_DESCRIPTION("IBM PowerExecutive power/temperature sensor driver"); MODULE_LICENSE("GPL"); module_init(ibmpex_init); module_exit(ibmpex_exit); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3350-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3550-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3650-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3655-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3755-*");
gpl-2.0
Eliminater74/g3_kernel
arch/alpha/kernel/sys_eiger.c
8098
5516
/* * linux/arch/alpha/kernel/sys_eiger.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996, 1999 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * Copyright (C) 1999 Iain Grant * * Code supporting the EIGER (EV6+TSUNAMI). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/pci.h> #include <asm/pgtable.h> #include <asm/core_tsunami.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* Note that this interrupt code is identical to TAKARA. */ /* Note mask bit is true for DISABLED irqs. */ static unsigned long cached_irq_mask[2] = { -1, -1 }; static inline void eiger_update_irq_hw(unsigned long irq, unsigned long mask) { int regaddr; mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30)); regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c); outl(mask & 0xffff0000UL, regaddr); } static inline void eiger_enable_irq(struct irq_data *d) { unsigned int irq = d->irq; unsigned long mask; mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); eiger_update_irq_hw(irq, mask); } static void eiger_disable_irq(struct irq_data *d) { unsigned int irq = d->irq; unsigned long mask; mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); eiger_update_irq_hw(irq, mask); } static struct irq_chip eiger_irq_type = { .name = "EIGER", .irq_unmask = eiger_enable_irq, .irq_mask = eiger_disable_irq, .irq_mask_ack = eiger_disable_irq, }; static void eiger_device_interrupt(unsigned long vector) { unsigned intstatus; /* * The PALcode will have passed us vectors 0x800 or 0x810, * which are fairly arbitrary values and serve only to tell * us whether an interrupt has come in on IRQ0 or IRQ1. If * it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's * probably ISA, but PCI interrupts can come through IRQ0 * as well if the interrupt controller isn't in accelerated * mode. * * OTOH, the accelerator thing doesn't seem to be working * overly well, so what we'll do instead is try directly * examining the Master Interrupt Register to see if it's a * PCI interrupt, and if _not_ then we'll pass it on to the * ISA handler. */ intstatus = inw(0x500) & 15; if (intstatus) { /* * This is a PCI interrupt. Check each bit and * despatch an interrupt if it's set. */ if (intstatus & 8) handle_irq(16+3); if (intstatus & 4) handle_irq(16+2); if (intstatus & 2) handle_irq(16+1); if (intstatus & 1) handle_irq(16+0); } else { isa_device_interrupt(vector); } } static void eiger_srm_device_interrupt(unsigned long vector) { int irq = (vector - 0x800) >> 4; handle_irq(irq); } static void __init eiger_init_irq(void) { long i; outb(0, DMA1_RESET_REG); outb(0, DMA2_RESET_REG); outb(DMA_MODE_CASCADE, DMA2_MODE_REG); outb(0, DMA2_MASK_REG); if (alpha_using_srm) alpha_mv.device_interrupt = eiger_srm_device_interrupt; for (i = 16; i < 128; i += 16) eiger_update_irq_hw(i, -1); init_i8259a_irqs(); for (i = 16; i < 128; ++i) { irq_set_chip_and_handler(i, &eiger_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } } static int __init eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { u8 irq_orig; /* The SRM console has already calculated out the IRQ value's for option cards. As this works lets just read in the value already set and change it to a useable value by Linux. All the IRQ values generated by the console are greater than 90, so we subtract 80 because it is (90 - allocated ISA IRQ's). */ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq_orig); return irq_orig - 0x80; } static u8 __init eiger_swizzle(struct pci_dev *dev, u8 *pinp) { struct pci_controller *hose = dev->sysdata; int slot, pin = *pinp; int bridge_count = 0; /* Find the number of backplane bridges. */ int backplane = inw(0x502) & 0x0f; switch (backplane) { case 0x00: bridge_count = 0; break; /* No bridges */ case 0x01: bridge_count = 1; break; /* 1 */ case 0x03: bridge_count = 2; break; /* 2 */ case 0x07: bridge_count = 3; break; /* 3 */ case 0x0f: bridge_count = 4; break; /* 4 */ }; slot = PCI_SLOT(dev->devfn); while (dev->bus->self) { /* Check for built-in bridges on hose 0. */ if (hose->index == 0 && (PCI_SLOT(dev->bus->self->devfn) > 20 - bridge_count)) { slot = PCI_SLOT(dev->devfn); break; } /* Must be a card-based bridge. */ pin = pci_swizzle_interrupt_pin(dev, pin); /* Move up the chain of bridges. */ dev = dev->bus->self; } *pinp = pin; return slot; } /* * The System Vectors */ struct alpha_machine_vector eiger_mv __initmv = { .vector_name = "Eiger", DO_EV6_MMU, DO_DEFAULT_RTC, DO_TSUNAMI_IO, .machine_check = tsunami_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = TSUNAMI_DAC_OFFSET, .nr_irqs = 128, .device_interrupt = eiger_device_interrupt, .init_arch = tsunami_init_arch, .init_irq = eiger_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .kill_arch = tsunami_kill_arch, .pci_map_irq = eiger_map_irq, .pci_swizzle = eiger_swizzle, }; ALIAS_MV(eiger)
gpl-2.0
garwynn/L900_MA7_Kernel
fs/proc/devices.c
10402
1430
#include <linux/fs.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> static int devinfo_show(struct seq_file *f, void *v) { int i = *(loff_t *) v; if (i < CHRDEV_MAJOR_HASH_SIZE) { if (i == 0) seq_puts(f, "Character devices:\n"); chrdev_show(f, i); } #ifdef CONFIG_BLOCK else { i -= CHRDEV_MAJOR_HASH_SIZE; if (i == 0) seq_puts(f, "\nBlock devices:\n"); blkdev_show(f, i); } #endif return 0; } static void *devinfo_start(struct seq_file *f, loff_t *pos) { if (*pos < (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE)) return pos; return NULL; } static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos) { (*pos)++; if (*pos >= (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE)) return NULL; return pos; } static void devinfo_stop(struct seq_file *f, void *v) { /* Nothing to do */ } static const struct seq_operations devinfo_ops = { .start = devinfo_start, .next = devinfo_next, .stop = devinfo_stop, .show = devinfo_show }; static int devinfo_open(struct inode *inode, struct file *filp) { return seq_open(filp, &devinfo_ops); } static const struct file_operations proc_devinfo_operations = { .open = devinfo_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init proc_devices_init(void) { proc_create("devices", 0, NULL, &proc_devinfo_operations); return 0; } module_init(proc_devices_init);
gpl-2.0
oguz298/FTL-Kernel
drivers/external_drivers/camera/drivers/media/pci/atomisp2/css2401a0_legacy_v21/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c
163
2995
/* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "assert_support.h" #include "ctc/ctc_1.0/ia_css_ctc.host.h" #include "ia_css_ctc1_5.host.h" static void ctc_gradient( int *dydx, int *shift, int y1, int y0, int x1, int x0) { int frc_bits = max(IA_CSS_CTC_COEF_SHIFT, 16); int dy = y1 - y0; int dx = x1 - x0; int dydx_int; int dydx_frc; int sft; /* max_dydx = the maxinum gradient = the maximum y (gain) */ int max_dydx = (1 << IA_CSS_CTC_COEF_SHIFT) - 1; if (dx == 0) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() error, illegal division operation\n"); return; } else { dydx_int = dy / dx; dydx_frc = ((dy - dydx_int * dx) << frc_bits) / dx; } assert(y0 >= 0 && y0 <= max_dydx); assert(y1 >= 0 && y1 <= max_dydx); assert(x0 < x1); assert(dydx != NULL); assert(shift != NULL); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() enter:\n"); /* search "sft" which meets this condition: (1 << (IA_CSS_CTC_COEF_SHIFT - 1)) <= (((float)dy / (float)dx) * (1 << sft)) <= ((1 << IA_CSS_CTC_COEF_SHIFT) - 1) */ for (sft = 0; sft <= IA_CSS_CTC_COEF_SHIFT; sft++) { int tmp_dydx = (dydx_int << sft) + (dydx_frc >> (frc_bits - sft)); if (tmp_dydx <= max_dydx) { *dydx = tmp_dydx; *shift = sft; } if (tmp_dydx >= max_dydx) break; } ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() leave:\n"); } void ia_css_ctc_encode( struct sh_css_isp_ctc_params *to, const struct ia_css_ctc_config *from, unsigned size) { (void)size; to->y0 = from->y0; to->y1 = from->y1; to->y2 = from->y2; to->y3 = from->y3; to->y4 = from->y4; to->y5 = from->y5; to->ce_gain_exp = from->ce_gain_exp; to->x1 = from->x1; to->x2 = from->x2; to->x3 = from->x3; to->x4 = from->x4; ctc_gradient(&(to->dydx0), &(to->dydx0_shift), from->y1, from->y0, from->x1, 0); ctc_gradient(&(to->dydx1), &(to->dydx1_shift), from->y2, from->y1, from->x2, from->x1); ctc_gradient(&to->dydx2, &to->dydx2_shift, from->y3, from->y2, from->x3, from->x2); ctc_gradient(&to->dydx3, &to->dydx3_shift, from->y4, from->y3, from->x4, from->x3); ctc_gradient(&(to->dydx4), &(to->dydx4_shift), from->y5, from->y4, SH_CSS_BAYER_MAXVAL, from->x4); } void ia_css_ctc_dump( const struct sh_css_isp_ctc_params *ctc, unsigned level);
gpl-2.0
lab11/bluetooth-next
sound/soc/soc-utils.c
419
4764
/* * soc-util.c -- ALSA SoC Audio Layer utility functions * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * Liam Girdwood <lrg@slimlogic.co.uk> * * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/platform_device.h> #include <linux/export.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots) { return sample_size * channels * tdm_slots; } EXPORT_SYMBOL_GPL(snd_soc_calc_frame_size); int snd_soc_params_to_frame_size(struct snd_pcm_hw_params *params) { int sample_size; sample_size = snd_pcm_format_width(params_format(params)); if (sample_size < 0) return sample_size; return snd_soc_calc_frame_size(sample_size, params_channels(params), 1); } EXPORT_SYMBOL_GPL(snd_soc_params_to_frame_size); int snd_soc_calc_bclk(int fs, int sample_size, int channels, int tdm_slots) { return fs * snd_soc_calc_frame_size(sample_size, channels, tdm_slots); } EXPORT_SYMBOL_GPL(snd_soc_calc_bclk); int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params) { int ret; ret = snd_soc_params_to_frame_size(params); if (ret > 0) return ret * params_rate(params); else return ret; } EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk); static const struct snd_pcm_hardware dummy_dma_hardware = { /* Random values to keep userspace happy when checking constraints */ .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER, .buffer_bytes_max = 128*1024, .period_bytes_min = PAGE_SIZE, .period_bytes_max = PAGE_SIZE*2, .periods_min = 2, .periods_max = 128, }; static int dummy_dma_open(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; /* BE's dont need dummy params */ if (!rtd->dai_link->no_pcm) snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware); return 0; } static struct snd_pcm_ops dummy_dma_ops = { .open = dummy_dma_open, .ioctl = snd_pcm_lib_ioctl, }; static struct snd_soc_platform_driver dummy_platform = { .ops = &dummy_dma_ops, }; static struct snd_soc_codec_driver dummy_codec; #define STUB_RATES SNDRV_PCM_RATE_8000_192000 #define STUB_FORMATS (SNDRV_PCM_FMTBIT_S8 | \ SNDRV_PCM_FMTBIT_U8 | \ SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_U16_LE | \ SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_U24_LE | \ SNDRV_PCM_FMTBIT_S32_LE | \ SNDRV_PCM_FMTBIT_U32_LE | \ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE) /* * The dummy CODEC is only meant to be used in situations where there is no * actual hardware. * * If there is actual hardware even if it does not have a control bus * the hardware will still have constraints like supported samplerates, etc. * which should be modelled. And the data flow graph also should be modelled * using DAPM. */ static struct snd_soc_dai_driver dummy_dai = { .name = "snd-soc-dummy-dai", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 384, .rates = STUB_RATES, .formats = STUB_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 384, .rates = STUB_RATES, .formats = STUB_FORMATS, }, }; int snd_soc_dai_is_dummy(struct snd_soc_dai *dai) { if (dai->driver == &dummy_dai) return 1; return 0; } static int snd_soc_dummy_probe(struct platform_device *pdev) { int ret; ret = snd_soc_register_codec(&pdev->dev, &dummy_codec, &dummy_dai, 1); if (ret < 0) return ret; ret = snd_soc_register_platform(&pdev->dev, &dummy_platform); if (ret < 0) { snd_soc_unregister_codec(&pdev->dev); return ret; } return ret; } static int snd_soc_dummy_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); snd_soc_unregister_codec(&pdev->dev); return 0; } static struct platform_driver soc_dummy_driver = { .driver = { .name = "snd-soc-dummy", }, .probe = snd_soc_dummy_probe, .remove = snd_soc_dummy_remove, }; static struct platform_device *soc_dummy_dev; int __init snd_soc_util_init(void) { int ret; soc_dummy_dev = platform_device_register_simple("snd-soc-dummy", -1, NULL, 0); if (IS_ERR(soc_dummy_dev)) return PTR_ERR(soc_dummy_dev); ret = platform_driver_register(&soc_dummy_driver); if (ret != 0) platform_device_unregister(soc_dummy_dev); return ret; } void __exit snd_soc_util_exit(void) { platform_device_unregister(soc_dummy_dev); platform_driver_unregister(&soc_dummy_driver); }
gpl-2.0
BOOTMGR/Kernel_JB_3.4
arch/arm/mach-msm/tz_log.c
419
12885
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/debugfs.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> #include <asm/uaccess.h> #define DEBUG_MAX_RW_BUF 4096 /* * Preprocessor Definitions and Constants */ #define TZBSP_CPU_COUNT 0x02 /* * Number of VMID Tables */ #define TZBSP_DIAG_NUM_OF_VMID 16 /* * VMID Description length */ #define TZBSP_DIAG_VMID_DESC_LEN 7 /* * Number of Interrupts */ #define TZBSP_DIAG_INT_NUM 32 /* * Length of descriptive name associated with Interrupt */ #define TZBSP_MAX_INT_DESC 16 /* * VMID Table */ struct tzdbg_vmid_t { uint8_t vmid; /* Virtual Machine Identifier */ uint8_t desc[TZBSP_DIAG_VMID_DESC_LEN]; /* ASCII Text */ }; /* * Boot Info Table */ struct tzdbg_boot_info_t { uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */ uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */ uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */ uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */ uint32_t warm_jmp_addr; /* Last Warmboot Jump Address */ uint32_t spare; /* Reserved for future use. */ }; /* * Reset Info Table */ struct tzdbg_reset_info_t { uint32_t reset_type; /* Reset Reason */ uint32_t reset_cnt; /* Number of resets occured/CPU */ }; /* * Interrupt Info Table */ struct tzdbg_int_t { /* * Type of Interrupt/exception */ uint16_t int_info; /* * Availability of the slot */ uint8_t avail; /* * Reserved for future use */ uint8_t spare; /* * Interrupt # for IRQ and FIQ */ uint32_t int_num; /* * ASCII text describing type of interrupt e.g: * Secure Timer, EBI XPU. This string is always null terminated, * supporting at most TZBSP_MAX_INT_DESC characters. * Any additional characters are truncated. */ uint8_t int_desc[TZBSP_MAX_INT_DESC]; uint64_t int_count[TZBSP_CPU_COUNT]; /* # of times seen per CPU */ }; /* * Diagnostic Table */ struct tzdbg_t { uint32_t magic_num; uint32_t version; /* * Number of CPU's */ uint32_t cpu_count; /* * Offset of VMID Table */ uint32_t vmid_info_off; /* * Offset of Boot Table */ uint32_t boot_info_off; /* * Offset of Reset info Table */ uint32_t reset_info_off; /* * Offset of Interrupt info Table */ uint32_t int_info_off; /* * Ring Buffer Offset */ uint32_t ring_off; /* * Ring Buffer Length */ uint32_t ring_len; /* * VMID to EE Mapping */ struct tzdbg_vmid_t vmid_info[TZBSP_DIAG_NUM_OF_VMID]; /* * Boot Info */ struct tzdbg_boot_info_t boot_info[TZBSP_CPU_COUNT]; /* * Reset Info */ struct tzdbg_reset_info_t reset_info[TZBSP_CPU_COUNT]; uint32_t num_interrupts; struct tzdbg_int_t int_info[TZBSP_DIAG_INT_NUM]; /* * We need at least 2K for the ring buffer */ uint8_t *ring_buffer; /* TZ Ring Buffer */ }; /* * Enumeration order for VMID's */ enum tzdbg_stats_type { TZDBG_BOOT = 0, TZDBG_RESET, TZDBG_INTERRUPT, TZDBG_VMID, TZDBG_GENERAL, TZDBG_LOG, TZDBG_STATS_MAX, }; struct tzdbg_stat { char *name; char *data; }; struct tzdbg { void __iomem *virt_iobase; struct tzdbg_t *diag_buf; char *disp_buf; int debug_tz[TZDBG_STATS_MAX]; struct tzdbg_stat stat[TZDBG_STATS_MAX]; }; static struct tzdbg tzdbg = { .stat[TZDBG_BOOT].name = "boot", .stat[TZDBG_RESET].name = "reset", .stat[TZDBG_INTERRUPT].name = "interrupt", .stat[TZDBG_VMID].name = "vmid", .stat[TZDBG_GENERAL].name = "general", .stat[TZDBG_LOG].name = "log", }; /* * Debugfs data structure and functions */ static int _disp_tz_general_stats(void) { int len = 0; len += snprintf(tzdbg.disp_buf + len, DEBUG_MAX_RW_BUF - 1, " Version : 0x%x\n" " Magic Number : 0x%x\n" " Number of CPU : %d\n", tzdbg.diag_buf->version, tzdbg.diag_buf->magic_num, tzdbg.diag_buf->cpu_count); tzdbg.stat[TZDBG_GENERAL].data = tzdbg.disp_buf; return len; } static int _disp_tz_vmid_stats(void) { int i, num_vmid; int len = 0; struct tzdbg_vmid_t *ptr; ptr = (struct tzdbg_vmid_t *)((unsigned char *)tzdbg.diag_buf + tzdbg.diag_buf->vmid_info_off); num_vmid = ((tzdbg.diag_buf->boot_info_off - tzdbg.diag_buf->vmid_info_off)/ (sizeof(struct tzdbg_vmid_t))); for (i = 0; i < num_vmid; i++) { if (ptr->vmid < 0xFF) { len += snprintf(tzdbg.disp_buf + len, (DEBUG_MAX_RW_BUF - 1) - len, " 0x%x %s\n", (uint32_t)ptr->vmid, (uint8_t *)ptr->desc); } if (len > (DEBUG_MAX_RW_BUF - 1)) { pr_warn("%s: Cannot fit all info into the buffer\n", __func__); break; } ptr++; } tzdbg.stat[TZDBG_VMID].data = tzdbg.disp_buf; return len; } static int _disp_tz_boot_stats(void) { int i; int len = 0; struct tzdbg_boot_info_t *ptr; ptr = (struct tzdbg_boot_info_t *)((unsigned char *)tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off); for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) { len += snprintf(tzdbg.disp_buf + len, (DEBUG_MAX_RW_BUF - 1) - len, " CPU #: %d\n" " Warmboot jump address : 0x%x\n" " Warmboot entry CPU counter: 0x%x\n" " Warmboot exit CPU counter : 0x%x\n" " Power Collapse entry CPU counter: 0x%x\n" " Power Collapse exit CPU counter : 0x%x\n", i, ptr->warm_jmp_addr, ptr->wb_entry_cnt, ptr->wb_exit_cnt, ptr->pc_entry_cnt, ptr->pc_exit_cnt); if (len > (DEBUG_MAX_RW_BUF - 1)) { pr_warn("%s: Cannot fit all info into the buffer\n", __func__); break; } ptr++; } tzdbg.stat[TZDBG_BOOT].data = tzdbg.disp_buf; return len; } static int _disp_tz_reset_stats(void) { int i; int len = 0; struct tzdbg_reset_info_t *ptr; ptr = (struct tzdbg_reset_info_t *)((unsigned char *)tzdbg.diag_buf + tzdbg.diag_buf->reset_info_off); for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) { len += snprintf(tzdbg.disp_buf + len, (DEBUG_MAX_RW_BUF - 1) - len, " CPU #: %d\n" " Reset Type (reason) : 0x%x\n" " Reset counter : 0x%x\n", i, ptr->reset_type, ptr->reset_cnt); if (len > (DEBUG_MAX_RW_BUF - 1)) { pr_warn("%s: Cannot fit all info into the buffer\n", __func__); break; } ptr++; } tzdbg.stat[TZDBG_RESET].data = tzdbg.disp_buf; return len; } static int _disp_tz_interrupt_stats(void) { int i, j, int_info_size; int len = 0; int *num_int; unsigned char *ptr; struct tzdbg_int_t *tzdbg_ptr; num_int = (uint32_t *)((unsigned char *)tzdbg.diag_buf + (tzdbg.diag_buf->int_info_off - sizeof(uint32_t))); ptr = ((unsigned char *)tzdbg.diag_buf + tzdbg.diag_buf->int_info_off); int_info_size = ((tzdbg.diag_buf->ring_off - tzdbg.diag_buf->int_info_off)/(*num_int)); for (i = 0; i < (*num_int); i++) { tzdbg_ptr = (struct tzdbg_int_t *)ptr; len += snprintf(tzdbg.disp_buf + len, (DEBUG_MAX_RW_BUF - 1) - len, " Interrupt Number : 0x%x\n" " Type of Interrupt : 0x%x\n" " Description of interrupt : %s\n", tzdbg_ptr->int_num, (uint32_t)tzdbg_ptr->int_info, (uint8_t *)tzdbg_ptr->int_desc); for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) { len += snprintf(tzdbg.disp_buf + len, (DEBUG_MAX_RW_BUF - 1) - len, " int_count on CPU # %d : %u\n", (uint32_t)j, (uint32_t)tzdbg_ptr->int_count[j]); } len += snprintf(tzdbg.disp_buf + len, DEBUG_MAX_RW_BUF - 1, "\n"); if (len > (DEBUG_MAX_RW_BUF - 1)) { pr_warn("%s: Cannot fit all info into the buffer\n", __func__); break; } ptr += int_info_size; } tzdbg.stat[TZDBG_INTERRUPT].data = tzdbg.disp_buf; return len; } static int _disp_tz_log_stats(void) { int len = 0; unsigned char *ptr; ptr = (unsigned char *)tzdbg.diag_buf + tzdbg.diag_buf->ring_off; len += snprintf(tzdbg.disp_buf, (DEBUG_MAX_RW_BUF - 1) - len, "%s\n", ptr); tzdbg.stat[TZDBG_LOG].data = tzdbg.disp_buf; return len; } static ssize_t tzdbgfs_read(struct file *file, char __user *buf, size_t count, loff_t *offp) { int len = 0; int *tz_id = file->private_data; memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase, DEBUG_MAX_RW_BUF); switch (*tz_id) { case TZDBG_BOOT: len = _disp_tz_boot_stats(); break; case TZDBG_RESET: len = _disp_tz_reset_stats(); break; case TZDBG_INTERRUPT: len = _disp_tz_interrupt_stats(); break; case TZDBG_GENERAL: len = _disp_tz_general_stats(); break; case TZDBG_VMID: len = _disp_tz_vmid_stats(); break; case TZDBG_LOG: len = _disp_tz_log_stats(); break; default: break; } if (len > count) len = count; return simple_read_from_buffer(buf, len, offp, tzdbg.stat[(*tz_id)].data, len); } static int tzdbgfs_open(struct inode *inode, struct file *pfile) { pfile->private_data = inode->i_private; return 0; } const struct file_operations tzdbg_fops = { .owner = THIS_MODULE, .read = tzdbgfs_read, .open = tzdbgfs_open, }; static int tzdbgfs_init(struct platform_device *pdev) { int rc = 0; int i; struct dentry *dent_dir; struct dentry *dent; dent_dir = debugfs_create_dir("tzdbg", NULL); if (dent_dir == NULL) { dev_err(&pdev->dev, "tzdbg debugfs_create_dir failed\n"); return -ENOMEM; } for (i = 0; i < TZDBG_STATS_MAX; i++) { tzdbg.debug_tz[i] = i; dent = debugfs_create_file(tzdbg.stat[i].name, S_IRUGO, dent_dir, &tzdbg.debug_tz[i], &tzdbg_fops); if (dent == NULL) { dev_err(&pdev->dev, "TZ debugfs_create_file failed\n"); rc = -ENOMEM; goto err; } } tzdbg.disp_buf = kzalloc(DEBUG_MAX_RW_BUF, GFP_KERNEL); if (tzdbg.disp_buf == NULL) { pr_err("%s: Can't Allocate memory for tzdbg.disp_buf\n", __func__); goto err; } platform_set_drvdata(pdev, dent_dir); return 0; err: debugfs_remove_recursive(dent_dir); return rc; } static void tzdbgfs_exit(struct platform_device *pdev) { struct dentry *dent_dir; kzfree(tzdbg.disp_buf); dent_dir = platform_get_drvdata(pdev); debugfs_remove_recursive(dent_dir); } /* * Driver functions */ static int __devinit tz_log_probe(struct platform_device *pdev) { struct resource *resource; void __iomem *virt_iobase; uint32_t tzdiag_phy_iobase; uint32_t *ptr = NULL; /* * Get address that stores the physical location of 4KB * diagnostic data */ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!resource) { dev_err(&pdev->dev, "%s: ERROR Missing MEM resource\n", __func__); return -ENXIO; }; /* * Map address that stores the physical location of 4KB * diagnostic data */ virt_iobase = devm_ioremap_nocache(&pdev->dev, resource->start, resource->end - resource->start + 1); if (!virt_iobase) { dev_err(&pdev->dev, "%s: ERROR could not ioremap: start=%p, len=%u\n", __func__, (void *) resource->start, (resource->end - resource->start + 1)); return -ENXIO; } /* * Retrieve the address of 4KB diagnostic data */ tzdiag_phy_iobase = readl_relaxed(virt_iobase); /* * Map the 4KB diagnostic information area */ tzdbg.virt_iobase = devm_ioremap_nocache(&pdev->dev, tzdiag_phy_iobase, DEBUG_MAX_RW_BUF); if (!tzdbg.virt_iobase) { dev_err(&pdev->dev, "%s: ERROR could not ioremap: start=%p, len=%u\n", __func__, (void *) tzdiag_phy_iobase, DEBUG_MAX_RW_BUF); return -ENXIO; } ptr = kzalloc(DEBUG_MAX_RW_BUF, GFP_KERNEL); if (ptr == NULL) { pr_err("%s: Can't Allocate memory: ptr\n", __func__); return -ENXIO; } tzdbg.diag_buf = (struct tzdbg_t *)ptr; if (tzdbgfs_init(pdev)) goto err; return 0; err: kfree(tzdbg.diag_buf); return -ENXIO; } static int __devexit tz_log_remove(struct platform_device *pdev) { kzfree(tzdbg.diag_buf); tzdbgfs_exit(pdev); return 0; } static struct of_device_id tzlog_match[] = { { .compatible = "qcom,tz-log", }, {} }; static struct platform_driver tz_log_driver = { .probe = tz_log_probe, .remove = __devexit_p(tz_log_remove), .driver = { .name = "tz_log", .owner = THIS_MODULE, .of_match_table = tzlog_match, }, }; static int __init tz_log_init(void) { return platform_driver_register(&tz_log_driver); } static void __exit tz_log_exit(void) { platform_driver_unregister(&tz_log_driver); } module_init(tz_log_init); module_exit(tz_log_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("TZ Log driver"); MODULE_VERSION("1.1"); MODULE_ALIAS("platform:tz_log");
gpl-2.0
galaxyishere/samsung-kernel-latona
drivers/scsi/scsi_tgt_if.c
931
9293
/* * SCSI target kernel/user interface functions * * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org> * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/miscdevice.h> #include <linux/gfp.h> #include <linux/file.h> #include <linux/smp_lock.h> #include <net/tcp.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tgt.h> #include <scsi/scsi_tgt_if.h> #include <asm/cacheflush.h> #include "scsi_tgt_priv.h" #if TGT_RING_SIZE < PAGE_SIZE # define TGT_RING_SIZE PAGE_SIZE #endif #define TGT_RING_PAGES (TGT_RING_SIZE >> PAGE_SHIFT) #define TGT_EVENT_PER_PAGE (PAGE_SIZE / sizeof(struct tgt_event)) #define TGT_MAX_EVENTS (TGT_EVENT_PER_PAGE * TGT_RING_PAGES) struct tgt_ring { u32 tr_idx; unsigned long tr_pages[TGT_RING_PAGES]; spinlock_t tr_lock; }; /* tx_ring : kernel->user, rx_ring : user->kernel */ static struct tgt_ring tx_ring, rx_ring; static DECLARE_WAIT_QUEUE_HEAD(tgt_poll_wait); static inline void tgt_ring_idx_inc(struct tgt_ring *ring) { if (ring->tr_idx == TGT_MAX_EVENTS - 1) ring->tr_idx = 0; else ring->tr_idx++; } static struct tgt_event *tgt_head_event(struct tgt_ring *ring, u32 idx) { u32 pidx, off; pidx = idx / TGT_EVENT_PER_PAGE; off = idx % TGT_EVENT_PER_PAGE; return (struct tgt_event *) (ring->tr_pages[pidx] + sizeof(struct tgt_event) * off); } static int tgt_uspace_send_event(u32 type, struct tgt_event *p) { struct tgt_event *ev; struct tgt_ring *ring = &tx_ring; unsigned long flags; int err = 0; spin_lock_irqsave(&ring->tr_lock, flags); ev = tgt_head_event(ring, ring->tr_idx); if (!ev->hdr.status) tgt_ring_idx_inc(ring); else err = -BUSY; spin_unlock_irqrestore(&ring->tr_lock, flags); if (err) return err; memcpy(ev, p, sizeof(*ev)); ev->hdr.type = type; mb(); ev->hdr.status = 1; flush_dcache_page(virt_to_page(ev)); wake_up_interruptible(&tgt_poll_wait); return 0; } int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, u64 itn_id, struct scsi_lun *lun, u64 tag) { struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd); struct tgt_event ev; int err; memset(&ev, 0, sizeof(ev)); ev.p.cmd_req.host_no = shost->host_no; ev.p.cmd_req.itn_id = itn_id; ev.p.cmd_req.data_len = scsi_bufflen(cmd); memcpy(ev.p.cmd_req.scb, cmd->cmnd, sizeof(ev.p.cmd_req.scb)); memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun)); ev.p.cmd_req.attribute = cmd->tag; ev.p.cmd_req.tag = tag; dprintk("%p %d %u %x %llx\n", cmd, shost->host_no, ev.p.cmd_req.data_len, cmd->tag, (unsigned long long) ev.p.cmd_req.tag); err = tgt_uspace_send_event(TGT_KEVENT_CMD_REQ, &ev); if (err) eprintk("tx buf is full, could not send\n"); return err; } int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 itn_id, u64 tag) { struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd); struct tgt_event ev; int err; memset(&ev, 0, sizeof(ev)); ev.p.cmd_done.host_no = shost->host_no; ev.p.cmd_done.itn_id = itn_id; ev.p.cmd_done.tag = tag; ev.p.cmd_done.result = cmd->result; dprintk("%p %d %llu %u %x\n", cmd, shost->host_no, (unsigned long long) ev.p.cmd_req.tag, ev.p.cmd_req.data_len, cmd->tag); err = tgt_uspace_send_event(TGT_KEVENT_CMD_DONE, &ev); if (err) eprintk("tx buf is full, could not send\n"); return err; } int scsi_tgt_uspace_send_tsk_mgmt(int host_no, u64 itn_id, int function, u64 tag, struct scsi_lun *scsilun, void *data) { struct tgt_event ev; int err; memset(&ev, 0, sizeof(ev)); ev.p.tsk_mgmt_req.host_no = host_no; ev.p.tsk_mgmt_req.itn_id = itn_id; ev.p.tsk_mgmt_req.function = function; ev.p.tsk_mgmt_req.tag = tag; memcpy(ev.p.tsk_mgmt_req.lun, scsilun, sizeof(ev.p.tsk_mgmt_req.lun)); ev.p.tsk_mgmt_req.mid = (u64) (unsigned long) data; dprintk("%d %x %llx %llx\n", host_no, function, (unsigned long long) tag, (unsigned long long) ev.p.tsk_mgmt_req.mid); err = tgt_uspace_send_event(TGT_KEVENT_TSK_MGMT_REQ, &ev); if (err) eprintk("tx buf is full, could not send\n"); return err; } int scsi_tgt_uspace_send_it_nexus_request(int host_no, u64 itn_id, int function, char *initiator_id) { struct tgt_event ev; int err; memset(&ev, 0, sizeof(ev)); ev.p.it_nexus_req.host_no = host_no; ev.p.it_nexus_req.function = function; ev.p.it_nexus_req.itn_id = itn_id; if (initiator_id) strncpy(ev.p.it_nexus_req.initiator_id, initiator_id, sizeof(ev.p.it_nexus_req.initiator_id)); dprintk("%d %x %llx\n", host_no, function, (unsigned long long)itn_id); err = tgt_uspace_send_event(TGT_KEVENT_IT_NEXUS_REQ, &ev); if (err) eprintk("tx buf is full, could not send\n"); return err; } static int event_recv_msg(struct tgt_event *ev) { int err = 0; switch (ev->hdr.type) { case TGT_UEVENT_CMD_RSP: err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no, ev->p.cmd_rsp.itn_id, ev->p.cmd_rsp.result, ev->p.cmd_rsp.tag, ev->p.cmd_rsp.uaddr, ev->p.cmd_rsp.len, ev->p.cmd_rsp.sense_uaddr, ev->p.cmd_rsp.sense_len, ev->p.cmd_rsp.rw); break; case TGT_UEVENT_TSK_MGMT_RSP: err = scsi_tgt_kspace_tsk_mgmt(ev->p.tsk_mgmt_rsp.host_no, ev->p.tsk_mgmt_rsp.itn_id, ev->p.tsk_mgmt_rsp.mid, ev->p.tsk_mgmt_rsp.result); break; case TGT_UEVENT_IT_NEXUS_RSP: err = scsi_tgt_kspace_it_nexus_rsp(ev->p.it_nexus_rsp.host_no, ev->p.it_nexus_rsp.itn_id, ev->p.it_nexus_rsp.result); break; default: eprintk("unknown type %d\n", ev->hdr.type); err = -EINVAL; } return err; } static ssize_t tgt_write(struct file *file, const char __user * buffer, size_t count, loff_t * ppos) { struct tgt_event *ev; struct tgt_ring *ring = &rx_ring; while (1) { ev = tgt_head_event(ring, ring->tr_idx); /* do we need this? */ flush_dcache_page(virt_to_page(ev)); if (!ev->hdr.status) break; tgt_ring_idx_inc(ring); event_recv_msg(ev); ev->hdr.status = 0; }; return count; } static unsigned int tgt_poll(struct file * file, struct poll_table_struct *wait) { struct tgt_event *ev; struct tgt_ring *ring = &tx_ring; unsigned long flags; unsigned int mask = 0; u32 idx; poll_wait(file, &tgt_poll_wait, wait); spin_lock_irqsave(&ring->tr_lock, flags); idx = ring->tr_idx ? ring->tr_idx - 1 : TGT_MAX_EVENTS - 1; ev = tgt_head_event(ring, idx); if (ev->hdr.status) mask |= POLLIN | POLLRDNORM; spin_unlock_irqrestore(&ring->tr_lock, flags); return mask; } static int uspace_ring_map(struct vm_area_struct *vma, unsigned long addr, struct tgt_ring *ring) { int i, err; for (i = 0; i < TGT_RING_PAGES; i++) { struct page *page = virt_to_page(ring->tr_pages[i]); err = vm_insert_page(vma, addr, page); if (err) return err; addr += PAGE_SIZE; } return 0; } static int tgt_mmap(struct file *filp, struct vm_area_struct *vma) { unsigned long addr; int err; if (vma->vm_pgoff) return -EINVAL; if (vma->vm_end - vma->vm_start != TGT_RING_SIZE * 2) { eprintk("mmap size must be %lu, not %lu \n", TGT_RING_SIZE * 2, vma->vm_end - vma->vm_start); return -EINVAL; } addr = vma->vm_start; err = uspace_ring_map(vma, addr, &tx_ring); if (err) return err; err = uspace_ring_map(vma, addr + TGT_RING_SIZE, &rx_ring); return err; } static int tgt_open(struct inode *inode, struct file *file) { tx_ring.tr_idx = rx_ring.tr_idx = 0; cycle_kernel_lock(); return 0; } static const struct file_operations tgt_fops = { .owner = THIS_MODULE, .open = tgt_open, .poll = tgt_poll, .write = tgt_write, .mmap = tgt_mmap, }; static struct miscdevice tgt_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "tgt", .fops = &tgt_fops, }; static void tgt_ring_exit(struct tgt_ring *ring) { int i; for (i = 0; i < TGT_RING_PAGES; i++) free_page(ring->tr_pages[i]); } static int tgt_ring_init(struct tgt_ring *ring) { int i; spin_lock_init(&ring->tr_lock); for (i = 0; i < TGT_RING_PAGES; i++) { ring->tr_pages[i] = get_zeroed_page(GFP_KERNEL); if (!ring->tr_pages[i]) { eprintk("out of memory\n"); return -ENOMEM; } } return 0; } void scsi_tgt_if_exit(void) { tgt_ring_exit(&tx_ring); tgt_ring_exit(&rx_ring); misc_deregister(&tgt_miscdev); } int scsi_tgt_if_init(void) { int err; err = tgt_ring_init(&tx_ring); if (err) return err; err = tgt_ring_init(&rx_ring); if (err) goto free_tx_ring; err = misc_register(&tgt_miscdev); if (err) goto free_rx_ring; return 0; free_rx_ring: tgt_ring_exit(&rx_ring); free_tx_ring: tgt_ring_exit(&tx_ring); return err; }
gpl-2.0
mseskir/android_kernel_vestel_55g
drivers/input/misc/cm36283.c
1443
49490
/* drivers/input/misc/cm36283.c - cm36283 optical sensors driver * * Copyright (C) 2012 Capella Microsystems Inc. * Author: Frank Hsieh <pengyueh@gmail.com> * * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/sensors.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/workqueue.h> #include <linux/irq.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/regulator/consumer.h> #include <linux/wakelock.h> #include <linux/jiffies.h> #include <linux/cm36283.h> #include <linux/of_gpio.h> #include <asm/uaccess.h> #include <asm/mach-types.h> #include <asm/setup.h> #define I2C_RETRY_COUNT 10 #define NEAR_DELAY_TIME ((100 * HZ) / 1000) #define CONTROL_INT_ISR_REPORT 0x00 #define CONTROL_ALS 0x01 #define CONTROL_PS 0x02 /* POWER SUPPLY VOLTAGE RANGE */ #define CM36283_VDD_MIN_UV 2700000 #define CM36283_VDD_MAX_UV 3300000 #define CM36283_VI2C_MIN_UV 1750000 #define CM36283_VI2C_MAX_UV 1950000 /* cm36283 polling rate in ms */ #define CM36283_LS_MIN_POLL_DELAY 1 #define CM36283_LS_MAX_POLL_DELAY 1000 #define CM36283_LS_DEFAULT_POLL_DELAY 100 #define CM36283_PS_MIN_POLL_DELAY 1 #define CM36283_PS_MAX_POLL_DELAY 1000 #define CM36283_PS_DEFAULT_POLL_DELAY 100 static struct sensors_classdev sensors_light_cdev = { .name = "cm36283-light", .vendor = "Capella", .version = 1, .handle = SENSORS_LIGHT_HANDLE, .type = SENSOR_TYPE_LIGHT, .max_range = "6553", .resolution = "0.0125", .sensor_power = "0.15", .min_delay = 0, .fifo_reserved_event_count = 0, .fifo_max_event_count = 0, .enabled = 0, .delay_msec = CM36283_LS_DEFAULT_POLL_DELAY, .sensors_enable = NULL, .sensors_poll_delay = NULL, }; static struct sensors_classdev sensors_proximity_cdev = { .name = "cm36283-proximity", .vendor = "Capella", .version = 1, .handle = SENSORS_PROXIMITY_HANDLE, .type = SENSOR_TYPE_PROXIMITY, .max_range = "5.0", .resolution = "5.0", .sensor_power = "0.18", .min_delay = 0, .fifo_reserved_event_count = 0, .fifo_max_event_count = 0, .enabled = 0, .delay_msec = CM36283_PS_DEFAULT_POLL_DELAY, .sensors_enable = NULL, .sensors_poll_delay = NULL, }; static const int als_range[] = { [CM36283_ALS_IT0] = 6554, [CM36283_ALS_IT1] = 3277, [CM36283_ALS_IT2] = 1638, [CM36283_ALS_IT3] = 819, }; static const int als_sense[] = { [CM36283_ALS_IT0] = 10, [CM36283_ALS_IT1] = 20, [CM36283_ALS_IT2] = 40, [CM36283_ALS_IT3] = 80, }; static void sensor_irq_do_work(struct work_struct *work); static DECLARE_WORK(sensor_irq_work, sensor_irq_do_work); struct cm36283_info { struct class *cm36283_class; struct device *ls_dev; struct device *ps_dev; struct input_dev *ls_input_dev; struct input_dev *ps_input_dev; struct i2c_client *i2c_client; struct workqueue_struct *lp_wq; int intr_pin; int als_enable; int ps_enable; int ps_irq_flag; uint16_t *adc_table; uint16_t cali_table[10]; int irq; int ls_calibrate; int (*power)(int, uint8_t); /* power to the chip */ uint32_t als_kadc; uint32_t als_gadc; uint16_t golden_adc; struct wake_lock ps_wake_lock; int psensor_opened; int lightsensor_opened; uint8_t slave_addr; uint8_t ps_close_thd_set; uint8_t ps_away_thd_set; int current_level; uint16_t current_adc; uint16_t ps_conf1_val; uint16_t ps_conf3_val; uint16_t ls_cmd; uint8_t record_clear_int_fail; bool polling; atomic_t ls_poll_delay; atomic_t ps_poll_delay; struct regulator *vdd; struct regulator *vio; struct delayed_work ldwork; struct delayed_work pdwork; struct sensors_classdev als_cdev; struct sensors_classdev ps_cdev; }; struct cm36283_info *lp_info; int fLevel=-1; static struct mutex als_enable_mutex, als_disable_mutex, als_get_adc_mutex; static struct mutex ps_enable_mutex, ps_disable_mutex, ps_get_adc_mutex; static struct mutex CM36283_control_mutex; static struct mutex wq_lock; static int lightsensor_enable(struct cm36283_info *lpi); static int lightsensor_disable(struct cm36283_info *lpi); static int initial_cm36283(struct cm36283_info *lpi); static void psensor_initial_cmd(struct cm36283_info *lpi); static int cm36283_power_set(struct cm36283_info *info, bool on); int32_t als_kadc; static int control_and_report(struct cm36283_info *lpi, uint8_t mode, uint16_t param, int report); static int I2C_RxData(uint16_t slaveAddr, uint8_t cmd, uint8_t *rxData, int length) { uint8_t loop_i; struct cm36283_info *lpi = lp_info; uint8_t subaddr[1]; struct i2c_msg msgs[] = { { .addr = slaveAddr, .flags = 0, .len = 1, .buf = subaddr, }, { .addr = slaveAddr, .flags = I2C_M_RD, .len = length, .buf = rxData, }, }; subaddr[0] = cmd; for (loop_i = 0; loop_i < I2C_RETRY_COUNT; loop_i++) { if (i2c_transfer(lp_info->i2c_client->adapter, msgs, 2) > 0) break; dev_err(&lpi->i2c_client->dev, "%s: I2C error(%d). Retrying.\n", __func__, cmd); msleep(10); } if (loop_i >= I2C_RETRY_COUNT) { dev_err(&lpi->i2c_client->dev, "%s: Retry count exceeds %d.", __func__, I2C_RETRY_COUNT); return -EIO; } return 0; } static int I2C_TxData(uint16_t slaveAddr, uint8_t *txData, int length) { uint8_t loop_i; struct cm36283_info *lpi = lp_info; struct i2c_msg msg[] = { { .addr = slaveAddr, .flags = 0, .len = length, .buf = txData, }, }; for (loop_i = 0; loop_i < I2C_RETRY_COUNT; loop_i++) { if (i2c_transfer(lp_info->i2c_client->adapter, msg, 1) > 0) break; pr_err("%s: I2C error. Retrying...\n", __func__); msleep(10); } if (loop_i >= I2C_RETRY_COUNT) { dev_err(&lpi->i2c_client->dev, "%s: Retry count exceeds %d.", __func__, I2C_RETRY_COUNT); return -EIO; } return 0; } static int _cm36283_I2C_Read_Word(uint16_t slaveAddr, uint8_t cmd, uint16_t *pdata) { uint8_t buffer[2]; int ret = 0; if (pdata == NULL) return -EFAULT; ret = I2C_RxData(slaveAddr, cmd, buffer, 2); if (ret < 0) { pr_err("%s: I2C RxData fail(%d).\n", __func__, cmd); return ret; } *pdata = (buffer[1]<<8)|buffer[0]; return ret; } static int _cm36283_I2C_Write_Word(uint16_t SlaveAddress, uint8_t cmd, uint16_t data) { char buffer[3]; int ret = 0; buffer[0] = cmd; buffer[1] = (uint8_t)(data&0xff); buffer[2] = (uint8_t)((data&0xff00)>>8); ret = I2C_TxData(SlaveAddress, buffer, 3); if (ret < 0) { pr_err("%s: I2C_TxData failed.\n", __func__); return -EIO; } return ret; } static int get_ls_adc_value(uint16_t *als_step, bool resume) { struct cm36283_info *lpi = lp_info; uint32_t tmp; int ret = 0; if (als_step == NULL) return -EFAULT; /* Read ALS data: */ ret = _cm36283_I2C_Read_Word(lpi->slave_addr, ALS_DATA, als_step); if (ret < 0) { dev_err(&lpi->i2c_client->dev, "%s: I2C read word failed.\n", __func__); return -EIO; } if (!lpi->ls_calibrate) { tmp = (uint32_t)(*als_step) * lpi->als_gadc / lpi->als_kadc; if (tmp > 0xFFFF) *als_step = 0xFFFF; else *als_step = tmp; } dev_dbg(&lpi->i2c_client->dev, "raw adc = 0x%x\n", *als_step); return ret; } static int set_lsensor_range(uint16_t low_thd, uint16_t high_thd) { int ret = 0; struct cm36283_info *lpi = lp_info; _cm36283_I2C_Write_Word(lpi->slave_addr, ALS_THDH, high_thd); _cm36283_I2C_Write_Word(lpi->slave_addr, ALS_THDL, low_thd); return ret; } static int get_ps_adc_value(uint16_t *data) { int ret = 0; struct cm36283_info *lpi = lp_info; if (data == NULL) return -EFAULT; ret = _cm36283_I2C_Read_Word(lpi->slave_addr, PS_DATA, data); if (ret < 0) return ret; (*data) &= 0xFF; return ret; } static uint16_t mid_value(uint16_t value[], uint8_t size) { int i = 0, j = 0; uint16_t temp = 0; if (size < 3) return 0; for (i = 0; i < (size - 1); i++) for (j = (i + 1); j < size; j++) if (value[i] > value[j]) { temp = value[i]; value[i] = value[j]; value[j] = temp; } return value[((size - 1) / 2)]; } static int get_stable_ps_adc_value(uint16_t *ps_adc) { uint16_t value[3] = {0, 0, 0}, mid_val = 0; int ret = 0; int i = 0; int wait_count = 0; struct cm36283_info *lpi = lp_info; for (i = 0; i < 3; i++) { /*wait interrupt GPIO high*/ while (gpio_get_value(lpi->intr_pin) == 0) { msleep(10); wait_count++; if (wait_count > 12) { dev_err(&lpi->i2c_client->dev, "%s: interrupt GPIO low\n", __func__); return -EIO; } } ret = get_ps_adc_value(&value[i]); if (ret < 0) { dev_err(&lpi->i2c_client->dev, "%s: error get ps value\n", __func__); return -EIO; } if (wait_count < 60/10) {/*wait gpio less than 60ms*/ msleep(60 - (10*wait_count)); } wait_count = 0; } mid_val = mid_value(value, 3); dev_dbg(&lpi->i2c_client->dev, "Sta_ps: After sort, value[0, 1, 2] = [0x%x, 0x%x, 0x%x]", value[0], value[1], value[2]); *ps_adc = (mid_val & 0xFF); return 0; } static void sensor_irq_do_work(struct work_struct *work) { struct cm36283_info *lpi = lp_info; uint16_t intFlag; _cm36283_I2C_Read_Word(lpi->slave_addr, INT_FLAG, &intFlag); control_and_report(lpi, CONTROL_INT_ISR_REPORT, intFlag, 1); enable_irq(lpi->irq); } static int get_als_range(void) { uint16_t ls_conf; int ret = 0; int index = 0; struct cm36283_info *lpi = lp_info; ret = _cm36283_I2C_Read_Word(lpi->slave_addr, ALS_CONF, &ls_conf); if (ret) { dev_err(&lpi->i2c_client->dev, "read ALS_CONF from i2c error. %d\n", ret); return -EIO; } index = (ls_conf & 0xC0) >> 0x06; return als_range[index]; } static int get_als_sense(void) { uint16_t ls_conf; int ret = 0; int index = 0; struct cm36283_info *lpi = lp_info; ret = _cm36283_I2C_Read_Word(lpi->slave_addr, ALS_CONF, &ls_conf); if (ret) { dev_err(&lpi->i2c_client->dev, "read ALS_CONF from i2c error. %d\n", ret); return -EIO; } index = (ls_conf & 0xC0) >> 0x06; return als_sense[index]; } static void psensor_delay_work_handler(struct work_struct *work) { struct cm36283_info *lpi = lp_info; uint16_t adc_value = 0; int ret; mutex_lock(&wq_lock); ret = get_ps_adc_value(&adc_value); mutex_unlock(&wq_lock); if (ret >= 0) { input_report_abs(lpi->ps_input_dev, ABS_DISTANCE, adc_value > lpi->ps_close_thd_set ? 0 : 1); input_sync(lpi->ps_input_dev); } schedule_delayed_work(&lpi->pdwork, msecs_to_jiffies(atomic_read(&lpi->ps_poll_delay))); } static void lsensor_delay_work_handler(struct work_struct *work) { struct cm36283_info *lpi = lp_info; uint16_t adc_value = 0; int sense; mutex_lock(&wq_lock); get_ls_adc_value(&adc_value, 0); sense = get_als_sense(); mutex_unlock(&wq_lock); if (sense > 0) { lpi->current_adc = adc_value; input_report_abs(lpi->ls_input_dev, ABS_MISC, adc_value/sense); input_sync(lpi->ls_input_dev); } schedule_delayed_work(&lpi->ldwork, msecs_to_jiffies(atomic_read(&lpi->ls_poll_delay))); } static irqreturn_t cm36283_irq_handler(int irq, void *data) { struct cm36283_info *lpi = data; disable_irq_nosync(lpi->irq); queue_work(lpi->lp_wq, &sensor_irq_work); return IRQ_HANDLED; } static int als_power(int enable) { struct cm36283_info *lpi = lp_info; if (lpi->power) lpi->power(LS_PWR_ON, 1); return 0; } static void ls_initial_cmd(struct cm36283_info *lpi) { /*must disable l-sensor interrupt befrore IST create*//*disable ALS func*/ lpi->ls_cmd &= CM36283_ALS_INT_MASK; lpi->ls_cmd |= CM36283_ALS_SD; _cm36283_I2C_Write_Word(lpi->slave_addr, ALS_CONF, lpi->ls_cmd); } static void psensor_initial_cmd(struct cm36283_info *lpi) { /*must disable p-sensor interrupt befrore IST create*/ lpi->ps_conf1_val |= CM36283_PS_SD; lpi->ps_conf1_val &= CM36283_PS_INT_MASK; _cm36283_I2C_Write_Word(lpi->slave_addr, PS_CONF1, lpi->ps_conf1_val); _cm36283_I2C_Write_Word(lpi->slave_addr, PS_CONF3, lpi->ps_conf3_val); _cm36283_I2C_Write_Word(lpi->slave_addr, PS_THD, (lpi->ps_close_thd_set << 8) | lpi->ps_away_thd_set); dev_dbg(&lpi->i2c_client->dev, "%s:send psensor initial command finished\n", __func__); } static int psensor_enable(struct cm36283_info *lpi) { int ret = -EIO; unsigned int delay; mutex_lock(&ps_enable_mutex); dev_dbg(&lpi->i2c_client->dev, "psensor enable!\n"); if (lpi->ps_enable) { dev_err(&lpi->i2c_client->dev, "already enabled\n"); ret = 0; } else { ret = control_and_report(lpi, CONTROL_PS, 1, 0); } mutex_unlock(&ps_enable_mutex); delay = atomic_read(&lpi->ps_poll_delay); if (lpi->polling) schedule_delayed_work(&lpi->pdwork, msecs_to_jiffies(delay)); return ret; } static int psensor_disable(struct cm36283_info *lpi) { int ret = -EIO; if (lpi->polling) cancel_delayed_work_sync(&lpi->pdwork); mutex_lock(&ps_disable_mutex); dev_dbg(&lpi->i2c_client->dev, "psensor disable!\n"); if (lpi->ps_enable == 0) { dev_err(&lpi->i2c_client->dev, "already disabled\n"); ret = 0; } else { ret = control_and_report(lpi, CONTROL_PS, 0, 0); } mutex_unlock(&ps_disable_mutex); return ret; } static int psensor_open(struct inode *inode, struct file *file) { struct cm36283_info *lpi = lp_info; dev_dbg(&lpi->i2c_client->dev, "psensor open!"); if (lpi->psensor_opened) return -EBUSY; lpi->psensor_opened = 1; return 0; } static int psensor_release(struct inode *inode, struct file *file) { struct cm36283_info *lpi = lp_info; dev_dbg(&lpi->i2c_client->dev, "psensor release!"); lpi->psensor_opened = 0; return psensor_disable(lpi); //return 0; } static long psensor_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int val; struct cm36283_info *lpi = lp_info; dev_dbg(&lpi->i2c_client->dev, "%s cmd %d\n", __func__, _IOC_NR(cmd)); switch (cmd) { case CAPELLA_CM3602_IOCTL_ENABLE: if (get_user(val, (unsigned long __user *)arg)) return -EFAULT; if (val) return psensor_enable(lpi); else return psensor_disable(lpi); break; case CAPELLA_CM3602_IOCTL_GET_ENABLED: return put_user(lpi->ps_enable, (unsigned long __user *)arg); break; default: dev_err(&lpi->i2c_client->dev, "%s: invalid cmd %d\n", __func__, _IOC_NR(cmd)); return -EINVAL; } } static const struct file_operations psensor_fops = { .owner = THIS_MODULE, .open = psensor_open, .release = psensor_release, .unlocked_ioctl = psensor_ioctl }; void lightsensor_set_kvalue(struct cm36283_info *lpi) { if (!lpi) { pr_err("%s: ls_info is empty\n", __func__); return; } dev_dbg(&lpi->i2c_client->dev, "%s: ALS calibrated als_kadc=0x%x\n", __func__, als_kadc); if (als_kadc >> 16 == ALS_CALIBRATED) lpi->als_kadc = als_kadc & 0xFFFF; else { lpi->als_kadc = 0; dev_dbg(&lpi->i2c_client->dev, "%s: no ALS calibrated\n", __func__); } if (lpi->als_kadc && lpi->golden_adc > 0) { lpi->als_kadc = (lpi->als_kadc > 0 && lpi->als_kadc < 0x1000) ? lpi->als_kadc : lpi->golden_adc; lpi->als_gadc = lpi->golden_adc; } else { lpi->als_kadc = 1; lpi->als_gadc = 1; } dev_dbg(&lpi->i2c_client->dev, "%s: als_kadc=0x%x, als_gadc=0x%x\n", __func__, lpi->als_kadc, lpi->als_gadc); } static int lightsensor_update_table(struct cm36283_info *lpi) { uint32_t tmp_data[10]; int i; for (i = 0; i < 10; i++) { tmp_data[i] = (uint32_t)(*(lpi->adc_table + i)) * lpi->als_kadc / lpi->als_gadc; if (tmp_data[i] <= 0xFFFF) lpi->cali_table[i] = (uint16_t) tmp_data[i]; else lpi->cali_table[i] = 0xFFFF; dev_dbg(&lpi->i2c_client->dev, "%s: Calibrated adc_table: data[%d], %x\n", __func__, i, lpi->cali_table[i]); } return 0; } static int lightsensor_enable(struct cm36283_info *lpi) { int ret = -EIO; unsigned int delay; mutex_lock(&als_enable_mutex); ret = control_and_report(lpi, CONTROL_ALS, 1, 0); mutex_unlock(&als_enable_mutex); delay = atomic_read(&lpi->ls_poll_delay); if (lpi->polling) schedule_delayed_work(&lpi->ldwork, msecs_to_jiffies(delay)); return ret; } static int lightsensor_disable(struct cm36283_info *lpi) { int ret = -EIO; mutex_lock(&als_disable_mutex); dev_dbg(&lpi->i2c_client->dev, "disable lightsensor\n"); if (lpi->polling) cancel_delayed_work_sync(&lpi->ldwork); if ( lpi->als_enable == 0 ) { dev_err(&lpi->i2c_client->dev, "already disabled\n"); ret = 0; } else { ret = control_and_report(lpi, CONTROL_ALS, 0, 0); } mutex_unlock(&als_disable_mutex); return ret; } static int lightsensor_open(struct inode *inode, struct file *file) { struct cm36283_info *lpi = lp_info; int rc = 0; dev_dbg(&lpi->i2c_client->dev, "%s\n", __func__); if (lpi->lightsensor_opened) { dev_err(&lpi->i2c_client->dev, "%s: already opened\n", __func__); rc = -EBUSY; } lpi->lightsensor_opened = 1; return rc; } static int lightsensor_release(struct inode *inode, struct file *file) { struct cm36283_info *lpi = lp_info; dev_dbg(&lpi->i2c_client->dev, "%s\n", __func__); lpi->lightsensor_opened = 0; return 0; } static long lightsensor_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int rc, val; struct cm36283_info *lpi = lp_info; switch (cmd) { case LIGHTSENSOR_IOCTL_ENABLE: if (get_user(val, (unsigned long __user *)arg)) { rc = -EFAULT; break; } rc = val ? lightsensor_enable(lpi) : lightsensor_disable(lpi); break; case LIGHTSENSOR_IOCTL_GET_ENABLED: val = lpi->als_enable; rc = put_user(val, (unsigned long __user *)arg); break; default: pr_err("[LS][CM36283 error]%s: invalid cmd %d\n", __func__, _IOC_NR(cmd)); rc = -EINVAL; } return rc; } static const struct file_operations lightsensor_fops = { .owner = THIS_MODULE, .open = lightsensor_open, .release = lightsensor_release, .unlocked_ioctl = lightsensor_ioctl }; static ssize_t ps_adc_show(struct device *dev, struct device_attribute *attr, char *buf) { uint16_t value; int ret; struct cm36283_info *lpi = lp_info; int intr_val = -1; get_ps_adc_value(&value); if (gpio_is_valid(lpi->intr_pin)) intr_val = gpio_get_value(lpi->intr_pin); ret = snprintf(buf, PAGE_SIZE, "ADC[0x%04X], ENABLE=%d intr_pin=%d\n", value, lpi->ps_enable, intr_val); return ret; } static int ps_enable_set(struct sensors_classdev *sensors_cdev, unsigned int enable) { struct cm36283_info *lpi = container_of(sensors_cdev, struct cm36283_info, ps_cdev); int ret; if (enable) ret = psensor_enable(lpi); else ret = psensor_disable(lpi); return ret; } static ssize_t ps_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ps_en; struct cm36283_info *lpi = lp_info; ps_en = -1; sscanf(buf, "%d", &ps_en); if (ps_en != 0 && ps_en != 1 && ps_en != 10 && ps_en != 13 && ps_en != 16) return -EINVAL; dev_dbg(&lpi->i2c_client->dev, "%s: ps_en=%d\n", __func__, ps_en); if (ps_en) psensor_enable(lpi); else psensor_disable(lpi); return count; } static ssize_t ps_parameters_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct cm36283_info *lpi = lp_info; ret = snprintf(buf, PAGE_SIZE, "PS_close_thd_set = 0x%x, PS_away_thd_set = 0x%x\n", lpi->ps_close_thd_set, lpi->ps_away_thd_set); return ret; } static ssize_t ps_parameters_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cm36283_info *lpi = lp_info; char *token[10]; int i; unsigned long tmp; for (i = 0; i < 3; i++) token[i] = strsep((char **)&buf, " "); if (kstrtoul(token[0], 16, &tmp)) return -EINVAL; lpi->ps_close_thd_set = tmp; if (kstrtoul(token[1], 16, &tmp)) return -EINVAL; lpi->ps_away_thd_set = tmp; dev_dbg(&lpi->i2c_client->dev, "ps_close_thd_set:0x%x\n", lpi->ps_close_thd_set); dev_dbg(&lpi->i2c_client->dev, "ps_away_thd_set:0x%x\n", lpi->ps_away_thd_set); return count; } static ssize_t ps_conf_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cm36283_info *lpi = lp_info; return sprintf(buf, "PS_CONF1 = 0x%x, PS_CONF3 = 0x%x\n", lpi->ps_conf1_val, lpi->ps_conf3_val); } static ssize_t ps_conf_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int code1, code2; struct cm36283_info *lpi = lp_info; sscanf(buf, "0x%x 0x%x", &code1, &code2); dev_dbg(&lpi->i2c_client->dev, "PS_CONF1:0x%x PS_CONF3:0x%x\n", code1, code2); lpi->ps_conf1_val = code1; lpi->ps_conf3_val = code2; _cm36283_I2C_Write_Word(lpi->slave_addr, PS_CONF3, lpi->ps_conf3_val); _cm36283_I2C_Write_Word(lpi->slave_addr, PS_CONF1, lpi->ps_conf1_val); return count; } static ssize_t ps_thd_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct cm36283_info *lpi = lp_info; ret = sprintf(buf, "%s ps_close_thd_set = 0x%x, ps_away_thd_set = 0x%x\n", __func__, lpi->ps_close_thd_set, lpi->ps_away_thd_set); return ret; } static ssize_t ps_thd_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int code; struct cm36283_info *lpi = lp_info; sscanf(buf, "0x%x", &code); lpi->ps_away_thd_set = code &0xFF; lpi->ps_close_thd_set = (code & 0xFF00)>>8; dev_dbg(&lpi->i2c_client->dev, "ps_away_thd_set:0x%x\n", lpi->ps_away_thd_set); dev_dbg(&lpi->i2c_client->dev, "ps_close_thd_set:0x%x\n", lpi->ps_close_thd_set); return count; } static ssize_t ps_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret = 0; struct cm36283_info *lpi = lp_info; ret = sprintf(buf, "PS1: reg = 0x%x, PS3: reg = 0x%x, ps_close_thd_set = 0x%x, ps_away_thd_set = 0x%x\n", lpi->ps_conf1_val, lpi->ps_conf3_val, lpi->ps_close_thd_set, lpi->ps_away_thd_set); return ret; } static ssize_t ps_hw_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int code; sscanf(buf, "0x%x", &code); return count; } static ssize_t ls_adc_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct cm36283_info *lpi = lp_info; ret = sprintf(buf, "ADC[0x%04X] => level %d\n", lpi->current_adc, lpi->current_level); return ret; } static int ls_enable_set(struct sensors_classdev *sensors_cdev, unsigned int enable) { struct cm36283_info *lpi = container_of(sensors_cdev, struct cm36283_info, als_cdev); int ret; if (enable) ret = lightsensor_enable(lpi); else ret = lightsensor_disable(lpi); if (ret < 0) { dev_err(&lpi->i2c_client->dev, "%s: set auto light sensor fail\n", __func__); return -EIO; } return 0; } static ssize_t ls_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret = 0; struct cm36283_info *lpi = lp_info; ret = sprintf(buf, "Light sensor Auto Enable = %d\n", lpi->als_enable); return ret; } static ssize_t ls_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret = 0; int ls_auto; struct cm36283_info *lpi = lp_info; ls_auto = -1; sscanf(buf, "%d", &ls_auto); if (ls_auto != 0 && ls_auto != 1 && ls_auto != 147) return -EINVAL; if (ls_auto) { lpi->ls_calibrate = (ls_auto == 147) ? 1 : 0; ret = lightsensor_enable(lpi); } else { lpi->ls_calibrate = 0; ret = lightsensor_disable(lpi); } dev_dbg(&lpi->i2c_client->dev, "als_enable:0x%x\n", lpi->als_enable); dev_dbg(&lpi->i2c_client->dev, "ls_calibrate:0x%x\n", lpi->ls_calibrate); dev_dbg(&lpi->i2c_client->dev, "ls_auto:0x%x\n", ls_auto); if (ret < 0) { dev_err(&lpi->i2c_client->dev, "%s: set auto light sensor fail\n", __func__); return ret; } return count; } static ssize_t ls_kadc_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cm36283_info *lpi = lp_info; int ret; ret = sprintf(buf, "kadc = 0x%x", lpi->als_kadc); return ret; } static ssize_t ls_kadc_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cm36283_info *lpi = lp_info; int kadc_temp = 0; sscanf(buf, "%d", &kadc_temp); mutex_lock(&als_get_adc_mutex); if (kadc_temp != 0) { lpi->als_kadc = kadc_temp; if (lpi->als_gadc != 0) { if (lightsensor_update_table(lpi) < 0) dev_err(&lpi->i2c_client->dev, "%s: update ls table fail\n", __func__); else dev_dbg(&lpi->i2c_client->dev, "%s: als_gadc =0x%x wait to be set\n", __func__, lpi->als_gadc); } } else { dev_err(&lpi->i2c_client->dev, "%s: als_kadc can't be set to zero\n", __func__); } mutex_unlock(&als_get_adc_mutex); return count; } static ssize_t ls_gadc_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cm36283_info *lpi = lp_info; int ret; ret = sprintf(buf, "gadc = 0x%x\n", lpi->als_gadc); return ret; } static ssize_t ls_gadc_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cm36283_info *lpi = lp_info; int gadc_temp = 0; sscanf(buf, "%d", &gadc_temp); mutex_lock(&als_get_adc_mutex); if (gadc_temp != 0) { lpi->als_gadc = gadc_temp; if (lpi->als_kadc != 0) { if (lightsensor_update_table(lpi) < 0) dev_err(&lpi->i2c_client->dev, "%s: update ls table fail\n", __func__); } else { dev_dbg(&lpi->i2c_client->dev, "als_kadc =0x%x wait to be set\n", lpi->als_kadc); } } else { dev_err(&lpi->i2c_client->dev, "als_gadc can't be set to zero\n"); } mutex_unlock(&als_get_adc_mutex); return count; } static ssize_t ls_adc_table_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned length = 0; int i; for (i = 0; i < 10; i++) { length += sprintf(buf + length, "[CM36283]Get adc_table[%d] = 0x%x ; %d, Get cali_table[%d] = 0x%x ; %d, \n", i, *(lp_info->adc_table + i), *(lp_info->adc_table + i), i, *(lp_info->cali_table + i), *(lp_info->cali_table + i)); } return length; } static ssize_t ls_adc_table_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cm36283_info *lpi = lp_info; char *token[10]; uint16_t tempdata[10]; int i; for (i = 0; i < 10; i++) { token[i] = strsep((char **)&buf, " "); tempdata[i] = simple_strtoul(token[i], NULL, 16); if (tempdata[i] < 1 || tempdata[i] > 0xffff) { dev_err(&lpi->i2c_client->dev, "adc_table[%d] = 0x%x error\n", i, tempdata[i]); return count; } } mutex_lock(&als_get_adc_mutex); for (i = 0; i < 10; i++) lpi->adc_table[i] = tempdata[i]; if (lightsensor_update_table(lpi) < 0) dev_err(&lpi->i2c_client->dev, "%s: update ls table fail\n", __func__); mutex_unlock(&als_get_adc_mutex); return count; } static ssize_t ls_conf_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cm36283_info *lpi = lp_info; return sprintf(buf, "ALS_CONF = %x\n", lpi->ls_cmd); } static ssize_t ls_conf_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cm36283_info *lpi = lp_info; int value = 0; sscanf(buf, "0x%x", &value); lpi->ls_cmd = value; dev_dbg(&lpi->i2c_client->dev, "ALS_CONF:0x%x\n", lpi->ls_cmd); _cm36283_I2C_Write_Word(lpi->slave_addr, ALS_CONF, lpi->ls_cmd); return count; } static ssize_t ls_poll_delay_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cm36283_info *lpi = lp_info; return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&lpi->ls_poll_delay)); } static ssize_t ls_poll_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cm36283_info *lpi = lp_info; unsigned long interval_ms; if (kstrtoul(buf, 10, &interval_ms)) return -EINVAL; if ((interval_ms < CM36283_LS_MIN_POLL_DELAY) || (interval_ms > CM36283_LS_MAX_POLL_DELAY)) return -EINVAL; atomic_set(&lpi->ls_poll_delay, (unsigned int) interval_ms); return count; } static int ls_poll_delay_set(struct sensors_classdev *sensors_cdev, unsigned int delay_msec) { struct cm36283_info *lpi = container_of(sensors_cdev, struct cm36283_info, als_cdev); if ((delay_msec < CM36283_LS_MIN_POLL_DELAY) || (delay_msec > CM36283_LS_MAX_POLL_DELAY)) return -EINVAL; atomic_set(&lpi->ls_poll_delay, delay_msec); return 0; } static ssize_t ps_poll_delay_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cm36283_info *lpi = lp_info; return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&lpi->ps_poll_delay)); } static ssize_t ps_poll_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cm36283_info *lpi = lp_info; unsigned long interval_ms; if (kstrtoul(buf, 10, &interval_ms)) return -EINVAL; if ((interval_ms < CM36283_PS_MIN_POLL_DELAY) || (interval_ms > CM36283_PS_MAX_POLL_DELAY)) return -EINVAL; atomic_set(&lpi->ps_poll_delay, (unsigned int) interval_ms); return count; } static int ps_poll_delay_set(struct sensors_classdev *sensors_cdev, unsigned int delay_msec) { struct cm36283_info *lpi = container_of(sensors_cdev, struct cm36283_info, als_cdev); if ((delay_msec < CM36283_PS_MIN_POLL_DELAY) || (delay_msec > CM36283_PS_MAX_POLL_DELAY)) return -EINVAL; atomic_set(&lpi->ps_poll_delay, delay_msec); return 0; } static ssize_t ls_fLevel_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "fLevel = %d\n", fLevel); } static ssize_t ls_fLevel_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cm36283_info *lpi = lp_info; int value=0; sscanf(buf, "%d", &value); (value>=0)?(value=min(value,10)):(value=max(value,-1)); fLevel=value; input_report_abs(lpi->ls_input_dev, ABS_MISC, fLevel); input_sync(lpi->ls_input_dev); msleep(1000); fLevel=-1; return count; } static int lightsensor_setup(struct cm36283_info *lpi) { int ret; int range; lpi->ls_input_dev = input_allocate_device(); if (!lpi->ls_input_dev) { pr_err( "[LS][CM36283 error]%s: could not allocate ls input device\n", __func__); return -ENOMEM; } lpi->ls_input_dev->name = "cm36283-ls"; lpi->ls_input_dev->id.bustype = BUS_I2C; set_bit(EV_ABS, lpi->ls_input_dev->evbit); range = get_als_range(); input_set_abs_params(lpi->ls_input_dev, ABS_MISC, 0, range, 0, 0); ret = input_register_device(lpi->ls_input_dev); if (ret < 0) { pr_err("[LS][CM36283 error]%s: can not register ls input device\n", __func__); goto err_free_ls_input_device; } return ret; err_free_ls_input_device: input_free_device(lpi->ls_input_dev); return ret; } static int psensor_setup(struct cm36283_info *lpi) { int ret; lpi->ps_input_dev = input_allocate_device(); if (!lpi->ps_input_dev) { pr_err( "[PS][CM36283 error]%s: could not allocate ps input device\n", __func__); return -ENOMEM; } lpi->ps_input_dev->name = "cm36283-ps"; lpi->ps_input_dev->id.bustype = BUS_I2C; set_bit(EV_ABS, lpi->ps_input_dev->evbit); input_set_abs_params(lpi->ps_input_dev, ABS_DISTANCE, 0, 1, 0, 0); ret = input_register_device(lpi->ps_input_dev); if (ret < 0) { pr_err( "[PS][CM36283 error]%s: could not register ps input device\n", __func__); goto err_free_ps_input_device; } return ret; err_free_ps_input_device: input_free_device(lpi->ps_input_dev); return ret; } static int initial_cm36283(struct cm36283_info *lpi) { int val, ret; uint16_t idReg; val = gpio_get_value(lpi->intr_pin); dev_dbg(&lpi->i2c_client->dev, "%s, INTERRUPT GPIO val = %d\n", __func__, val); ret = _cm36283_I2C_Read_Word(lpi->slave_addr, ID_REG, &idReg); return ret; } static int cm36283_setup(struct cm36283_info *lpi) { int ret = 0; als_power(1); msleep(5); ret = gpio_request(lpi->intr_pin, "gpio_cm36283_intr"); if (ret < 0) { pr_err("[PS][CM36283 error]%s: gpio %d request failed (%d)\n", __func__, lpi->intr_pin, ret); return ret; } ret = gpio_direction_input(lpi->intr_pin); if (ret < 0) { pr_err( "[PS][CM36283 error]%s: fail to set gpio %d as input (%d)\n", __func__, lpi->intr_pin, ret); goto fail_free_intr_pin; } ret = initial_cm36283(lpi); if (ret < 0) { pr_err( "[PS_ERR][CM36283 error]%s: fail to initial cm36283 (%d)\n", __func__, ret); goto fail_free_intr_pin; } /*Default disable P sensor and L sensor*/ ls_initial_cmd(lpi); psensor_initial_cmd(lpi); if (!lpi->polling) ret = request_any_context_irq(lpi->irq, cm36283_irq_handler, IRQF_TRIGGER_LOW, "cm36283", lpi); if (ret < 0) { pr_err( "[PS][CM36283 error]%s: req_irq(%d) fail for gpio %d (%d)\n", __func__, lpi->irq, lpi->intr_pin, ret); goto fail_free_intr_pin; } return ret; fail_free_intr_pin: gpio_free(lpi->intr_pin); return ret; } static int cm36283_parse_dt(struct device *dev, struct cm36283_platform_data *pdata) { struct device_node *np = dev->of_node; u32 levels[CM36283_LEVELS_SIZE], i; u32 temp_val; int rc; rc = of_get_named_gpio_flags(np, "capella,interrupt-gpio", 0, NULL); if (rc < 0) { dev_err(dev, "Unable to read interrupt pin number\n"); return rc; } else { pdata->intr = rc; } rc = of_property_read_u32_array(np, "capella,levels", levels, CM36283_LEVELS_SIZE); if (rc) { dev_err(dev, "Unable to read levels data\n"); return rc; } else { for (i = 0; i < CM36283_LEVELS_SIZE; i++) pdata->levels[i] = levels[i]; } rc = of_property_read_u32(np, "capella,ps_close_thd_set", &temp_val); if (rc) { dev_err(dev, "Unable to read ps_close_thd_set\n"); return rc; } else { pdata->ps_close_thd_set = (u8)temp_val; } rc = of_property_read_u32(np, "capella,ps_away_thd_set", &temp_val); if (rc) { dev_err(dev, "Unable to read ps_away_thd_set\n"); return rc; } else { pdata->ps_away_thd_set = (u8)temp_val; } rc = of_property_read_u32(np, "capella,ls_cmd", &temp_val); if (rc) { dev_err(dev, "Unable to read ls_cmd\n"); return rc; } else { pdata->ls_cmd = (u16)temp_val; } rc = of_property_read_u32(np, "capella,ps_conf1_val", &temp_val); if (rc) { dev_err(dev, "Unable to read ps_conf1_val\n"); return rc; } else { pdata->ps_conf1_val = (u16)temp_val; } rc = of_property_read_u32(np, "capella,ps_conf3_val", &temp_val); if (rc) { dev_err(dev, "Unable to read ps_conf3_val\n"); return rc; } else { pdata->ps_conf3_val = (u16)temp_val; } pdata->polling = of_property_read_bool(np, "capella,use-polling"); return 0; } static int create_sysfs_interfaces(struct device *dev, struct device_attribute *attributes, int len) { int i; int err; for (i = 0; i < len; i++) { err = device_create_file(dev, attributes + i); if (err) goto error; } return 0; error: for (; i >= 0; i--) device_remove_file(dev, attributes + i); dev_err(dev, "%s:Unable to create interface\n", __func__); return err; } static int remove_sysfs_interfaces(struct device *dev, struct device_attribute *attributes, int len) { int i; for (i = 0; i < len; i++) device_remove_file(dev, attributes + i); return 0; } static struct device_attribute light_attr[] = { __ATTR(ls_adc, 0664, ls_adc_show, NULL), __ATTR(ls_kadc, 0664, ls_kadc_show, ls_kadc_store), __ATTR(ls_gadc, 0664, ls_gadc_show, ls_gadc_store), __ATTR(ls_conf, 0664, ls_conf_show, ls_conf_store), __ATTR(ls_adc_table, 0664, ls_adc_table_show, ls_adc_table_store), __ATTR(poll_delay, 0664, ls_poll_delay_show, ls_poll_delay_store), __ATTR(enable, 0664, ls_enable_show, ls_enable_store), }; static struct device_attribute proximity_attr[] = { __ATTR(enable, 0664, ps_adc_show, ps_enable_store), __ATTR(ps_parameters, 0664, ps_parameters_show, ps_parameters_store), __ATTR(ps_conf, 0664, ps_conf_show, ps_conf_store), __ATTR(ps_hw, 0664, ps_hw_show, ps_hw_store), __ATTR(ps_thd, 0664, ps_thd_show, ps_thd_store), __ATTR(poll_delay, 0664, ps_poll_delay_show, ps_poll_delay_store), __ATTR(ls_flevel, 0664, ls_fLevel_show, ls_fLevel_store), }; static int cm36283_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret = 0; struct cm36283_info *lpi; struct cm36283_platform_data *pdata; lpi = kzalloc(sizeof(struct cm36283_info), GFP_KERNEL); if (!lpi) return -ENOMEM; lpi->i2c_client = client; if (client->dev.of_node) { pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { dev_err(&client->dev, "Failed to allocate memory for pdata\n"); ret = -ENOMEM; goto err_platform_data_null; } ret = cm36283_parse_dt(&client->dev, pdata); pdata->slave_addr = client->addr; if (ret) { dev_err(&client->dev, "Failed to get pdata from device tree\n"); goto err_parse_dt; } } else { pdata = client->dev.platform_data; if (!pdata) { dev_err(&client->dev, "%s: Assign platform_data error!!\n", __func__); ret = -EBUSY; goto err_platform_data_null; } } lpi->irq = client->irq; i2c_set_clientdata(client, lpi); lpi->intr_pin = pdata->intr; lpi->adc_table = pdata->levels; lpi->power = pdata->power; lpi->slave_addr = pdata->slave_addr; lpi->ps_away_thd_set = pdata->ps_away_thd_set; lpi->ps_close_thd_set = pdata->ps_close_thd_set; lpi->ps_conf1_val = pdata->ps_conf1_val; lpi->ps_conf3_val = pdata->ps_conf3_val; lpi->polling = pdata->polling; atomic_set(&lpi->ls_poll_delay, (unsigned int) CM36283_LS_DEFAULT_POLL_DELAY); atomic_set(&lpi->ps_poll_delay, (unsigned int) CM36283_PS_DEFAULT_POLL_DELAY); lpi->ls_cmd = pdata->ls_cmd; lpi->record_clear_int_fail=0; dev_dbg(&lpi->i2c_client->dev, "[PS][CM36283] %s: ls_cmd 0x%x\n", __func__, lpi->ls_cmd); if (pdata->ls_cmd == 0) { lpi->ls_cmd = CM36283_ALS_IT_80ms | CM36283_ALS_GAIN_2; } lp_info = lpi; mutex_init(&CM36283_control_mutex); mutex_init(&als_enable_mutex); mutex_init(&als_disable_mutex); mutex_init(&als_get_adc_mutex); mutex_init(&ps_enable_mutex); mutex_init(&ps_disable_mutex); mutex_init(&ps_get_adc_mutex); /* * SET LUX STEP FACTOR HERE * if adc raw value one step = 5/100 = 1/20 = 0.05 lux * the following will set the factor 0.05 = 1/20 * and lpi->golden_adc = 1; * set als_kadc = (ALS_CALIBRATED << 16) | 20; */ als_kadc = (ALS_CALIBRATED << 16) | 10; lpi->golden_adc = 100; lpi->ls_calibrate = 0; lightsensor_set_kvalue(lpi); ret = lightsensor_update_table(lpi); if (ret < 0) { pr_err("[LS][CM36283 error]%s: update ls table fail\n", __func__); goto err_lightsensor_update_table; } lpi->lp_wq = create_singlethread_workqueue("cm36283_wq"); if (!lpi->lp_wq) { pr_err("[PS][CM36283 error]%s: can't create workqueue\n", __func__); ret = -ENOMEM; goto err_create_singlethread_workqueue; } wake_lock_init(&(lpi->ps_wake_lock), WAKE_LOCK_SUSPEND, "proximity"); ret = cm36283_power_set(lpi, true); if (ret < 0) { dev_err(&client->dev, "%s:cm36283 power on error!\n", __func__); goto err_cm36283_power_on; } ret = cm36283_setup(lpi); if (ret < 0) { pr_err("[PS_ERR][CM36283 error]%s: cm36283_setup error!\n", __func__); goto err_cm36283_setup; } ret = lightsensor_setup(lpi); if (ret < 0) { pr_err("[LS][CM36283 error]%s: lightsensor_setup error!!\n", __func__); goto err_lightsensor_setup; } ret = psensor_setup(lpi); if (ret < 0) { pr_err("[PS][CM36283 error]%s: psensor_setup error!!\n", __func__); goto err_psensor_setup; } ret = create_sysfs_interfaces(&lpi->ls_input_dev->dev, light_attr, ARRAY_SIZE(light_attr)); if (ret < 0) { dev_err(&client->dev, "failed to create sysfs\n"); goto err_input_cleanup; } ret = create_sysfs_interfaces(&lpi->ps_input_dev->dev, proximity_attr, ARRAY_SIZE(proximity_attr)); if (ret < 0) { dev_err(&client->dev, "failed to create sysfs\n"); goto err_light_sysfs_cleanup; } lpi->als_cdev = sensors_light_cdev; lpi->als_cdev.sensors_enable = ls_enable_set; lpi->als_cdev.sensors_poll_delay = ls_poll_delay_set; lpi->als_cdev.min_delay = CM36283_LS_MIN_POLL_DELAY * 1000; lpi->ps_cdev = sensors_proximity_cdev; lpi->ps_cdev.sensors_enable = ps_enable_set; lpi->ps_cdev.sensors_poll_delay = ps_poll_delay_set; lpi->ps_cdev.min_delay = CM36283_PS_MIN_POLL_DELAY * 1000; ret = sensors_classdev_register(&client->dev, &lpi->als_cdev); if (ret) goto err_proximity_sysfs_cleanup; ret = sensors_classdev_register(&client->dev, &lpi->ps_cdev); if (ret) goto err_create_class_sysfs; mutex_init(&wq_lock); INIT_DELAYED_WORK(&lpi->ldwork, lsensor_delay_work_handler); INIT_DELAYED_WORK(&lpi->pdwork, psensor_delay_work_handler); dev_dbg(&lpi->i2c_client->dev, "%s: Probe success!\n", __func__); return ret; err_create_class_sysfs: sensors_classdev_unregister(&lpi->als_cdev); err_proximity_sysfs_cleanup: remove_sysfs_interfaces(&lpi->ps_input_dev->dev, proximity_attr, ARRAY_SIZE(proximity_attr)); err_light_sysfs_cleanup: remove_sysfs_interfaces(&lpi->ls_input_dev->dev, light_attr, ARRAY_SIZE(light_attr)); err_input_cleanup: input_unregister_device(lpi->ps_input_dev); input_free_device(lpi->ps_input_dev); err_psensor_setup: input_unregister_device(lpi->ls_input_dev); input_free_device(lpi->ls_input_dev); err_lightsensor_setup: err_cm36283_setup: cm36283_power_set(lpi, false); err_cm36283_power_on: wake_lock_destroy(&(lpi->ps_wake_lock)); destroy_workqueue(lpi->lp_wq); err_create_singlethread_workqueue: err_lightsensor_update_table: mutex_destroy(&CM36283_control_mutex); mutex_destroy(&als_enable_mutex); mutex_destroy(&als_disable_mutex); mutex_destroy(&als_get_adc_mutex); mutex_destroy(&ps_enable_mutex); mutex_destroy(&ps_disable_mutex); mutex_destroy(&ps_get_adc_mutex); err_parse_dt: if (client->dev.of_node && (pdata != NULL)) devm_kfree(&client->dev, pdata); err_platform_data_null: kfree(lpi); dev_err(&client->dev, "%s:error exit! ret = %d\n", __func__, ret); return ret; } static int control_and_report(struct cm36283_info *lpi, uint8_t mode, uint16_t param, int report) { int ret = 0; uint16_t adc_value = 0; uint16_t ps_data = 0; int level = 0, i, val; mutex_lock(&CM36283_control_mutex); if( mode == CONTROL_ALS ){ if(param){ lpi->ls_cmd &= CM36283_ALS_SD_MASK; } else { lpi->ls_cmd |= CM36283_ALS_SD; } _cm36283_I2C_Write_Word(lpi->slave_addr, ALS_CONF, lpi->ls_cmd); lpi->als_enable=param; } else if( mode == CONTROL_PS ){ if(param){ lpi->ps_conf1_val &= CM36283_PS_SD_MASK; lpi->ps_conf1_val |= CM36283_PS_INT_IN_AND_OUT; } else { lpi->ps_conf1_val |= CM36283_PS_SD; lpi->ps_conf1_val &= CM36283_PS_INT_MASK; } _cm36283_I2C_Write_Word(lpi->slave_addr, PS_CONF1, lpi->ps_conf1_val); lpi->ps_enable=param; } if((mode == CONTROL_ALS)||(mode == CONTROL_PS)){ if( param==1 ){ msleep(100); } } if(lpi->als_enable){ if( mode == CONTROL_ALS || ( mode == CONTROL_INT_ISR_REPORT && ((param&INT_FLAG_ALS_IF_L)||(param&INT_FLAG_ALS_IF_H)))){ lpi->ls_cmd &= CM36283_ALS_INT_MASK; ret = _cm36283_I2C_Write_Word(lpi->slave_addr, ALS_CONF, lpi->ls_cmd); get_ls_adc_value(&adc_value, 0); if( lpi->ls_calibrate ) { for (i = 0; i < 10; i++) { if (adc_value <= (*(lpi->cali_table + i))) { level = i; if (*(lpi->cali_table + i)) break; } if ( i == 9) {/*avoid i = 10, because 'cali_table' of size is 10 */ level = i; break; } } } else { for (i = 0; i < 10; i++) { if (adc_value <= (*(lpi->adc_table + i))) { level = i; if (*(lpi->adc_table + i)) break; } if ( i == 9) {/*avoid i = 10, because 'cali_table' of size is 10 */ level = i; break; } } } if (!lpi->polling) { ret = set_lsensor_range(((i == 0) || (adc_value == 0)) ? 0 : *(lpi->cali_table + (i - 1)) + 1, *(lpi->cali_table + i)); lpi->ls_cmd |= CM36283_ALS_INT_EN; } ret = _cm36283_I2C_Write_Word(lpi->slave_addr, ALS_CONF, lpi->ls_cmd); if (report) { lpi->current_level = level; lpi->current_adc = adc_value; input_report_abs(lpi->ls_input_dev, ABS_MISC, level); input_sync(lpi->ls_input_dev); } } } #define PS_CLOSE 1 #define PS_AWAY (1<<1) #define PS_CLOSE_AND_AWAY PS_CLOSE+PS_AWAY if (report && (lpi->ps_enable)) { int ps_status = 0; if (mode == CONTROL_PS) ps_status = PS_CLOSE_AND_AWAY; else if (mode == CONTROL_INT_ISR_REPORT) { if (param & INT_FLAG_PS_IF_CLOSE) ps_status |= PS_CLOSE; if (param & INT_FLAG_PS_IF_AWAY) ps_status |= PS_AWAY; } if (ps_status != 0) { switch (ps_status) { case PS_CLOSE_AND_AWAY: get_stable_ps_adc_value(&ps_data); val = (ps_data >= lpi->ps_close_thd_set) ? 0 : 1; break; case PS_AWAY: val = 1; break; case PS_CLOSE: val = 0; break; }; input_report_abs(lpi->ps_input_dev, ABS_DISTANCE, val); input_sync(lpi->ps_input_dev); } } mutex_unlock(&CM36283_control_mutex); return ret; } static int cm36283_power_set(struct cm36283_info *info, bool on) { int rc; if (on) { info->vdd = regulator_get(&info->i2c_client->dev, "vdd"); if (IS_ERR(info->vdd)) { rc = PTR_ERR(info->vdd); dev_err(&info->i2c_client->dev, "Regulator get failed vdd rc=%d\n", rc); goto err_vdd_get; } if (regulator_count_voltages(info->vdd) > 0) { rc = regulator_set_voltage(info->vdd, CM36283_VDD_MIN_UV, CM36283_VDD_MAX_UV); if (rc) { dev_err(&info->i2c_client->dev, "Regulator set failed vdd rc=%d\n", rc); goto err_vdd_set_vtg; } } info->vio = regulator_get(&info->i2c_client->dev, "vio"); if (IS_ERR(info->vio)) { rc = PTR_ERR(info->vio); dev_err(&info->i2c_client->dev, "Regulator get failed vio rc=%d\n", rc); goto err_vio_get; } if (regulator_count_voltages(info->vio) > 0) { rc = regulator_set_voltage(info->vio, CM36283_VI2C_MIN_UV, CM36283_VI2C_MAX_UV); if (rc) { dev_err(&info->i2c_client->dev, "Regulator set failed vio rc=%d\n", rc); goto err_vio_set_vtg; } } rc = regulator_enable(info->vdd); if (rc) { dev_err(&info->i2c_client->dev, "Regulator vdd enable failed rc=%d\n", rc); goto err_vdd_ena; } rc = regulator_enable(info->vio); if (rc) { dev_err(&info->i2c_client->dev, "Regulator vio enable failed rc=%d\n", rc); goto err_vio_ena; } } else { rc = regulator_disable(info->vdd); if (rc) { dev_err(&info->i2c_client->dev, "Regulator vdd disable failed rc=%d\n", rc); return rc; } if (regulator_count_voltages(info->vdd) > 0) regulator_set_voltage(info->vdd, 0, CM36283_VDD_MAX_UV); regulator_put(info->vdd); rc = regulator_disable(info->vio); if (rc) { dev_err(&info->i2c_client->dev, "Regulator vio disable failed rc=%d\n", rc); return rc; } if (regulator_count_voltages(info->vio) > 0) regulator_set_voltage(info->vio, 0, CM36283_VI2C_MAX_UV); regulator_put(info->vio); } return 0; err_vio_ena: regulator_disable(info->vdd); err_vdd_ena: if (regulator_count_voltages(info->vio) > 0) regulator_set_voltage(info->vio, 0, CM36283_VI2C_MAX_UV); err_vio_set_vtg: regulator_put(info->vio); err_vio_get: if (regulator_count_voltages(info->vdd) > 0) regulator_set_voltage(info->vdd, 0, CM36283_VDD_MAX_UV); err_vdd_set_vtg: regulator_put(info->vdd); err_vdd_get: return rc; } #ifdef CONFIG_PM_SLEEP static int cm36283_suspend(struct device *dev) { struct cm36283_info *lpi = lp_info; if (lpi->als_enable) { if (lightsensor_disable(lpi)) goto out; lpi->als_enable = 1; } if (cm36283_power_set(lpi, 0)) goto out; return 0; out: dev_err(&lpi->i2c_client->dev, "%s:failed during resume operation.\n", __func__); return -EIO; } static int cm36283_resume(struct device *dev) { struct cm36283_info *lpi = lp_info; if (cm36283_power_set(lpi, 1)) goto out; if (lpi->als_enable) { ls_initial_cmd(lpi); psensor_initial_cmd(lpi); if (lightsensor_enable(lpi)) goto out; } return 0; out: dev_err(&lpi->i2c_client->dev, "%s:failed during resume operation.\n", __func__); return -EIO; } #endif static UNIVERSAL_DEV_PM_OPS(cm36283_pm, cm36283_suspend, cm36283_resume, NULL); static const struct i2c_device_id cm36283_i2c_id[] = { {CM36283_I2C_NAME, 0}, {} }; static struct of_device_id cm36283_match_table[] = { { .compatible = "capella,cm36283",}, { }, }; static struct i2c_driver cm36283_driver = { .id_table = cm36283_i2c_id, .probe = cm36283_probe, .driver = { .name = CM36283_I2C_NAME, .owner = THIS_MODULE, .pm = &cm36283_pm, .of_match_table = cm36283_match_table, }, }; static int __init cm36283_init(void) { return i2c_add_driver(&cm36283_driver); } static void __exit cm36283_exit(void) { i2c_del_driver(&cm36283_driver); } module_init(cm36283_init); module_exit(cm36283_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CM36283 Driver"); MODULE_AUTHOR("Frank Hsieh <pengyueh@gmail.com>");
gpl-2.0
phjanderson/Kernel-3188
net/x25/af_x25.c
1699
41029
/* * X.25 Packet Layer release 002 * * This is ALPHA test software. This code may break your machine, * randomly fail to work with new releases, misbehave and/or generally * screw up. It might even work. * * This code REQUIRES 2.1.15 or higher * * This module: * This module is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * History * X.25 001 Jonathan Naylor Started coding. * X.25 002 Jonathan Naylor Centralised disconnect handling. * New timer architecture. * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant. * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of * facilities negotiation and increased * the throughput upper limit. * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups * 2000-09-04 Henner Eisen Set sock->state in x25_accept(). * Fixed x25_output() related skb leakage. * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket. * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation. * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to * x25_proc.c, using seq_file * 2005-04-02 Shaun Pereira Selective sub address matching * with call user data * 2005-04-15 Shaun Pereira Fast select with no restriction on * response */ #include <linux/module.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/notifier.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/ctype.h> #include <net/x25.h> #include <net/compat.h> int sysctl_x25_restart_request_timeout = X25_DEFAULT_T20; int sysctl_x25_call_request_timeout = X25_DEFAULT_T21; int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22; int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23; int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2; int sysctl_x25_forward = 0; HLIST_HEAD(x25_list); DEFINE_RWLOCK(x25_list_lock); static const struct proto_ops x25_proto_ops; static struct x25_address null_x25_address = {" "}; #ifdef CONFIG_COMPAT struct compat_x25_subscrip_struct { char device[200-sizeof(compat_ulong_t)]; compat_ulong_t global_facil_mask; compat_uint_t extended; }; #endif int x25_parse_address_block(struct sk_buff *skb, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned char len; int needed; int rc; if (skb->len < 1) { /* packet has no address block */ rc = 0; goto empty; } len = *skb->data; needed = 1 + (len >> 4) + (len & 0x0f); if (skb->len < needed) { /* packet is too short to hold the addresses it claims to hold */ rc = -1; goto empty; } return x25_addr_ntoa(skb->data, called_addr, calling_addr); empty: *called_addr->x25_addr = 0; *calling_addr->x25_addr = 0; return rc; } int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned int called_len, calling_len; char *called, *calling; unsigned int i; called_len = (*p >> 0) & 0x0F; calling_len = (*p >> 4) & 0x0F; called = called_addr->x25_addr; calling = calling_addr->x25_addr; p++; for (i = 0; i < (called_len + calling_len); i++) { if (i < called_len) { if (i % 2 != 0) { *called++ = ((*p >> 0) & 0x0F) + '0'; p++; } else { *called++ = ((*p >> 4) & 0x0F) + '0'; } } else { if (i % 2 != 0) { *calling++ = ((*p >> 0) & 0x0F) + '0'; p++; } else { *calling++ = ((*p >> 4) & 0x0F) + '0'; } } } *called = *calling = '\0'; return 1 + (called_len + calling_len + 1) / 2; } int x25_addr_aton(unsigned char *p, struct x25_address *called_addr, struct x25_address *calling_addr) { unsigned int called_len, calling_len; char *called, *calling; int i; called = called_addr->x25_addr; calling = calling_addr->x25_addr; called_len = strlen(called); calling_len = strlen(calling); *p++ = (calling_len << 4) | (called_len << 0); for (i = 0; i < (called_len + calling_len); i++) { if (i < called_len) { if (i % 2 != 0) { *p |= (*called++ - '0') << 0; p++; } else { *p = 0x00; *p |= (*called++ - '0') << 4; } } else { if (i % 2 != 0) { *p |= (*calling++ - '0') << 0; p++; } else { *p = 0x00; *p |= (*calling++ - '0') << 4; } } } return 1 + (called_len + calling_len + 1) / 2; } /* * Socket removal during an interrupt is now safe. */ static void x25_remove_socket(struct sock *sk) { write_lock_bh(&x25_list_lock); sk_del_node_init(sk); write_unlock_bh(&x25_list_lock); } /* * Kill all bound sockets on a dropped device. */ static void x25_kill_by_device(struct net_device *dev) { struct sock *s; struct hlist_node *node; write_lock_bh(&x25_list_lock); sk_for_each(s, node, &x25_list) if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev) x25_disconnect(s, ENETUNREACH, 0, 0); write_unlock_bh(&x25_list_lock); } /* * Handle device status changes. */ static int x25_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct x25_neigh *nb; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (dev->type == ARPHRD_X25 #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) || dev->type == ARPHRD_ETHER #endif ) { switch (event) { case NETDEV_UP: x25_link_device_up(dev); break; case NETDEV_GOING_DOWN: nb = x25_get_neigh(dev); if (nb) { x25_terminate_link(nb); x25_neigh_put(nb); } break; case NETDEV_DOWN: x25_kill_by_device(dev); x25_route_device_down(dev); x25_link_device_down(dev); break; } } return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ static void x25_insert_socket(struct sock *sk) { write_lock_bh(&x25_list_lock); sk_add_node(sk, &x25_list); write_unlock_bh(&x25_list_lock); } /* * Find a socket that wants to accept the Call Request we just * received. Check the full list for an address/cud match. * If no cuds match return the next_best thing, an address match. * Note: if a listening socket has cud set it must only get calls * with matching cud. */ static struct sock *x25_find_listener(struct x25_address *addr, struct sk_buff *skb) { struct sock *s; struct sock *next_best; struct hlist_node *node; read_lock_bh(&x25_list_lock); next_best = NULL; sk_for_each(s, node, &x25_list) if ((!strcmp(addr->x25_addr, x25_sk(s)->source_addr.x25_addr) || !strcmp(addr->x25_addr, null_x25_address.x25_addr)) && s->sk_state == TCP_LISTEN) { /* * Found a listening socket, now check the incoming * call user data vs this sockets call user data */ if (x25_sk(s)->cudmatchlength > 0 && skb->len >= x25_sk(s)->cudmatchlength) { if((memcmp(x25_sk(s)->calluserdata.cuddata, skb->data, x25_sk(s)->cudmatchlength)) == 0) { sock_hold(s); goto found; } } else next_best = s; } if (next_best) { s = next_best; sock_hold(s); goto found; } s = NULL; found: read_unlock_bh(&x25_list_lock); return s; } /* * Find a connected X.25 socket given my LCI and neighbour. */ static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb) { struct sock *s; struct hlist_node *node; sk_for_each(s, node, &x25_list) if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) { sock_hold(s); goto found; } s = NULL; found: return s; } struct sock *x25_find_socket(unsigned int lci, struct x25_neigh *nb) { struct sock *s; read_lock_bh(&x25_list_lock); s = __x25_find_socket(lci, nb); read_unlock_bh(&x25_list_lock); return s; } /* * Find a unique LCI for a given device. */ static unsigned int x25_new_lci(struct x25_neigh *nb) { unsigned int lci = 1; struct sock *sk; read_lock_bh(&x25_list_lock); while ((sk = __x25_find_socket(lci, nb)) != NULL) { sock_put(sk); if (++lci == 4096) { lci = 0; break; } } read_unlock_bh(&x25_list_lock); return lci; } /* * Deferred destroy. */ static void __x25_destroy_socket(struct sock *); /* * handler for deferred kills. */ static void x25_destroy_timer(unsigned long data) { x25_destroy_socket_from_timer((struct sock *)data); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. * Not static as it's used by the timer */ static void __x25_destroy_socket(struct sock *sk) { struct sk_buff *skb; x25_stop_heartbeat(sk); x25_stop_timer(sk); x25_remove_socket(sk); x25_clear_queues(sk); /* Flush the queues */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (skb->sk != sk) { /* A pending connection */ /* * Queue the unaccepted socket for death */ skb->sk->sk_state = TCP_LISTEN; sock_set_flag(skb->sk, SOCK_DEAD); x25_start_heartbeat(skb->sk); x25_sk(skb->sk)->state = X25_STATE_0; } kfree_skb(skb); } if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ sk->sk_timer.expires = jiffies + 10 * HZ; sk->sk_timer.function = x25_destroy_timer; sk->sk_timer.data = (unsigned long)sk; add_timer(&sk->sk_timer); } else { /* drop last reference so sock_put will free */ __sock_put(sk); } } void x25_destroy_socket_from_timer(struct sock *sk) { sock_hold(sk); bh_lock_sock(sk); __x25_destroy_socket(sk); bh_unlock_sock(sk); sock_put(sk); } /* * Handling for system calls applied via the various interfaces to a * X.25 socket object. */ static int x25_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { int opt; struct sock *sk = sock->sk; int rc = -ENOPROTOOPT; if (level != SOL_X25 || optname != X25_QBITINCL) goto out; rc = -EINVAL; if (optlen < sizeof(int)) goto out; rc = -EFAULT; if (get_user(opt, (int __user *)optval)) goto out; if (opt) set_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); else clear_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); rc = 0; out: return rc; } static int x25_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int val, len, rc = -ENOPROTOOPT; if (level != SOL_X25 || optname != X25_QBITINCL) goto out; rc = -EFAULT; if (get_user(len, optlen)) goto out; len = min_t(unsigned int, len, sizeof(int)); rc = -EINVAL; if (len < 0) goto out; rc = -EFAULT; if (put_user(len, optlen)) goto out; val = test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); rc = copy_to_user(optval, &val, len) ? -EFAULT : 0; out: return rc; } static int x25_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int rc = -EOPNOTSUPP; lock_sock(sk); if (sk->sk_state != TCP_LISTEN) { memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; rc = 0; } release_sock(sk); return rc; } static struct proto x25_proto = { .name = "X25", .owner = THIS_MODULE, .obj_size = sizeof(struct x25_sock), }; static struct sock *x25_alloc_socket(struct net *net) { struct x25_sock *x25; struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto); if (!sk) goto out; sock_init_data(NULL, sk); x25 = x25_sk(sk); skb_queue_head_init(&x25->ack_queue); skb_queue_head_init(&x25->fragment_queue); skb_queue_head_init(&x25->interrupt_in_queue); skb_queue_head_init(&x25->interrupt_out_queue); out: return sk; } static int x25_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct x25_sock *x25; int rc = -EAFNOSUPPORT; if (!net_eq(net, &init_net)) goto out; rc = -ESOCKTNOSUPPORT; if (sock->type != SOCK_SEQPACKET) goto out; rc = -EINVAL; if (protocol) goto out; rc = -ENOBUFS; if ((sk = x25_alloc_socket(net)) == NULL) goto out; x25 = x25_sk(sk); sock_init_data(sock, sk); x25_init_timers(sk); sock->ops = &x25_proto_ops; sk->sk_protocol = protocol; sk->sk_backlog_rcv = x25_backlog_rcv; x25->t21 = sysctl_x25_call_request_timeout; x25->t22 = sysctl_x25_reset_request_timeout; x25->t23 = sysctl_x25_clear_request_timeout; x25->t2 = sysctl_x25_ack_holdback_timeout; x25->state = X25_STATE_0; x25->cudmatchlength = 0; set_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); /* normally no cud */ /* on call accept */ x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE; x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; x25->facilities.throughput = 0; /* by default don't negotiate throughput */ x25->facilities.reverse = X25_DEFAULT_REVERSE; x25->dte_facilities.calling_len = 0; x25->dte_facilities.called_len = 0; memset(x25->dte_facilities.called_ae, '\0', sizeof(x25->dte_facilities.called_ae)); memset(x25->dte_facilities.calling_ae, '\0', sizeof(x25->dte_facilities.calling_ae)); rc = 0; out: return rc; } static struct sock *x25_make_new(struct sock *osk) { struct sock *sk = NULL; struct x25_sock *x25, *ox25; if (osk->sk_type != SOCK_SEQPACKET) goto out; if ((sk = x25_alloc_socket(sock_net(osk))) == NULL) goto out; x25 = x25_sk(sk); sk->sk_type = osk->sk_type; sk->sk_priority = osk->sk_priority; sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sk->sk_backlog_rcv = osk->sk_backlog_rcv; sock_copy_flags(sk, osk); ox25 = x25_sk(osk); x25->t21 = ox25->t21; x25->t22 = ox25->t22; x25->t23 = ox25->t23; x25->t2 = ox25->t2; x25->flags = ox25->flags; x25->facilities = ox25->facilities; x25->dte_facilities = ox25->dte_facilities; x25->cudmatchlength = ox25->cudmatchlength; clear_bit(X25_INTERRUPT_FLAG, &x25->flags); x25_init_timers(sk); out: return sk; } static int x25_release(struct socket *sock) { struct sock *sk = sock->sk; struct x25_sock *x25; if (!sk) return 0; x25 = x25_sk(sk); sock_hold(sk); lock_sock(sk); switch (x25->state) { case X25_STATE_0: case X25_STATE_2: x25_disconnect(sk, 0, 0, 0); __x25_destroy_socket(sk); goto out; case X25_STATE_1: case X25_STATE_3: case X25_STATE_4: x25_clear_queues(sk); x25_write_internal(sk, X25_CLEAR_REQUEST); x25_start_t23timer(sk); x25->state = X25_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DESTROY); break; } sock_orphan(sk); out: release_sock(sk); sock_put(sk); return 0; } static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; int len, i, rc = 0; if (!sock_flag(sk, SOCK_ZAPPED) || addr_len != sizeof(struct sockaddr_x25) || addr->sx25_family != AF_X25) { rc = -EINVAL; goto out; } len = strlen(addr->sx25_addr.x25_addr); for (i = 0; i < len; i++) { if (!isdigit(addr->sx25_addr.x25_addr[i])) { rc = -EINVAL; goto out; } } lock_sock(sk); x25_sk(sk)->source_addr = addr->sx25_addr; x25_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); release_sock(sk); SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); out: return rc; } static int x25_wait_for_connection_establishment(struct sock *sk) { DECLARE_WAITQUEUE(wait, current); int rc; add_wait_queue_exclusive(sk_sleep(sk), &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = sock_error(sk); if (rc) { sk->sk_socket->state = SS_UNCONNECTED; break; } rc = 0; if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); schedule(); lock_sock(sk); } else break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return rc; } static int x25_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; struct x25_route *rt; int rc = 0; lock_sock(sk); if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { sock->state = SS_CONNECTED; goto out; /* Connect completed during a ERESTARTSYS event */ } rc = -ECONNREFUSED; if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; goto out; } rc = -EISCONN; /* No reconnect on a seqpacket socket */ if (sk->sk_state == TCP_ESTABLISHED) goto out; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; rc = -EINVAL; if (addr_len != sizeof(struct sockaddr_x25) || addr->sx25_family != AF_X25) goto out; rc = -ENETUNREACH; rt = x25_get_route(&addr->sx25_addr); if (!rt) goto out; x25->neighbour = x25_get_neigh(rt->dev); if (!x25->neighbour) goto out_put_route; x25_limit_facilities(&x25->facilities, x25->neighbour); x25->lci = x25_new_lci(x25->neighbour); if (!x25->lci) goto out_put_neigh; rc = -EINVAL; if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ goto out_put_neigh; if (!strcmp(x25->source_addr.x25_addr, null_x25_address.x25_addr)) memset(&x25->source_addr, '\0', X25_ADDR_LEN); x25->dest_addr = addr->sx25_addr; /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; x25->state = X25_STATE_1; x25_write_internal(sk, X25_CALL_REQUEST); x25_start_heartbeat(sk); x25_start_t21timer(sk); /* Now the loop */ rc = -EINPROGRESS; if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) goto out_put_neigh; rc = x25_wait_for_connection_establishment(sk); if (rc) goto out_put_neigh; sock->state = SS_CONNECTED; rc = 0; out_put_neigh: if (rc) x25_neigh_put(x25->neighbour); out_put_route: x25_route_put(rt); out: release_sock(sk); return rc; } static int x25_wait_for_data(struct sock *sk, long timeout) { DECLARE_WAITQUEUE(wait, current); int rc = 0; add_wait_queue_exclusive(sk_sleep(sk), &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); if (sk->sk_shutdown & RCV_SHUTDOWN) break; rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = -EAGAIN; if (!timeout) break; rc = 0; if (skb_queue_empty(&sk->sk_receive_queue)) { release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); } else break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return rc; } static int x25_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk; struct sock *newsk; struct sk_buff *skb; int rc = -EINVAL; if (!sk) goto out; rc = -EOPNOTSUPP; if (sk->sk_type != SOCK_SEQPACKET) goto out; lock_sock(sk); rc = -EINVAL; if (sk->sk_state != TCP_LISTEN) goto out2; rc = x25_wait_for_data(sk, sk->sk_rcvtimeo); if (rc) goto out2; skb = skb_dequeue(&sk->sk_receive_queue); rc = -EINVAL; if (!skb->sk) goto out2; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ skb->sk = NULL; kfree_skb(skb); sk->sk_ack_backlog--; newsock->state = SS_CONNECTED; rc = 0; out2: release_sock(sk); out: return rc; } static int x25_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr; struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); int rc = 0; if (peer) { if (sk->sk_state != TCP_ESTABLISHED) { rc = -ENOTCONN; goto out; } sx25->sx25_addr = x25->dest_addr; } else sx25->sx25_addr = x25->source_addr; sx25->sx25_family = AF_X25; *uaddr_len = sizeof(*sx25); out: return rc; } int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, unsigned int lci) { struct sock *sk; struct sock *make; struct x25_sock *makex25; struct x25_address source_addr, dest_addr; struct x25_facilities facilities; struct x25_dte_facilities dte_facilities; int len, addr_len, rc; /* * Remove the LCI and frame type. */ skb_pull(skb, X25_STD_MIN_LEN); /* * Extract the X.25 addresses and convert them to ASCII strings, * and remove them. * * Address block is mandatory in call request packets */ addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr); if (addr_len <= 0) goto out_clear_request; skb_pull(skb, addr_len); /* * Get the length of the facilities, skip past them for the moment * get the call user data because this is needed to determine * the correct listener * * Facilities length is mandatory in call request packets */ if (skb->len < 1) goto out_clear_request; len = skb->data[0] + 1; if (skb->len < len) goto out_clear_request; skb_pull(skb,len); /* * Find a listener for the particular address/cud pair. */ sk = x25_find_listener(&source_addr,skb); skb_push(skb,len); if (sk != NULL && sk_acceptq_is_full(sk)) { goto out_sock_put; } /* * We dont have any listeners for this incoming call. * Try forwarding it. */ if (sk == NULL) { skb_push(skb, addr_len + X25_STD_MIN_LEN); if (sysctl_x25_forward && x25_forward_call(&dest_addr, nb, skb, lci) > 0) { /* Call was forwarded, dont process it any more */ kfree_skb(skb); rc = 1; goto out; } else { /* No listeners, can't forward, clear the call */ goto out_clear_request; } } /* * Try to reach a compromise on the requested facilities. */ len = x25_negotiate_facilities(skb, sk, &facilities, &dte_facilities); if (len == -1) goto out_sock_put; /* * current neighbour/link might impose additional limits * on certain facilties */ x25_limit_facilities(&facilities, nb); /* * Try to create a new socket. */ make = x25_make_new(sk); if (!make) goto out_sock_put; /* * Remove the facilities */ skb_pull(skb, len); skb->sk = make; make->sk_state = TCP_ESTABLISHED; makex25 = x25_sk(make); makex25->lci = lci; makex25->dest_addr = dest_addr; makex25->source_addr = source_addr; makex25->neighbour = nb; makex25->facilities = facilities; makex25->dte_facilities= dte_facilities; makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask; /* ensure no reverse facil on accept */ makex25->vc_facil_mask &= ~X25_MASK_REVERSE; /* ensure no calling address extension on accept */ makex25->vc_facil_mask &= ~X25_MASK_CALLING_AE; makex25->cudmatchlength = x25_sk(sk)->cudmatchlength; /* Normally all calls are accepted immediately */ if (test_bit(X25_ACCPT_APPRV_FLAG, &makex25->flags)) { x25_write_internal(make, X25_CALL_ACCEPTED); makex25->state = X25_STATE_3; } /* * Incoming Call User Data. */ skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len); makex25->calluserdata.cudlength = skb->len; sk->sk_ack_backlog++; x25_insert_socket(make); skb_queue_head(&sk->sk_receive_queue, skb); x25_start_heartbeat(make); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); rc = 1; sock_put(sk); out: return rc; out_sock_put: sock_put(sk); out_clear_request: rc = 0; x25_transmit_clear_request(nb, lci, 0x01); goto out; } static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *usx25 = (struct sockaddr_x25 *)msg->msg_name; struct sockaddr_x25 sx25; struct sk_buff *skb; unsigned char *asmptr; int noblock = msg->msg_flags & MSG_DONTWAIT; size_t size; int qbit = 0, rc = -EINVAL; lock_sock(sk); if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT)) goto out; /* we currently don't support segmented records at the user interface */ if (!(msg->msg_flags & (MSG_EOR|MSG_OOB))) goto out; rc = -EADDRNOTAVAIL; if (sock_flag(sk, SOCK_ZAPPED)) goto out; rc = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); goto out; } rc = -ENETUNREACH; if (!x25->neighbour) goto out; if (usx25) { rc = -EINVAL; if (msg->msg_namelen < sizeof(sx25)) goto out; memcpy(&sx25, usx25, sizeof(sx25)); rc = -EISCONN; if (strcmp(x25->dest_addr.x25_addr, sx25.sx25_addr.x25_addr)) goto out; rc = -EINVAL; if (sx25.sx25_family != AF_X25) goto out; } else { /* * FIXME 1003.1g - if the socket is like this because * it has become closed (not started closed) we ought * to SIGPIPE, EPIPE; */ rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; sx25.sx25_family = AF_X25; sx25.sx25_addr = x25->dest_addr; } /* Sanity check the packet size */ if (len > 65535) { rc = -EMSGSIZE; goto out; } SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n"); /* Build a packet */ SOCK_DEBUG(sk, "x25_sendmsg: sendto: building packet.\n"); if ((msg->msg_flags & MSG_OOB) && len > 32) len = 32; size = len + X25_MAX_L2_LEN + X25_EXT_MIN_LEN; release_sock(sk); skb = sock_alloc_send_skb(sk, size, noblock, &rc); lock_sock(sk); if (!skb) goto out; X25_SKB_CB(skb)->flags = msg->msg_flags; skb_reserve(skb, X25_MAX_L2_LEN + X25_EXT_MIN_LEN); /* * Put the data on the end */ SOCK_DEBUG(sk, "x25_sendmsg: Copying user data\n"); skb_reset_transport_header(skb); skb_put(skb, len); rc = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (rc) goto out_kfree_skb; /* * If the Q BIT Include socket option is in force, the first * byte of the user data is the logical value of the Q Bit. */ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { qbit = skb->data[0]; skb_pull(skb, 1); } /* * Push down the X.25 header */ SOCK_DEBUG(sk, "x25_sendmsg: Building X.25 Header.\n"); if (msg->msg_flags & MSG_OOB) { if (x25->neighbour->extended) { asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_INTERRUPT; } else { asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_INTERRUPT; } } else { if (x25->neighbour->extended) { /* Build an Extended X.25 header */ asmptr = skb_push(skb, X25_EXT_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_DATA; *asmptr++ = X25_DATA; } else { /* Build an Standard X.25 header */ asmptr = skb_push(skb, X25_STD_MIN_LEN); *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ; *asmptr++ = (x25->lci >> 0) & 0xFF; *asmptr++ = X25_DATA; } if (qbit) skb->data[0] |= X25_Q_BIT; } SOCK_DEBUG(sk, "x25_sendmsg: Built header.\n"); SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n"); rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out_kfree_skb; if (msg->msg_flags & MSG_OOB) skb_queue_tail(&x25->interrupt_out_queue, skb); else { rc = x25_output(sk, skb); len = rc; if (rc < 0) kfree_skb(skb); else if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) len++; } x25_kick(sk); rc = len; out: release_sock(sk); return rc; out_kfree_skb: kfree_skb(skb); goto out; } static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; size_t copied; int qbit; struct sk_buff *skb; unsigned char *asmptr; int rc = -ENOTCONN; lock_sock(sk); /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) goto out; if (flags & MSG_OOB) { rc = -EINVAL; if (sock_flag(sk, SOCK_URGINLINE) || !skb_peek(&x25->interrupt_in_queue)) goto out; skb = skb_dequeue(&x25->interrupt_in_queue); skb_pull(skb, X25_STD_MIN_LEN); /* * No Q bit information on Interrupt data. */ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = 0x00; } msg->msg_flags |= MSG_OOB; } else { /* Now we can treat all alike */ release_sock(sk); skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); lock_sock(sk); if (!skb) goto out; qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; skb_pull(skb, x25->neighbour->extended ? X25_EXT_MIN_LEN : X25_STD_MIN_LEN); if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { asmptr = skb_push(skb, 1); *asmptr = qbit; } } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } /* Currently, each datagram always contains a complete record */ msg->msg_flags |= MSG_EOR; rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (rc) goto out_free_dgram; if (sx25) { sx25->sx25_family = AF_X25; sx25->sx25_addr = x25->dest_addr; } msg->msg_namelen = sizeof(struct sockaddr_x25); x25_check_rbuf(sk); rc = copied; out_free_dgram: skb_free_datagram(sk, skb); out: release_sock(sk); return rc; } static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct x25_sock *x25 = x25_sk(sk); void __user *argp = (void __user *)arg; int rc; switch (cmd) { case TIOCOUTQ: { int amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; rc = put_user(amount, (unsigned int __user *)argp); break; } case TIOCINQ: { struct sk_buff *skb; int amount = 0; /* * These two are safe on a single CPU system as * only user tasks fiddle here */ lock_sock(sk); if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; release_sock(sk); rc = put_user(amount, (unsigned int __user *)argp); break; } case SIOCGSTAMP: rc = -EINVAL; if (sk) rc = sock_get_timestamp(sk, (struct timeval __user *)argp); break; case SIOCGSTAMPNS: rc = -EINVAL; if (sk) rc = sock_get_timestampns(sk, (struct timespec __user *)argp); break; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: rc = -EINVAL; break; case SIOCADDRT: case SIOCDELRT: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_route_ioctl(cmd, argp); break; case SIOCX25GSUBSCRIP: rc = x25_subscr_ioctl(cmd, argp); break; case SIOCX25SSUBSCRIP: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_subscr_ioctl(cmd, argp); break; case SIOCX25GFACILITIES: { lock_sock(sk); rc = copy_to_user(argp, &x25->facilities, sizeof(x25->facilities)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SFACILITIES: { struct x25_facilities facilities; rc = -EFAULT; if (copy_from_user(&facilities, argp, sizeof(facilities))) break; rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_LISTEN && sk->sk_state != TCP_CLOSE) goto out_fac_release; if (facilities.pacsize_in < X25_PS16 || facilities.pacsize_in > X25_PS4096) goto out_fac_release; if (facilities.pacsize_out < X25_PS16 || facilities.pacsize_out > X25_PS4096) goto out_fac_release; if (facilities.winsize_in < 1 || facilities.winsize_in > 127) goto out_fac_release; if (facilities.throughput) { int out = facilities.throughput & 0xf0; int in = facilities.throughput & 0x0f; if (!out) facilities.throughput |= X25_DEFAULT_THROUGHPUT << 4; else if (out < 0x30 || out > 0xD0) goto out_fac_release; if (!in) facilities.throughput |= X25_DEFAULT_THROUGHPUT; else if (in < 0x03 || in > 0x0D) goto out_fac_release; } if (facilities.reverse && (facilities.reverse & 0x81) != 0x81) goto out_fac_release; x25->facilities = facilities; rc = 0; out_fac_release: release_sock(sk); break; } case SIOCX25GDTEFACILITIES: { lock_sock(sk); rc = copy_to_user(argp, &x25->dte_facilities, sizeof(x25->dte_facilities)); release_sock(sk); if (rc) rc = -EFAULT; break; } case SIOCX25SDTEFACILITIES: { struct x25_dte_facilities dtefacs; rc = -EFAULT; if (copy_from_user(&dtefacs, argp, sizeof(dtefacs))) break; rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_LISTEN && sk->sk_state != TCP_CLOSE) goto out_dtefac_release; if (dtefacs.calling_len > X25_MAX_AE_LEN) goto out_dtefac_release; if (dtefacs.calling_ae == NULL) goto out_dtefac_release; if (dtefacs.called_len > X25_MAX_AE_LEN) goto out_dtefac_release; if (dtefacs.called_ae == NULL) goto out_dtefac_release; x25->dte_facilities = dtefacs; rc = 0; out_dtefac_release: release_sock(sk); break; } case SIOCX25GCALLUSERDATA: { lock_sock(sk); rc = copy_to_user(argp, &x25->calluserdata, sizeof(x25->calluserdata)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SCALLUSERDATA: { struct x25_calluserdata calluserdata; rc = -EFAULT; if (copy_from_user(&calluserdata, argp, sizeof(calluserdata))) break; rc = -EINVAL; if (calluserdata.cudlength > X25_MAX_CUD_LEN) break; lock_sock(sk); x25->calluserdata = calluserdata; release_sock(sk); rc = 0; break; } case SIOCX25GCAUSEDIAG: { lock_sock(sk); rc = copy_to_user(argp, &x25->causediag, sizeof(x25->causediag)) ? -EFAULT : 0; release_sock(sk); break; } case SIOCX25SCAUSEDIAG: { struct x25_causediag causediag; rc = -EFAULT; if (copy_from_user(&causediag, argp, sizeof(causediag))) break; lock_sock(sk); x25->causediag = causediag; release_sock(sk); rc = 0; break; } case SIOCX25SCUDMATCHLEN: { struct x25_subaddr sub_addr; rc = -EINVAL; lock_sock(sk); if(sk->sk_state != TCP_CLOSE) goto out_cud_release; rc = -EFAULT; if (copy_from_user(&sub_addr, argp, sizeof(sub_addr))) goto out_cud_release; rc = -EINVAL; if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN) goto out_cud_release; x25->cudmatchlength = sub_addr.cudmatchlength; rc = 0; out_cud_release: release_sock(sk); break; } case SIOCX25CALLACCPTAPPRV: { rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_CLOSE) break; clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); release_sock(sk); rc = 0; break; } case SIOCX25SENDCALLACCPT: { rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) break; /* must call accptapprv above */ if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags)) break; x25_write_internal(sk, X25_CALL_ACCEPTED); x25->state = X25_STATE_3; release_sock(sk); rc = 0; break; } default: rc = -ENOIOCTLCMD; break; } return rc; } static const struct net_proto_family x25_family_ops = { .family = AF_X25, .create = x25_create, .owner = THIS_MODULE, }; #ifdef CONFIG_COMPAT static int compat_x25_subscr_ioctl(unsigned int cmd, struct compat_x25_subscrip_struct __user *x25_subscr32) { struct compat_x25_subscrip_struct x25_subscr; struct x25_neigh *nb; struct net_device *dev; int rc = -EINVAL; rc = -EFAULT; if (copy_from_user(&x25_subscr, x25_subscr32, sizeof(*x25_subscr32))) goto out; rc = -EINVAL; dev = x25_dev_get(x25_subscr.device); if (dev == NULL) goto out; nb = x25_get_neigh(dev); if (nb == NULL) goto out_dev_put; dev_put(dev); if (cmd == SIOCX25GSUBSCRIP) { read_lock_bh(&x25_neigh_list_lock); x25_subscr.extended = nb->extended; x25_subscr.global_facil_mask = nb->global_facil_mask; read_unlock_bh(&x25_neigh_list_lock); rc = copy_to_user(x25_subscr32, &x25_subscr, sizeof(*x25_subscr32)) ? -EFAULT : 0; } else { rc = -EINVAL; if (x25_subscr.extended == 0 || x25_subscr.extended == 1) { rc = 0; write_lock_bh(&x25_neigh_list_lock); nb->extended = x25_subscr.extended; nb->global_facil_mask = x25_subscr.global_facil_mask; write_unlock_bh(&x25_neigh_list_lock); } } x25_neigh_put(nb); out: return rc; out_dev_put: dev_put(dev); goto out; } static int compat_x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; int rc = -ENOIOCTLCMD; switch(cmd) { case TIOCOUTQ: case TIOCINQ: rc = x25_ioctl(sock, cmd, (unsigned long)argp); break; case SIOCGSTAMP: rc = -EINVAL; if (sk) rc = compat_sock_get_timestamp(sk, (struct timeval __user*)argp); break; case SIOCGSTAMPNS: rc = -EINVAL; if (sk) rc = compat_sock_get_timestampns(sk, (struct timespec __user*)argp); break; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: rc = -EINVAL; break; case SIOCADDRT: case SIOCDELRT: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = x25_route_ioctl(cmd, argp); break; case SIOCX25GSUBSCRIP: rc = compat_x25_subscr_ioctl(cmd, argp); break; case SIOCX25SSUBSCRIP: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = compat_x25_subscr_ioctl(cmd, argp); break; case SIOCX25GFACILITIES: case SIOCX25SFACILITIES: case SIOCX25GDTEFACILITIES: case SIOCX25SDTEFACILITIES: case SIOCX25GCALLUSERDATA: case SIOCX25SCALLUSERDATA: case SIOCX25GCAUSEDIAG: case SIOCX25SCAUSEDIAG: case SIOCX25SCUDMATCHLEN: case SIOCX25CALLACCPTAPPRV: case SIOCX25SENDCALLACCPT: rc = x25_ioctl(sock, cmd, (unsigned long)argp); break; default: rc = -ENOIOCTLCMD; break; } return rc; } #endif static const struct proto_ops x25_proto_ops = { .family = AF_X25, .owner = THIS_MODULE, .release = x25_release, .bind = x25_bind, .connect = x25_connect, .socketpair = sock_no_socketpair, .accept = x25_accept, .getname = x25_getname, .poll = datagram_poll, .ioctl = x25_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_x25_ioctl, #endif .listen = x25_listen, .shutdown = sock_no_shutdown, .setsockopt = x25_setsockopt, .getsockopt = x25_getsockopt, .sendmsg = x25_sendmsg, .recvmsg = x25_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct packet_type x25_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_X25), .func = x25_lapb_receive_frame, }; static struct notifier_block x25_dev_notifier = { .notifier_call = x25_device_event, }; void x25_kill_by_neigh(struct x25_neigh *nb) { struct sock *s; struct hlist_node *node; write_lock_bh(&x25_list_lock); sk_for_each(s, node, &x25_list) if (x25_sk(s)->neighbour == nb) x25_disconnect(s, ENETUNREACH, 0, 0); write_unlock_bh(&x25_list_lock); /* Remove any related forwards */ x25_clear_forward_by_dev(nb->dev); } static int __init x25_init(void) { int rc = proto_register(&x25_proto, 0); if (rc != 0) goto out; rc = sock_register(&x25_family_ops); if (rc != 0) goto out_proto; dev_add_pack(&x25_packet_type); rc = register_netdevice_notifier(&x25_dev_notifier); if (rc != 0) goto out_sock; printk(KERN_INFO "X.25 for Linux Version 0.2\n"); x25_register_sysctl(); rc = x25_proc_init(); if (rc != 0) goto out_dev; out: return rc; out_dev: unregister_netdevice_notifier(&x25_dev_notifier); out_sock: sock_unregister(AF_X25); out_proto: proto_unregister(&x25_proto); goto out; } module_init(x25_init); static void __exit x25_exit(void) { x25_proc_exit(); x25_link_free(); x25_route_free(); x25_unregister_sysctl(); unregister_netdevice_notifier(&x25_dev_notifier); dev_remove_pack(&x25_packet_type); sock_unregister(AF_X25); proto_unregister(&x25_proto); } module_exit(x25_exit); MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_X25);
gpl-2.0
lilbowza1985/s6eng2
arch/sh/kernel/traps_64.c
2211
21709
/* * arch/sh/kernel/traps_64.c * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2003, 2004 Paul Mundt * Copyright (C) 2003, 2004 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/kallsyms.h> #include <linux/interrupt.h> #include <linux/sysctl.h> #include <linux/module.h> #include <linux/perf_event.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/alignment.h> #include <asm/processor.h> #include <asm/pgtable.h> #include <asm/fpu.h> static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode) { int get_user_error; unsigned long aligned_pc; insn_size_t opcode; if ((pc & 3) == 1) { /* SHmedia */ aligned_pc = pc & ~3; if (from_user_mode) { if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t))) { get_user_error = -EFAULT; } else { get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc); *result_opcode = opcode; } return get_user_error; } else { /* If the fault was in the kernel, we can either read * this directly, or if not, we fault. */ *result_opcode = *(insn_size_t *)aligned_pc; return 0; } } else if ((pc & 1) == 0) { /* SHcompact */ /* TODO : provide handling for this. We don't really support user-mode SHcompact yet, and for a kernel fault, this would have to come from a module built for SHcompact. */ return -EFAULT; } else { /* misaligned */ return -EFAULT; } } static int address_is_sign_extended(__u64 a) { __u64 b; #if (NEFF == 32) b = (__u64)(__s64)(__s32)(a & 0xffffffffUL); return (b == a) ? 1 : 0; #else #error "Sign extend check only works for NEFF==32" #endif } /* return -1 for fault, 0 for OK */ static int generate_and_check_address(struct pt_regs *regs, insn_size_t opcode, int displacement_not_indexed, int width_shift, __u64 *address) { __u64 base_address, addr; int basereg; switch (1 << width_shift) { case 1: inc_unaligned_byte_access(); break; case 2: inc_unaligned_word_access(); break; case 4: inc_unaligned_dword_access(); break; case 8: inc_unaligned_multi_access(); break; } basereg = (opcode >> 20) & 0x3f; base_address = regs->regs[basereg]; if (displacement_not_indexed) { __s64 displacement; displacement = (opcode >> 10) & 0x3ff; displacement = ((displacement << 54) >> 54); /* sign extend */ addr = (__u64)((__s64)base_address + (displacement << width_shift)); } else { __u64 offset; int offsetreg; offsetreg = (opcode >> 10) & 0x3f; offset = regs->regs[offsetreg]; addr = base_address + offset; } /* Check sign extended */ if (!address_is_sign_extended(addr)) return -1; /* Check accessible. For misaligned access in the kernel, assume the address is always accessible (and if not, just fault when the load/store gets done.) */ if (user_mode(regs)) { inc_unaligned_user_access(); if (addr >= TASK_SIZE) return -1; } else inc_unaligned_kernel_access(); *address = addr; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr); unaligned_fixups_notify(current, opcode, regs); return 0; } static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result) { unsigned short x; unsigned char *p, *q; p = (unsigned char *) (int) address; q = (unsigned char *) &x; q[0] = p[0]; q[1] = p[1]; if (do_sign_extend) { *result = (__u64)(__s64) *(short *) &x; } else { *result = (__u64) x; } } static void misaligned_kernel_word_store(__u64 address, __u64 value) { unsigned short x; unsigned char *p, *q; p = (unsigned char *) (int) address; q = (unsigned char *) &x; x = (__u16) value; p[0] = q[0]; p[1] = q[1]; } static int misaligned_load(struct pt_regs *regs, insn_size_t opcode, int displacement_not_indexed, int width_shift, int do_sign_extend) { /* Return -1 for a fault, 0 for OK */ int error; int destreg; __u64 address; error = generate_and_check_address(regs, opcode, displacement_not_indexed, width_shift, &address); if (error < 0) return error; destreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { __u64 buffer; if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) { return -1; } if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) { return -1; /* fault */ } switch (width_shift) { case 1: if (do_sign_extend) { regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer; } else { regs->regs[destreg] = (__u64) *(__u16 *) &buffer; } break; case 2: regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer; break; case 3: regs->regs[destreg] = buffer; break; default: printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", width_shift, (unsigned long) regs->pc); break; } } else { /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ __u64 lo, hi; switch (width_shift) { case 1: misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]); break; case 2: asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address)); asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address)); regs->regs[destreg] = lo | hi; break; case 3: asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address)); asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address)); regs->regs[destreg] = lo | hi; break; default: printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", width_shift, (unsigned long) regs->pc); break; } } return 0; } static int misaligned_store(struct pt_regs *regs, insn_size_t opcode, int displacement_not_indexed, int width_shift) { /* Return -1 for a fault, 0 for OK */ int error; int srcreg; __u64 address; error = generate_and_check_address(regs, opcode, displacement_not_indexed, width_shift, &address); if (error < 0) return error; srcreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { __u64 buffer; if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { return -1; } switch (width_shift) { case 1: *(__u16 *) &buffer = (__u16) regs->regs[srcreg]; break; case 2: *(__u32 *) &buffer = (__u32) regs->regs[srcreg]; break; case 3: buffer = regs->regs[srcreg]; break; default: printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", width_shift, (unsigned long) regs->pc); break; } if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { return -1; /* fault */ } } else { /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ __u64 val = regs->regs[srcreg]; switch (width_shift) { case 1: misaligned_kernel_word_store(address, val); break; case 2: asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address)); asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address)); break; case 3: asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address)); asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address)); break; default: printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", width_shift, (unsigned long) regs->pc); break; } } return 0; } /* Never need to fix up misaligned FPU accesses within the kernel since that's a real error. */ static int misaligned_fpu_load(struct pt_regs *regs, insn_size_t opcode, int displacement_not_indexed, int width_shift, int do_paired_load) { /* Return -1 for a fault, 0 for OK */ int error; int destreg; __u64 address; error = generate_and_check_address(regs, opcode, displacement_not_indexed, width_shift, &address); if (error < 0) return error; destreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { __u64 buffer; __u32 buflo, bufhi; if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) { return -1; } if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) { return -1; /* fault */ } /* 'current' may be the current owner of the FPU state, so context switch the registers into memory so they can be indexed by register number. */ if (last_task_used_math == current) { enable_fpu(); save_fpu(current); disable_fpu(); last_task_used_math = NULL; regs->sr |= SR_FD; } buflo = *(__u32*) &buffer; bufhi = *(1 + (__u32*) &buffer); switch (width_shift) { case 2: current->thread.xstate->hardfpu.fp_regs[destreg] = buflo; break; case 3: if (do_paired_load) { current->thread.xstate->hardfpu.fp_regs[destreg] = buflo; current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi; } else { #if defined(CONFIG_CPU_LITTLE_ENDIAN) current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi; current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo; #else current->thread.xstate->hardfpu.fp_regs[destreg] = buflo; current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi; #endif } break; default: printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n", width_shift, (unsigned long) regs->pc); break; } return 0; } else { die ("Misaligned FPU load inside kernel", regs, 0); return -1; } } static int misaligned_fpu_store(struct pt_regs *regs, insn_size_t opcode, int displacement_not_indexed, int width_shift, int do_paired_load) { /* Return -1 for a fault, 0 for OK */ int error; int srcreg; __u64 address; error = generate_and_check_address(regs, opcode, displacement_not_indexed, width_shift, &address); if (error < 0) return error; srcreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { __u64 buffer; /* Initialise these to NaNs. */ __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL; if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { return -1; } /* 'current' may be the current owner of the FPU state, so context switch the registers into memory so they can be indexed by register number. */ if (last_task_used_math == current) { enable_fpu(); save_fpu(current); disable_fpu(); last_task_used_math = NULL; regs->sr |= SR_FD; } switch (width_shift) { case 2: buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; break; case 3: if (do_paired_load) { buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; } else { #if defined(CONFIG_CPU_LITTLE_ENDIAN) bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg]; buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; #else buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; #endif } break; default: printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n", width_shift, (unsigned long) regs->pc); break; } *(__u32*) &buffer = buflo; *(1 + (__u32*) &buffer) = bufhi; if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { return -1; /* fault */ } return 0; } else { die ("Misaligned FPU load inside kernel", regs, 0); return -1; } } static int misaligned_fixup(struct pt_regs *regs) { insn_size_t opcode; int error; int major, minor; unsigned int user_action; user_action = unaligned_user_action(); if (!(user_action & UM_FIXUP)) return -1; error = read_opcode(regs->pc, &opcode, user_mode(regs)); if (error < 0) { return error; } major = (opcode >> 26) & 0x3f; minor = (opcode >> 16) & 0xf; switch (major) { case (0x84>>2): /* LD.W */ error = misaligned_load(regs, opcode, 1, 1, 1); break; case (0xb0>>2): /* LD.UW */ error = misaligned_load(regs, opcode, 1, 1, 0); break; case (0x88>>2): /* LD.L */ error = misaligned_load(regs, opcode, 1, 2, 1); break; case (0x8c>>2): /* LD.Q */ error = misaligned_load(regs, opcode, 1, 3, 0); break; case (0xa4>>2): /* ST.W */ error = misaligned_store(regs, opcode, 1, 1); break; case (0xa8>>2): /* ST.L */ error = misaligned_store(regs, opcode, 1, 2); break; case (0xac>>2): /* ST.Q */ error = misaligned_store(regs, opcode, 1, 3); break; case (0x40>>2): /* indexed loads */ switch (minor) { case 0x1: /* LDX.W */ error = misaligned_load(regs, opcode, 0, 1, 1); break; case 0x5: /* LDX.UW */ error = misaligned_load(regs, opcode, 0, 1, 0); break; case 0x2: /* LDX.L */ error = misaligned_load(regs, opcode, 0, 2, 1); break; case 0x3: /* LDX.Q */ error = misaligned_load(regs, opcode, 0, 3, 0); break; default: error = -1; break; } break; case (0x60>>2): /* indexed stores */ switch (minor) { case 0x1: /* STX.W */ error = misaligned_store(regs, opcode, 0, 1); break; case 0x2: /* STX.L */ error = misaligned_store(regs, opcode, 0, 2); break; case 0x3: /* STX.Q */ error = misaligned_store(regs, opcode, 0, 3); break; default: error = -1; break; } break; case (0x94>>2): /* FLD.S */ error = misaligned_fpu_load(regs, opcode, 1, 2, 0); break; case (0x98>>2): /* FLD.P */ error = misaligned_fpu_load(regs, opcode, 1, 3, 1); break; case (0x9c>>2): /* FLD.D */ error = misaligned_fpu_load(regs, opcode, 1, 3, 0); break; case (0x1c>>2): /* floating indexed loads */ switch (minor) { case 0x8: /* FLDX.S */ error = misaligned_fpu_load(regs, opcode, 0, 2, 0); break; case 0xd: /* FLDX.P */ error = misaligned_fpu_load(regs, opcode, 0, 3, 1); break; case 0x9: /* FLDX.D */ error = misaligned_fpu_load(regs, opcode, 0, 3, 0); break; default: error = -1; break; } break; case (0xb4>>2): /* FLD.S */ error = misaligned_fpu_store(regs, opcode, 1, 2, 0); break; case (0xb8>>2): /* FLD.P */ error = misaligned_fpu_store(regs, opcode, 1, 3, 1); break; case (0xbc>>2): /* FLD.D */ error = misaligned_fpu_store(regs, opcode, 1, 3, 0); break; case (0x3c>>2): /* floating indexed stores */ switch (minor) { case 0x8: /* FSTX.S */ error = misaligned_fpu_store(regs, opcode, 0, 2, 0); break; case 0xd: /* FSTX.P */ error = misaligned_fpu_store(regs, opcode, 0, 3, 1); break; case 0x9: /* FSTX.D */ error = misaligned_fpu_store(regs, opcode, 0, 3, 0); break; default: error = -1; break; } break; default: /* Fault */ error = -1; break; } if (error < 0) { return error; } else { regs->pc += 4; /* Skip the instruction that's just been emulated */ return 0; } } static void do_unhandled_exception(int signr, char *str, unsigned long error, struct pt_regs *regs) { if (user_mode(regs)) force_sig(signr, current); die_if_no_fixup(str, regs, error); } #define DO_ERROR(signr, str, name) \ asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \ { \ do_unhandled_exception(signr, str, error_code, regs); \ } DO_ERROR(SIGILL, "illegal slot instruction", illegal_slot_inst) DO_ERROR(SIGSEGV, "address error (exec)", address_error_exec) #if defined(CONFIG_SH64_ID2815_WORKAROUND) #define OPCODE_INVALID 0 #define OPCODE_USER_VALID 1 #define OPCODE_PRIV_VALID 2 /* getcon/putcon - requires checking which control register is referenced. */ #define OPCODE_CTRL_REG 3 /* Table of valid opcodes for SHmedia mode. Form a 10-bit value by concatenating the major/minor opcodes i.e. opcode[31:26,20:16]. The 6 MSBs of this value index into the following array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to LSBs==4'b0000 etc). */ static unsigned long shmedia_opcode_table[64] = { 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015, 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000, 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000, 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000, 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000 }; /* Workaround SH5-101 cut2 silicon defect #2815 : in some situations, inter-mode branches from SHcompact -> SHmedia which should take ITLBMISS or EXECPROT exceptions at the target falsely take RESINST at the target instead. */ void do_reserved_inst(unsigned long error_code, struct pt_regs *regs) { insn_size_t opcode = 0x6ff4fff0; /* guaranteed reserved opcode */ unsigned long pc, aligned_pc; unsigned long index, shift; unsigned long major, minor, combined; unsigned long reserved_field; int opcode_state; int get_user_error; int signr = SIGILL; char *exception_name = "reserved_instruction"; pc = regs->pc; /* SHcompact is not handled */ if (unlikely((pc & 3) == 0)) goto out; /* SHmedia : check for defect. This requires executable vmas to be readable too. */ aligned_pc = pc & ~3; if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t))) get_user_error = -EFAULT; else get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc); if (get_user_error < 0) { /* * Error trying to read opcode. This typically means a * real fault, not a RESINST any more. So change the * codes. */ exception_name = "address error (exec)"; signr = SIGSEGV; goto out; } /* These bits are currently reserved as zero in all valid opcodes */ reserved_field = opcode & 0xf; if (unlikely(reserved_field)) goto out; /* invalid opcode */ major = (opcode >> 26) & 0x3f; minor = (opcode >> 16) & 0xf; combined = (major << 4) | minor; index = major; shift = minor << 1; opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3; switch (opcode_state) { case OPCODE_INVALID: /* Trap. */ break; case OPCODE_USER_VALID: /* * Restart the instruction: the branch to the instruction * will now be from an RTE not from SHcompact so the * silicon defect won't be triggered. */ return; case OPCODE_PRIV_VALID: if (!user_mode(regs)) { /* * Should only ever get here if a module has * SHcompact code inside it. If so, the same fix * up is needed. */ return; /* same reason */ } /* * Otherwise, user mode trying to execute a privileged * instruction - fall through to trap. */ break; case OPCODE_CTRL_REG: /* If in privileged mode, return as above. */ if (!user_mode(regs)) return; /* In user mode ... */ if (combined == 0x9f) { /* GETCON */ unsigned long regno = (opcode >> 20) & 0x3f; if (regno >= 62) return; /* reserved/privileged control register => trap */ } else if (combined == 0x1bf) { /* PUTCON */ unsigned long regno = (opcode >> 4) & 0x3f; if (regno >= 62) return; /* reserved/privileged control register => trap */ } break; default: /* Fall through to trap. */ break; } out: do_unhandled_exception(signr, exception_name, error_code, regs); } #else /* CONFIG_SH64_ID2815_WORKAROUND */ /* If the workaround isn't needed, this is just a straightforward reserved instruction */ DO_ERROR(SIGILL, "reserved instruction", reserved_inst) #endif /* CONFIG_SH64_ID2815_WORKAROUND */ /* Called with interrupts disabled */ asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs) { die_if_kernel("exception", regs, ex); } asmlinkage int do_unknown_trapa(unsigned long scId, struct pt_regs *regs) { /* Syscall debug */ printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId); die_if_kernel("unknown trapa", regs, scId); return -ENOSYS; } /* Implement misaligned load/store handling for kernel (and optionally for user mode too). Limitation : only SHmedia mode code is handled - there is no handling at all for misaligned accesses occurring in SHcompact code yet. */ asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs) { if (misaligned_fixup(regs) < 0) do_unhandled_exception(SIGSEGV, "address error(load)", error_code, regs); } asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs) { if (misaligned_fixup(regs) < 0) do_unhandled_exception(SIGSEGV, "address error(store)", error_code, regs); } asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs) { u64 peek_real_address_q(u64 addr); u64 poke_real_address_q(u64 addr, u64 val); unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010; unsigned long long exp_cause; /* It's not worth ioremapping the debug module registers for the amount of access we make to them - just go direct to their physical addresses. */ exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY); if (exp_cause & ~4) printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n", (unsigned long)(exp_cause & 0xffffffff)); show_state(); /* Clear all DEBUGINT causes */ poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0); } void __cpuinit per_cpu_trap_init(void) { /* Nothing to do for now, VBR initialization later. */ }
gpl-2.0
sawdoctor/Note-4-AEL-Kernel
drivers/dma/sh/shdma.c
2211
24442
/* * Renesas SuperH DMA Engine support * * base is drivers/dma/flsdma.c * * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * - DMA of SuperH does not have Hardware DMA chain mode. * - MAX DMA size is 16MB. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/dmaengine.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/sh_dma.h> #include <linux/notifier.h> #include <linux/kdebug.h> #include <linux/spinlock.h> #include <linux/rculist.h> #include "../dmaengine.h" #include "shdma.h" #define SH_DMAE_DRV_NAME "sh-dma-engine" /* Default MEMCPY transfer size = 2^2 = 4 bytes */ #define LOG2_DEFAULT_XFER_SIZE 2 #define SH_DMA_SLAVE_NUMBER 256 #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1) /* * Used for write-side mutual exclusion for the global device list, * read-side synchronization by way of RCU, and per-controller data. */ static DEFINE_SPINLOCK(sh_dmae_lock); static LIST_HEAD(sh_dmae_devices); static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) { struct sh_dmae_device *shdev = to_sh_dev(sh_dc); __raw_writel(data, shdev->chan_reg + shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset); } static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) { __raw_writel(data, sh_dc->base + reg / sizeof(u32)); } static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) { return __raw_readl(sh_dc->base + reg / sizeof(u32)); } static u16 dmaor_read(struct sh_dmae_device *shdev) { u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); if (shdev->pdata->dmaor_is_32bit) return __raw_readl(addr); else return __raw_readw(addr); } static void dmaor_write(struct sh_dmae_device *shdev, u16 data) { u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); if (shdev->pdata->dmaor_is_32bit) __raw_writel(data, addr); else __raw_writew(data, addr); } static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) { struct sh_dmae_device *shdev = to_sh_dev(sh_dc); __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); } static u32 chcr_read(struct sh_dmae_chan *sh_dc) { struct sh_dmae_device *shdev = to_sh_dev(sh_dc); return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); } /* * Reset DMA controller * * SH7780 has two DMAOR register */ static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) { unsigned short dmaor; unsigned long flags; spin_lock_irqsave(&sh_dmae_lock, flags); dmaor = dmaor_read(shdev); dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); spin_unlock_irqrestore(&sh_dmae_lock, flags); } static int sh_dmae_rst(struct sh_dmae_device *shdev) { unsigned short dmaor; unsigned long flags; spin_lock_irqsave(&sh_dmae_lock, flags); dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); if (shdev->pdata->chclr_present) { int i; for (i = 0; i < shdev->pdata->channel_num; i++) { struct sh_dmae_chan *sh_chan = shdev->chan[i]; if (sh_chan) chclr_write(sh_chan, 0); } } dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); dmaor = dmaor_read(shdev); spin_unlock_irqrestore(&sh_dmae_lock, flags); if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n"); return -EIO; } if (shdev->pdata->dmaor_init & ~dmaor) dev_warn(shdev->shdma_dev.dma_dev.dev, "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", dmaor, shdev->pdata->dmaor_init); return 0; } static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) { u32 chcr = chcr_read(sh_chan); if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) return true; /* working */ return false; /* waiting */ } static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); if (cnt >= pdata->ts_shift_num) cnt = 0; return pdata->ts_shift[cnt]; } static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; int i; for (i = 0; i < pdata->ts_shift_num; i++) if (pdata->ts_shift[i] == l2size) break; if (i == pdata->ts_shift_num) i = 0; return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | ((i << pdata->ts_high_shift) & pdata->ts_high_mask); } static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) { sh_dmae_writel(sh_chan, hw->sar, SAR); sh_dmae_writel(sh_chan, hw->dar, DAR); sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); } static void dmae_start(struct sh_dmae_chan *sh_chan) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); u32 chcr = chcr_read(sh_chan); if (shdev->pdata->needs_tend_set) sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); chcr |= CHCR_DE | shdev->chcr_ie_bit; chcr_write(sh_chan, chcr & ~CHCR_TE); } static void dmae_init(struct sh_dmae_chan *sh_chan) { /* * Default configuration for dual address memory-memory transfer. * 0x400 represents auto-request. */ u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, LOG2_DEFAULT_XFER_SIZE); sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); chcr_write(sh_chan, chcr); } static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) { /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ if (dmae_is_busy(sh_chan)) return -EBUSY; sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); chcr_write(sh_chan, val); return 0; } static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; u16 __iomem *addr = shdev->dmars; unsigned int shift = chan_pdata->dmars_bit; if (dmae_is_busy(sh_chan)) return -EBUSY; if (pdata->no_dmars) return 0; /* in the case of a missing DMARS resource use first memory window */ if (!addr) addr = (u16 __iomem *)shdev->chan_reg; addr += chan_pdata->dmars / sizeof(u16); __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), addr); return 0; } static void sh_dmae_start_xfer(struct shdma_chan *schan, struct shdma_desc *sdesc) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); struct sh_dmae_desc *sh_desc = container_of(sdesc, struct sh_dmae_desc, shdma_desc); dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n", sdesc->async_tx.cookie, sh_chan->shdma_chan.id, sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar); /* Get the ld start address from ld_queue */ dmae_set_reg(sh_chan, &sh_desc->hw); dmae_start(sh_chan); } static bool sh_dmae_channel_busy(struct shdma_chan *schan) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); return dmae_is_busy(sh_chan); } static void sh_dmae_setup_xfer(struct shdma_chan *schan, int slave_id) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); if (slave_id >= 0) { const struct sh_dmae_slave_config *cfg = sh_chan->config; dmae_set_dmars(sh_chan, cfg->mid_rid); dmae_set_chcr(sh_chan, cfg->chcr); } else { dmae_init(sh_chan); } } static const struct sh_dmae_slave_config *dmae_find_slave( struct sh_dmae_chan *sh_chan, int slave_id) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; const struct sh_dmae_slave_config *cfg; int i; if (slave_id >= SH_DMA_SLAVE_NUMBER) return NULL; for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) if (cfg->slave_id == slave_id) return cfg; return NULL; } static int sh_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); if (!cfg) return -ENXIO; if (!try) sh_chan->config = cfg; return 0; } static void dmae_halt(struct sh_dmae_chan *sh_chan) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); u32 chcr = chcr_read(sh_chan); chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); chcr_write(sh_chan, chcr); } static int sh_dmae_desc_setup(struct shdma_chan *schan, struct shdma_desc *sdesc, dma_addr_t src, dma_addr_t dst, size_t *len) { struct sh_dmae_desc *sh_desc = container_of(sdesc, struct sh_dmae_desc, shdma_desc); if (*len > schan->max_xfer_len) *len = schan->max_xfer_len; sh_desc->hw.sar = src; sh_desc->hw.dar = dst; sh_desc->hw.tcr = *len; return 0; } static void sh_dmae_halt(struct shdma_chan *schan) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); dmae_halt(sh_chan); } static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); if (!(chcr_read(sh_chan) & CHCR_TE)) return false; /* DMA stop */ dmae_halt(sh_chan); return true; } static size_t sh_dmae_get_partial(struct shdma_chan *schan, struct shdma_desc *sdesc) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); struct sh_dmae_desc *sh_desc = container_of(sdesc, struct sh_dmae_desc, shdma_desc); return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << sh_chan->xmit_shift; } /* Called from error IRQ or NMI */ static bool sh_dmae_reset(struct sh_dmae_device *shdev) { bool ret; /* halt the dma controller */ sh_dmae_ctl_stop(shdev); /* We cannot detect, which channel caused the error, have to reset all */ ret = shdma_reset(&shdev->shdma_dev); sh_dmae_rst(shdev); return ret; } static irqreturn_t sh_dmae_err(int irq, void *data) { struct sh_dmae_device *shdev = data; if (!(dmaor_read(shdev) & DMAOR_AE)) return IRQ_NONE; sh_dmae_reset(shdev); return IRQ_HANDLED; } static bool sh_dmae_desc_completed(struct shdma_chan *schan, struct shdma_desc *sdesc) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); struct sh_dmae_desc *sh_desc = container_of(sdesc, struct sh_dmae_desc, shdma_desc); u32 sar_buf = sh_dmae_readl(sh_chan, SAR); u32 dar_buf = sh_dmae_readl(sh_chan, DAR); return (sdesc->direction == DMA_DEV_TO_MEM && (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) || (sdesc->direction != DMA_DEV_TO_MEM && (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf); } static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) { /* Fast path out if NMIF is not asserted for this controller */ if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) return false; return sh_dmae_reset(shdev); } static int sh_dmae_nmi_handler(struct notifier_block *self, unsigned long cmd, void *data) { struct sh_dmae_device *shdev; int ret = NOTIFY_DONE; bool triggered; /* * Only concern ourselves with NMI events. * * Normally we would check the die chain value, but as this needs * to be architecture independent, check for NMI context instead. */ if (!in_nmi()) return NOTIFY_DONE; rcu_read_lock(); list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { /* * Only stop if one of the controllers has NMIF asserted, * we do not want to interfere with regular address error * handling or NMI events that don't concern the DMACs. */ triggered = sh_dmae_nmi_notify(shdev); if (triggered == true) ret = NOTIFY_OK; } rcu_read_unlock(); return ret; } static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { .notifier_call = sh_dmae_nmi_handler, /* Run before NMI debug handler and KGDB */ .priority = 1, }; static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, int irq, unsigned long flags) { const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; struct shdma_dev *sdev = &shdev->shdma_dev; struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); struct sh_dmae_chan *sh_chan; struct shdma_chan *schan; int err; sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); if (!sh_chan) { dev_err(sdev->dma_dev.dev, "No free memory for allocating dma channels!\n"); return -ENOMEM; } schan = &sh_chan->shdma_chan; schan->max_xfer_len = SH_DMA_TCR_MAX + 1; shdma_chan_probe(sdev, schan, id); sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); /* set up channel irq */ if (pdev->id >= 0) snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), "sh-dmae%d.%d", pdev->id, id); else snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), "sh-dma%d", id); err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id); if (err) { dev_err(sdev->dma_dev.dev, "DMA channel %d request_irq error %d\n", id, err); goto err_no_irq; } shdev->chan[id] = sh_chan; return 0; err_no_irq: /* remove from dmaengine device node */ shdma_chan_remove(schan); kfree(sh_chan); return err; } static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) { struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; struct shdma_chan *schan; int i; shdma_for_each_chan(schan, &shdev->shdma_dev, i) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); BUG_ON(!schan); shdma_free_irq(&sh_chan->shdma_chan); shdma_chan_remove(schan); kfree(sh_chan); } dma_dev->chancnt = 0; } static void sh_dmae_shutdown(struct platform_device *pdev) { struct sh_dmae_device *shdev = platform_get_drvdata(pdev); sh_dmae_ctl_stop(shdev); } static int sh_dmae_runtime_suspend(struct device *dev) { return 0; } static int sh_dmae_runtime_resume(struct device *dev) { struct sh_dmae_device *shdev = dev_get_drvdata(dev); return sh_dmae_rst(shdev); } #ifdef CONFIG_PM static int sh_dmae_suspend(struct device *dev) { return 0; } static int sh_dmae_resume(struct device *dev) { struct sh_dmae_device *shdev = dev_get_drvdata(dev); int i, ret; ret = sh_dmae_rst(shdev); if (ret < 0) dev_err(dev, "Failed to reset!\n"); for (i = 0; i < shdev->pdata->channel_num; i++) { struct sh_dmae_chan *sh_chan = shdev->chan[i]; if (!sh_chan->shdma_chan.desc_num) continue; if (sh_chan->shdma_chan.slave_id >= 0) { const struct sh_dmae_slave_config *cfg = sh_chan->config; dmae_set_dmars(sh_chan, cfg->mid_rid); dmae_set_chcr(sh_chan, cfg->chcr); } else { dmae_init(sh_chan); } } return 0; } #else #define sh_dmae_suspend NULL #define sh_dmae_resume NULL #endif const struct dev_pm_ops sh_dmae_pm = { .suspend = sh_dmae_suspend, .resume = sh_dmae_resume, .runtime_suspend = sh_dmae_runtime_suspend, .runtime_resume = sh_dmae_runtime_resume, }; static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); /* * Implicit BUG_ON(!sh_chan->config) * This is an exclusive slave DMA operation, may only be called after a * successful slave configuration. */ return sh_chan->config->addr; } static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) { return &((struct sh_dmae_desc *)buf)[i].shdma_desc; } static const struct shdma_ops sh_dmae_shdma_ops = { .desc_completed = sh_dmae_desc_completed, .halt_channel = sh_dmae_halt, .channel_busy = sh_dmae_channel_busy, .slave_addr = sh_dmae_slave_addr, .desc_setup = sh_dmae_desc_setup, .set_slave = sh_dmae_set_slave, .setup_xfer = sh_dmae_setup_xfer, .start_xfer = sh_dmae_start_xfer, .embedded_desc = sh_dmae_embedded_desc, .chan_irq = sh_dmae_chan_irq, .get_partial = sh_dmae_get_partial, }; static int sh_dmae_probe(struct platform_device *pdev) { struct sh_dmae_pdata *pdata = pdev->dev.platform_data; unsigned long irqflags = IRQF_DISABLED, chan_flag[SH_DMAE_MAX_CHANNELS] = {}; int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; struct sh_dmae_device *shdev; struct dma_device *dma_dev; struct resource *chan, *dmars, *errirq_res, *chanirq_res; /* get platform data */ if (!pdata || !pdata->channel_num) return -ENODEV; chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); /* DMARS area is optional */ dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); /* * IRQ resources: * 1. there always must be at least one IRQ IO-resource. On SH4 it is * the error IRQ, in which case it is the only IRQ in this resource: * start == end. If it is the only IRQ resource, all channels also * use the same IRQ. * 2. DMA channel IRQ resources can be specified one per resource or in * ranges (start != end) * 3. iff all events (channels and, optionally, error) on this * controller use the same IRQ, only one IRQ resource can be * specified, otherwise there must be one IRQ per channel, even if * some of them are equal * 4. if all IRQs on this controller are equal or if some specific IRQs * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be * requested with the IRQF_SHARED flag */ errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!chan || !errirq_res) return -ENODEV; if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { dev_err(&pdev->dev, "DMAC register region already claimed\n"); return -EBUSY; } if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); err = -EBUSY; goto ermrdmars; } err = -ENOMEM; shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); if (!shdev) { dev_err(&pdev->dev, "Not enough memory\n"); goto ealloc; } dma_dev = &shdev->shdma_dev.dma_dev; shdev->chan_reg = ioremap(chan->start, resource_size(chan)); if (!shdev->chan_reg) goto emapchan; if (dmars) { shdev->dmars = ioremap(dmars->start, resource_size(dmars)); if (!shdev->dmars) goto emapdmars; } if (!pdata->slave_only) dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); if (pdata->slave && pdata->slave_num) dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); /* Default transfer size of 32 bytes requires 32-byte alignment */ dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; shdev->shdma_dev.ops = &sh_dmae_shdma_ops; shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc); err = shdma_init(&pdev->dev, &shdev->shdma_dev, pdata->channel_num); if (err < 0) goto eshdma; /* platform data */ shdev->pdata = pdev->dev.platform_data; if (pdata->chcr_offset) shdev->chcr_offset = pdata->chcr_offset; else shdev->chcr_offset = CHCR; if (pdata->chcr_ie_bit) shdev->chcr_ie_bit = pdata->chcr_ie_bit; else shdev->chcr_ie_bit = CHCR_IE; platform_set_drvdata(pdev, shdev); pm_runtime_enable(&pdev->dev); err = pm_runtime_get_sync(&pdev->dev); if (err < 0) dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); spin_lock_irq(&sh_dmae_lock); list_add_tail_rcu(&shdev->node, &sh_dmae_devices); spin_unlock_irq(&sh_dmae_lock); /* reset dma controller - only needed as a test */ err = sh_dmae_rst(shdev); if (err) goto rst_err; #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); if (!chanirq_res) chanirq_res = errirq_res; else irqres++; if (chanirq_res == errirq_res || (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) irqflags = IRQF_SHARED; errirq = errirq_res->start; err = request_irq(errirq, sh_dmae_err, irqflags, "DMAC Address Error", shdev); if (err) { dev_err(&pdev->dev, "DMA failed requesting irq #%d, error %d\n", errirq, err); goto eirq_err; } #else chanirq_res = errirq_res; #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ if (chanirq_res->start == chanirq_res->end && !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { /* Special case - all multiplexed */ for (; irq_cnt < pdata->channel_num; irq_cnt++) { if (irq_cnt < SH_DMAE_MAX_CHANNELS) { chan_irq[irq_cnt] = chanirq_res->start; chan_flag[irq_cnt] = IRQF_SHARED; } else { irq_cap = 1; break; } } } else { do { for (i = chanirq_res->start; i <= chanirq_res->end; i++) { if (irq_cnt >= SH_DMAE_MAX_CHANNELS) { irq_cap = 1; break; } if ((errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) chan_flag[irq_cnt] = IRQF_SHARED; else chan_flag[irq_cnt] = IRQF_DISABLED; dev_dbg(&pdev->dev, "Found IRQ %d for channel %d\n", i, irq_cnt); chan_irq[irq_cnt++] = i; } if (irq_cnt >= SH_DMAE_MAX_CHANNELS) break; chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, ++irqres); } while (irq_cnt < pdata->channel_num && chanirq_res); } /* Create DMA Channel */ for (i = 0; i < irq_cnt; i++) { err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); if (err) goto chan_probe_err; } if (irq_cap) dev_notice(&pdev->dev, "Attempting to register %d DMA " "channels when a maximum of %d are supported.\n", pdata->channel_num, SH_DMAE_MAX_CHANNELS); pm_runtime_put(&pdev->dev); err = dma_async_device_register(&shdev->shdma_dev.dma_dev); if (err < 0) goto edmadevreg; return err; edmadevreg: pm_runtime_get(&pdev->dev); chan_probe_err: sh_dmae_chan_remove(shdev); #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) free_irq(errirq, shdev); eirq_err: #endif rst_err: spin_lock_irq(&sh_dmae_lock); list_del_rcu(&shdev->node); spin_unlock_irq(&sh_dmae_lock); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); platform_set_drvdata(pdev, NULL); shdma_cleanup(&shdev->shdma_dev); eshdma: if (dmars) iounmap(shdev->dmars); emapdmars: iounmap(shdev->chan_reg); synchronize_rcu(); emapchan: kfree(shdev); ealloc: if (dmars) release_mem_region(dmars->start, resource_size(dmars)); ermrdmars: release_mem_region(chan->start, resource_size(chan)); return err; } static int sh_dmae_remove(struct platform_device *pdev) { struct sh_dmae_device *shdev = platform_get_drvdata(pdev); struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; struct resource *res; int errirq = platform_get_irq(pdev, 0); dma_async_device_unregister(dma_dev); if (errirq > 0) free_irq(errirq, shdev); spin_lock_irq(&sh_dmae_lock); list_del_rcu(&shdev->node); spin_unlock_irq(&sh_dmae_lock); pm_runtime_disable(&pdev->dev); sh_dmae_chan_remove(shdev); shdma_cleanup(&shdev->shdma_dev); if (shdev->dmars) iounmap(shdev->dmars); iounmap(shdev->chan_reg); platform_set_drvdata(pdev, NULL); synchronize_rcu(); kfree(shdev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) release_mem_region(res->start, resource_size(res)); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) release_mem_region(res->start, resource_size(res)); return 0; } static struct platform_driver sh_dmae_driver = { .driver = { .owner = THIS_MODULE, .pm = &sh_dmae_pm, .name = SH_DMAE_DRV_NAME, }, .remove = sh_dmae_remove, .shutdown = sh_dmae_shutdown, }; static int __init sh_dmae_init(void) { /* Wire up NMI handling */ int err = register_die_notifier(&sh_dmae_nmi_notifier); if (err) return err; return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); } module_init(sh_dmae_init); static void __exit sh_dmae_exit(void) { platform_driver_unregister(&sh_dmae_driver); unregister_die_notifier(&sh_dmae_nmi_notifier); } module_exit(sh_dmae_exit); MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
gpl-2.0
bekriebel/android_kernel_omap
drivers/input/touchscreen/usbtouchscreen.c
3235
40467
/****************************************************************************** * usbtouchscreen.c * Driver for USB Touchscreens, supporting those devices: * - eGalax Touchkit * includes eTurboTouch CT-410/510/700 * - 3M/Microtouch EX II series * - ITM * - PanJit TouchSet * - eTurboTouch * - Gunze AHL61 * - DMC TSC-10/25 * - IRTOUCHSYSTEMS/UNITOP * - IdealTEK URTC1000 * - General Touch * - GoTop Super_Q2/GogoPen/PenPower tablets * - JASTEC USB touch controller/DigiTech DTR-02U * - Zytronic capacitive touchscreen * - NEXIO/iNexio * * Copyright (C) 2004-2007 by Daniel Ritz <daniel.ritz@gmx.ch> * Copyright (C) by Todd E. Johnson (mtouchusb.c) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Driver is based on touchkitusb.c * - ITM parts are from itmtouch.c * - 3M parts are from mtouchusb.c * - PanJit parts are from an unmerged driver by Lanslott Gish * - DMC TSC 10/25 are from Holger Schurig, with ideas from an unmerged * driver from Marius Vollmer * *****************************************************************************/ //#define DEBUG #include <linux/kernel.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/module.h> #include <linux/init.h> #include <linux/usb.h> #include <linux/usb/input.h> #include <linux/hid.h> #define DRIVER_VERSION "v0.6" #define DRIVER_AUTHOR "Daniel Ritz <daniel.ritz@gmx.ch>" #define DRIVER_DESC "USB Touchscreen Driver" static int swap_xy; module_param(swap_xy, bool, 0644); MODULE_PARM_DESC(swap_xy, "If set X and Y axes are swapped."); static int hwcalib_xy; module_param(hwcalib_xy, bool, 0644); MODULE_PARM_DESC(hwcalib_xy, "If set hw-calibrated X/Y are used if available"); /* device specifc data/functions */ struct usbtouch_usb; struct usbtouch_device_info { int min_xc, max_xc; int min_yc, max_yc; int min_press, max_press; int rept_size; /* * Always service the USB devices irq not just when the input device is * open. This is useful when devices have a watchdog which prevents us * from periodically polling the device. Leave this unset unless your * touchscreen device requires it, as it does consume more of the USB * bandwidth. */ bool irq_always; void (*process_pkt) (struct usbtouch_usb *usbtouch, unsigned char *pkt, int len); /* * used to get the packet len. possible return values: * > 0: packet len * = 0: skip one byte * < 0: -return value more bytes needed */ int (*get_pkt_len) (unsigned char *pkt, int len); int (*read_data) (struct usbtouch_usb *usbtouch, unsigned char *pkt); int (*alloc) (struct usbtouch_usb *usbtouch); int (*init) (struct usbtouch_usb *usbtouch); void (*exit) (struct usbtouch_usb *usbtouch); }; /* a usbtouch device */ struct usbtouch_usb { unsigned char *data; dma_addr_t data_dma; unsigned char *buffer; int buf_len; struct urb *irq; struct usb_interface *interface; struct input_dev *input; struct usbtouch_device_info *type; char name[128]; char phys[64]; void *priv; int x, y; int touch, press; }; /* device types */ enum { DEVTYPE_IGNORE = -1, DEVTYPE_EGALAX, DEVTYPE_PANJIT, DEVTYPE_3M, DEVTYPE_ITM, DEVTYPE_ETURBO, DEVTYPE_GUNZE, DEVTYPE_DMC_TSC10, DEVTYPE_IRTOUCH, DEVTYPE_IDEALTEK, DEVTYPE_GENERAL_TOUCH, DEVTYPE_GOTOP, DEVTYPE_JASTEC, DEVTYPE_E2I, DEVTYPE_ZYTRONIC, DEVTYPE_TC45USB, DEVTYPE_NEXIO, }; #define USB_DEVICE_HID_CLASS(vend, prod) \ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS \ | USB_DEVICE_ID_MATCH_INT_PROTOCOL \ | USB_DEVICE_ID_MATCH_DEVICE, \ .idVendor = (vend), \ .idProduct = (prod), \ .bInterfaceClass = USB_INTERFACE_CLASS_HID, \ .bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE static const struct usb_device_id usbtouch_devices[] = { #ifdef CONFIG_TOUCHSCREEN_USB_EGALAX /* ignore the HID capable devices, handled by usbhid */ {USB_DEVICE_HID_CLASS(0x0eef, 0x0001), .driver_info = DEVTYPE_IGNORE}, {USB_DEVICE_HID_CLASS(0x0eef, 0x0002), .driver_info = DEVTYPE_IGNORE}, /* normal device IDs */ {USB_DEVICE(0x3823, 0x0001), .driver_info = DEVTYPE_EGALAX}, {USB_DEVICE(0x3823, 0x0002), .driver_info = DEVTYPE_EGALAX}, {USB_DEVICE(0x0123, 0x0001), .driver_info = DEVTYPE_EGALAX}, {USB_DEVICE(0x0eef, 0x0001), .driver_info = DEVTYPE_EGALAX}, {USB_DEVICE(0x0eef, 0x0002), .driver_info = DEVTYPE_EGALAX}, {USB_DEVICE(0x1234, 0x0001), .driver_info = DEVTYPE_EGALAX}, {USB_DEVICE(0x1234, 0x0002), .driver_info = DEVTYPE_EGALAX}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_PANJIT {USB_DEVICE(0x134c, 0x0001), .driver_info = DEVTYPE_PANJIT}, {USB_DEVICE(0x134c, 0x0002), .driver_info = DEVTYPE_PANJIT}, {USB_DEVICE(0x134c, 0x0003), .driver_info = DEVTYPE_PANJIT}, {USB_DEVICE(0x134c, 0x0004), .driver_info = DEVTYPE_PANJIT}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_3M {USB_DEVICE(0x0596, 0x0001), .driver_info = DEVTYPE_3M}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ITM {USB_DEVICE(0x0403, 0xf9e9), .driver_info = DEVTYPE_ITM}, {USB_DEVICE(0x16e3, 0xf9e9), .driver_info = DEVTYPE_ITM}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ETURBO {USB_DEVICE(0x1234, 0x5678), .driver_info = DEVTYPE_ETURBO}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_GUNZE {USB_DEVICE(0x0637, 0x0001), .driver_info = DEVTYPE_GUNZE}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10 {USB_DEVICE(0x0afa, 0x03e8), .driver_info = DEVTYPE_DMC_TSC10}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH {USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK {USB_DEVICE(0x1391, 0x1000), .driver_info = DEVTYPE_IDEALTEK}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH {USB_DEVICE(0x0dfc, 0x0001), .driver_info = DEVTYPE_GENERAL_TOUCH}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_GOTOP {USB_DEVICE(0x08f2, 0x007f), .driver_info = DEVTYPE_GOTOP}, {USB_DEVICE(0x08f2, 0x00ce), .driver_info = DEVTYPE_GOTOP}, {USB_DEVICE(0x08f2, 0x00f4), .driver_info = DEVTYPE_GOTOP}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_JASTEC {USB_DEVICE(0x0f92, 0x0001), .driver_info = DEVTYPE_JASTEC}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_E2I {USB_DEVICE(0x1ac7, 0x0001), .driver_info = DEVTYPE_E2I}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC {USB_DEVICE(0x14c8, 0x0003), .driver_info = DEVTYPE_ZYTRONIC}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB /* TC5UH */ {USB_DEVICE(0x0664, 0x0309), .driver_info = DEVTYPE_TC45USB}, /* TC4UM */ {USB_DEVICE(0x0664, 0x0306), .driver_info = DEVTYPE_TC45USB}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_NEXIO /* data interface only */ {USB_DEVICE_AND_INTERFACE_INFO(0x10f0, 0x2002, 0x0a, 0x00, 0x00), .driver_info = DEVTYPE_NEXIO}, {USB_DEVICE_AND_INTERFACE_INFO(0x1870, 0x0001, 0x0a, 0x00, 0x00), .driver_info = DEVTYPE_NEXIO}, #endif {} }; /***************************************************************************** * e2i Part */ #ifdef CONFIG_TOUCHSCREEN_USB_E2I static int e2i_init(struct usbtouch_usb *usbtouch) { int ret; struct usb_device *udev = interface_to_usbdev(usbtouch->interface); ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x01, 0x02, 0x0000, 0x0081, NULL, 0, USB_CTRL_SET_TIMEOUT); dbg("%s - usb_control_msg - E2I_RESET - bytes|err: %d", __func__, ret); return ret; } static int e2i_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { int tmp = (pkt[0] << 8) | pkt[1]; dev->x = (pkt[2] << 8) | pkt[3]; dev->y = (pkt[4] << 8) | pkt[5]; tmp = tmp - 0xA000; dev->touch = (tmp > 0); dev->press = (tmp > 0 ? tmp : 0); return 1; } #endif /***************************************************************************** * eGalax part */ #ifdef CONFIG_TOUCHSCREEN_USB_EGALAX #ifndef MULTI_PACKET #define MULTI_PACKET #endif #define EGALAX_PKT_TYPE_MASK 0xFE #define EGALAX_PKT_TYPE_REPT 0x80 #define EGALAX_PKT_TYPE_DIAG 0x0A static int egalax_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { if ((pkt[0] & EGALAX_PKT_TYPE_MASK) != EGALAX_PKT_TYPE_REPT) return 0; dev->x = ((pkt[3] & 0x0F) << 7) | (pkt[4] & 0x7F); dev->y = ((pkt[1] & 0x0F) << 7) | (pkt[2] & 0x7F); dev->touch = pkt[0] & 0x01; return 1; } static int egalax_get_pkt_len(unsigned char *buf, int len) { switch (buf[0] & EGALAX_PKT_TYPE_MASK) { case EGALAX_PKT_TYPE_REPT: return 5; case EGALAX_PKT_TYPE_DIAG: if (len < 2) return -1; return buf[1] + 2; } return 0; } #endif /***************************************************************************** * PanJit Part */ #ifdef CONFIG_TOUCHSCREEN_USB_PANJIT static int panjit_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1]; dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3]; dev->touch = pkt[0] & 0x01; return 1; } #endif /***************************************************************************** * 3M/Microtouch Part */ #ifdef CONFIG_TOUCHSCREEN_USB_3M #define MTOUCHUSB_ASYNC_REPORT 1 #define MTOUCHUSB_RESET 7 #define MTOUCHUSB_REQ_CTRLLR_ID 10 static int mtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { if (hwcalib_xy) { dev->x = (pkt[4] << 8) | pkt[3]; dev->y = 0xffff - ((pkt[6] << 8) | pkt[5]); } else { dev->x = (pkt[8] << 8) | pkt[7]; dev->y = (pkt[10] << 8) | pkt[9]; } dev->touch = (pkt[2] & 0x40) ? 1 : 0; return 1; } static int mtouch_init(struct usbtouch_usb *usbtouch) { int ret, i; struct usb_device *udev = interface_to_usbdev(usbtouch->interface); ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), MTOUCHUSB_RESET, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); dbg("%s - usb_control_msg - MTOUCHUSB_RESET - bytes|err: %d", __func__, ret); if (ret < 0) return ret; msleep(150); for (i = 0; i < 3; i++) { ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), MTOUCHUSB_ASYNC_REPORT, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 1, 1, NULL, 0, USB_CTRL_SET_TIMEOUT); dbg("%s - usb_control_msg - MTOUCHUSB_ASYNC_REPORT - bytes|err: %d", __func__, ret); if (ret >= 0) break; if (ret != -EPIPE) return ret; } /* Default min/max xy are the raw values, override if using hw-calib */ if (hwcalib_xy) { input_set_abs_params(usbtouch->input, ABS_X, 0, 0xffff, 0, 0); input_set_abs_params(usbtouch->input, ABS_Y, 0, 0xffff, 0, 0); } return 0; } #endif /***************************************************************************** * ITM Part */ #ifdef CONFIG_TOUCHSCREEN_USB_ITM static int itm_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { int touch; /* * ITM devices report invalid x/y data if not touched. * if the screen was touched before but is not touched any more * report touch as 0 with the last valid x/y data once. then stop * reporting data until touched again. */ dev->press = ((pkt[2] & 0x01) << 7) | (pkt[5] & 0x7F); touch = ~pkt[7] & 0x20; if (!touch) { if (dev->touch) { dev->touch = 0; return 1; } return 0; } dev->x = ((pkt[0] & 0x1F) << 7) | (pkt[3] & 0x7F); dev->y = ((pkt[1] & 0x1F) << 7) | (pkt[4] & 0x7F); dev->touch = touch; return 1; } #endif /***************************************************************************** * eTurboTouch part */ #ifdef CONFIG_TOUCHSCREEN_USB_ETURBO #ifndef MULTI_PACKET #define MULTI_PACKET #endif static int eturbo_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { unsigned int shift; /* packets should start with sync */ if (!(pkt[0] & 0x80)) return 0; shift = (6 - (pkt[0] & 0x03)); dev->x = ((pkt[3] << 7) | pkt[4]) >> shift; dev->y = ((pkt[1] << 7) | pkt[2]) >> shift; dev->touch = (pkt[0] & 0x10) ? 1 : 0; return 1; } static int eturbo_get_pkt_len(unsigned char *buf, int len) { if (buf[0] & 0x80) return 5; if (buf[0] == 0x01) return 3; return 0; } #endif /***************************************************************************** * Gunze part */ #ifdef CONFIG_TOUCHSCREEN_USB_GUNZE static int gunze_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { if (!(pkt[0] & 0x80) || ((pkt[1] | pkt[2] | pkt[3]) & 0x80)) return 0; dev->x = ((pkt[0] & 0x1F) << 7) | (pkt[2] & 0x7F); dev->y = ((pkt[1] & 0x1F) << 7) | (pkt[3] & 0x7F); dev->touch = pkt[0] & 0x20; return 1; } #endif /***************************************************************************** * DMC TSC-10/25 Part * * Documentation about the controller and it's protocol can be found at * http://www.dmccoltd.com/files/controler/tsc10usb_pi_e.pdf * http://www.dmccoltd.com/files/controler/tsc25_usb_e.pdf */ #ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10 /* supported data rates. currently using 130 */ #define TSC10_RATE_POINT 0x50 #define TSC10_RATE_30 0x40 #define TSC10_RATE_50 0x41 #define TSC10_RATE_80 0x42 #define TSC10_RATE_100 0x43 #define TSC10_RATE_130 0x44 #define TSC10_RATE_150 0x45 /* commands */ #define TSC10_CMD_RESET 0x55 #define TSC10_CMD_RATE 0x05 #define TSC10_CMD_DATA1 0x01 static int dmc_tsc10_init(struct usbtouch_usb *usbtouch) { struct usb_device *dev = interface_to_usbdev(usbtouch->interface); int ret = -ENOMEM; unsigned char *buf; buf = kmalloc(2, GFP_NOIO); if (!buf) goto err_nobuf; /* reset */ buf[0] = buf[1] = 0xFF; ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), TSC10_CMD_RESET, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, buf, 2, USB_CTRL_SET_TIMEOUT); if (ret < 0) goto err_out; if (buf[0] != 0x06) { ret = -ENODEV; goto err_out; } /* set coordinate output rate */ buf[0] = buf[1] = 0xFF; ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), TSC10_CMD_RATE, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, TSC10_RATE_150, 0, buf, 2, USB_CTRL_SET_TIMEOUT); if (ret < 0) goto err_out; if ((buf[0] != 0x06) && (buf[0] != 0x15 || buf[1] != 0x01)) { ret = -ENODEV; goto err_out; } /* start sending data */ ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), TSC10_CMD_DATA1, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); err_out: kfree(buf); err_nobuf: return ret; } static int dmc_tsc10_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[2] & 0x03) << 8) | pkt[1]; dev->y = ((pkt[4] & 0x03) << 8) | pkt[3]; dev->touch = pkt[0] & 0x01; return 1; } #endif /***************************************************************************** * IRTOUCH Part */ #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH static int irtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = (pkt[3] << 8) | pkt[2]; dev->y = (pkt[5] << 8) | pkt[4]; dev->touch = (pkt[1] & 0x03) ? 1 : 0; return 1; } #endif /***************************************************************************** * ET&T TC5UH/TC4UM part */ #ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB static int tc45usb_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1]; dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3]; dev->touch = pkt[0] & 0x01; return 1; } #endif /***************************************************************************** * IdealTEK URTC1000 Part */ #ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK #ifndef MULTI_PACKET #define MULTI_PACKET #endif static int idealtek_get_pkt_len(unsigned char *buf, int len) { if (buf[0] & 0x80) return 5; if (buf[0] == 0x01) return len; return 0; } static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { switch (pkt[0] & 0x98) { case 0x88: /* touch data in IdealTEK mode */ dev->x = (pkt[1] << 5) | (pkt[2] >> 2); dev->y = (pkt[3] << 5) | (pkt[4] >> 2); dev->touch = (pkt[0] & 0x40) ? 1 : 0; return 1; case 0x98: /* touch data in MT emulation mode */ dev->x = (pkt[2] << 5) | (pkt[1] >> 2); dev->y = (pkt[4] << 5) | (pkt[3] >> 2); dev->touch = (pkt[0] & 0x40) ? 1 : 0; return 1; default: return 0; } } #endif /***************************************************************************** * General Touch Part */ #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = (pkt[2] << 8) | pkt[1]; dev->y = (pkt[4] << 8) | pkt[3]; dev->press = pkt[5] & 0xff; dev->touch = pkt[0] & 0x01; return 1; } #endif /***************************************************************************** * GoTop Part */ #ifdef CONFIG_TOUCHSCREEN_USB_GOTOP static int gotop_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[1] & 0x38) << 4) | pkt[2]; dev->y = ((pkt[1] & 0x07) << 7) | pkt[3]; dev->touch = pkt[0] & 0x01; return 1; } #endif /***************************************************************************** * JASTEC Part */ #ifdef CONFIG_TOUCHSCREEN_USB_JASTEC static int jastec_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[0] & 0x3f) << 6) | (pkt[2] & 0x3f); dev->y = ((pkt[1] & 0x3f) << 6) | (pkt[3] & 0x3f); dev->touch = (pkt[0] & 0x40) >> 6; return 1; } #endif /***************************************************************************** * Zytronic Part */ #ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC static int zytronic_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { switch (pkt[0]) { case 0x3A: /* command response */ dbg("%s: Command response %d", __func__, pkt[1]); break; case 0xC0: /* down */ dev->x = (pkt[1] & 0x7f) | ((pkt[2] & 0x07) << 7); dev->y = (pkt[3] & 0x7f) | ((pkt[4] & 0x07) << 7); dev->touch = 1; dbg("%s: down %d,%d", __func__, dev->x, dev->y); return 1; case 0x80: /* up */ dev->x = (pkt[1] & 0x7f) | ((pkt[2] & 0x07) << 7); dev->y = (pkt[3] & 0x7f) | ((pkt[4] & 0x07) << 7); dev->touch = 0; dbg("%s: up %d,%d", __func__, dev->x, dev->y); return 1; default: dbg("%s: Unknown return %d", __func__, pkt[0]); break; } return 0; } #endif /***************************************************************************** * NEXIO Part */ #ifdef CONFIG_TOUCHSCREEN_USB_NEXIO #define NEXIO_TIMEOUT 5000 #define NEXIO_BUFSIZE 1024 #define NEXIO_THRESHOLD 50 struct nexio_priv { struct urb *ack; unsigned char *ack_buf; }; struct nexio_touch_packet { u8 flags; /* 0xe1 = touch, 0xe1 = release */ __be16 data_len; /* total bytes of touch data */ __be16 x_len; /* bytes for X axis */ __be16 y_len; /* bytes for Y axis */ u8 data[]; } __attribute__ ((packed)); static unsigned char nexio_ack_pkt[2] = { 0xaa, 0x02 }; static unsigned char nexio_init_pkt[4] = { 0x82, 0x04, 0x0a, 0x0f }; static void nexio_ack_complete(struct urb *urb) { } static int nexio_alloc(struct usbtouch_usb *usbtouch) { struct nexio_priv *priv; int ret = -ENOMEM; usbtouch->priv = kmalloc(sizeof(struct nexio_priv), GFP_KERNEL); if (!usbtouch->priv) goto out_buf; priv = usbtouch->priv; priv->ack_buf = kmemdup(nexio_ack_pkt, sizeof(nexio_ack_pkt), GFP_KERNEL); if (!priv->ack_buf) goto err_priv; priv->ack = usb_alloc_urb(0, GFP_KERNEL); if (!priv->ack) { dbg("%s - usb_alloc_urb failed: usbtouch->ack", __func__); goto err_ack_buf; } return 0; err_ack_buf: kfree(priv->ack_buf); err_priv: kfree(priv); out_buf: return ret; } static int nexio_init(struct usbtouch_usb *usbtouch) { struct usb_device *dev = interface_to_usbdev(usbtouch->interface); struct usb_host_interface *interface = usbtouch->interface->cur_altsetting; struct nexio_priv *priv = usbtouch->priv; int ret = -ENOMEM; int actual_len, i; unsigned char *buf; char *firmware_ver = NULL, *device_name = NULL; int input_ep = 0, output_ep = 0; /* find first input and output endpoint */ for (i = 0; i < interface->desc.bNumEndpoints; i++) { if (!input_ep && usb_endpoint_dir_in(&interface->endpoint[i].desc)) input_ep = interface->endpoint[i].desc.bEndpointAddress; if (!output_ep && usb_endpoint_dir_out(&interface->endpoint[i].desc)) output_ep = interface->endpoint[i].desc.bEndpointAddress; } if (!input_ep || !output_ep) return -ENXIO; buf = kmalloc(NEXIO_BUFSIZE, GFP_NOIO); if (!buf) goto out_buf; /* two empty reads */ for (i = 0; i < 2; i++) { ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, input_ep), buf, NEXIO_BUFSIZE, &actual_len, NEXIO_TIMEOUT); if (ret < 0) goto out_buf; } /* send init command */ memcpy(buf, nexio_init_pkt, sizeof(nexio_init_pkt)); ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, output_ep), buf, sizeof(nexio_init_pkt), &actual_len, NEXIO_TIMEOUT); if (ret < 0) goto out_buf; /* read replies */ for (i = 0; i < 3; i++) { memset(buf, 0, NEXIO_BUFSIZE); ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, input_ep), buf, NEXIO_BUFSIZE, &actual_len, NEXIO_TIMEOUT); if (ret < 0 || actual_len < 1 || buf[1] != actual_len) continue; switch (buf[0]) { case 0x83: /* firmware version */ if (!firmware_ver) firmware_ver = kstrdup(&buf[2], GFP_NOIO); break; case 0x84: /* device name */ if (!device_name) device_name = kstrdup(&buf[2], GFP_NOIO); break; } } printk(KERN_INFO "Nexio device: %s, firmware version: %s\n", device_name, firmware_ver); kfree(firmware_ver); kfree(device_name); usb_fill_bulk_urb(priv->ack, dev, usb_sndbulkpipe(dev, output_ep), priv->ack_buf, sizeof(nexio_ack_pkt), nexio_ack_complete, usbtouch); ret = 0; out_buf: kfree(buf); return ret; } static void nexio_exit(struct usbtouch_usb *usbtouch) { struct nexio_priv *priv = usbtouch->priv; usb_kill_urb(priv->ack); usb_free_urb(priv->ack); kfree(priv->ack_buf); kfree(priv); } static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt) { struct nexio_touch_packet *packet = (void *) pkt; struct nexio_priv *priv = usbtouch->priv; unsigned int data_len = be16_to_cpu(packet->data_len); unsigned int x_len = be16_to_cpu(packet->x_len); unsigned int y_len = be16_to_cpu(packet->y_len); int x, y, begin_x, begin_y, end_x, end_y, w, h, ret; /* got touch data? */ if ((pkt[0] & 0xe0) != 0xe0) return 0; if (data_len > 0xff) data_len -= 0x100; if (x_len > 0xff) x_len -= 0x80; /* send ACK */ ret = usb_submit_urb(priv->ack, GFP_ATOMIC); if (!usbtouch->type->max_xc) { usbtouch->type->max_xc = 2 * x_len; input_set_abs_params(usbtouch->input, ABS_X, 0, usbtouch->type->max_xc, 0, 0); usbtouch->type->max_yc = 2 * y_len; input_set_abs_params(usbtouch->input, ABS_Y, 0, usbtouch->type->max_yc, 0, 0); } /* * The device reports state of IR sensors on X and Y axes. * Each byte represents "darkness" percentage (0-100) of one element. * 17" touchscreen reports only 64 x 52 bytes so the resolution is low. * This also means that there's a limited multi-touch capability but * it's disabled (and untested) here as there's no X driver for that. */ begin_x = end_x = begin_y = end_y = -1; for (x = 0; x < x_len; x++) { if (begin_x == -1 && packet->data[x] > NEXIO_THRESHOLD) { begin_x = x; continue; } if (end_x == -1 && begin_x != -1 && packet->data[x] < NEXIO_THRESHOLD) { end_x = x - 1; for (y = x_len; y < data_len; y++) { if (begin_y == -1 && packet->data[y] > NEXIO_THRESHOLD) { begin_y = y - x_len; continue; } if (end_y == -1 && begin_y != -1 && packet->data[y] < NEXIO_THRESHOLD) { end_y = y - 1 - x_len; w = end_x - begin_x; h = end_y - begin_y; #if 0 /* multi-touch */ input_report_abs(usbtouch->input, ABS_MT_TOUCH_MAJOR, max(w,h)); input_report_abs(usbtouch->input, ABS_MT_TOUCH_MINOR, min(x,h)); input_report_abs(usbtouch->input, ABS_MT_POSITION_X, 2*begin_x+w); input_report_abs(usbtouch->input, ABS_MT_POSITION_Y, 2*begin_y+h); input_report_abs(usbtouch->input, ABS_MT_ORIENTATION, w > h); input_mt_sync(usbtouch->input); #endif /* single touch */ usbtouch->x = 2 * begin_x + w; usbtouch->y = 2 * begin_y + h; usbtouch->touch = packet->flags & 0x01; begin_y = end_y = -1; return 1; } } begin_x = end_x = -1; } } return 0; } #endif /***************************************************************************** * the different device descriptors */ #ifdef MULTI_PACKET static void usbtouch_process_multi(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len); #endif static struct usbtouch_device_info usbtouch_dev_info[] = { #ifdef CONFIG_TOUCHSCREEN_USB_EGALAX [DEVTYPE_EGALAX] = { .min_xc = 0x0, .max_xc = 0x07ff, .min_yc = 0x0, .max_yc = 0x07ff, .rept_size = 16, .process_pkt = usbtouch_process_multi, .get_pkt_len = egalax_get_pkt_len, .read_data = egalax_read_data, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_PANJIT [DEVTYPE_PANJIT] = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 8, .read_data = panjit_read_data, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_3M [DEVTYPE_3M] = { .min_xc = 0x0, .max_xc = 0x4000, .min_yc = 0x0, .max_yc = 0x4000, .rept_size = 11, .read_data = mtouch_read_data, .init = mtouch_init, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ITM [DEVTYPE_ITM] = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .max_press = 0xff, .rept_size = 8, .read_data = itm_read_data, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ETURBO [DEVTYPE_ETURBO] = { .min_xc = 0x0, .max_xc = 0x07ff, .min_yc = 0x0, .max_yc = 0x07ff, .rept_size = 8, .process_pkt = usbtouch_process_multi, .get_pkt_len = eturbo_get_pkt_len, .read_data = eturbo_read_data, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_GUNZE [DEVTYPE_GUNZE] = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 4, .read_data = gunze_read_data, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10 [DEVTYPE_DMC_TSC10] = { .min_xc = 0x0, .max_xc = 0x03ff, .min_yc = 0x0, .max_yc = 0x03ff, .rept_size = 5, .init = dmc_tsc10_init, .read_data = dmc_tsc10_read_data, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH [DEVTYPE_IRTOUCH] = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 8, .read_data = irtouch_read_data, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK [DEVTYPE_IDEALTEK] = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 8, .process_pkt = usbtouch_process_multi, .get_pkt_len = idealtek_get_pkt_len, .read_data = idealtek_read_data, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH [DEVTYPE_GENERAL_TOUCH] = { .min_xc = 0x0, .max_xc = 0x7fff, .min_yc = 0x0, .max_yc = 0x7fff, .rept_size = 7, .read_data = general_touch_read_data, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_GOTOP [DEVTYPE_GOTOP] = { .min_xc = 0x0, .max_xc = 0x03ff, .min_yc = 0x0, .max_yc = 0x03ff, .rept_size = 4, .read_data = gotop_read_data, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_JASTEC [DEVTYPE_JASTEC] = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 4, .read_data = jastec_read_data, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_E2I [DEVTYPE_E2I] = { .min_xc = 0x0, .max_xc = 0x7fff, .min_yc = 0x0, .max_yc = 0x7fff, .rept_size = 6, .init = e2i_init, .read_data = e2i_read_data, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC [DEVTYPE_ZYTRONIC] = { .min_xc = 0x0, .max_xc = 0x03ff, .min_yc = 0x0, .max_yc = 0x03ff, .rept_size = 5, .read_data = zytronic_read_data, .irq_always = true, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB [DEVTYPE_TC45USB] = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 5, .read_data = tc45usb_read_data, }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_NEXIO [DEVTYPE_NEXIO] = { .rept_size = 1024, .irq_always = true, .read_data = nexio_read_data, .alloc = nexio_alloc, .init = nexio_init, .exit = nexio_exit, }, #endif }; /***************************************************************************** * Generic Part */ static void usbtouch_process_pkt(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len) { struct usbtouch_device_info *type = usbtouch->type; if (!type->read_data(usbtouch, pkt)) return; input_report_key(usbtouch->input, BTN_TOUCH, usbtouch->touch); if (swap_xy) { input_report_abs(usbtouch->input, ABS_X, usbtouch->y); input_report_abs(usbtouch->input, ABS_Y, usbtouch->x); } else { input_report_abs(usbtouch->input, ABS_X, usbtouch->x); input_report_abs(usbtouch->input, ABS_Y, usbtouch->y); } if (type->max_press) input_report_abs(usbtouch->input, ABS_PRESSURE, usbtouch->press); input_sync(usbtouch->input); } #ifdef MULTI_PACKET static void usbtouch_process_multi(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len) { unsigned char *buffer; int pkt_len, pos, buf_len, tmp; /* process buffer */ if (unlikely(usbtouch->buf_len)) { /* try to get size */ pkt_len = usbtouch->type->get_pkt_len( usbtouch->buffer, usbtouch->buf_len); /* drop? */ if (unlikely(!pkt_len)) goto out_flush_buf; /* need to append -pkt_len bytes before able to get size */ if (unlikely(pkt_len < 0)) { int append = -pkt_len; if (unlikely(append > len)) append = len; if (usbtouch->buf_len + append >= usbtouch->type->rept_size) goto out_flush_buf; memcpy(usbtouch->buffer + usbtouch->buf_len, pkt, append); usbtouch->buf_len += append; pkt_len = usbtouch->type->get_pkt_len( usbtouch->buffer, usbtouch->buf_len); if (pkt_len < 0) return; } /* append */ tmp = pkt_len - usbtouch->buf_len; if (usbtouch->buf_len + tmp >= usbtouch->type->rept_size) goto out_flush_buf; memcpy(usbtouch->buffer + usbtouch->buf_len, pkt, tmp); usbtouch_process_pkt(usbtouch, usbtouch->buffer, pkt_len); buffer = pkt + tmp; buf_len = len - tmp; } else { buffer = pkt; buf_len = len; } /* loop over the received packet, process */ pos = 0; while (pos < buf_len) { /* get packet len */ pkt_len = usbtouch->type->get_pkt_len(buffer + pos, buf_len - pos); /* unknown packet: skip one byte */ if (unlikely(!pkt_len)) { pos++; continue; } /* full packet: process */ if (likely((pkt_len > 0) && (pkt_len <= buf_len - pos))) { usbtouch_process_pkt(usbtouch, buffer + pos, pkt_len); } else { /* incomplete packet: save in buffer */ memcpy(usbtouch->buffer, buffer + pos, buf_len - pos); usbtouch->buf_len = buf_len - pos; return; } pos += pkt_len; } out_flush_buf: usbtouch->buf_len = 0; return; } #endif static void usbtouch_irq(struct urb *urb) { struct usbtouch_usb *usbtouch = urb->context; int retval; switch (urb->status) { case 0: /* success */ break; case -ETIME: /* this urb is timing out */ dbg("%s - urb timed out - was the device unplugged?", __func__); return; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -EPIPE: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __func__, urb->status); return; default: dbg("%s - nonzero urb status received: %d", __func__, urb->status); goto exit; } usbtouch->type->process_pkt(usbtouch, usbtouch->data, urb->actual_length); exit: usb_mark_last_busy(interface_to_usbdev(usbtouch->interface)); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) err("%s - usb_submit_urb failed with result: %d", __func__, retval); } static int usbtouch_open(struct input_dev *input) { struct usbtouch_usb *usbtouch = input_get_drvdata(input); int r; usbtouch->irq->dev = interface_to_usbdev(usbtouch->interface); r = usb_autopm_get_interface(usbtouch->interface) ? -EIO : 0; if (r < 0) goto out; if (!usbtouch->type->irq_always) { if (usb_submit_urb(usbtouch->irq, GFP_KERNEL)) { r = -EIO; goto out_put; } } usbtouch->interface->needs_remote_wakeup = 1; out_put: usb_autopm_put_interface(usbtouch->interface); out: return r; } static void usbtouch_close(struct input_dev *input) { struct usbtouch_usb *usbtouch = input_get_drvdata(input); int r; if (!usbtouch->type->irq_always) usb_kill_urb(usbtouch->irq); r = usb_autopm_get_interface(usbtouch->interface); usbtouch->interface->needs_remote_wakeup = 0; if (!r) usb_autopm_put_interface(usbtouch->interface); } static int usbtouch_suspend (struct usb_interface *intf, pm_message_t message) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); usb_kill_urb(usbtouch->irq); return 0; } static int usbtouch_resume(struct usb_interface *intf) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); struct input_dev *input = usbtouch->input; int result = 0; mutex_lock(&input->mutex); if (input->users || usbtouch->type->irq_always) result = usb_submit_urb(usbtouch->irq, GFP_NOIO); mutex_unlock(&input->mutex); return result; } static int usbtouch_reset_resume(struct usb_interface *intf) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); struct input_dev *input = usbtouch->input; int err = 0; /* reinit the device */ if (usbtouch->type->init) { err = usbtouch->type->init(usbtouch); if (err) { dbg("%s - type->init() failed, err: %d", __func__, err); return err; } } /* restart IO if needed */ mutex_lock(&input->mutex); if (input->users) err = usb_submit_urb(usbtouch->irq, GFP_NOIO); mutex_unlock(&input->mutex); return err; } static void usbtouch_free_buffers(struct usb_device *udev, struct usbtouch_usb *usbtouch) { usb_free_coherent(udev, usbtouch->type->rept_size, usbtouch->data, usbtouch->data_dma); kfree(usbtouch->buffer); } static struct usb_endpoint_descriptor * usbtouch_get_input_endpoint(struct usb_host_interface *interface) { int i; for (i = 0; i < interface->desc.bNumEndpoints; i++) if (usb_endpoint_dir_in(&interface->endpoint[i].desc)) return &interface->endpoint[i].desc; return NULL; } static int usbtouch_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usbtouch_usb *usbtouch; struct input_dev *input_dev; struct usb_endpoint_descriptor *endpoint; struct usb_device *udev = interface_to_usbdev(intf); struct usbtouch_device_info *type; int err = -ENOMEM; /* some devices are ignored */ if (id->driver_info == DEVTYPE_IGNORE) return -ENODEV; endpoint = usbtouch_get_input_endpoint(intf->cur_altsetting); if (!endpoint) return -ENXIO; usbtouch = kzalloc(sizeof(struct usbtouch_usb), GFP_KERNEL); input_dev = input_allocate_device(); if (!usbtouch || !input_dev) goto out_free; type = &usbtouch_dev_info[id->driver_info]; usbtouch->type = type; if (!type->process_pkt) type->process_pkt = usbtouch_process_pkt; usbtouch->data = usb_alloc_coherent(udev, type->rept_size, GFP_KERNEL, &usbtouch->data_dma); if (!usbtouch->data) goto out_free; if (type->get_pkt_len) { usbtouch->buffer = kmalloc(type->rept_size, GFP_KERNEL); if (!usbtouch->buffer) goto out_free_buffers; } usbtouch->irq = usb_alloc_urb(0, GFP_KERNEL); if (!usbtouch->irq) { dbg("%s - usb_alloc_urb failed: usbtouch->irq", __func__); goto out_free_buffers; } usbtouch->interface = intf; usbtouch->input = input_dev; if (udev->manufacturer) strlcpy(usbtouch->name, udev->manufacturer, sizeof(usbtouch->name)); if (udev->product) { if (udev->manufacturer) strlcat(usbtouch->name, " ", sizeof(usbtouch->name)); strlcat(usbtouch->name, udev->product, sizeof(usbtouch->name)); } if (!strlen(usbtouch->name)) snprintf(usbtouch->name, sizeof(usbtouch->name), "USB Touchscreen %04x:%04x", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); usb_make_path(udev, usbtouch->phys, sizeof(usbtouch->phys)); strlcat(usbtouch->phys, "/input0", sizeof(usbtouch->phys)); input_dev->name = usbtouch->name; input_dev->phys = usbtouch->phys; usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, usbtouch); input_dev->open = usbtouch_open; input_dev->close = usbtouch_close; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, type->min_xc, type->max_xc, 0, 0); input_set_abs_params(input_dev, ABS_Y, type->min_yc, type->max_yc, 0, 0); if (type->max_press) input_set_abs_params(input_dev, ABS_PRESSURE, type->min_press, type->max_press, 0, 0); if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT) usb_fill_int_urb(usbtouch->irq, udev, usb_rcvintpipe(udev, endpoint->bEndpointAddress), usbtouch->data, type->rept_size, usbtouch_irq, usbtouch, endpoint->bInterval); else usb_fill_bulk_urb(usbtouch->irq, udev, usb_rcvbulkpipe(udev, endpoint->bEndpointAddress), usbtouch->data, type->rept_size, usbtouch_irq, usbtouch); usbtouch->irq->dev = udev; usbtouch->irq->transfer_dma = usbtouch->data_dma; usbtouch->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* device specific allocations */ if (type->alloc) { err = type->alloc(usbtouch); if (err) { dbg("%s - type->alloc() failed, err: %d", __func__, err); goto out_free_urb; } } /* device specific initialisation*/ if (type->init) { err = type->init(usbtouch); if (err) { dbg("%s - type->init() failed, err: %d", __func__, err); goto out_do_exit; } } err = input_register_device(usbtouch->input); if (err) { dbg("%s - input_register_device failed, err: %d", __func__, err); goto out_do_exit; } usb_set_intfdata(intf, usbtouch); if (usbtouch->type->irq_always) { /* this can't fail */ usb_autopm_get_interface(intf); err = usb_submit_urb(usbtouch->irq, GFP_KERNEL); if (err) { usb_autopm_put_interface(intf); err("%s - usb_submit_urb failed with result: %d", __func__, err); goto out_unregister_input; } } return 0; out_unregister_input: input_unregister_device(input_dev); input_dev = NULL; out_do_exit: if (type->exit) type->exit(usbtouch); out_free_urb: usb_free_urb(usbtouch->irq); out_free_buffers: usbtouch_free_buffers(udev, usbtouch); out_free: input_free_device(input_dev); kfree(usbtouch); return err; } static void usbtouch_disconnect(struct usb_interface *intf) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); dbg("%s - called", __func__); if (!usbtouch) return; dbg("%s - usbtouch is initialized, cleaning up", __func__); usb_set_intfdata(intf, NULL); /* this will stop IO via close */ input_unregister_device(usbtouch->input); usb_free_urb(usbtouch->irq); if (usbtouch->type->exit) usbtouch->type->exit(usbtouch); usbtouch_free_buffers(interface_to_usbdev(intf), usbtouch); kfree(usbtouch); } MODULE_DEVICE_TABLE(usb, usbtouch_devices); static struct usb_driver usbtouch_driver = { .name = "usbtouchscreen", .probe = usbtouch_probe, .disconnect = usbtouch_disconnect, .suspend = usbtouch_suspend, .resume = usbtouch_resume, .reset_resume = usbtouch_reset_resume, .id_table = usbtouch_devices, .supports_autosuspend = 1, }; static int __init usbtouch_init(void) { return usb_register(&usbtouch_driver); } static void __exit usbtouch_cleanup(void) { usb_deregister(&usbtouch_driver); } module_init(usbtouch_init); module_exit(usbtouch_cleanup); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_ALIAS("touchkitusb"); MODULE_ALIAS("itmtouch"); MODULE_ALIAS("mtouchusb");
gpl-2.0
javelinanddart/kernel_samsung_jf
drivers/ata/pata_pxa.c
5027
9979
/* * Generic PXA PATA driver * * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/ata.h> #include <linux/libata.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/completion.h> #include <scsi/scsi_host.h> #include <mach/pxa2xx-regs.h> #include <mach/pata_pxa.h> #include <mach/dma.h> #define DRV_NAME "pata_pxa" #define DRV_VERSION "0.1" struct pata_pxa_data { uint32_t dma_channel; struct pxa_dma_desc *dma_desc; dma_addr_t dma_desc_addr; uint32_t dma_desc_id; /* DMA IO physical address */ uint32_t dma_io_addr; /* PXA DREQ<0:2> pin selector */ uint32_t dma_dreq; /* DMA DCSR register value */ uint32_t dma_dcsr; struct completion dma_done; }; /* * Setup the DMA descriptors. The size is transfer capped at 4k per descriptor, * if the transfer is longer, it is split into multiple chained descriptors. */ static void pxa_load_dmac(struct scatterlist *sg, struct ata_queued_cmd *qc) { struct pata_pxa_data *pd = qc->ap->private_data; uint32_t cpu_len, seg_len; dma_addr_t cpu_addr; cpu_addr = sg_dma_address(sg); cpu_len = sg_dma_len(sg); do { seg_len = (cpu_len > 0x1000) ? 0x1000 : cpu_len; pd->dma_desc[pd->dma_desc_id].ddadr = pd->dma_desc_addr + ((pd->dma_desc_id + 1) * sizeof(struct pxa_dma_desc)); pd->dma_desc[pd->dma_desc_id].dcmd = DCMD_BURST32 | DCMD_WIDTH2 | (DCMD_LENGTH & seg_len); if (qc->tf.flags & ATA_TFLAG_WRITE) { pd->dma_desc[pd->dma_desc_id].dsadr = cpu_addr; pd->dma_desc[pd->dma_desc_id].dtadr = pd->dma_io_addr; pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG; } else { pd->dma_desc[pd->dma_desc_id].dsadr = pd->dma_io_addr; pd->dma_desc[pd->dma_desc_id].dtadr = cpu_addr; pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; } cpu_len -= seg_len; cpu_addr += seg_len; pd->dma_desc_id++; } while (cpu_len); /* Should not happen */ if (seg_len & 0x1f) DALGN |= (1 << pd->dma_dreq); } /* * Prepare taskfile for submission. */ static void pxa_qc_prep(struct ata_queued_cmd *qc) { struct pata_pxa_data *pd = qc->ap->private_data; int si = 0; struct scatterlist *sg; if (!(qc->flags & ATA_QCFLAG_DMAMAP)) return; pd->dma_desc_id = 0; DCSR(pd->dma_channel) = 0; DALGN &= ~(1 << pd->dma_dreq); for_each_sg(qc->sg, sg, qc->n_elem, si) pxa_load_dmac(sg, qc); pd->dma_desc[pd->dma_desc_id - 1].ddadr = DDADR_STOP; /* Fire IRQ only at the end of last block */ pd->dma_desc[pd->dma_desc_id - 1].dcmd |= DCMD_ENDIRQEN; DDADR(pd->dma_channel) = pd->dma_desc_addr; DRCMR(pd->dma_dreq) = DRCMR_MAPVLD | pd->dma_channel; } /* * Configure the DMA controller, load the DMA descriptors, but don't start the * DMA controller yet. Only issue the ATA command. */ static void pxa_bmdma_setup(struct ata_queued_cmd *qc) { qc->ap->ops->sff_exec_command(qc->ap, &qc->tf); } /* * Execute the DMA transfer. */ static void pxa_bmdma_start(struct ata_queued_cmd *qc) { struct pata_pxa_data *pd = qc->ap->private_data; init_completion(&pd->dma_done); DCSR(pd->dma_channel) = DCSR_RUN; } /* * Wait until the DMA transfer completes, then stop the DMA controller. */ static void pxa_bmdma_stop(struct ata_queued_cmd *qc) { struct pata_pxa_data *pd = qc->ap->private_data; if ((DCSR(pd->dma_channel) & DCSR_RUN) && wait_for_completion_timeout(&pd->dma_done, HZ)) dev_err(qc->ap->dev, "Timeout waiting for DMA completion!"); DCSR(pd->dma_channel) = 0; } /* * Read DMA status. The bmdma_stop() will take care of properly finishing the * DMA transfer so we always have DMA-complete interrupt here. */ static unsigned char pxa_bmdma_status(struct ata_port *ap) { struct pata_pxa_data *pd = ap->private_data; unsigned char ret = ATA_DMA_INTR; if (pd->dma_dcsr & DCSR_BUSERR) ret |= ATA_DMA_ERR; return ret; } /* * No IRQ register present so we do nothing. */ static void pxa_irq_clear(struct ata_port *ap) { } /* * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still * unclear why ATAPI has DMA issues. */ static int pxa_check_atapi_dma(struct ata_queued_cmd *qc) { return -EOPNOTSUPP; } static struct scsi_host_template pxa_ata_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations pxa_ata_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, .bmdma_setup = pxa_bmdma_setup, .bmdma_start = pxa_bmdma_start, .bmdma_stop = pxa_bmdma_stop, .bmdma_status = pxa_bmdma_status, .check_atapi_dma = pxa_check_atapi_dma, .sff_irq_clear = pxa_irq_clear, .qc_prep = pxa_qc_prep, }; /* * DMA interrupt handler. */ static void pxa_ata_dma_irq(int dma, void *port) { struct ata_port *ap = port; struct pata_pxa_data *pd = ap->private_data; pd->dma_dcsr = DCSR(dma); DCSR(dma) = pd->dma_dcsr; if (pd->dma_dcsr & DCSR_STOPSTATE) complete(&pd->dma_done); } static int __devinit pxa_ata_probe(struct platform_device *pdev) { struct ata_host *host; struct ata_port *ap; struct pata_pxa_data *data; struct resource *cmd_res; struct resource *ctl_res; struct resource *dma_res; struct resource *irq_res; struct pata_pxa_pdata *pdata = pdev->dev.platform_data; int ret = 0; /* * Resource validation, three resources are needed: * - CMD port base address * - CTL port base address * - DMA port base address * - IRQ pin */ if (pdev->num_resources != 4) { dev_err(&pdev->dev, "invalid number of resources\n"); return -EINVAL; } /* * CMD port base address */ cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (unlikely(cmd_res == NULL)) return -EINVAL; /* * CTL port base address */ ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (unlikely(ctl_res == NULL)) return -EINVAL; /* * DMA port base address */ dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (unlikely(dma_res == NULL)) return -EINVAL; /* * IRQ pin */ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (unlikely(irq_res == NULL)) return -EINVAL; /* * Allocate the host */ host = ata_host_alloc(&pdev->dev, 1); if (!host) return -ENOMEM; ap = host->ports[0]; ap->ops = &pxa_ata_port_ops; ap->pio_mask = ATA_PIO4; ap->mwdma_mask = ATA_MWDMA2; ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start, resource_size(cmd_res)); ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start, resource_size(ctl_res)); ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start, resource_size(dma_res)); /* * Adjust register offsets */ ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; ap->ioaddr.data_addr = ap->ioaddr.cmd_addr + (ATA_REG_DATA << pdata->reg_shift); ap->ioaddr.error_addr = ap->ioaddr.cmd_addr + (ATA_REG_ERR << pdata->reg_shift); ap->ioaddr.feature_addr = ap->ioaddr.cmd_addr + (ATA_REG_FEATURE << pdata->reg_shift); ap->ioaddr.nsect_addr = ap->ioaddr.cmd_addr + (ATA_REG_NSECT << pdata->reg_shift); ap->ioaddr.lbal_addr = ap->ioaddr.cmd_addr + (ATA_REG_LBAL << pdata->reg_shift); ap->ioaddr.lbam_addr = ap->ioaddr.cmd_addr + (ATA_REG_LBAM << pdata->reg_shift); ap->ioaddr.lbah_addr = ap->ioaddr.cmd_addr + (ATA_REG_LBAH << pdata->reg_shift); ap->ioaddr.device_addr = ap->ioaddr.cmd_addr + (ATA_REG_DEVICE << pdata->reg_shift); ap->ioaddr.status_addr = ap->ioaddr.cmd_addr + (ATA_REG_STATUS << pdata->reg_shift); ap->ioaddr.command_addr = ap->ioaddr.cmd_addr + (ATA_REG_CMD << pdata->reg_shift); /* * Allocate and load driver's internal data structure */ data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data), GFP_KERNEL); if (!data) return -ENOMEM; ap->private_data = data; data->dma_dreq = pdata->dma_dreq; data->dma_io_addr = dma_res->start; /* * Allocate space for the DMA descriptors */ data->dma_desc = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE, &data->dma_desc_addr, GFP_KERNEL); if (!data->dma_desc) return -EINVAL; /* * Request the DMA channel */ data->dma_channel = pxa_request_dma(DRV_NAME, DMA_PRIO_LOW, pxa_ata_dma_irq, ap); if (data->dma_channel < 0) return -EBUSY; /* * Stop and clear the DMA channel */ DCSR(data->dma_channel) = 0; /* * Activate the ATA host */ ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt, pdata->irq_flags, &pxa_ata_sht); if (ret) pxa_free_dma(data->dma_channel); return ret; } static int __devexit pxa_ata_remove(struct platform_device *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); struct pata_pxa_data *data = host->ports[0]->private_data; pxa_free_dma(data->dma_channel); ata_host_detach(host); return 0; } static struct platform_driver pxa_ata_driver = { .probe = pxa_ata_probe, .remove = __devexit_p(pxa_ata_remove), .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, }; module_platform_driver(pxa_ata_driver); MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
davidmueller13/valexKernel-lt03wifi
drivers/media/video/cx23885/cx23885-417.c
5027
49127
/* * * Support for a cx23417 mpeg encoder via cx23885 host port. * * (c) 2004 Jelle Foks <jelle@foks.us> * (c) 2004 Gerd Knorr <kraxel@bytesex.org> * (c) 2008 Steven Toth <stoth@linuxtv.org> * - CX23885/7/8 support * * Includes parts from the ivtv driver <http://sourceforge.net/projects/ivtv/> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/slab.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/cx2341x.h> #include "cx23885.h" #include "cx23885-ioctl.h" #define CX23885_FIRM_IMAGE_SIZE 376836 #define CX23885_FIRM_IMAGE_NAME "v4l-cx23885-enc.fw" static unsigned int mpegbufs = 32; module_param(mpegbufs, int, 0644); MODULE_PARM_DESC(mpegbufs, "number of mpeg buffers, range 2-32"); static unsigned int mpeglines = 32; module_param(mpeglines, int, 0644); MODULE_PARM_DESC(mpeglines, "number of lines in an MPEG buffer, range 2-32"); static unsigned int mpeglinesize = 512; module_param(mpeglinesize, int, 0644); MODULE_PARM_DESC(mpeglinesize, "number of bytes in each line of an MPEG buffer, range 512-1024"); static unsigned int v4l_debug; module_param(v4l_debug, int, 0644); MODULE_PARM_DESC(v4l_debug, "enable V4L debug messages"); #define dprintk(level, fmt, arg...)\ do { if (v4l_debug >= level) \ printk(KERN_DEBUG "%s: " fmt, \ (dev) ? dev->name : "cx23885[?]", ## arg); \ } while (0) static struct cx23885_tvnorm cx23885_tvnorms[] = { { .name = "NTSC-M", .id = V4L2_STD_NTSC_M, }, { .name = "NTSC-JP", .id = V4L2_STD_NTSC_M_JP, }, { .name = "PAL-BG", .id = V4L2_STD_PAL_BG, }, { .name = "PAL-DK", .id = V4L2_STD_PAL_DK, }, { .name = "PAL-I", .id = V4L2_STD_PAL_I, }, { .name = "PAL-M", .id = V4L2_STD_PAL_M, }, { .name = "PAL-N", .id = V4L2_STD_PAL_N, }, { .name = "PAL-Nc", .id = V4L2_STD_PAL_Nc, }, { .name = "PAL-60", .id = V4L2_STD_PAL_60, }, { .name = "SECAM-L", .id = V4L2_STD_SECAM_L, }, { .name = "SECAM-DK", .id = V4L2_STD_SECAM_DK, } }; /* ------------------------------------------------------------------ */ enum cx23885_capture_type { CX23885_MPEG_CAPTURE, CX23885_RAW_CAPTURE, CX23885_RAW_PASSTHRU_CAPTURE }; enum cx23885_capture_bits { CX23885_RAW_BITS_NONE = 0x00, CX23885_RAW_BITS_YUV_CAPTURE = 0x01, CX23885_RAW_BITS_PCM_CAPTURE = 0x02, CX23885_RAW_BITS_VBI_CAPTURE = 0x04, CX23885_RAW_BITS_PASSTHRU_CAPTURE = 0x08, CX23885_RAW_BITS_TO_HOST_CAPTURE = 0x10 }; enum cx23885_capture_end { CX23885_END_AT_GOP, /* stop at the end of gop, generate irq */ CX23885_END_NOW, /* stop immediately, no irq */ }; enum cx23885_framerate { CX23885_FRAMERATE_NTSC_30, /* NTSC: 30fps */ CX23885_FRAMERATE_PAL_25 /* PAL: 25fps */ }; enum cx23885_stream_port { CX23885_OUTPUT_PORT_MEMORY, CX23885_OUTPUT_PORT_STREAMING, CX23885_OUTPUT_PORT_SERIAL }; enum cx23885_data_xfer_status { CX23885_MORE_BUFFERS_FOLLOW, CX23885_LAST_BUFFER, }; enum cx23885_picture_mask { CX23885_PICTURE_MASK_NONE, CX23885_PICTURE_MASK_I_FRAMES, CX23885_PICTURE_MASK_I_P_FRAMES = 0x3, CX23885_PICTURE_MASK_ALL_FRAMES = 0x7, }; enum cx23885_vbi_mode_bits { CX23885_VBI_BITS_SLICED, CX23885_VBI_BITS_RAW, }; enum cx23885_vbi_insertion_bits { CX23885_VBI_BITS_INSERT_IN_XTENSION_USR_DATA, CX23885_VBI_BITS_INSERT_IN_PRIVATE_PACKETS = 0x1 << 1, CX23885_VBI_BITS_SEPARATE_STREAM = 0x2 << 1, CX23885_VBI_BITS_SEPARATE_STREAM_USR_DATA = 0x4 << 1, CX23885_VBI_BITS_SEPARATE_STREAM_PRV_DATA = 0x5 << 1, }; enum cx23885_dma_unit { CX23885_DMA_BYTES, CX23885_DMA_FRAMES, }; enum cx23885_dma_transfer_status_bits { CX23885_DMA_TRANSFER_BITS_DONE = 0x01, CX23885_DMA_TRANSFER_BITS_ERROR = 0x04, CX23885_DMA_TRANSFER_BITS_LL_ERROR = 0x10, }; enum cx23885_pause { CX23885_PAUSE_ENCODING, CX23885_RESUME_ENCODING, }; enum cx23885_copyright { CX23885_COPYRIGHT_OFF, CX23885_COPYRIGHT_ON, }; enum cx23885_notification_type { CX23885_NOTIFICATION_REFRESH, }; enum cx23885_notification_status { CX23885_NOTIFICATION_OFF, CX23885_NOTIFICATION_ON, }; enum cx23885_notification_mailbox { CX23885_NOTIFICATION_NO_MAILBOX = -1, }; enum cx23885_field1_lines { CX23885_FIELD1_SAA7114 = 0x00EF, /* 239 */ CX23885_FIELD1_SAA7115 = 0x00F0, /* 240 */ CX23885_FIELD1_MICRONAS = 0x0105, /* 261 */ }; enum cx23885_field2_lines { CX23885_FIELD2_SAA7114 = 0x00EF, /* 239 */ CX23885_FIELD2_SAA7115 = 0x00F0, /* 240 */ CX23885_FIELD2_MICRONAS = 0x0106, /* 262 */ }; enum cx23885_custom_data_type { CX23885_CUSTOM_EXTENSION_USR_DATA, CX23885_CUSTOM_PRIVATE_PACKET, }; enum cx23885_mute { CX23885_UNMUTE, CX23885_MUTE, }; enum cx23885_mute_video_mask { CX23885_MUTE_VIDEO_V_MASK = 0x0000FF00, CX23885_MUTE_VIDEO_U_MASK = 0x00FF0000, CX23885_MUTE_VIDEO_Y_MASK = 0xFF000000, }; enum cx23885_mute_video_shift { CX23885_MUTE_VIDEO_V_SHIFT = 8, CX23885_MUTE_VIDEO_U_SHIFT = 16, CX23885_MUTE_VIDEO_Y_SHIFT = 24, }; /* defines below are from ivtv-driver.h */ #define IVTV_CMD_HW_BLOCKS_RST 0xFFFFFFFF /* Firmware API commands */ #define IVTV_API_STD_TIMEOUT 500 /* Registers */ /* IVTV_REG_OFFSET */ #define IVTV_REG_ENC_SDRAM_REFRESH (0x07F8) #define IVTV_REG_ENC_SDRAM_PRECHARGE (0x07FC) #define IVTV_REG_SPU (0x9050) #define IVTV_REG_HW_BLOCKS (0x9054) #define IVTV_REG_VPU (0x9058) #define IVTV_REG_APU (0xA064) /**** Bit definitions for MC417_RWD and MC417_OEN registers *** bits 31-16 +-----------+ | Reserved | +-----------+ bit 15 bit 14 bit 13 bit 12 bit 11 bit 10 bit 9 bit 8 +-------+-------+-------+-------+-------+-------+-------+-------+ | MIWR# | MIRD# | MICS# |MIRDY# |MIADDR3|MIADDR2|MIADDR1|MIADDR0| +-------+-------+-------+-------+-------+-------+-------+-------+ bit 7 bit 6 bit 5 bit 4 bit 3 bit 2 bit 1 bit 0 +-------+-------+-------+-------+-------+-------+-------+-------+ |MIDATA7|MIDATA6|MIDATA5|MIDATA4|MIDATA3|MIDATA2|MIDATA1|MIDATA0| +-------+-------+-------+-------+-------+-------+-------+-------+ ***/ #define MC417_MIWR 0x8000 #define MC417_MIRD 0x4000 #define MC417_MICS 0x2000 #define MC417_MIRDY 0x1000 #define MC417_MIADDR 0x0F00 #define MC417_MIDATA 0x00FF /* MIADDR* nibble definitions */ #define MCI_MEMORY_DATA_BYTE0 0x000 #define MCI_MEMORY_DATA_BYTE1 0x100 #define MCI_MEMORY_DATA_BYTE2 0x200 #define MCI_MEMORY_DATA_BYTE3 0x300 #define MCI_MEMORY_ADDRESS_BYTE2 0x400 #define MCI_MEMORY_ADDRESS_BYTE1 0x500 #define MCI_MEMORY_ADDRESS_BYTE0 0x600 #define MCI_REGISTER_DATA_BYTE0 0x800 #define MCI_REGISTER_DATA_BYTE1 0x900 #define MCI_REGISTER_DATA_BYTE2 0xA00 #define MCI_REGISTER_DATA_BYTE3 0xB00 #define MCI_REGISTER_ADDRESS_BYTE0 0xC00 #define MCI_REGISTER_ADDRESS_BYTE1 0xD00 #define MCI_REGISTER_MODE 0xE00 /* Read and write modes */ #define MCI_MODE_REGISTER_READ 0 #define MCI_MODE_REGISTER_WRITE 1 #define MCI_MODE_MEMORY_READ 0 #define MCI_MODE_MEMORY_WRITE 0x40 /*** Bit definitions for MC417_CTL register **** bits 31-6 bits 5-4 bit 3 bits 2-1 Bit 0 +--------+-------------+--------+--------------+------------+ |Reserved|MC417_SPD_CTL|Reserved|MC417_GPIO_SEL|UART_GPIO_EN| +--------+-------------+--------+--------------+------------+ ***/ #define MC417_SPD_CTL(x) (((x) << 4) & 0x00000030) #define MC417_GPIO_SEL(x) (((x) << 1) & 0x00000006) #define MC417_UART_GPIO_EN 0x00000001 /* Values for speed control */ #define MC417_SPD_CTL_SLOW 0x1 #define MC417_SPD_CTL_MEDIUM 0x0 #define MC417_SPD_CTL_FAST 0x3 /* b'1x, but we use b'11 */ /* Values for GPIO select */ #define MC417_GPIO_SEL_GPIO3 0x3 #define MC417_GPIO_SEL_GPIO2 0x2 #define MC417_GPIO_SEL_GPIO1 0x1 #define MC417_GPIO_SEL_GPIO0 0x0 void cx23885_mc417_init(struct cx23885_dev *dev) { u32 regval; dprintk(2, "%s()\n", __func__); /* Configure MC417_CTL register to defaults. */ regval = MC417_SPD_CTL(MC417_SPD_CTL_FAST) | MC417_GPIO_SEL(MC417_GPIO_SEL_GPIO3) | MC417_UART_GPIO_EN; cx_write(MC417_CTL, regval); /* Configure MC417_OEN to defaults. */ regval = MC417_MIRDY; cx_write(MC417_OEN, regval); /* Configure MC417_RWD to defaults. */ regval = MC417_MIWR | MC417_MIRD | MC417_MICS; cx_write(MC417_RWD, regval); } static int mc417_wait_ready(struct cx23885_dev *dev) { u32 mi_ready; unsigned long timeout = jiffies + msecs_to_jiffies(1); for (;;) { mi_ready = cx_read(MC417_RWD) & MC417_MIRDY; if (mi_ready != 0) return 0; if (time_after(jiffies, timeout)) return -1; udelay(1); } } int mc417_register_write(struct cx23885_dev *dev, u16 address, u32 value) { u32 regval; /* Enable MC417 GPIO outputs except for MC417_MIRDY, * which is an input. */ cx_write(MC417_OEN, MC417_MIRDY); /* Write data byte 0 */ regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE0 | (value & 0x000000FF); cx_write(MC417_RWD, regval); /* Transition CS/WR to effect write transaction across bus. */ regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Write data byte 1 */ regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE1 | ((value >> 8) & 0x000000FF); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Write data byte 2 */ regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE2 | ((value >> 16) & 0x000000FF); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Write data byte 3 */ regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE3 | ((value >> 24) & 0x000000FF); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Write address byte 0 */ regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_ADDRESS_BYTE0 | (address & 0xFF); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Write address byte 1 */ regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_ADDRESS_BYTE1 | ((address >> 8) & 0xFF); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Indicate that this is a write. */ regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_MODE | MCI_MODE_REGISTER_WRITE; cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Wait for the trans to complete (MC417_MIRDY asserted). */ return mc417_wait_ready(dev); } int mc417_register_read(struct cx23885_dev *dev, u16 address, u32 *value) { int retval; u32 regval; u32 tempval; u32 dataval; /* Enable MC417 GPIO outputs except for MC417_MIRDY, * which is an input. */ cx_write(MC417_OEN, MC417_MIRDY); /* Write address byte 0 */ regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_ADDRESS_BYTE0 | ((address & 0x00FF)); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Write address byte 1 */ regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_ADDRESS_BYTE1 | ((address >> 8) & 0xFF); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Indicate that this is a register read. */ regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_MODE | MCI_MODE_REGISTER_READ; cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Wait for the trans to complete (MC417_MIRDY asserted). */ retval = mc417_wait_ready(dev); /* switch the DAT0-7 GPIO[10:3] to input mode */ cx_write(MC417_OEN, MC417_MIRDY | MC417_MIDATA); /* Read data byte 0 */ regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE0; cx_write(MC417_RWD, regval); /* Transition RD to effect read transaction across bus. * Transtion 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)? * Should it be 0x9000 -> 0xF000 (also why is RDY being set, its * input only...) */ regval = MC417_MIWR | MC417_MIRDY | MCI_REGISTER_DATA_BYTE0; cx_write(MC417_RWD, regval); /* Collect byte */ tempval = cx_read(MC417_RWD); dataval = tempval & 0x000000FF; /* Bring CS and RD high. */ regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY; cx_write(MC417_RWD, regval); /* Read data byte 1 */ regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE1; cx_write(MC417_RWD, regval); regval = MC417_MIWR | MC417_MIRDY | MCI_REGISTER_DATA_BYTE1; cx_write(MC417_RWD, regval); tempval = cx_read(MC417_RWD); dataval |= ((tempval & 0x000000FF) << 8); regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY; cx_write(MC417_RWD, regval); /* Read data byte 2 */ regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE2; cx_write(MC417_RWD, regval); regval = MC417_MIWR | MC417_MIRDY | MCI_REGISTER_DATA_BYTE2; cx_write(MC417_RWD, regval); tempval = cx_read(MC417_RWD); dataval |= ((tempval & 0x000000FF) << 16); regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY; cx_write(MC417_RWD, regval); /* Read data byte 3 */ regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE3; cx_write(MC417_RWD, regval); regval = MC417_MIWR | MC417_MIRDY | MCI_REGISTER_DATA_BYTE3; cx_write(MC417_RWD, regval); tempval = cx_read(MC417_RWD); dataval |= ((tempval & 0x000000FF) << 24); regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY; cx_write(MC417_RWD, regval); *value = dataval; return retval; } int mc417_memory_write(struct cx23885_dev *dev, u32 address, u32 value) { u32 regval; /* Enable MC417 GPIO outputs except for MC417_MIRDY, * which is an input. */ cx_write(MC417_OEN, MC417_MIRDY); /* Write data byte 0 */ regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE0 | (value & 0x000000FF); cx_write(MC417_RWD, regval); /* Transition CS/WR to effect write transaction across bus. */ regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Write data byte 1 */ regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE1 | ((value >> 8) & 0x000000FF); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Write data byte 2 */ regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE2 | ((value >> 16) & 0x000000FF); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Write data byte 3 */ regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE3 | ((value >> 24) & 0x000000FF); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Write address byte 2 */ regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_ADDRESS_BYTE2 | MCI_MODE_MEMORY_WRITE | ((address >> 16) & 0x3F); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Write address byte 1 */ regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_ADDRESS_BYTE1 | ((address >> 8) & 0xFF); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Write address byte 0 */ regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_ADDRESS_BYTE0 | (address & 0xFF); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Wait for the trans to complete (MC417_MIRDY asserted). */ return mc417_wait_ready(dev); } int mc417_memory_read(struct cx23885_dev *dev, u32 address, u32 *value) { int retval; u32 regval; u32 tempval; u32 dataval; /* Enable MC417 GPIO outputs except for MC417_MIRDY, * which is an input. */ cx_write(MC417_OEN, MC417_MIRDY); /* Write address byte 2 */ regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_ADDRESS_BYTE2 | MCI_MODE_MEMORY_READ | ((address >> 16) & 0x3F); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Write address byte 1 */ regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_ADDRESS_BYTE1 | ((address >> 8) & 0xFF); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Write address byte 0 */ regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_ADDRESS_BYTE0 | (address & 0xFF); cx_write(MC417_RWD, regval); regval |= MC417_MICS | MC417_MIWR; cx_write(MC417_RWD, regval); /* Wait for the trans to complete (MC417_MIRDY asserted). */ retval = mc417_wait_ready(dev); /* switch the DAT0-7 GPIO[10:3] to input mode */ cx_write(MC417_OEN, MC417_MIRDY | MC417_MIDATA); /* Read data byte 3 */ regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE3; cx_write(MC417_RWD, regval); /* Transition RD to effect read transaction across bus. */ regval = MC417_MIWR | MC417_MIRDY | MCI_MEMORY_DATA_BYTE3; cx_write(MC417_RWD, regval); /* Collect byte */ tempval = cx_read(MC417_RWD); dataval = ((tempval & 0x000000FF) << 24); /* Bring CS and RD high. */ regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY; cx_write(MC417_RWD, regval); /* Read data byte 2 */ regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE2; cx_write(MC417_RWD, regval); regval = MC417_MIWR | MC417_MIRDY | MCI_MEMORY_DATA_BYTE2; cx_write(MC417_RWD, regval); tempval = cx_read(MC417_RWD); dataval |= ((tempval & 0x000000FF) << 16); regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY; cx_write(MC417_RWD, regval); /* Read data byte 1 */ regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE1; cx_write(MC417_RWD, regval); regval = MC417_MIWR | MC417_MIRDY | MCI_MEMORY_DATA_BYTE1; cx_write(MC417_RWD, regval); tempval = cx_read(MC417_RWD); dataval |= ((tempval & 0x000000FF) << 8); regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY; cx_write(MC417_RWD, regval); /* Read data byte 0 */ regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE0; cx_write(MC417_RWD, regval); regval = MC417_MIWR | MC417_MIRDY | MCI_MEMORY_DATA_BYTE0; cx_write(MC417_RWD, regval); tempval = cx_read(MC417_RWD); dataval |= (tempval & 0x000000FF); regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY; cx_write(MC417_RWD, regval); *value = dataval; return retval; } void mc417_gpio_set(struct cx23885_dev *dev, u32 mask) { u32 val; /* Set the gpio value */ mc417_register_read(dev, 0x900C, &val); val |= (mask & 0x000ffff); mc417_register_write(dev, 0x900C, val); } void mc417_gpio_clear(struct cx23885_dev *dev, u32 mask) { u32 val; /* Clear the gpio value */ mc417_register_read(dev, 0x900C, &val); val &= ~(mask & 0x0000ffff); mc417_register_write(dev, 0x900C, val); } void mc417_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput) { u32 val; /* Enable GPIO direction bits */ mc417_register_read(dev, 0x9020, &val); if (asoutput) val |= (mask & 0x0000ffff); else val &= ~(mask & 0x0000ffff); mc417_register_write(dev, 0x9020, val); } /* ------------------------------------------------------------------ */ /* MPEG encoder API */ static char *cmd_to_str(int cmd) { switch (cmd) { case CX2341X_ENC_PING_FW: return "PING_FW"; case CX2341X_ENC_START_CAPTURE: return "START_CAPTURE"; case CX2341X_ENC_STOP_CAPTURE: return "STOP_CAPTURE"; case CX2341X_ENC_SET_AUDIO_ID: return "SET_AUDIO_ID"; case CX2341X_ENC_SET_VIDEO_ID: return "SET_VIDEO_ID"; case CX2341X_ENC_SET_PCR_ID: return "SET_PCR_ID"; case CX2341X_ENC_SET_FRAME_RATE: return "SET_FRAME_RATE"; case CX2341X_ENC_SET_FRAME_SIZE: return "SET_FRAME_SIZE"; case CX2341X_ENC_SET_BIT_RATE: return "SET_BIT_RATE"; case CX2341X_ENC_SET_GOP_PROPERTIES: return "SET_GOP_PROPERTIES"; case CX2341X_ENC_SET_ASPECT_RATIO: return "SET_ASPECT_RATIO"; case CX2341X_ENC_SET_DNR_FILTER_MODE: return "SET_DNR_FILTER_MODE"; case CX2341X_ENC_SET_DNR_FILTER_PROPS: return "SET_DNR_FILTER_PROPS"; case CX2341X_ENC_SET_CORING_LEVELS: return "SET_CORING_LEVELS"; case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE: return "SET_SPATIAL_FILTER_TYPE"; case CX2341X_ENC_SET_VBI_LINE: return "SET_VBI_LINE"; case CX2341X_ENC_SET_STREAM_TYPE: return "SET_STREAM_TYPE"; case CX2341X_ENC_SET_OUTPUT_PORT: return "SET_OUTPUT_PORT"; case CX2341X_ENC_SET_AUDIO_PROPERTIES: return "SET_AUDIO_PROPERTIES"; case CX2341X_ENC_HALT_FW: return "HALT_FW"; case CX2341X_ENC_GET_VERSION: return "GET_VERSION"; case CX2341X_ENC_SET_GOP_CLOSURE: return "SET_GOP_CLOSURE"; case CX2341X_ENC_GET_SEQ_END: return "GET_SEQ_END"; case CX2341X_ENC_SET_PGM_INDEX_INFO: return "SET_PGM_INDEX_INFO"; case CX2341X_ENC_SET_VBI_CONFIG: return "SET_VBI_CONFIG"; case CX2341X_ENC_SET_DMA_BLOCK_SIZE: return "SET_DMA_BLOCK_SIZE"; case CX2341X_ENC_GET_PREV_DMA_INFO_MB_10: return "GET_PREV_DMA_INFO_MB_10"; case CX2341X_ENC_GET_PREV_DMA_INFO_MB_9: return "GET_PREV_DMA_INFO_MB_9"; case CX2341X_ENC_SCHED_DMA_TO_HOST: return "SCHED_DMA_TO_HOST"; case CX2341X_ENC_INITIALIZE_INPUT: return "INITIALIZE_INPUT"; case CX2341X_ENC_SET_FRAME_DROP_RATE: return "SET_FRAME_DROP_RATE"; case CX2341X_ENC_PAUSE_ENCODER: return "PAUSE_ENCODER"; case CX2341X_ENC_REFRESH_INPUT: return "REFRESH_INPUT"; case CX2341X_ENC_SET_COPYRIGHT: return "SET_COPYRIGHT"; case CX2341X_ENC_SET_EVENT_NOTIFICATION: return "SET_EVENT_NOTIFICATION"; case CX2341X_ENC_SET_NUM_VSYNC_LINES: return "SET_NUM_VSYNC_LINES"; case CX2341X_ENC_SET_PLACEHOLDER: return "SET_PLACEHOLDER"; case CX2341X_ENC_MUTE_VIDEO: return "MUTE_VIDEO"; case CX2341X_ENC_MUTE_AUDIO: return "MUTE_AUDIO"; case CX2341X_ENC_MISC: return "MISC"; default: return "UNKNOWN"; } } static int cx23885_mbox_func(void *priv, u32 command, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA]) { struct cx23885_dev *dev = priv; unsigned long timeout; u32 value, flag, retval = 0; int i; dprintk(3, "%s: command(0x%X) = %s\n", __func__, command, cmd_to_str(command)); /* this may not be 100% safe if we can't read any memory location without side effects */ mc417_memory_read(dev, dev->cx23417_mailbox - 4, &value); if (value != 0x12345678) { printk(KERN_ERR "Firmware and/or mailbox pointer not initialized " "or corrupted, signature = 0x%x, cmd = %s\n", value, cmd_to_str(command)); return -1; } /* This read looks at 32 bits, but flag is only 8 bits. * Seems we also bail if CMD or TIMEOUT bytes are set??? */ mc417_memory_read(dev, dev->cx23417_mailbox, &flag); if (flag) { printk(KERN_ERR "ERROR: Mailbox appears to be in use " "(%x), cmd = %s\n", flag, cmd_to_str(command)); return -1; } flag |= 1; /* tell 'em we're working on it */ mc417_memory_write(dev, dev->cx23417_mailbox, flag); /* write command + args + fill remaining with zeros */ /* command code */ mc417_memory_write(dev, dev->cx23417_mailbox + 1, command); mc417_memory_write(dev, dev->cx23417_mailbox + 3, IVTV_API_STD_TIMEOUT); /* timeout */ for (i = 0; i < in; i++) { mc417_memory_write(dev, dev->cx23417_mailbox + 4 + i, data[i]); dprintk(3, "API Input %d = %d\n", i, data[i]); } for (; i < CX2341X_MBOX_MAX_DATA; i++) mc417_memory_write(dev, dev->cx23417_mailbox + 4 + i, 0); flag |= 3; /* tell 'em we're done writing */ mc417_memory_write(dev, dev->cx23417_mailbox, flag); /* wait for firmware to handle the API command */ timeout = jiffies + msecs_to_jiffies(10); for (;;) { mc417_memory_read(dev, dev->cx23417_mailbox, &flag); if (0 != (flag & 4)) break; if (time_after(jiffies, timeout)) { printk(KERN_ERR "ERROR: API Mailbox timeout\n"); return -1; } udelay(10); } /* read output values */ for (i = 0; i < out; i++) { mc417_memory_read(dev, dev->cx23417_mailbox + 4 + i, data + i); dprintk(3, "API Output %d = %d\n", i, data[i]); } mc417_memory_read(dev, dev->cx23417_mailbox + 2, &retval); dprintk(3, "API result = %d\n", retval); flag = 0; mc417_memory_write(dev, dev->cx23417_mailbox, flag); return retval; } /* We don't need to call the API often, so using just one * mailbox will probably suffice */ static int cx23885_api_cmd(struct cx23885_dev *dev, u32 command, u32 inputcnt, u32 outputcnt, ...) { u32 data[CX2341X_MBOX_MAX_DATA]; va_list vargs; int i, err; dprintk(3, "%s() cmds = 0x%08x\n", __func__, command); va_start(vargs, outputcnt); for (i = 0; i < inputcnt; i++) data[i] = va_arg(vargs, int); err = cx23885_mbox_func(dev, command, inputcnt, outputcnt, data); for (i = 0; i < outputcnt; i++) { int *vptr = va_arg(vargs, int *); *vptr = data[i]; } va_end(vargs); return err; } static int cx23885_find_mailbox(struct cx23885_dev *dev) { u32 signature[4] = { 0x12345678, 0x34567812, 0x56781234, 0x78123456 }; int signaturecnt = 0; u32 value; int i; dprintk(2, "%s()\n", __func__); for (i = 0; i < CX23885_FIRM_IMAGE_SIZE; i++) { mc417_memory_read(dev, i, &value); if (value == signature[signaturecnt]) signaturecnt++; else signaturecnt = 0; if (4 == signaturecnt) { dprintk(1, "Mailbox signature found at 0x%x\n", i+1); return i+1; } } printk(KERN_ERR "Mailbox signature values not found!\n"); return -1; } static int cx23885_load_firmware(struct cx23885_dev *dev) { static const unsigned char magic[8] = { 0xa7, 0x0d, 0x00, 0x00, 0x66, 0xbb, 0x55, 0xaa }; const struct firmware *firmware; int i, retval = 0; u32 value = 0; u32 gpio_output = 0; u32 gpio_value; u32 checksum = 0; u32 *dataptr; dprintk(2, "%s()\n", __func__); /* Save GPIO settings before reset of APU */ retval |= mc417_memory_read(dev, 0x9020, &gpio_output); retval |= mc417_memory_read(dev, 0x900C, &gpio_value); retval = mc417_register_write(dev, IVTV_REG_VPU, 0xFFFFFFED); retval |= mc417_register_write(dev, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST); retval |= mc417_register_write(dev, IVTV_REG_ENC_SDRAM_REFRESH, 0x80000800); retval |= mc417_register_write(dev, IVTV_REG_ENC_SDRAM_PRECHARGE, 0x1A); retval |= mc417_register_write(dev, IVTV_REG_APU, 0); if (retval != 0) { printk(KERN_ERR "%s: Error with mc417_register_write\n", __func__); return -1; } retval = request_firmware(&firmware, CX23885_FIRM_IMAGE_NAME, &dev->pci->dev); if (retval != 0) { printk(KERN_ERR "ERROR: Hotplug firmware request failed (%s).\n", CX23885_FIRM_IMAGE_NAME); printk(KERN_ERR "Please fix your hotplug setup, the board will " "not work without firmware loaded!\n"); return -1; } if (firmware->size != CX23885_FIRM_IMAGE_SIZE) { printk(KERN_ERR "ERROR: Firmware size mismatch " "(have %zd, expected %d)\n", firmware->size, CX23885_FIRM_IMAGE_SIZE); release_firmware(firmware); return -1; } if (0 != memcmp(firmware->data, magic, 8)) { printk(KERN_ERR "ERROR: Firmware magic mismatch, wrong file?\n"); release_firmware(firmware); return -1; } /* transfer to the chip */ dprintk(2, "Loading firmware ...\n"); dataptr = (u32 *)firmware->data; for (i = 0; i < (firmware->size >> 2); i++) { value = *dataptr; checksum += ~value; if (mc417_memory_write(dev, i, value) != 0) { printk(KERN_ERR "ERROR: Loading firmware failed!\n"); release_firmware(firmware); return -1; } dataptr++; } /* read back to verify with the checksum */ dprintk(1, "Verifying firmware ...\n"); for (i--; i >= 0; i--) { if (mc417_memory_read(dev, i, &value) != 0) { printk(KERN_ERR "ERROR: Reading firmware failed!\n"); release_firmware(firmware); return -1; } checksum -= ~value; } if (checksum) { printk(KERN_ERR "ERROR: Firmware load failed (checksum mismatch).\n"); release_firmware(firmware); return -1; } release_firmware(firmware); dprintk(1, "Firmware upload successful.\n"); retval |= mc417_register_write(dev, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST); /* F/W power up disturbs the GPIOs, restore state */ retval |= mc417_register_write(dev, 0x9020, gpio_output); retval |= mc417_register_write(dev, 0x900C, gpio_value); retval |= mc417_register_read(dev, IVTV_REG_VPU, &value); retval |= mc417_register_write(dev, IVTV_REG_VPU, value & 0xFFFFFFE8); /* Hardcoded GPIO's here */ retval |= mc417_register_write(dev, 0x9020, 0x4000); retval |= mc417_register_write(dev, 0x900C, 0x4000); mc417_register_read(dev, 0x9020, &gpio_output); mc417_register_read(dev, 0x900C, &gpio_value); if (retval < 0) printk(KERN_ERR "%s: Error with mc417_register_write\n", __func__); return 0; } void cx23885_417_check_encoder(struct cx23885_dev *dev) { u32 status, seq; status = seq = 0; cx23885_api_cmd(dev, CX2341X_ENC_GET_SEQ_END, 0, 2, &status, &seq); dprintk(1, "%s() status = %d, seq = %d\n", __func__, status, seq); } static void cx23885_codec_settings(struct cx23885_dev *dev) { dprintk(1, "%s()\n", __func__); /* Dynamically change the height based on video standard */ if (dev->encodernorm.id & V4L2_STD_525_60) dev->ts1.height = 480; else dev->ts1.height = 576; /* assign frame size */ cx23885_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0, dev->ts1.height, dev->ts1.width); dev->mpeg_params.width = dev->ts1.width; dev->mpeg_params.height = dev->ts1.height; dev->mpeg_params.is_50hz = (dev->encodernorm.id & V4L2_STD_625_50) != 0; cx2341x_update(dev, cx23885_mbox_func, NULL, &dev->mpeg_params); cx23885_api_cmd(dev, CX2341X_ENC_MISC, 2, 0, 3, 1); cx23885_api_cmd(dev, CX2341X_ENC_MISC, 2, 0, 4, 1); } static int cx23885_initialize_codec(struct cx23885_dev *dev, int startencoder) { int version; int retval; u32 i, data[7]; dprintk(1, "%s()\n", __func__); retval = cx23885_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */ if (retval < 0) { dprintk(2, "%s() PING OK\n", __func__); retval = cx23885_load_firmware(dev); if (retval < 0) { printk(KERN_ERR "%s() f/w load failed\n", __func__); return retval; } retval = cx23885_find_mailbox(dev); if (retval < 0) { printk(KERN_ERR "%s() mailbox < 0, error\n", __func__); return -1; } dev->cx23417_mailbox = retval; retval = cx23885_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); if (retval < 0) { printk(KERN_ERR "ERROR: cx23417 firmware ping failed!\n"); return -1; } retval = cx23885_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1, &version); if (retval < 0) { printk(KERN_ERR "ERROR: cx23417 firmware get encoder :" "version failed!\n"); return -1; } dprintk(1, "cx23417 firmware version is 0x%08x\n", version); msleep(200); } cx23885_codec_settings(dev); msleep(60); cx23885_api_cmd(dev, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, 0, CX23885_FIELD1_SAA7115, CX23885_FIELD2_SAA7115); cx23885_api_cmd(dev, CX2341X_ENC_SET_PLACEHOLDER, 12, 0, CX23885_CUSTOM_EXTENSION_USR_DATA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); /* Setup to capture VBI */ data[0] = 0x0001BD00; data[1] = 1; /* frames per interrupt */ data[2] = 4; /* total bufs */ data[3] = 0x91559155; /* start codes */ data[4] = 0x206080C0; /* stop codes */ data[5] = 6; /* lines */ data[6] = 64; /* BPL */ cx23885_api_cmd(dev, CX2341X_ENC_SET_VBI_CONFIG, 7, 0, data[0], data[1], data[2], data[3], data[4], data[5], data[6]); for (i = 2; i <= 24; i++) { int valid; valid = ((i >= 19) && (i <= 21)); cx23885_api_cmd(dev, CX2341X_ENC_SET_VBI_LINE, 5, 0, i, valid, 0 , 0, 0); cx23885_api_cmd(dev, CX2341X_ENC_SET_VBI_LINE, 5, 0, i | 0x80000000, valid, 0, 0, 0); } cx23885_api_cmd(dev, CX2341X_ENC_MUTE_AUDIO, 1, 0, CX23885_UNMUTE); msleep(60); /* initialize the video input */ cx23885_api_cmd(dev, CX2341X_ENC_INITIALIZE_INPUT, 0, 0); msleep(60); /* Enable VIP style pixel invalidation so we work with scaled mode */ mc417_memory_write(dev, 2120, 0x00000080); /* start capturing to the host interface */ if (startencoder) { cx23885_api_cmd(dev, CX2341X_ENC_START_CAPTURE, 2, 0, CX23885_MPEG_CAPTURE, CX23885_RAW_BITS_NONE); msleep(10); } return 0; } /* ------------------------------------------------------------------ */ static int bb_buf_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) { struct cx23885_fh *fh = q->priv_data; fh->dev->ts1.ts_packet_size = mpeglinesize; fh->dev->ts1.ts_packet_count = mpeglines; *size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count; *count = mpegbufs; return 0; } static int bb_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, enum v4l2_field field) { struct cx23885_fh *fh = q->priv_data; return cx23885_buf_prepare(q, &fh->dev->ts1, (struct cx23885_buffer *)vb, field); } static void bb_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct cx23885_fh *fh = q->priv_data; cx23885_buf_queue(&fh->dev->ts1, (struct cx23885_buffer *)vb); } static void bb_buf_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { cx23885_free_buffer(q, (struct cx23885_buffer *)vb); } static struct videobuf_queue_ops cx23885_qops = { .buf_setup = bb_buf_setup, .buf_prepare = bb_buf_prepare, .buf_queue = bb_buf_queue, .buf_release = bb_buf_release, }; /* ------------------------------------------------------------------ */ static const u32 *ctrl_classes[] = { cx2341x_mpeg_ctrls, NULL }; static int cx23885_queryctrl(struct cx23885_dev *dev, struct v4l2_queryctrl *qctrl) { qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id); if (qctrl->id == 0) return -EINVAL; /* MPEG V4L2 controls */ if (cx2341x_ctrl_query(&dev->mpeg_params, qctrl)) qctrl->flags |= V4L2_CTRL_FLAG_DISABLED; return 0; } static int cx23885_querymenu(struct cx23885_dev *dev, struct v4l2_querymenu *qmenu) { struct v4l2_queryctrl qctrl; qctrl.id = qmenu->id; cx23885_queryctrl(dev, &qctrl); return v4l2_ctrl_query_menu(qmenu, &qctrl, cx2341x_ctrl_get_menu(&dev->mpeg_params, qmenu->id)); } static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id) { struct cx23885_fh *fh = file->private_data; struct cx23885_dev *dev = fh->dev; call_all(dev, core, g_std, id); return 0; } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *id) { struct cx23885_fh *fh = file->private_data; struct cx23885_dev *dev = fh->dev; unsigned int i; for (i = 0; i < ARRAY_SIZE(cx23885_tvnorms); i++) if (*id & cx23885_tvnorms[i].id) break; if (i == ARRAY_SIZE(cx23885_tvnorms)) return -EINVAL; dev->encodernorm = cx23885_tvnorms[i]; /* Have the drier core notify the subdevices */ mutex_lock(&dev->lock); cx23885_set_tvnorm(dev, *id); mutex_unlock(&dev->lock); return 0; } static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; dprintk(1, "%s()\n", __func__); return cx23885_enum_input(dev, i); } static int vidioc_g_input(struct file *file, void *priv, unsigned int *i) { return cx23885_get_input(file, priv, i); } static int vidioc_s_input(struct file *file, void *priv, unsigned int i) { return cx23885_set_input(file, priv, i); } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct cx23885_fh *fh = file->private_data; struct cx23885_dev *dev = fh->dev; if (UNSET == dev->tuner_type) return -EINVAL; if (0 != t->index) return -EINVAL; strcpy(t->name, "Television"); call_all(dev, tuner, g_tuner, t); dprintk(1, "VIDIOC_G_TUNER: tuner type %d\n", t->type); return 0; } static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct cx23885_fh *fh = file->private_data; struct cx23885_dev *dev = fh->dev; if (UNSET == dev->tuner_type) return -EINVAL; /* Update the A/V core */ call_all(dev, tuner, s_tuner, t); return 0; } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct cx23885_fh *fh = file->private_data; struct cx23885_dev *dev = fh->dev; if (UNSET == dev->tuner_type) return -EINVAL; f->type = V4L2_TUNER_ANALOG_TV; f->frequency = dev->freq; call_all(dev, tuner, g_frequency, f); return 0; } static int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { return cx23885_set_frequency(file, priv, f); } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctl) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; return cx23885_get_control(dev, ctl); } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctl) { struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev; return cx23885_set_control(dev, ctl); } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct cx23885_fh *fh = file->private_data; struct cx23885_dev *dev = fh->dev; struct cx23885_tsport *tsport = &dev->ts1; strlcpy(cap->driver, dev->name, sizeof(cap->driver)); strlcpy(cap->card, cx23885_boards[tsport->dev->board].name, sizeof(cap->card)); sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci)); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | 0; if (UNSET != dev->tuner_type) cap->capabilities |= V4L2_CAP_TUNER; return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index != 0) return -EINVAL; strlcpy(f->description, "MPEG", sizeof(f->description)); f->pixelformat = V4L2_PIX_FMT_MPEG; return 0; } static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx23885_fh *fh = file->private_data; struct cx23885_dev *dev = fh->dev; f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = dev->ts1.ts_packet_size * dev->ts1.ts_packet_count; f->fmt.pix.colorspace = 0; f->fmt.pix.width = dev->ts1.width; f->fmt.pix.height = dev->ts1.height; f->fmt.pix.field = fh->mpegq.field; dprintk(1, "VIDIOC_G_FMT: w: %d, h: %d, f: %d\n", dev->ts1.width, dev->ts1.height, fh->mpegq.field); return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx23885_fh *fh = file->private_data; struct cx23885_dev *dev = fh->dev; f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = dev->ts1.ts_packet_size * dev->ts1.ts_packet_count; f->fmt.pix.colorspace = 0; dprintk(1, "VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n", dev->ts1.width, dev->ts1.height, fh->mpegq.field); return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx23885_fh *fh = file->private_data; struct cx23885_dev *dev = fh->dev; f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = dev->ts1.ts_packet_size * dev->ts1.ts_packet_count; f->fmt.pix.colorspace = 0; dprintk(1, "VIDIOC_S_FMT: w: %d, h: %d, f: %d\n", f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field); return 0; } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct cx23885_fh *fh = file->private_data; return videobuf_reqbufs(&fh->mpegq, p); } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct cx23885_fh *fh = file->private_data; return videobuf_querybuf(&fh->mpegq, p); } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct cx23885_fh *fh = file->private_data; return videobuf_qbuf(&fh->mpegq, p); } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct cx23885_fh *fh = priv; return videobuf_dqbuf(&fh->mpegq, b, file->f_flags & O_NONBLOCK); } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct cx23885_fh *fh = file->private_data; return videobuf_streamon(&fh->mpegq); } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) { struct cx23885_fh *fh = file->private_data; return videobuf_streamoff(&fh->mpegq); } static int vidioc_g_ext_ctrls(struct file *file, void *priv, struct v4l2_ext_controls *f) { struct cx23885_fh *fh = priv; struct cx23885_dev *dev = fh->dev; if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG) return -EINVAL; return cx2341x_ext_ctrls(&dev->mpeg_params, 0, f, VIDIOC_G_EXT_CTRLS); } static int vidioc_s_ext_ctrls(struct file *file, void *priv, struct v4l2_ext_controls *f) { struct cx23885_fh *fh = priv; struct cx23885_dev *dev = fh->dev; struct cx2341x_mpeg_params p; int err; if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG) return -EINVAL; p = dev->mpeg_params; err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_S_EXT_CTRLS); if (err == 0) { err = cx2341x_update(dev, cx23885_mbox_func, &dev->mpeg_params, &p); dev->mpeg_params = p; } return err; } static int vidioc_try_ext_ctrls(struct file *file, void *priv, struct v4l2_ext_controls *f) { struct cx23885_fh *fh = priv; struct cx23885_dev *dev = fh->dev; struct cx2341x_mpeg_params p; int err; if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG) return -EINVAL; p = dev->mpeg_params; err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_TRY_EXT_CTRLS); return err; } static int vidioc_log_status(struct file *file, void *priv) { struct cx23885_fh *fh = priv; struct cx23885_dev *dev = fh->dev; char name[32 + 2]; snprintf(name, sizeof(name), "%s/2", dev->name); printk(KERN_INFO "%s/2: ============ START LOG STATUS ============\n", dev->name); call_all(dev, core, log_status); cx2341x_log_status(&dev->mpeg_params, name); printk(KERN_INFO "%s/2: ============= END LOG STATUS =============\n", dev->name); return 0; } static int vidioc_querymenu(struct file *file, void *priv, struct v4l2_querymenu *a) { struct cx23885_fh *fh = priv; struct cx23885_dev *dev = fh->dev; return cx23885_querymenu(dev, a); } static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *c) { struct cx23885_fh *fh = priv; struct cx23885_dev *dev = fh->dev; return cx23885_queryctrl(dev, c); } static int mpeg_open(struct file *file) { struct cx23885_dev *dev = video_drvdata(file); struct cx23885_fh *fh; dprintk(2, "%s()\n", __func__); /* allocate + initialize per filehandle data */ fh = kzalloc(sizeof(*fh), GFP_KERNEL); if (!fh) return -ENOMEM; file->private_data = fh; fh->dev = dev; videobuf_queue_sg_init(&fh->mpegq, &cx23885_qops, &dev->pci->dev, &dev->ts1.slock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED, sizeof(struct cx23885_buffer), fh, NULL); return 0; } static int mpeg_release(struct file *file) { struct cx23885_fh *fh = file->private_data; struct cx23885_dev *dev = fh->dev; dprintk(2, "%s()\n", __func__); /* FIXME: Review this crap */ /* Shut device down on last close */ if (atomic_cmpxchg(&fh->v4l_reading, 1, 0) == 1) { if (atomic_dec_return(&dev->v4l_reader_count) == 0) { /* stop mpeg capture */ cx23885_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0, CX23885_END_NOW, CX23885_MPEG_CAPTURE, CX23885_RAW_BITS_NONE); msleep(500); cx23885_417_check_encoder(dev); cx23885_cancel_buffers(&fh->dev->ts1); } } if (fh->mpegq.streaming) videobuf_streamoff(&fh->mpegq); if (fh->mpegq.reading) videobuf_read_stop(&fh->mpegq); videobuf_mmap_free(&fh->mpegq); file->private_data = NULL; kfree(fh); return 0; } static ssize_t mpeg_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { struct cx23885_fh *fh = file->private_data; struct cx23885_dev *dev = fh->dev; dprintk(2, "%s()\n", __func__); /* Deal w/ A/V decoder * and mpeg encoder sync issues. */ /* Start mpeg encoder on first read. */ if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) { if (atomic_inc_return(&dev->v4l_reader_count) == 1) { if (cx23885_initialize_codec(dev, 1) < 0) return -EINVAL; } } return videobuf_read_stream(&fh->mpegq, data, count, ppos, 0, file->f_flags & O_NONBLOCK); } static unsigned int mpeg_poll(struct file *file, struct poll_table_struct *wait) { struct cx23885_fh *fh = file->private_data; struct cx23885_dev *dev = fh->dev; dprintk(2, "%s\n", __func__); return videobuf_poll_stream(file, &fh->mpegq, wait); } static int mpeg_mmap(struct file *file, struct vm_area_struct *vma) { struct cx23885_fh *fh = file->private_data; struct cx23885_dev *dev = fh->dev; dprintk(2, "%s()\n", __func__); return videobuf_mmap_mapper(&fh->mpegq, vma); } static struct v4l2_file_operations mpeg_fops = { .owner = THIS_MODULE, .open = mpeg_open, .release = mpeg_release, .read = mpeg_read, .poll = mpeg_poll, .mmap = mpeg_mmap, .ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops mpeg_ioctl_ops = { .vidioc_querystd = vidioc_g_std, .vidioc_g_std = vidioc_g_std, .vidioc_s_std = vidioc_s_std, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls, .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls, .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls, .vidioc_log_status = vidioc_log_status, .vidioc_querymenu = vidioc_querymenu, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_chip_ident = cx23885_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = cx23885_g_register, .vidioc_s_register = cx23885_s_register, #endif }; static struct video_device cx23885_mpeg_template = { .name = "cx23885", .fops = &mpeg_fops, .ioctl_ops = &mpeg_ioctl_ops, .tvnorms = CX23885_NORMS, .current_norm = V4L2_STD_NTSC_M, }; void cx23885_417_unregister(struct cx23885_dev *dev) { dprintk(1, "%s()\n", __func__); if (dev->v4l_device) { if (video_is_registered(dev->v4l_device)) video_unregister_device(dev->v4l_device); else video_device_release(dev->v4l_device); dev->v4l_device = NULL; } } static struct video_device *cx23885_video_dev_alloc( struct cx23885_tsport *tsport, struct pci_dev *pci, struct video_device *template, char *type) { struct video_device *vfd; struct cx23885_dev *dev = tsport->dev; dprintk(1, "%s()\n", __func__); vfd = video_device_alloc(); if (NULL == vfd) return NULL; *vfd = *template; snprintf(vfd->name, sizeof(vfd->name), "%s (%s)", cx23885_boards[tsport->dev->board].name, type); vfd->parent = &pci->dev; vfd->release = video_device_release; return vfd; } int cx23885_417_register(struct cx23885_dev *dev) { /* FIXME: Port1 hardcoded here */ int err = -ENODEV; struct cx23885_tsport *tsport = &dev->ts1; dprintk(1, "%s()\n", __func__); if (cx23885_boards[dev->board].portb != CX23885_MPEG_ENCODER) return err; /* Set default TV standard */ dev->encodernorm = cx23885_tvnorms[0]; if (dev->encodernorm.id & V4L2_STD_525_60) tsport->height = 480; else tsport->height = 576; tsport->width = 720; cx2341x_fill_defaults(&dev->mpeg_params); dev->mpeg_params.port = CX2341X_PORT_SERIAL; /* Allocate and initialize V4L video device */ dev->v4l_device = cx23885_video_dev_alloc(tsport, dev->pci, &cx23885_mpeg_template, "mpeg"); video_set_drvdata(dev->v4l_device, dev); err = video_register_device(dev->v4l_device, VFL_TYPE_GRABBER, -1); if (err < 0) { printk(KERN_INFO "%s: can't register mpeg device\n", dev->name); return err; } printk(KERN_INFO "%s: registered device %s [mpeg]\n", dev->name, video_device_node_name(dev->v4l_device)); /* ST: Configure the encoder paramaters, but don't begin * encoding, this resolves an issue where the first time the * encoder is started video can be choppy. */ cx23885_initialize_codec(dev, 0); return 0; }
gpl-2.0
AdiPat/android_kernel_samsung_janice
drivers/gpio/gpio-max7300.c
5795
2048
/* * Copyright (C) 2009 Wolfram Sang, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Check max730x.c for further details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/i2c.h> #include <linux/spi/max7301.h> #include <linux/slab.h> static int max7300_i2c_write(struct device *dev, unsigned int reg, unsigned int val) { struct i2c_client *client = to_i2c_client(dev); return i2c_smbus_write_byte_data(client, reg, val); } static int max7300_i2c_read(struct device *dev, unsigned int reg) { struct i2c_client *client = to_i2c_client(dev); return i2c_smbus_read_byte_data(client, reg); } static int __devinit max7300_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct max7301 *ts; int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; ts = kzalloc(sizeof(struct max7301), GFP_KERNEL); if (!ts) return -ENOMEM; ts->read = max7300_i2c_read; ts->write = max7300_i2c_write; ts->dev = &client->dev; ret = __max730x_probe(ts); if (ret) kfree(ts); return ret; } static int __devexit max7300_remove(struct i2c_client *client) { return __max730x_remove(&client->dev); } static const struct i2c_device_id max7300_id[] = { { "max7300", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, max7300_id); static struct i2c_driver max7300_driver = { .driver = { .name = "max7300", .owner = THIS_MODULE, }, .probe = max7300_probe, .remove = __devexit_p(max7300_remove), .id_table = max7300_id, }; static int __init max7300_init(void) { return i2c_add_driver(&max7300_driver); } subsys_initcall(max7300_init); static void __exit max7300_exit(void) { i2c_del_driver(&max7300_driver); } module_exit(max7300_exit); MODULE_AUTHOR("Wolfram Sang"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MAX7300 GPIO-Expander");
gpl-2.0
faux123/htc_m8
drivers/media/rc/keymaps/rc-videomate-m1f.c
9379
2311
/* videomate-k100.h - Keytable for videomate_k100 Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Pavel Osnova <pvosnova@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> static struct rc_map_table videomate_k100[] = { { 0x01, KEY_POWER }, { 0x31, KEY_TUNER }, { 0x33, KEY_VIDEO }, { 0x2f, KEY_RADIO }, { 0x30, KEY_CAMERA }, { 0x2d, KEY_NEW }, /* TV record button */ { 0x17, KEY_CYCLEWINDOWS }, { 0x2c, KEY_ANGLE }, { 0x2b, KEY_LANGUAGE }, { 0x32, KEY_SEARCH }, /* '...' button */ { 0x11, KEY_UP }, { 0x13, KEY_LEFT }, { 0x15, KEY_OK }, { 0x14, KEY_RIGHT }, { 0x12, KEY_DOWN }, { 0x16, KEY_BACKSPACE }, { 0x02, KEY_ZOOM }, /* WIN key */ { 0x04, KEY_INFO }, { 0x05, KEY_VOLUMEUP }, { 0x03, KEY_MUTE }, { 0x07, KEY_CHANNELUP }, { 0x06, KEY_VOLUMEDOWN }, { 0x08, KEY_CHANNELDOWN }, { 0x0c, KEY_RECORD }, { 0x0e, KEY_STOP }, { 0x0a, KEY_BACK }, { 0x0b, KEY_PLAY }, { 0x09, KEY_FORWARD }, { 0x10, KEY_PREVIOUS }, { 0x0d, KEY_PAUSE }, { 0x0f, KEY_NEXT }, { 0x1e, KEY_1 }, { 0x1f, KEY_2 }, { 0x20, KEY_3 }, { 0x21, KEY_4 }, { 0x22, KEY_5 }, { 0x23, KEY_6 }, { 0x24, KEY_7 }, { 0x25, KEY_8 }, { 0x26, KEY_9 }, { 0x2a, KEY_NUMERIC_STAR }, /* * key */ { 0x1d, KEY_0 }, { 0x29, KEY_SUBTITLE }, /* # key */ { 0x27, KEY_CLEAR }, { 0x34, KEY_SCREEN }, { 0x28, KEY_ENTER }, { 0x19, KEY_RED }, { 0x1a, KEY_GREEN }, { 0x1b, KEY_YELLOW }, { 0x1c, KEY_BLUE }, { 0x18, KEY_TEXT }, }; static struct rc_map_list videomate_k100_map = { .map = { .scan = videomate_k100, .size = ARRAY_SIZE(videomate_k100), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_VIDEOMATE_K100, } }; static int __init init_rc_map_videomate_k100(void) { return rc_map_register(&videomate_k100_map); } static void __exit exit_rc_map_videomate_k100(void) { rc_map_unregister(&videomate_k100_map); } module_init(init_rc_map_videomate_k100) module_exit(exit_rc_map_videomate_k100) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pavel Osnova <pvosnova@gmail.com>");
gpl-2.0
gearslam/JB_LS970ZVC
drivers/media/video/cx23885/cx23885-f300.c
9379
4188
/* * Driver for Silicon Labs C8051F300 microcontroller. * * It is used for LNB power control in TeVii S470, * TBS 6920 PCIe DVB-S2 cards. * * Microcontroller connected to cx23885 GPIO pins: * GPIO0 - data - P0.3 F300 * GPIO1 - reset - P0.2 F300 * GPIO2 - clk - P0.1 F300 * GPIO3 - busy - P0.0 F300 * * Copyright (C) 2009 Igor M. Liplianin <liplianin@me.by> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "cx23885.h" #define F300_DATA GPIO_0 #define F300_RESET GPIO_1 #define F300_CLK GPIO_2 #define F300_BUSY GPIO_3 static void f300_set_line(struct cx23885_dev *dev, u32 line, u8 lvl) { cx23885_gpio_enable(dev, line, 1); if (lvl == 1) cx23885_gpio_set(dev, line); else cx23885_gpio_clear(dev, line); } static u8 f300_get_line(struct cx23885_dev *dev, u32 line) { cx23885_gpio_enable(dev, line, 0); return cx23885_gpio_get(dev, line); } static void f300_send_byte(struct cx23885_dev *dev, u8 dta) { u8 i; for (i = 0; i < 8; i++) { f300_set_line(dev, F300_CLK, 0); udelay(30); f300_set_line(dev, F300_DATA, (dta & 0x80) >> 7);/* msb first */ udelay(30); dta <<= 1; f300_set_line(dev, F300_CLK, 1); udelay(30); } } static u8 f300_get_byte(struct cx23885_dev *dev) { u8 i, dta = 0; for (i = 0; i < 8; i++) { f300_set_line(dev, F300_CLK, 0); udelay(30); dta <<= 1; f300_set_line(dev, F300_CLK, 1); udelay(30); dta |= f300_get_line(dev, F300_DATA);/* msb first */ } return dta; } static u8 f300_xfer(struct dvb_frontend *fe, u8 *buf) { struct cx23885_tsport *port = fe->dvb->priv; struct cx23885_dev *dev = port->dev; u8 i, temp, ret = 0; temp = buf[0]; for (i = 0; i < buf[0]; i++) temp += buf[i + 1]; temp = (~temp + 1);/* get check sum */ buf[1 + buf[0]] = temp; f300_set_line(dev, F300_RESET, 1); f300_set_line(dev, F300_CLK, 1); udelay(30); f300_set_line(dev, F300_DATA, 1); msleep(1); /* question: */ f300_set_line(dev, F300_RESET, 0);/* begin to send data */ msleep(1); f300_send_byte(dev, 0xe0);/* the slave address is 0xe0, write */ msleep(1); temp = buf[0]; temp += 2; for (i = 0; i < temp; i++) f300_send_byte(dev, buf[i]); f300_set_line(dev, F300_RESET, 1);/* sent data over */ f300_set_line(dev, F300_DATA, 1); /* answer: */ temp = 0; for (i = 0; ((i < 8) & (temp == 0)); i++) { msleep(1); if (f300_get_line(dev, F300_BUSY) == 0) temp = 1; } if (i > 7) { printk(KERN_ERR "%s: timeout, the slave no response\n", __func__); ret = 1; /* timeout, the slave no response */ } else { /* the slave not busy, prepare for getting data */ f300_set_line(dev, F300_RESET, 0);/*ready...*/ msleep(1); f300_send_byte(dev, 0xe1);/* 0xe1 is Read */ msleep(1); temp = f300_get_byte(dev);/*get the data length */ if (temp > 14) temp = 14; for (i = 0; i < (temp + 1); i++) f300_get_byte(dev);/* get data to empty buffer */ f300_set_line(dev, F300_RESET, 1);/* received data over */ f300_set_line(dev, F300_DATA, 1); } return ret; } int f300_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { u8 buf[16]; buf[0] = 0x05; buf[1] = 0x38;/* write port */ buf[2] = 0x01;/* A port, lnb power */ switch (voltage) { case SEC_VOLTAGE_13: buf[3] = 0x01;/* power on */ buf[4] = 0x02;/* B port, H/V */ buf[5] = 0x00;/*13V v*/ break; case SEC_VOLTAGE_18: buf[3] = 0x01; buf[4] = 0x02; buf[5] = 0x01;/* 18V h*/ break; case SEC_VOLTAGE_OFF: buf[3] = 0x00;/* power off */ buf[4] = 0x00; buf[5] = 0x00; break; } return f300_xfer(fe, buf); }
gpl-2.0
Asus-T100/kernel
arch/alpha/kernel/err_marvel.c
11939
37488
/* * linux/arch/alpha/kernel/err_marvel.c * * Copyright (C) 2001 Jeff Wiedemeier (Compaq Computer Corporation) * */ #include <linux/init.h> #include <linux/pci.h> #include <linux/sched.h> #include <asm/io.h> #include <asm/console.h> #include <asm/core_marvel.h> #include <asm/hwrpb.h> #include <asm/smp.h> #include <asm/err_common.h> #include <asm/err_ev7.h> #include "err_impl.h" #include "proto.h" static void marvel_print_680_frame(struct ev7_lf_subpackets *lf_subpackets) { #ifdef CONFIG_VERBOSE_MCHECK struct ev7_pal_environmental_subpacket *env; struct { int type; char *name; } ev_packets[] = { { EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE, "Ambient Temperature" }, { EL_TYPE__PAL__ENV__AIRMOVER_FAN, "AirMover / Fan" }, { EL_TYPE__PAL__ENV__VOLTAGE, "Voltage" }, { EL_TYPE__PAL__ENV__INTRUSION, "Intrusion" }, { EL_TYPE__PAL__ENV__POWER_SUPPLY, "Power Supply" }, { EL_TYPE__PAL__ENV__LAN, "LAN" }, { EL_TYPE__PAL__ENV__HOT_PLUG, "Hot Plug" }, { 0, NULL } }; int i; for (i = 0; ev_packets[i].type != 0; i++) { env = lf_subpackets->env[ev7_lf_env_index(ev_packets[i].type)]; if (!env) continue; printk("%s**%s event (cabinet %d, drawer %d)\n", err_print_prefix, ev_packets[i].name, env->cabinet, env->drawer); printk("%s Module Type: 0x%x - Unit ID 0x%x - " "Condition 0x%x\n", err_print_prefix, env->module_type, env->unit_id, env->condition); } #endif /* CONFIG_VERBOSE_MCHECK */ } static int marvel_process_680_frame(struct ev7_lf_subpackets *lf_subpackets, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; int i; for (i = ev7_lf_env_index(EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE); i <= ev7_lf_env_index(EL_TYPE__PAL__ENV__HOT_PLUG); i++) { if (lf_subpackets->env[i]) status = MCHK_DISPOSITION_REPORT; } if (print) marvel_print_680_frame(lf_subpackets); return status; } #ifdef CONFIG_VERBOSE_MCHECK static void marvel_print_err_cyc(u64 err_cyc) { static char *packet_desc[] = { "No Error", "UNKNOWN", "1 cycle (1 or 2 flit packet)", "2 cycles (3 flit packet)", "9 cycles (18 flit packet)", "10 cycles (19 flit packet)", "UNKNOWN", "UNKNOWN", "UNKNOWN" }; #define IO7__ERR_CYC__ODD_FLT (1UL << 0) #define IO7__ERR_CYC__EVN_FLT (1UL << 1) #define IO7__ERR_CYC__PACKET__S (6) #define IO7__ERR_CYC__PACKET__M (0x7) #define IO7__ERR_CYC__LOC (1UL << 5) #define IO7__ERR_CYC__CYCLE__S (2) #define IO7__ERR_CYC__CYCLE__M (0x7) printk("%s Packet In Error: %s\n" "%s Error in %s, cycle %lld%s%s\n", err_print_prefix, packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)], err_print_prefix, (err_cyc & IO7__ERR_CYC__LOC) ? "DATA" : "HEADER", EXTRACT(err_cyc, IO7__ERR_CYC__CYCLE), (err_cyc & IO7__ERR_CYC__ODD_FLT) ? " [ODD Flit]": "", (err_cyc & IO7__ERR_CYC__EVN_FLT) ? " [Even Flit]": ""); } static void marvel_print_po7_crrct_sym(u64 crrct_sym) { #define IO7__PO7_CRRCT_SYM__SYN__S (0) #define IO7__PO7_CRRCT_SYM__SYN__M (0x7f) #define IO7__PO7_CRRCT_SYM__ERR_CYC__S (7) /* ERR_CYC + ODD_FLT + EVN_FLT */ #define IO7__PO7_CRRCT_SYM__ERR_CYC__M (0x1ff) printk("%s Correctable Error Symptoms:\n" "%s Syndrome: 0x%llx\n", err_print_prefix, err_print_prefix, EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__SYN)); marvel_print_err_cyc(EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__ERR_CYC)); } static void marvel_print_po7_uncrr_sym(u64 uncrr_sym, u64 valid_mask) { static char *clk_names[] = { "_h[0]", "_h[1]", "_n[0]", "_n[1]" }; static char *clk_decode[] = { "No Error", "One extra rising edge", "Two extra rising edges", "Lost one clock" }; static char *port_names[] = { "Port 0", "Port 1", "Port 2", "Port 3", "Unknown Port", "Unknown Port", "Unknown Port", "Port 7" }; int scratch, i; #define IO7__PO7_UNCRR_SYM__SYN__S (0) #define IO7__PO7_UNCRR_SYM__SYN__M (0x7f) #define IO7__PO7_UNCRR_SYM__ERR_CYC__S (7) /* ERR_CYC + ODD_FLT... */ #define IO7__PO7_UNCRR_SYM__ERR_CYC__M (0x1ff) /* ... + EVN_FLT */ #define IO7__PO7_UNCRR_SYM__CLK__S (16) #define IO7__PO7_UNCRR_SYM__CLK__M (0xff) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__REQ (1UL << 24) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__RIO (1UL << 25) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__WIO (1UL << 26) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__BLK (1UL << 27) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__NBK (1UL << 28) #define IO7__PO7_UNCRR_SYM__OVF__READIO (1UL << 29) #define IO7__PO7_UNCRR_SYM__OVF__WRITEIO (1UL << 30) #define IO7__PO7_UNCRR_SYM__OVF__FWD (1UL << 31) #define IO7__PO7_UNCRR_SYM__VICTIM_SP__S (32) #define IO7__PO7_UNCRR_SYM__VICTIM_SP__M (0xff) #define IO7__PO7_UNCRR_SYM__DETECT_SP__S (40) #define IO7__PO7_UNCRR_SYM__DETECT_SP__M (0xff) #define IO7__PO7_UNCRR_SYM__STRV_VTR__S (48) #define IO7__PO7_UNCRR_SYM__STRV_VTR__M (0x3ff) #define IO7__STRV_VTR__LSI__INTX__S (0) #define IO7__STRV_VTR__LSI__INTX__M (0x3) #define IO7__STRV_VTR__LSI__SLOT__S (2) #define IO7__STRV_VTR__LSI__SLOT__M (0x7) #define IO7__STRV_VTR__LSI__BUS__S (5) #define IO7__STRV_VTR__LSI__BUS__M (0x3) #define IO7__STRV_VTR__MSI__INTNUM__S (0) #define IO7__STRV_VTR__MSI__INTNUM__M (0x1ff) #define IO7__STRV_VTR__IS_MSI (1UL << 9) printk("%s Uncorrectable Error Symptoms:\n", err_print_prefix); uncrr_sym &= valid_mask; if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__SYN)) printk("%s Syndrome: 0x%llx\n", err_print_prefix, EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__SYN)); if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__ERR_CYC)) marvel_print_err_cyc(EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__ERR_CYC)); scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__CLK); for (i = 0; i < 4; i++, scratch >>= 2) { if (scratch & 0x3) printk("%s Clock %s: %s\n", err_print_prefix, clk_names[i], clk_decode[scratch & 0x3]); } if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__REQ) printk("%s REQ Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__RIO) printk("%s RIO Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__WIO) printk("%s WIO Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__BLK) printk("%s BLK Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__NBK) printk("%s NBK Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__READIO) printk("%s Read I/O Buffer Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__WRITEIO) printk("%s Write I/O Buffer Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__FWD) printk("%s FWD Buffer Overflow\n", err_print_prefix); if ((scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__VICTIM_SP))) { int lost = scratch & (1UL << 4); scratch &= ~lost; for (i = 0; i < 8; i++, scratch >>= 1) { if (!(scratch & 1)) continue; printk("%s Error Response sent to %s", err_print_prefix, port_names[i]); } if (lost) printk("%s Lost Error sent somewhere else\n", err_print_prefix); } if ((scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__DETECT_SP))) { for (i = 0; i < 8; i++, scratch >>= 1) { if (!(scratch & 1)) continue; printk("%s Error Reported by %s", err_print_prefix, port_names[i]); } } if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__STRV_VTR)) { char starvation_message[80]; scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__STRV_VTR); if (scratch & IO7__STRV_VTR__IS_MSI) sprintf(starvation_message, "MSI Interrupt 0x%x", EXTRACT(scratch, IO7__STRV_VTR__MSI__INTNUM)); else sprintf(starvation_message, "LSI INT%c for Bus:Slot (%d:%d)\n", 'A' + EXTRACT(scratch, IO7__STRV_VTR__LSI__INTX), EXTRACT(scratch, IO7__STRV_VTR__LSI__BUS), EXTRACT(scratch, IO7__STRV_VTR__LSI__SLOT)); printk("%s Starvation Int Trigger By: %s\n", err_print_prefix, starvation_message); } } static void marvel_print_po7_ugbge_sym(u64 ugbge_sym) { char opcode_str[10]; #define IO7__PO7_UGBGE_SYM__UPH_PKT_OFF__S (6) #define IO7__PO7_UGBGE_SYM__UPH_PKT_OFF__M (0xfffffffful) #define IO7__PO7_UGBGE_SYM__UPH_OPCODE__S (40) #define IO7__PO7_UGBGE_SYM__UPH_OPCODE__M (0xff) #define IO7__PO7_UGBGE_SYM__UPH_SRC_PORT__S (48) #define IO7__PO7_UGBGE_SYM__UPH_SRC_PORT__M (0xf) #define IO7__PO7_UGBGE_SYM__UPH_DEST_PID__S (52) #define IO7__PO7_UGBGE_SYM__UPH_DEST_PID__M (0x7ff) #define IO7__PO7_UGBGE_SYM__VALID (1UL << 63) if (!(ugbge_sym & IO7__PO7_UGBGE_SYM__VALID)) return; switch(EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) { case 0x51: sprintf(opcode_str, "Wr32"); break; case 0x50: sprintf(opcode_str, "WrQW"); break; case 0x54: sprintf(opcode_str, "WrIPR"); break; case 0xD8: sprintf(opcode_str, "Victim"); break; case 0xC5: sprintf(opcode_str, "BlkIO"); break; default: sprintf(opcode_str, "0x%llx\n", EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)); break; } printk("%s Up Hose Garbage Symptom:\n" "%s Source Port: %lld - Dest PID: %lld - OpCode: %s\n", err_print_prefix, err_print_prefix, EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT), EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_DEST_PID), opcode_str); if (0xC5 != EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) printk("%s Packet Offset 0x%08llx\n", err_print_prefix, EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_PKT_OFF)); } static void marvel_print_po7_err_sum(struct ev7_pal_io_subpacket *io) { u64 uncrr_sym_valid = 0; #define IO7__PO7_ERRSUM__CR_SBE (1UL << 32) #define IO7__PO7_ERRSUM__CR_SBE2 (1UL << 33) #define IO7__PO7_ERRSUM__CR_PIO_WBYTE (1UL << 34) #define IO7__PO7_ERRSUM__CR_CSR_NXM (1UL << 35) #define IO7__PO7_ERRSUM__CR_RPID_ACV (1UL << 36) #define IO7__PO7_ERRSUM__CR_RSP_NXM (1UL << 37) #define IO7__PO7_ERRSUM__CR_ERR_RESP (1UL << 38) #define IO7__PO7_ERRSUM__CR_CLK_DERR (1UL << 39) #define IO7__PO7_ERRSUM__CR_DAT_DBE (1UL << 40) #define IO7__PO7_ERRSUM__CR_DAT_GRBG (1UL << 41) #define IO7__PO7_ERRSUM__MAF_TO (1UL << 42) #define IO7__PO7_ERRSUM__UGBGE (1UL << 43) #define IO7__PO7_ERRSUM__UN_MAF_LOST (1UL << 44) #define IO7__PO7_ERRSUM__UN_PKT_OVF (1UL << 45) #define IO7__PO7_ERRSUM__UN_CDT_OVF (1UL << 46) #define IO7__PO7_ERRSUM__UN_DEALLOC (1UL << 47) #define IO7__PO7_ERRSUM__BH_CDT_TO (1UL << 51) #define IO7__PO7_ERRSUM__BH_CLK_HDR (1UL << 52) #define IO7__PO7_ERRSUM__BH_DBE_HDR (1UL << 53) #define IO7__PO7_ERRSUM__BH_GBG_HDR (1UL << 54) #define IO7__PO7_ERRSUM__BH_BAD_CMD (1UL << 55) #define IO7__PO7_ERRSUM__HLT_INT (1UL << 56) #define IO7__PO7_ERRSUM__HP_INT (1UL << 57) #define IO7__PO7_ERRSUM__CRD_INT (1UL << 58) #define IO7__PO7_ERRSUM__STV_INT (1UL << 59) #define IO7__PO7_ERRSUM__HRD_INT (1UL << 60) #define IO7__PO7_ERRSUM__BH_SUM (1UL << 61) #define IO7__PO7_ERRSUM__ERR_LST (1UL << 62) #define IO7__PO7_ERRSUM__ERR_VALID (1UL << 63) #define IO7__PO7_ERRSUM__ERR_MASK (IO7__PO7_ERRSUM__ERR_VALID | \ IO7__PO7_ERRSUM__CR_SBE) /* * Single bit errors aren't covered by ERR_VALID. */ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE) { printk("%s %sSingle Bit Error(s) detected/corrected\n", err_print_prefix, (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE2) ? "Multiple " : ""); marvel_print_po7_crrct_sym(io->po7_crrct_sym); } /* * Neither are the interrupt status bits */ if (io->po7_error_sum & IO7__PO7_ERRSUM__HLT_INT) printk("%s Halt Interrupt posted", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__HP_INT) { printk("%s Hot Plug Event Interrupt posted", err_print_prefix); uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__DETECT_SP); } if (io->po7_error_sum & IO7__PO7_ERRSUM__CRD_INT) printk("%s Correctable Error Interrupt posted", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__STV_INT) { printk("%s Starvation Interrupt posted", err_print_prefix); uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__STRV_VTR); } if (io->po7_error_sum & IO7__PO7_ERRSUM__HRD_INT) { printk("%s Hard Error Interrupt posted", err_print_prefix); uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__DETECT_SP); } /* * Everything else is valid only with ERR_VALID, so skip to the end * (uncrr_sym check) unless ERR_VALID is set. */ if (!(io->po7_error_sum & IO7__PO7_ERRSUM__ERR_VALID)) goto check_uncrr_sym; /* * Since ERR_VALID is set, VICTIM_SP in uncrr_sym is valid. * For bits [29:0] to also be valid, the following bits must * not be set: * CR_PIO_WBYTE CR_CSR_NXM CR_RSP_NXM * CR_ERR_RESP MAF_TO */ uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__VICTIM_SP); if (!(io->po7_error_sum & (IO7__PO7_ERRSUM__CR_PIO_WBYTE | IO7__PO7_ERRSUM__CR_CSR_NXM | IO7__PO7_ERRSUM__CR_RSP_NXM | IO7__PO7_ERRSUM__CR_ERR_RESP | IO7__PO7_ERRSUM__MAF_TO))) uncrr_sym_valid |= 0x3ffffffful; if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_PIO_WBYTE) printk("%s Write byte into IO7 CSR\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CSR_NXM) printk("%s PIO to non-existent CSR\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RPID_ACV) printk("%s Bus Requester PID (Access Violation)\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RSP_NXM) printk("%s Received NXM response from EV7\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_ERR_RESP) printk("%s Received ERROR RESPONSE\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CLK_DERR) printk("%s Clock error on data flit\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_DBE) printk("%s Double Bit Error Data Error Detected\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_GRBG) printk("%s Garbage Encoding Detected on the data\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__UGBGE) { printk("%s Garbage Encoding sent up hose\n", err_print_prefix); marvel_print_po7_ugbge_sym(io->po7_ugbge_sym); } if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_MAF_LOST) printk("%s Orphan response (unexpected response)\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_PKT_OVF) printk("%s Down hose packet overflow\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_CDT_OVF) printk("%s Down hose credit overflow\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_DEALLOC) printk("%s Unexpected or bad dealloc field\n", err_print_prefix); /* * The black hole events. */ if (io->po7_error_sum & IO7__PO7_ERRSUM__MAF_TO) printk("%s BLACK HOLE: Timeout for all responses\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CDT_TO) printk("%s BLACK HOLE: Credit Timeout\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CLK_HDR) printk("%s BLACK HOLE: Clock check on header\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_DBE_HDR) printk("%s BLACK HOLE: Uncorrectable Error on header\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_GBG_HDR) printk("%s BLACK HOLE: Garbage on header\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_BAD_CMD) printk("%s BLACK HOLE: Bad EV7 command\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__ERR_LST) printk("%s Lost Error\n", err_print_prefix); printk("%s Failing Packet:\n" "%s Cycle 1: %016llx\n" "%s Cycle 2: %016llx\n", err_print_prefix, err_print_prefix, io->po7_err_pkt0, err_print_prefix, io->po7_err_pkt1); /* * If there are any valid bits in UNCRR sym for this err, * print UNCRR_SYM as well. */ check_uncrr_sym: if (uncrr_sym_valid) marvel_print_po7_uncrr_sym(io->po7_uncrr_sym, uncrr_sym_valid); } static void marvel_print_pox_tlb_err(u64 tlb_err) { static char *tlb_errors[] = { "No Error", "North Port Signaled Error fetching TLB entry", "PTE invalid or UCC or GBG error on this entry", "Address did not hit any DMA window" }; #define IO7__POX_TLBERR__ERR_VALID (1UL << 63) #define IO7__POX_TLBERR__ERRCODE__S (0) #define IO7__POX_TLBERR__ERRCODE__M (0x3) #define IO7__POX_TLBERR__ERR_TLB_PTR__S (3) #define IO7__POX_TLBERR__ERR_TLB_PTR__M (0x7) #define IO7__POX_TLBERR__FADDR__S (6) #define IO7__POX_TLBERR__FADDR__M (0x3fffffffffful) if (!(tlb_err & IO7__POX_TLBERR__ERR_VALID)) return; printk("%s TLB Error on index 0x%llx:\n" "%s - %s\n" "%s - Addr: 0x%016llx\n", err_print_prefix, EXTRACT(tlb_err, IO7__POX_TLBERR__ERR_TLB_PTR), err_print_prefix, tlb_errors[EXTRACT(tlb_err, IO7__POX_TLBERR__ERRCODE)], err_print_prefix, EXTRACT(tlb_err, IO7__POX_TLBERR__FADDR) << 6); } static void marvel_print_pox_spl_cmplt(u64 spl_cmplt) { char message[80]; #define IO7__POX_SPLCMPLT__MESSAGE__S (0) #define IO7__POX_SPLCMPLT__MESSAGE__M (0x0fffffffful) #define IO7__POX_SPLCMPLT__SOURCE_BUS__S (40) #define IO7__POX_SPLCMPLT__SOURCE_BUS__M (0xfful) #define IO7__POX_SPLCMPLT__SOURCE_DEV__S (35) #define IO7__POX_SPLCMPLT__SOURCE_DEV__M (0x1ful) #define IO7__POX_SPLCMPLT__SOURCE_FUNC__S (32) #define IO7__POX_SPLCMPLT__SOURCE_FUNC__M (0x07ul) #define IO7__POX_SPLCMPLT__MSG_CLASS__S (28) #define IO7__POX_SPLCMPLT__MSG_CLASS__M (0xf) #define IO7__POX_SPLCMPLT__MSG_INDEX__S (20) #define IO7__POX_SPLCMPLT__MSG_INDEX__M (0xff) #define IO7__POX_SPLCMPLT__MSG_CLASSINDEX__S (20) #define IO7__POX_SPLCMPLT__MSG_CLASSINDEX__M (0xfff) #define IO7__POX_SPLCMPLT__REM_LOWER_ADDR__S (12) #define IO7__POX_SPLCMPLT__REM_LOWER_ADDR__M (0x7f) #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__S (0) #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff) printk("%s Split Completion Error:\n" "%s Source (Bus:Dev:Func): %lld:%lld:%lld\n", err_print_prefix, err_print_prefix, EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS), EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_DEV), EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_FUNC)); switch(EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MSG_CLASSINDEX)) { case 0x000: sprintf(message, "Normal completion"); break; case 0x100: sprintf(message, "Bridge - Master Abort"); break; case 0x101: sprintf(message, "Bridge - Target Abort"); break; case 0x102: sprintf(message, "Bridge - Uncorrectable Write Data Error"); break; case 0x200: sprintf(message, "Byte Count Out of Range"); break; case 0x201: sprintf(message, "Uncorrectable Split Write Data Error"); break; default: sprintf(message, "%08llx\n", EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MESSAGE)); break; } printk("%s Message: %s\n", err_print_prefix, message); } static void marvel_print_pox_trans_sum(u64 trans_sum) { static const char * const pcix_cmd[] = { "Interrupt Acknowledge", "Special Cycle", "I/O Read", "I/O Write", "Reserved", "Reserved / Device ID Message", "Memory Read", "Memory Write", "Reserved / Alias to Memory Read Block", "Reserved / Alias to Memory Write Block", "Configuration Read", "Configuration Write", "Memory Read Multiple / Split Completion", "Dual Address Cycle", "Memory Read Line / Memory Read Block", "Memory Write and Invalidate / Memory Write Block" }; #define IO7__POX_TRANSUM__PCI_ADDR__S (0) #define IO7__POX_TRANSUM__PCI_ADDR__M (0x3fffffffffffful) #define IO7__POX_TRANSUM__DAC (1UL << 50) #define IO7__POX_TRANSUM__PCIX_MASTER_SLOT__S (52) #define IO7__POX_TRANSUM__PCIX_MASTER_SLOT__M (0xf) #define IO7__POX_TRANSUM__PCIX_CMD__S (56) #define IO7__POX_TRANSUM__PCIX_CMD__M (0xf) #define IO7__POX_TRANSUM__ERR_VALID (1UL << 63) if (!(trans_sum & IO7__POX_TRANSUM__ERR_VALID)) return; printk("%s Transaction Summary:\n" "%s Command: 0x%llx - %s\n" "%s Address: 0x%016llx%s\n" "%s PCI-X Master Slot: 0x%llx\n", err_print_prefix, err_print_prefix, EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD), pcix_cmd[EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD)], err_print_prefix, EXTRACT(trans_sum, IO7__POX_TRANSUM__PCI_ADDR), (trans_sum & IO7__POX_TRANSUM__DAC) ? " (DAC)" : "", err_print_prefix, EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_MASTER_SLOT)); } static void marvel_print_pox_err(u64 err_sum, struct ev7_pal_io_one_port *port) { #define IO7__POX_ERRSUM__AGP_REQQ_OVFL (1UL << 4) #define IO7__POX_ERRSUM__AGP_SYNC_ERR (1UL << 5) #define IO7__POX_ERRSUM__MRETRY_TO (1UL << 6) #define IO7__POX_ERRSUM__PCIX_UX_SPL (1UL << 7) #define IO7__POX_ERRSUM__PCIX_SPLIT_TO (1UL << 8) #define IO7__POX_ERRSUM__PCIX_DISCARD_SPL (1UL << 9) #define IO7__POX_ERRSUM__DMA_RD_TO (1UL << 10) #define IO7__POX_ERRSUM__CSR_NXM_RD (1UL << 11) #define IO7__POX_ERRSUM__CSR_NXM_WR (1UL << 12) #define IO7__POX_ERRSUM__DMA_TO (1UL << 13) #define IO7__POX_ERRSUM__ALL_MABORTS (1UL << 14) #define IO7__POX_ERRSUM__MABORT (1UL << 15) #define IO7__POX_ERRSUM__MABORT_MASK (IO7__POX_ERRSUM__ALL_MABORTS|\ IO7__POX_ERRSUM__MABORT) #define IO7__POX_ERRSUM__PT_TABORT (1UL << 16) #define IO7__POX_ERRSUM__PM_TABORT (1UL << 17) #define IO7__POX_ERRSUM__TABORT_MASK (IO7__POX_ERRSUM__PT_TABORT | \ IO7__POX_ERRSUM__PM_TABORT) #define IO7__POX_ERRSUM__SERR (1UL << 18) #define IO7__POX_ERRSUM__ADDRERR_STB (1UL << 19) #define IO7__POX_ERRSUM__DETECTED_SERR (1UL << 20) #define IO7__POX_ERRSUM__PERR (1UL << 21) #define IO7__POX_ERRSUM__DATAERR_STB_NIOW (1UL << 22) #define IO7__POX_ERRSUM__DETECTED_PERR (1UL << 23) #define IO7__POX_ERRSUM__PM_PERR (1UL << 24) #define IO7__POX_ERRSUM__PT_SCERROR (1UL << 26) #define IO7__POX_ERRSUM__HUNG_BUS (1UL << 28) #define IO7__POX_ERRSUM__UPE_ERROR__S (51) #define IO7__POX_ERRSUM__UPE_ERROR__M (0xffUL) #define IO7__POX_ERRSUM__UPE_ERROR GEN_MASK(IO7__POX_ERRSUM__UPE_ERROR) #define IO7__POX_ERRSUM__TLB_ERR (1UL << 59) #define IO7__POX_ERRSUM__ERR_VALID (1UL << 63) #define IO7__POX_ERRSUM__TRANS_SUM__MASK (IO7__POX_ERRSUM__MRETRY_TO | \ IO7__POX_ERRSUM__PCIX_UX_SPL | \ IO7__POX_ERRSUM__PCIX_SPLIT_TO | \ IO7__POX_ERRSUM__DMA_TO | \ IO7__POX_ERRSUM__MABORT_MASK | \ IO7__POX_ERRSUM__TABORT_MASK | \ IO7__POX_ERRSUM__SERR | \ IO7__POX_ERRSUM__ADDRERR_STB | \ IO7__POX_ERRSUM__PERR | \ IO7__POX_ERRSUM__DATAERR_STB_NIOW |\ IO7__POX_ERRSUM__DETECTED_PERR | \ IO7__POX_ERRSUM__PM_PERR | \ IO7__POX_ERRSUM__PT_SCERROR | \ IO7__POX_ERRSUM__UPE_ERROR) if (!(err_sum & IO7__POX_ERRSUM__ERR_VALID)) return; /* * First the transaction summary errors */ if (err_sum & IO7__POX_ERRSUM__MRETRY_TO) printk("%s IO7 Master Retry Timeout expired\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PCIX_UX_SPL) printk("%s Unexpected Split Completion\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PCIX_SPLIT_TO) printk("%s IO7 Split Completion Timeout expired\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__DMA_TO) printk("%s Hung bus during DMA transaction\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__MABORT_MASK) printk("%s Master Abort\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PT_TABORT) printk("%s IO7 Asserted Target Abort\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PM_TABORT) printk("%s IO7 Received Target Abort\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__ADDRERR_STB) { printk("%s Address or PCI-X Attribute Parity Error\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__SERR) printk("%s IO7 Asserted SERR\n", err_print_prefix); } if (err_sum & IO7__POX_ERRSUM__PERR) { if (err_sum & IO7__POX_ERRSUM__DATAERR_STB_NIOW) printk("%s IO7 Detected Data Parity Error\n", err_print_prefix); else printk("%s Split Completion Response with " "Parity Error\n", err_print_prefix); } if (err_sum & IO7__POX_ERRSUM__DETECTED_PERR) printk("%s PERR detected\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PM_PERR) printk("%s PERR while IO7 is master\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PT_SCERROR) { printk("%s IO7 Received Split Completion Error message\n", err_print_prefix); marvel_print_pox_spl_cmplt(port->pox_spl_cmplt); } if (err_sum & IO7__POX_ERRSUM__UPE_ERROR) { unsigned int upe_error = EXTRACT(err_sum, IO7__POX_ERRSUM__UPE_ERROR); int i; static char *upe_errors[] = { "Parity Error on MSI write data", "MSI read (MSI window is write only", "TLB - Invalid WR transaction", "TLB - Invalid RD transaction", "DMA - WR error (see north port)", "DMA - RD error (see north port)", "PPR - WR error (see north port)", "PPR - RD error (see north port)" }; printk("%s UPE Error:\n", err_print_prefix); for (i = 0; i < 8; i++) { if (upe_error & (1 << i)) printk("%s %s\n", err_print_prefix, upe_errors[i]); } } /* * POx_TRANS_SUM, if appropriate. */ if (err_sum & IO7__POX_ERRSUM__TRANS_SUM__MASK) marvel_print_pox_trans_sum(port->pox_trans_sum); /* * Then TLB_ERR. */ if (err_sum & IO7__POX_ERRSUM__TLB_ERR) { printk("%s TLB ERROR\n", err_print_prefix); marvel_print_pox_tlb_err(port->pox_tlb_err); } /* * And the single bit status errors. */ if (err_sum & IO7__POX_ERRSUM__AGP_REQQ_OVFL) printk("%s AGP Request Queue Overflow\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__AGP_SYNC_ERR) printk("%s AGP Sync Error\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PCIX_DISCARD_SPL) printk("%s Discarded split completion\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__DMA_RD_TO) printk("%s DMA Read Timeout\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__CSR_NXM_RD) printk("%s CSR NXM READ\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__CSR_NXM_WR) printk("%s CSR NXM WRITE\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__DETECTED_SERR) printk("%s SERR detected\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__HUNG_BUS) printk("%s HUNG BUS detected\n", err_print_prefix); } #endif /* CONFIG_VERBOSE_MCHECK */ static struct ev7_pal_io_subpacket * marvel_find_io7_with_error(struct ev7_lf_subpackets *lf_subpackets) { struct ev7_pal_io_subpacket *io = lf_subpackets->io; struct io7 *io7; int i; /* * Caller must provide the packet to fill */ if (!io) return NULL; /* * Fill the subpacket with the console's standard fill pattern */ memset(io, 0x55, sizeof(*io)); for (io7 = NULL; NULL != (io7 = marvel_next_io7(io7)); ) { unsigned long err_sum = 0; err_sum |= io7->csrs->PO7_ERROR_SUM.csr; for (i = 0; i < IO7_NUM_PORTS; i++) { if (!io7->ports[i].enabled) continue; err_sum |= io7->ports[i].csrs->POx_ERR_SUM.csr; } /* * Is there at least one error? */ if (err_sum & (1UL << 63)) break; } /* * Did we find an IO7 with an error? */ if (!io7) return NULL; /* * We have an IO7 with an error. * * Fill in the IO subpacket. */ io->io_asic_rev = io7->csrs->IO_ASIC_REV.csr; io->io_sys_rev = io7->csrs->IO_SYS_REV.csr; io->io7_uph = io7->csrs->IO7_UPH.csr; io->hpi_ctl = io7->csrs->HPI_CTL.csr; io->crd_ctl = io7->csrs->CRD_CTL.csr; io->hei_ctl = io7->csrs->HEI_CTL.csr; io->po7_error_sum = io7->csrs->PO7_ERROR_SUM.csr; io->po7_uncrr_sym = io7->csrs->PO7_UNCRR_SYM.csr; io->po7_crrct_sym = io7->csrs->PO7_CRRCT_SYM.csr; io->po7_ugbge_sym = io7->csrs->PO7_UGBGE_SYM.csr; io->po7_err_pkt0 = io7->csrs->PO7_ERR_PKT[0].csr; io->po7_err_pkt1 = io7->csrs->PO7_ERR_PKT[1].csr; for (i = 0; i < IO7_NUM_PORTS; i++) { io7_ioport_csrs *csrs = io7->ports[i].csrs; if (!io7->ports[i].enabled) continue; io->ports[i].pox_err_sum = csrs->POx_ERR_SUM.csr; io->ports[i].pox_tlb_err = csrs->POx_TLB_ERR.csr; io->ports[i].pox_spl_cmplt = csrs->POx_SPL_COMPLT.csr; io->ports[i].pox_trans_sum = csrs->POx_TRANS_SUM.csr; io->ports[i].pox_first_err = csrs->POx_FIRST_ERR.csr; io->ports[i].pox_mult_err = csrs->POx_MULT_ERR.csr; io->ports[i].pox_dm_source = csrs->POx_DM_SOURCE.csr; io->ports[i].pox_dm_dest = csrs->POx_DM_DEST.csr; io->ports[i].pox_dm_size = csrs->POx_DM_SIZE.csr; io->ports[i].pox_dm_ctrl = csrs->POx_DM_CTRL.csr; /* * Ack this port's errors, if any. POx_ERR_SUM must be last. * * Most of the error registers get cleared and unlocked when * the associated bits in POx_ERR_SUM are cleared (by writing * 1). POx_TLB_ERR is an exception and must be explicitly * cleared. */ csrs->POx_TLB_ERR.csr = io->ports[i].pox_tlb_err; csrs->POx_ERR_SUM.csr = io->ports[i].pox_err_sum; mb(); csrs->POx_ERR_SUM.csr; } /* * Ack any port 7 error(s). */ io7->csrs->PO7_ERROR_SUM.csr = io->po7_error_sum; mb(); io7->csrs->PO7_ERROR_SUM.csr; /* * Correct the io7_pid. */ lf_subpackets->io_pid = io7->pe; return io; } static int marvel_process_io_error(struct ev7_lf_subpackets *lf_subpackets, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK struct ev7_pal_io_subpacket *io = lf_subpackets->io; int i; #endif /* CONFIG_VERBOSE_MCHECK */ #define MARVEL_IO_ERR_VALID(x) ((x) & (1UL << 63)) if (!lf_subpackets->logout || !lf_subpackets->io) return status; /* * The PALcode only builds an IO subpacket if there is a * locally connected IO7. In the cases of * 1) a uniprocessor kernel * 2) an mp kernel before the local secondary has called in * error interrupts are all directed to the primary processor. * In that case, we may not have an IO subpacket at all and, event * if we do, it may not be the right now. * * If the RBOX indicates an I/O error interrupt, make sure we have * the correct IO7 information. If we don't have an IO subpacket * or it's the wrong one, try to find the right one. * * RBOX I/O error interrupts are indicated by RBOX_INT<29> and * RBOX_INT<10>. */ if ((lf_subpackets->io->po7_error_sum & (1UL << 32)) || ((lf_subpackets->io->po7_error_sum | lf_subpackets->io->ports[0].pox_err_sum | lf_subpackets->io->ports[1].pox_err_sum | lf_subpackets->io->ports[2].pox_err_sum | lf_subpackets->io->ports[3].pox_err_sum) & (1UL << 63))) { /* * Either we have no IO subpacket or no error is * indicated in the one we do have. Try find the * one with the error. */ if (!marvel_find_io7_with_error(lf_subpackets)) return status; } /* * We have an IO7 indicating an error - we're going to report it */ status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; printk("%s*Error occurred on IO7 at PID %u\n", err_print_prefix, lf_subpackets->io_pid); /* * Check port 7 first */ if (lf_subpackets->io->po7_error_sum & IO7__PO7_ERRSUM__ERR_MASK) { marvel_print_po7_err_sum(io); #if 0 printk("%s PORT 7 ERROR:\n" "%s PO7_ERROR_SUM: %016llx\n" "%s PO7_UNCRR_SYM: %016llx\n" "%s PO7_CRRCT_SYM: %016llx\n" "%s PO7_UGBGE_SYM: %016llx\n" "%s PO7_ERR_PKT0: %016llx\n" "%s PO7_ERR_PKT1: %016llx\n", err_print_prefix, err_print_prefix, io->po7_error_sum, err_print_prefix, io->po7_uncrr_sym, err_print_prefix, io->po7_crrct_sym, err_print_prefix, io->po7_ugbge_sym, err_print_prefix, io->po7_err_pkt0, err_print_prefix, io->po7_err_pkt1); #endif } /* * Then loop through the ports */ for (i = 0; i < IO7_NUM_PORTS; i++) { if (!MARVEL_IO_ERR_VALID(io->ports[i].pox_err_sum)) continue; printk("%s PID %u PORT %d POx_ERR_SUM: %016llx\n", err_print_prefix, lf_subpackets->io_pid, i, io->ports[i].pox_err_sum); marvel_print_pox_err(io->ports[i].pox_err_sum, &io->ports[i]); printk("%s [ POx_FIRST_ERR: %016llx ]\n", err_print_prefix, io->ports[i].pox_first_err); marvel_print_pox_err(io->ports[i].pox_first_err, &io->ports[i]); } #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int marvel_process_logout_frame(struct ev7_lf_subpackets *lf_subpackets, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; /* * I/O error? */ #define EV7__RBOX_INT__IO_ERROR__MASK 0x20000400ul if (lf_subpackets->logout && (lf_subpackets->logout->rbox_int & 0x20000400ul)) status = marvel_process_io_error(lf_subpackets, print); /* * Probing behind PCI-X bridges can cause machine checks on * Marvel when the probe is handled by the bridge as a split * completion transaction. The symptom is an ERROR_RESPONSE * to a CONFIG address. Since these errors will happen in * normal operation, dismiss them. * * Dismiss if: * C_STAT = 0x14 (Error Response) * C_STS<3> = 0 (C_ADDR valid) * C_ADDR<42> = 1 (I/O) * C_ADDR<31:22> = 111110xxb (PCI Config space) */ if (lf_subpackets->ev7 && (lf_subpackets->ev7->c_stat == 0x14) && !(lf_subpackets->ev7->c_sts & 0x8) && ((lf_subpackets->ev7->c_addr & 0x400ff000000ul) == 0x400fe000000ul)) status = MCHK_DISPOSITION_DISMISS; return status; } void marvel_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr; int (*process_frame)(struct ev7_lf_subpackets *, int) = NULL; struct ev7_lf_subpackets subpacket_collection = { NULL, }; struct ev7_pal_io_subpacket scratch_io_packet = { 0, }; struct ev7_lf_subpackets *lf_subpackets = NULL; int disposition = MCHK_DISPOSITION_UNKNOWN_ERROR; char *saved_err_prefix = err_print_prefix; char *error_type = NULL; /* * Sync the processor */ mb(); draina(); switch(vector) { case SCB_Q_SYSEVENT: process_frame = marvel_process_680_frame; error_type = "System Event"; break; case SCB_Q_SYSMCHK: process_frame = marvel_process_logout_frame; error_type = "System Uncorrectable Error"; break; case SCB_Q_SYSERR: process_frame = marvel_process_logout_frame; error_type = "System Correctable Error"; break; default: /* Don't know it - pass it up. */ ev7_machine_check(vector, la_ptr); return; } /* * A system event or error has occurred, handle it here. * * Any errors in the logout frame have already been cleared by the * PALcode, so just parse it. */ err_print_prefix = KERN_CRIT; /* * Parse the logout frame without printing first. If the only error(s) * found are classified as "dismissable", then just dismiss them and * don't print any message */ lf_subpackets = ev7_collect_logout_frame_subpackets(el_ptr, &subpacket_collection); if (process_frame && lf_subpackets && lf_subpackets->logout) { /* * We might not have the correct (or any) I/O subpacket. * [ See marvel_process_io_error() for explanation. ] * If we don't have one, point the io subpacket in * lf_subpackets at scratch_io_packet so that * marvel_find_io7_with_error() will have someplace to * store the info. */ if (!lf_subpackets->io) lf_subpackets->io = &scratch_io_packet; /* * Default io_pid to the processor reporting the error * [this will get changed in marvel_find_io7_with_error() * if a different one is needed] */ lf_subpackets->io_pid = lf_subpackets->logout->whami; /* * Evaluate the frames. */ disposition = process_frame(lf_subpackets, 0); } switch(disposition) { case MCHK_DISPOSITION_DISMISS: /* Nothing to do. */ break; case MCHK_DISPOSITION_REPORT: /* Recognized error, report it. */ printk("%s*%s (Vector 0x%x) reported on CPU %d\n", err_print_prefix, error_type, (unsigned int)vector, (int)smp_processor_id()); el_print_timestamp(&lf_subpackets->logout->timestamp); process_frame(lf_subpackets, 1); break; default: /* Unknown - dump the annotated subpackets. */ printk("%s*%s (Vector 0x%x) reported on CPU %d\n", err_print_prefix, error_type, (unsigned int)vector, (int)smp_processor_id()); el_process_subpacket(el_ptr); break; } err_print_prefix = saved_err_prefix; /* Release the logout frame. */ wrmces(0x7); mb(); } void __init marvel_register_error_handlers(void) { ev7_register_error_handlers(); }
gpl-2.0
zeusk/tiamat-leo
fs/partitions/acorn.c
13219
12563
/* * linux/fs/partitions/acorn.c * * Copyright (c) 1996-2000 Russell King. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Scan ADFS partitions on hard disk drives. Unfortunately, there * isn't a standard for partitioning drives on Acorn machines, so * every single manufacturer of SCSI and IDE cards created their own * method. */ #include <linux/buffer_head.h> #include <linux/adfs_fs.h> #include "check.h" #include "acorn.h" /* * Partition types. (Oh for reusability) */ #define PARTITION_RISCIX_MFM 1 #define PARTITION_RISCIX_SCSI 2 #define PARTITION_LINUX 9 #if defined(CONFIG_ACORN_PARTITION_CUMANA) || \ defined(CONFIG_ACORN_PARTITION_ADFS) static struct adfs_discrecord * adfs_partition(struct parsed_partitions *state, char *name, char *data, unsigned long first_sector, int slot) { struct adfs_discrecord *dr; unsigned int nr_sects; if (adfs_checkbblk(data)) return NULL; dr = (struct adfs_discrecord *)(data + 0x1c0); if (dr->disc_size == 0 && dr->disc_size_high == 0) return NULL; nr_sects = (le32_to_cpu(dr->disc_size_high) << 23) | (le32_to_cpu(dr->disc_size) >> 9); if (name) { strlcat(state->pp_buf, " [", PAGE_SIZE); strlcat(state->pp_buf, name, PAGE_SIZE); strlcat(state->pp_buf, "]", PAGE_SIZE); } put_partition(state, slot, first_sector, nr_sects); return dr; } #endif #ifdef CONFIG_ACORN_PARTITION_RISCIX struct riscix_part { __le32 start; __le32 length; __le32 one; char name[16]; }; struct riscix_record { __le32 magic; #define RISCIX_MAGIC cpu_to_le32(0x4a657320) __le32 date; struct riscix_part part[8]; }; #if defined(CONFIG_ACORN_PARTITION_CUMANA) || \ defined(CONFIG_ACORN_PARTITION_ADFS) static int riscix_partition(struct parsed_partitions *state, unsigned long first_sect, int slot, unsigned long nr_sects) { Sector sect; struct riscix_record *rr; rr = read_part_sector(state, first_sect, &sect); if (!rr) return -1; strlcat(state->pp_buf, " [RISCiX]", PAGE_SIZE); if (rr->magic == RISCIX_MAGIC) { unsigned long size = nr_sects > 2 ? 2 : nr_sects; int part; strlcat(state->pp_buf, " <", PAGE_SIZE); put_partition(state, slot++, first_sect, size); for (part = 0; part < 8; part++) { if (rr->part[part].one && memcmp(rr->part[part].name, "All\0", 4)) { put_partition(state, slot++, le32_to_cpu(rr->part[part].start), le32_to_cpu(rr->part[part].length)); strlcat(state->pp_buf, "(", PAGE_SIZE); strlcat(state->pp_buf, rr->part[part].name, PAGE_SIZE); strlcat(state->pp_buf, ")", PAGE_SIZE); } } strlcat(state->pp_buf, " >\n", PAGE_SIZE); } else { put_partition(state, slot++, first_sect, nr_sects); } put_dev_sector(sect); return slot; } #endif #endif #define LINUX_NATIVE_MAGIC 0xdeafa1de #define LINUX_SWAP_MAGIC 0xdeafab1e struct linux_part { __le32 magic; __le32 start_sect; __le32 nr_sects; }; #if defined(CONFIG_ACORN_PARTITION_CUMANA) || \ defined(CONFIG_ACORN_PARTITION_ADFS) static int linux_partition(struct parsed_partitions *state, unsigned long first_sect, int slot, unsigned long nr_sects) { Sector sect; struct linux_part *linuxp; unsigned long size = nr_sects > 2 ? 2 : nr_sects; strlcat(state->pp_buf, " [Linux]", PAGE_SIZE); put_partition(state, slot++, first_sect, size); linuxp = read_part_sector(state, first_sect, &sect); if (!linuxp) return -1; strlcat(state->pp_buf, " <", PAGE_SIZE); while (linuxp->magic == cpu_to_le32(LINUX_NATIVE_MAGIC) || linuxp->magic == cpu_to_le32(LINUX_SWAP_MAGIC)) { if (slot == state->limit) break; put_partition(state, slot++, first_sect + le32_to_cpu(linuxp->start_sect), le32_to_cpu(linuxp->nr_sects)); linuxp ++; } strlcat(state->pp_buf, " >", PAGE_SIZE); put_dev_sector(sect); return slot; } #endif #ifdef CONFIG_ACORN_PARTITION_CUMANA int adfspart_check_CUMANA(struct parsed_partitions *state) { unsigned long first_sector = 0; unsigned int start_blk = 0; Sector sect; unsigned char *data; char *name = "CUMANA/ADFS"; int first = 1; int slot = 1; /* * Try Cumana style partitions - sector 6 contains ADFS boot block * with pointer to next 'drive'. * * There are unknowns in this code - is the 'cylinder number' of the * next partition relative to the start of this one - I'm assuming * it is. * * Also, which ID did Cumana use? * * This is totally unfinished, and will require more work to get it * going. Hence it is totally untested. */ do { struct adfs_discrecord *dr; unsigned int nr_sects; data = read_part_sector(state, start_blk * 2 + 6, &sect); if (!data) return -1; if (slot == state->limit) break; dr = adfs_partition(state, name, data, first_sector, slot++); if (!dr) break; name = NULL; nr_sects = (data[0x1fd] + (data[0x1fe] << 8)) * (dr->heads + (dr->lowsector & 0x40 ? 1 : 0)) * dr->secspertrack; if (!nr_sects) break; first = 0; first_sector += nr_sects; start_blk += nr_sects >> (BLOCK_SIZE_BITS - 9); nr_sects = 0; /* hmm - should be partition size */ switch (data[0x1fc] & 15) { case 0: /* No partition / ADFS? */ break; #ifdef CONFIG_ACORN_PARTITION_RISCIX case PARTITION_RISCIX_SCSI: /* RISCiX - we don't know how to find the next one. */ slot = riscix_partition(state, first_sector, slot, nr_sects); break; #endif case PARTITION_LINUX: slot = linux_partition(state, first_sector, slot, nr_sects); break; } put_dev_sector(sect); if (slot == -1) return -1; } while (1); put_dev_sector(sect); return first ? 0 : 1; } #endif #ifdef CONFIG_ACORN_PARTITION_ADFS /* * Purpose: allocate ADFS partitions. * * Params : hd - pointer to gendisk structure to store partition info. * dev - device number to access. * * Returns: -1 on error, 0 for no ADFS boot sector, 1 for ok. * * Alloc : hda = whole drive * hda1 = ADFS partition on first drive. * hda2 = non-ADFS partition. */ int adfspart_check_ADFS(struct parsed_partitions *state) { unsigned long start_sect, nr_sects, sectscyl, heads; Sector sect; unsigned char *data; struct adfs_discrecord *dr; unsigned char id; int slot = 1; data = read_part_sector(state, 6, &sect); if (!data) return -1; dr = adfs_partition(state, "ADFS", data, 0, slot++); if (!dr) { put_dev_sector(sect); return 0; } heads = dr->heads + ((dr->lowsector >> 6) & 1); sectscyl = dr->secspertrack * heads; start_sect = ((data[0x1fe] << 8) + data[0x1fd]) * sectscyl; id = data[0x1fc] & 15; put_dev_sector(sect); /* * Work out start of non-adfs partition. */ nr_sects = (state->bdev->bd_inode->i_size >> 9) - start_sect; if (start_sect) { switch (id) { #ifdef CONFIG_ACORN_PARTITION_RISCIX case PARTITION_RISCIX_SCSI: case PARTITION_RISCIX_MFM: slot = riscix_partition(state, start_sect, slot, nr_sects); break; #endif case PARTITION_LINUX: slot = linux_partition(state, start_sect, slot, nr_sects); break; } } strlcat(state->pp_buf, "\n", PAGE_SIZE); return 1; } #endif #ifdef CONFIG_ACORN_PARTITION_ICS struct ics_part { __le32 start; __le32 size; }; static int adfspart_check_ICSLinux(struct parsed_partitions *state, unsigned long block) { Sector sect; unsigned char *data = read_part_sector(state, block, &sect); int result = 0; if (data) { if (memcmp(data, "LinuxPart", 9) == 0) result = 1; put_dev_sector(sect); } return result; } /* * Check for a valid ICS partition using the checksum. */ static inline int valid_ics_sector(const unsigned char *data) { unsigned long sum; int i; for (i = 0, sum = 0x50617274; i < 508; i++) sum += data[i]; sum -= le32_to_cpu(*(__le32 *)(&data[508])); return sum == 0; } /* * Purpose: allocate ICS partitions. * Params : hd - pointer to gendisk structure to store partition info. * dev - device number to access. * Returns: -1 on error, 0 for no ICS table, 1 for partitions ok. * Alloc : hda = whole drive * hda1 = ADFS partition 0 on first drive. * hda2 = ADFS partition 1 on first drive. * ..etc.. */ int adfspart_check_ICS(struct parsed_partitions *state) { const unsigned char *data; const struct ics_part *p; int slot; Sector sect; /* * Try ICS style partitions - sector 0 contains partition info. */ data = read_part_sector(state, 0, &sect); if (!data) return -1; if (!valid_ics_sector(data)) { put_dev_sector(sect); return 0; } strlcat(state->pp_buf, " [ICS]", PAGE_SIZE); for (slot = 1, p = (const struct ics_part *)data; p->size; p++) { u32 start = le32_to_cpu(p->start); s32 size = le32_to_cpu(p->size); /* yes, it's signed. */ if (slot == state->limit) break; /* * Negative sizes tell the RISC OS ICS driver to ignore * this partition - in effect it says that this does not * contain an ADFS filesystem. */ if (size < 0) { size = -size; /* * Our own extension - We use the first sector * of the partition to identify what type this * partition is. We must not make this visible * to the filesystem. */ if (size > 1 && adfspart_check_ICSLinux(state, start)) { start += 1; size -= 1; } } if (size) put_partition(state, slot++, start, size); } put_dev_sector(sect); strlcat(state->pp_buf, "\n", PAGE_SIZE); return 1; } #endif #ifdef CONFIG_ACORN_PARTITION_POWERTEC struct ptec_part { __le32 unused1; __le32 unused2; __le32 start; __le32 size; __le32 unused5; char type[8]; }; static inline int valid_ptec_sector(const unsigned char *data) { unsigned char checksum = 0x2a; int i; /* * If it looks like a PC/BIOS partition, then it * probably isn't PowerTec. */ if (data[510] == 0x55 && data[511] == 0xaa) return 0; for (i = 0; i < 511; i++) checksum += data[i]; return checksum == data[511]; } /* * Purpose: allocate ICS partitions. * Params : hd - pointer to gendisk structure to store partition info. * dev - device number to access. * Returns: -1 on error, 0 for no ICS table, 1 for partitions ok. * Alloc : hda = whole drive * hda1 = ADFS partition 0 on first drive. * hda2 = ADFS partition 1 on first drive. * ..etc.. */ int adfspart_check_POWERTEC(struct parsed_partitions *state) { Sector sect; const unsigned char *data; const struct ptec_part *p; int slot = 1; int i; data = read_part_sector(state, 0, &sect); if (!data) return -1; if (!valid_ptec_sector(data)) { put_dev_sector(sect); return 0; } strlcat(state->pp_buf, " [POWERTEC]", PAGE_SIZE); for (i = 0, p = (const struct ptec_part *)data; i < 12; i++, p++) { u32 start = le32_to_cpu(p->start); u32 size = le32_to_cpu(p->size); if (size) put_partition(state, slot++, start, size); } put_dev_sector(sect); strlcat(state->pp_buf, "\n", PAGE_SIZE); return 1; } #endif #ifdef CONFIG_ACORN_PARTITION_EESOX struct eesox_part { char magic[6]; char name[10]; __le32 start; __le32 unused6; __le32 unused7; __le32 unused8; }; /* * Guess who created this format? */ static const char eesox_name[] = { 'N', 'e', 'i', 'l', ' ', 'C', 'r', 'i', 't', 'c', 'h', 'e', 'l', 'l', ' ', ' ' }; /* * EESOX SCSI partition format. * * This is a goddamned awful partition format. We don't seem to store * the size of the partition in this table, only the start addresses. * * There are two possibilities where the size comes from: * 1. The individual ADFS boot block entries that are placed on the disk. * 2. The start address of the next entry. */ int adfspart_check_EESOX(struct parsed_partitions *state) { Sector sect; const unsigned char *data; unsigned char buffer[256]; struct eesox_part *p; sector_t start = 0; int i, slot = 1; data = read_part_sector(state, 7, &sect); if (!data) return -1; /* * "Decrypt" the partition table. God knows why... */ for (i = 0; i < 256; i++) buffer[i] = data[i] ^ eesox_name[i & 15]; put_dev_sector(sect); for (i = 0, p = (struct eesox_part *)buffer; i < 8; i++, p++) { sector_t next; if (memcmp(p->magic, "Eesox", 6)) break; next = le32_to_cpu(p->start); if (i) put_partition(state, slot++, start, next - start); start = next; } if (i != 0) { sector_t size; size = get_capacity(state->bdev->bd_disk); put_partition(state, slot++, start, size - start); strlcat(state->pp_buf, "\n", PAGE_SIZE); } return i ? 1 : 0; } #endif
gpl-2.0
EmericanX/android_kernel_motorola_msm8960-common
drivers/tc/tc-driver.c
15011
3027
/* * TURBOchannel driver services. * * Copyright (c) 2005 James Simmons * Copyright (c) 2006 Maciej W. Rozycki * * Loosely based on drivers/dio/dio-driver.c and * drivers/pci/pci-driver.c. * * This file is subject to the terms and conditions of the GNU * General Public License. See the file "COPYING" in the main * directory of this archive for more details. */ #include <linux/init.h> #include <linux/module.h> #include <linux/tc.h> /** * tc_register_driver - register a new TC driver * @drv: the driver structure to register * * Adds the driver structure to the list of registered drivers * Returns a negative value on error, otherwise 0. * If no error occurred, the driver remains registered even if * no device was claimed during registration. */ int tc_register_driver(struct tc_driver *tdrv) { return driver_register(&tdrv->driver); } EXPORT_SYMBOL(tc_register_driver); /** * tc_unregister_driver - unregister a TC driver * @drv: the driver structure to unregister * * Deletes the driver structure from the list of registered TC drivers, * gives it a chance to clean up by calling its remove() function for * each device it was responsible for, and marks those devices as * driverless. */ void tc_unregister_driver(struct tc_driver *tdrv) { driver_unregister(&tdrv->driver); } EXPORT_SYMBOL(tc_unregister_driver); /** * tc_match_device - tell if a TC device structure has a matching * TC device ID structure * @tdrv: the TC driver to earch for matching TC device ID strings * @tdev: the TC device structure to match against * * Used by a driver to check whether a TC device present in the * system is in its list of supported devices. Returns the matching * tc_device_id structure or %NULL if there is no match. */ const struct tc_device_id *tc_match_device(struct tc_driver *tdrv, struct tc_dev *tdev) { const struct tc_device_id *id = tdrv->id_table; if (id) { while (id->name[0] || id->vendor[0]) { if (strcmp(tdev->name, id->name) == 0 && strcmp(tdev->vendor, id->vendor) == 0) return id; id++; } } return NULL; } EXPORT_SYMBOL(tc_match_device); /** * tc_bus_match - Tell if a device structure has a matching * TC device ID structure * @dev: the device structure to match against * @drv: the device driver to search for matching TC device ID strings * * Used by a driver to check whether a TC device present in the * system is in its list of supported devices. Returns 1 if there * is a match or 0 otherwise. */ static int tc_bus_match(struct device *dev, struct device_driver *drv) { struct tc_dev *tdev = to_tc_dev(dev); struct tc_driver *tdrv = to_tc_driver(drv); const struct tc_device_id *id; id = tc_match_device(tdrv, tdev); if (id) return 1; return 0; } struct bus_type tc_bus_type = { .name = "tc", .match = tc_bus_match, }; EXPORT_SYMBOL(tc_bus_type); static int __init tc_driver_init(void) { return bus_register(&tc_bus_type); } postcore_initcall(tc_driver_init);
gpl-2.0
F4uzan/mono_hima
drivers/usb/dwc3/debugfs.c
164
35084
/** * debugfs.c - DesignWare USB3 DRD Controller DebugFS file * * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com * * Authors: Felipe Balbi <balbi@ti.com>, * Sebastian Andrzej Siewior <bigeasy@linutronix.de> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the above-listed copyright holders may not be used * to endorse or promote products derived from this software without * specific prior written permission. * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2, as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/ptrace.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/usb/ch9.h> #include "core.h" #include "gadget.h" #include "io.h" #include "debug.h" #define dump_register(nm) \ { \ .name = __stringify(nm), \ .offset = DWC3_ ##nm - DWC3_GLOBALS_REGS_START, \ } #define ep_event_rate(ev, c, p, dt) \ ((dt) ? ((c.ev - p.ev) * (MSEC_PER_SEC)) / (dt) : 0) static const struct debugfs_reg32 dwc3_regs[] = { dump_register(GSBUSCFG0), dump_register(GSBUSCFG1), dump_register(GTXTHRCFG), dump_register(GRXTHRCFG), dump_register(GCTL), dump_register(GEVTEN), dump_register(GSTS), dump_register(GSNPSID), dump_register(GGPIO), dump_register(GUID), dump_register(GUCTL), dump_register(GBUSERRADDR0), dump_register(GBUSERRADDR1), dump_register(GPRTBIMAP0), dump_register(GPRTBIMAP1), dump_register(GHWPARAMS0), dump_register(GHWPARAMS1), dump_register(GHWPARAMS2), dump_register(GHWPARAMS3), dump_register(GHWPARAMS4), dump_register(GHWPARAMS5), dump_register(GHWPARAMS6), dump_register(GHWPARAMS7), dump_register(GDBGFIFOSPACE), dump_register(GDBGLTSSM), dump_register(GPRTBIMAP_HS0), dump_register(GPRTBIMAP_HS1), dump_register(GPRTBIMAP_FS0), dump_register(GPRTBIMAP_FS1), dump_register(GUSB2PHYCFG(0)), dump_register(GUSB2PHYCFG(1)), dump_register(GUSB2PHYCFG(2)), dump_register(GUSB2PHYCFG(3)), dump_register(GUSB2PHYCFG(4)), dump_register(GUSB2PHYCFG(5)), dump_register(GUSB2PHYCFG(6)), dump_register(GUSB2PHYCFG(7)), dump_register(GUSB2PHYCFG(8)), dump_register(GUSB2PHYCFG(9)), dump_register(GUSB2PHYCFG(10)), dump_register(GUSB2PHYCFG(11)), dump_register(GUSB2PHYCFG(12)), dump_register(GUSB2PHYCFG(13)), dump_register(GUSB2PHYCFG(14)), dump_register(GUSB2PHYCFG(15)), dump_register(GUSB2I2CCTL(0)), dump_register(GUSB2I2CCTL(1)), dump_register(GUSB2I2CCTL(2)), dump_register(GUSB2I2CCTL(3)), dump_register(GUSB2I2CCTL(4)), dump_register(GUSB2I2CCTL(5)), dump_register(GUSB2I2CCTL(6)), dump_register(GUSB2I2CCTL(7)), dump_register(GUSB2I2CCTL(8)), dump_register(GUSB2I2CCTL(9)), dump_register(GUSB2I2CCTL(10)), dump_register(GUSB2I2CCTL(11)), dump_register(GUSB2I2CCTL(12)), dump_register(GUSB2I2CCTL(13)), dump_register(GUSB2I2CCTL(14)), dump_register(GUSB2I2CCTL(15)), dump_register(GUSB2PHYACC(0)), dump_register(GUSB2PHYACC(1)), dump_register(GUSB2PHYACC(2)), dump_register(GUSB2PHYACC(3)), dump_register(GUSB2PHYACC(4)), dump_register(GUSB2PHYACC(5)), dump_register(GUSB2PHYACC(6)), dump_register(GUSB2PHYACC(7)), dump_register(GUSB2PHYACC(8)), dump_register(GUSB2PHYACC(9)), dump_register(GUSB2PHYACC(10)), dump_register(GUSB2PHYACC(11)), dump_register(GUSB2PHYACC(12)), dump_register(GUSB2PHYACC(13)), dump_register(GUSB2PHYACC(14)), dump_register(GUSB2PHYACC(15)), dump_register(GUSB3PIPECTL(0)), dump_register(GUSB3PIPECTL(1)), dump_register(GUSB3PIPECTL(2)), dump_register(GUSB3PIPECTL(3)), dump_register(GUSB3PIPECTL(4)), dump_register(GUSB3PIPECTL(5)), dump_register(GUSB3PIPECTL(6)), dump_register(GUSB3PIPECTL(7)), dump_register(GUSB3PIPECTL(8)), dump_register(GUSB3PIPECTL(9)), dump_register(GUSB3PIPECTL(10)), dump_register(GUSB3PIPECTL(11)), dump_register(GUSB3PIPECTL(12)), dump_register(GUSB3PIPECTL(13)), dump_register(GUSB3PIPECTL(14)), dump_register(GUSB3PIPECTL(15)), dump_register(GTXFIFOSIZ(0)), dump_register(GTXFIFOSIZ(1)), dump_register(GTXFIFOSIZ(2)), dump_register(GTXFIFOSIZ(3)), dump_register(GTXFIFOSIZ(4)), dump_register(GTXFIFOSIZ(5)), dump_register(GTXFIFOSIZ(6)), dump_register(GTXFIFOSIZ(7)), dump_register(GTXFIFOSIZ(8)), dump_register(GTXFIFOSIZ(9)), dump_register(GTXFIFOSIZ(10)), dump_register(GTXFIFOSIZ(11)), dump_register(GTXFIFOSIZ(12)), dump_register(GTXFIFOSIZ(13)), dump_register(GTXFIFOSIZ(14)), dump_register(GTXFIFOSIZ(15)), dump_register(GTXFIFOSIZ(16)), dump_register(GTXFIFOSIZ(17)), dump_register(GTXFIFOSIZ(18)), dump_register(GTXFIFOSIZ(19)), dump_register(GTXFIFOSIZ(20)), dump_register(GTXFIFOSIZ(21)), dump_register(GTXFIFOSIZ(22)), dump_register(GTXFIFOSIZ(23)), dump_register(GTXFIFOSIZ(24)), dump_register(GTXFIFOSIZ(25)), dump_register(GTXFIFOSIZ(26)), dump_register(GTXFIFOSIZ(27)), dump_register(GTXFIFOSIZ(28)), dump_register(GTXFIFOSIZ(29)), dump_register(GTXFIFOSIZ(30)), dump_register(GTXFIFOSIZ(31)), dump_register(GRXFIFOSIZ(0)), dump_register(GRXFIFOSIZ(1)), dump_register(GRXFIFOSIZ(2)), dump_register(GRXFIFOSIZ(3)), dump_register(GRXFIFOSIZ(4)), dump_register(GRXFIFOSIZ(5)), dump_register(GRXFIFOSIZ(6)), dump_register(GRXFIFOSIZ(7)), dump_register(GRXFIFOSIZ(8)), dump_register(GRXFIFOSIZ(9)), dump_register(GRXFIFOSIZ(10)), dump_register(GRXFIFOSIZ(11)), dump_register(GRXFIFOSIZ(12)), dump_register(GRXFIFOSIZ(13)), dump_register(GRXFIFOSIZ(14)), dump_register(GRXFIFOSIZ(15)), dump_register(GRXFIFOSIZ(16)), dump_register(GRXFIFOSIZ(17)), dump_register(GRXFIFOSIZ(18)), dump_register(GRXFIFOSIZ(19)), dump_register(GRXFIFOSIZ(20)), dump_register(GRXFIFOSIZ(21)), dump_register(GRXFIFOSIZ(22)), dump_register(GRXFIFOSIZ(23)), dump_register(GRXFIFOSIZ(24)), dump_register(GRXFIFOSIZ(25)), dump_register(GRXFIFOSIZ(26)), dump_register(GRXFIFOSIZ(27)), dump_register(GRXFIFOSIZ(28)), dump_register(GRXFIFOSIZ(29)), dump_register(GRXFIFOSIZ(30)), dump_register(GRXFIFOSIZ(31)), dump_register(GEVNTADRLO(0)), dump_register(GEVNTADRHI(0)), dump_register(GEVNTSIZ(0)), dump_register(GEVNTCOUNT(0)), dump_register(GHWPARAMS8), dump_register(GFLADJ), dump_register(DCFG), dump_register(DCTL), dump_register(DEVTEN), dump_register(DSTS), dump_register(DGCMDPAR), dump_register(DGCMD), dump_register(DALEPENA), dump_register(DEPCMDPAR2(0)), dump_register(DEPCMDPAR2(1)), dump_register(DEPCMDPAR2(2)), dump_register(DEPCMDPAR2(3)), dump_register(DEPCMDPAR2(4)), dump_register(DEPCMDPAR2(5)), dump_register(DEPCMDPAR2(6)), dump_register(DEPCMDPAR2(7)), dump_register(DEPCMDPAR2(8)), dump_register(DEPCMDPAR2(9)), dump_register(DEPCMDPAR2(10)), dump_register(DEPCMDPAR2(11)), dump_register(DEPCMDPAR2(12)), dump_register(DEPCMDPAR2(13)), dump_register(DEPCMDPAR2(14)), dump_register(DEPCMDPAR2(15)), dump_register(DEPCMDPAR2(16)), dump_register(DEPCMDPAR2(17)), dump_register(DEPCMDPAR2(18)), dump_register(DEPCMDPAR2(19)), dump_register(DEPCMDPAR2(20)), dump_register(DEPCMDPAR2(21)), dump_register(DEPCMDPAR2(22)), dump_register(DEPCMDPAR2(23)), dump_register(DEPCMDPAR2(24)), dump_register(DEPCMDPAR2(25)), dump_register(DEPCMDPAR2(26)), dump_register(DEPCMDPAR2(27)), dump_register(DEPCMDPAR2(28)), dump_register(DEPCMDPAR2(29)), dump_register(DEPCMDPAR2(30)), dump_register(DEPCMDPAR2(31)), dump_register(DEPCMDPAR1(0)), dump_register(DEPCMDPAR1(1)), dump_register(DEPCMDPAR1(2)), dump_register(DEPCMDPAR1(3)), dump_register(DEPCMDPAR1(4)), dump_register(DEPCMDPAR1(5)), dump_register(DEPCMDPAR1(6)), dump_register(DEPCMDPAR1(7)), dump_register(DEPCMDPAR1(8)), dump_register(DEPCMDPAR1(9)), dump_register(DEPCMDPAR1(10)), dump_register(DEPCMDPAR1(11)), dump_register(DEPCMDPAR1(12)), dump_register(DEPCMDPAR1(13)), dump_register(DEPCMDPAR1(14)), dump_register(DEPCMDPAR1(15)), dump_register(DEPCMDPAR1(16)), dump_register(DEPCMDPAR1(17)), dump_register(DEPCMDPAR1(18)), dump_register(DEPCMDPAR1(19)), dump_register(DEPCMDPAR1(20)), dump_register(DEPCMDPAR1(21)), dump_register(DEPCMDPAR1(22)), dump_register(DEPCMDPAR1(23)), dump_register(DEPCMDPAR1(24)), dump_register(DEPCMDPAR1(25)), dump_register(DEPCMDPAR1(26)), dump_register(DEPCMDPAR1(27)), dump_register(DEPCMDPAR1(28)), dump_register(DEPCMDPAR1(29)), dump_register(DEPCMDPAR1(30)), dump_register(DEPCMDPAR1(31)), dump_register(DEPCMDPAR0(0)), dump_register(DEPCMDPAR0(1)), dump_register(DEPCMDPAR0(2)), dump_register(DEPCMDPAR0(3)), dump_register(DEPCMDPAR0(4)), dump_register(DEPCMDPAR0(5)), dump_register(DEPCMDPAR0(6)), dump_register(DEPCMDPAR0(7)), dump_register(DEPCMDPAR0(8)), dump_register(DEPCMDPAR0(9)), dump_register(DEPCMDPAR0(10)), dump_register(DEPCMDPAR0(11)), dump_register(DEPCMDPAR0(12)), dump_register(DEPCMDPAR0(13)), dump_register(DEPCMDPAR0(14)), dump_register(DEPCMDPAR0(15)), dump_register(DEPCMDPAR0(16)), dump_register(DEPCMDPAR0(17)), dump_register(DEPCMDPAR0(18)), dump_register(DEPCMDPAR0(19)), dump_register(DEPCMDPAR0(20)), dump_register(DEPCMDPAR0(21)), dump_register(DEPCMDPAR0(22)), dump_register(DEPCMDPAR0(23)), dump_register(DEPCMDPAR0(24)), dump_register(DEPCMDPAR0(25)), dump_register(DEPCMDPAR0(26)), dump_register(DEPCMDPAR0(27)), dump_register(DEPCMDPAR0(28)), dump_register(DEPCMDPAR0(29)), dump_register(DEPCMDPAR0(30)), dump_register(DEPCMDPAR0(31)), dump_register(DEPCMD(0)), dump_register(DEPCMD(1)), dump_register(DEPCMD(2)), dump_register(DEPCMD(3)), dump_register(DEPCMD(4)), dump_register(DEPCMD(5)), dump_register(DEPCMD(6)), dump_register(DEPCMD(7)), dump_register(DEPCMD(8)), dump_register(DEPCMD(9)), dump_register(DEPCMD(10)), dump_register(DEPCMD(11)), dump_register(DEPCMD(12)), dump_register(DEPCMD(13)), dump_register(DEPCMD(14)), dump_register(DEPCMD(15)), dump_register(DEPCMD(16)), dump_register(DEPCMD(17)), dump_register(DEPCMD(18)), dump_register(DEPCMD(19)), dump_register(DEPCMD(20)), dump_register(DEPCMD(21)), dump_register(DEPCMD(22)), dump_register(DEPCMD(23)), dump_register(DEPCMD(24)), dump_register(DEPCMD(25)), dump_register(DEPCMD(26)), dump_register(DEPCMD(27)), dump_register(DEPCMD(28)), dump_register(DEPCMD(29)), dump_register(DEPCMD(30)), dump_register(DEPCMD(31)), dump_register(OCFG), dump_register(OCTL), dump_register(OEVT), dump_register(OEVTEN), dump_register(OSTS), }; static int dwc3_mode_show(struct seq_file *s, void *unused) { struct dwc3 *dwc = s->private; unsigned long flags; u32 reg; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GCTL); spin_unlock_irqrestore(&dwc->lock, flags); switch (DWC3_GCTL_PRTCAP(reg)) { case DWC3_GCTL_PRTCAP_HOST: seq_printf(s, "host\n"); break; case DWC3_GCTL_PRTCAP_DEVICE: seq_printf(s, "device\n"); break; case DWC3_GCTL_PRTCAP_OTG: seq_printf(s, "OTG\n"); break; default: seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg)); } return 0; } static int dwc3_mode_open(struct inode *inode, struct file *file) { return single_open(file, dwc3_mode_show, inode->i_private); } static ssize_t dwc3_mode_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; struct dwc3 *dwc = s->private; unsigned long flags; u32 mode = 0; char buf[32]; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; if (!strncmp(buf, "host", 4)) mode |= DWC3_GCTL_PRTCAP_HOST; if (!strncmp(buf, "device", 6)) mode |= DWC3_GCTL_PRTCAP_DEVICE; if (!strncmp(buf, "otg", 3)) mode |= DWC3_GCTL_PRTCAP_OTG; if (mode) { spin_lock_irqsave(&dwc->lock, flags); dwc3_set_mode(dwc, mode); spin_unlock_irqrestore(&dwc->lock, flags); } return count; } static const struct file_operations dwc3_mode_fops = { .open = dwc3_mode_open, .write = dwc3_mode_write, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int dwc3_testmode_show(struct seq_file *s, void *unused) { struct dwc3 *dwc = s->private; unsigned long flags; u32 reg; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg &= DWC3_DCTL_TSTCTRL_MASK; reg >>= 1; spin_unlock_irqrestore(&dwc->lock, flags); switch (reg) { case 0: seq_printf(s, "no test\n"); break; case TEST_J: seq_printf(s, "test_j\n"); break; case TEST_K: seq_printf(s, "test_k\n"); break; case TEST_SE0_NAK: seq_printf(s, "test_se0_nak\n"); break; case TEST_PACKET: seq_printf(s, "test_packet\n"); break; case TEST_FORCE_EN: seq_printf(s, "test_force_enable\n"); break; default: seq_printf(s, "UNKNOWN %d\n", reg); } return 0; } static int dwc3_testmode_open(struct inode *inode, struct file *file) { return single_open(file, dwc3_testmode_show, inode->i_private); } static ssize_t dwc3_testmode_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; struct dwc3 *dwc = s->private; unsigned long flags; u32 testmode = 0; char buf[32]; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; if (!strncmp(buf, "test_j", 6)) testmode = TEST_J; else if (!strncmp(buf, "test_k", 6)) testmode = TEST_K; else if (!strncmp(buf, "test_se0_nak", 12)) testmode = TEST_SE0_NAK; else if (!strncmp(buf, "test_packet", 11)) testmode = TEST_PACKET; else if (!strncmp(buf, "test_force_enable", 17)) testmode = TEST_FORCE_EN; else testmode = 0; spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_set_test_mode(dwc, testmode); spin_unlock_irqrestore(&dwc->lock, flags); return count; } static const struct file_operations dwc3_testmode_fops = { .open = dwc3_testmode_open, .write = dwc3_testmode_write, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int dwc3_link_state_show(struct seq_file *s, void *unused) { struct dwc3 *dwc = s->private; unsigned long flags; enum dwc3_link_state state; u32 reg; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_DSTS); state = DWC3_DSTS_USBLNKST(reg); spin_unlock_irqrestore(&dwc->lock, flags); switch (state) { case DWC3_LINK_STATE_U0: seq_printf(s, "U0\n"); break; case DWC3_LINK_STATE_U1: seq_printf(s, "U1\n"); break; case DWC3_LINK_STATE_U2: seq_printf(s, "U2\n"); break; case DWC3_LINK_STATE_U3: seq_printf(s, "U3\n"); break; case DWC3_LINK_STATE_SS_DIS: seq_printf(s, "SS.Disabled\n"); break; case DWC3_LINK_STATE_RX_DET: seq_printf(s, "Rx.Detect\n"); break; case DWC3_LINK_STATE_SS_INACT: seq_printf(s, "SS.Inactive\n"); break; case DWC3_LINK_STATE_POLL: seq_printf(s, "Poll\n"); break; case DWC3_LINK_STATE_RECOV: seq_printf(s, "Recovery\n"); break; case DWC3_LINK_STATE_HRESET: seq_printf(s, "HRESET\n"); break; case DWC3_LINK_STATE_CMPLY: seq_printf(s, "Compliance\n"); break; case DWC3_LINK_STATE_LPBK: seq_printf(s, "Loopback\n"); break; case DWC3_LINK_STATE_RESET: seq_printf(s, "Reset\n"); break; case DWC3_LINK_STATE_RESUME: seq_printf(s, "Resume\n"); break; default: seq_printf(s, "UNKNOWN %d\n", state); } return 0; } static int dwc3_link_state_open(struct inode *inode, struct file *file) { return single_open(file, dwc3_link_state_show, inode->i_private); } static ssize_t dwc3_link_state_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; struct dwc3 *dwc = s->private; unsigned long flags; enum dwc3_link_state state = 0; char buf[32]; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; if (!strncmp(buf, "SS.Disabled", 11)) state = DWC3_LINK_STATE_SS_DIS; else if (!strncmp(buf, "Rx.Detect", 9)) state = DWC3_LINK_STATE_RX_DET; else if (!strncmp(buf, "SS.Inactive", 11)) state = DWC3_LINK_STATE_SS_INACT; else if (!strncmp(buf, "Recovery", 8)) state = DWC3_LINK_STATE_RECOV; else if (!strncmp(buf, "Compliance", 10)) state = DWC3_LINK_STATE_CMPLY; else if (!strncmp(buf, "Loopback", 8)) state = DWC3_LINK_STATE_LPBK; else return -EINVAL; spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_set_link_state(dwc, state); spin_unlock_irqrestore(&dwc->lock, flags); return count; } static const struct file_operations dwc3_link_state_fops = { .open = dwc3_link_state_open, .write = dwc3_link_state_write, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int ep_num; static ssize_t dwc3_store_ep_num(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; struct dwc3 *dwc = s->private; char kbuf[10]; unsigned int num, dir; unsigned long flags; memset(kbuf, 0, 10); if (copy_from_user(kbuf, ubuf, count > 10 ? 10 : count)) return -EFAULT; if (sscanf(kbuf, "%u %u", &num, &dir) != 2) return -EINVAL; spin_lock_irqsave(&dwc->lock, flags); ep_num = (num << 1) + dir; spin_unlock_irqrestore(&dwc->lock, flags); return count; } static int dwc3_ep_req_list_show(struct seq_file *s, void *unused) { struct dwc3 *dwc = s->private; struct dwc3_ep *dep; struct dwc3_request *req = NULL; struct list_head *ptr = NULL; unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); dep = dwc->eps[ep_num]; seq_printf(s, "%s request list: flags: 0x%x\n", dep->name, dep->flags); list_for_each(ptr, &dep->request_list) { req = list_entry(ptr, struct dwc3_request, list); seq_printf(s, "req:0x%p len: %d sts: %d dma:0x%pa num_sgs: %d\n", req, req->request.length, req->request.status, &req->request.dma, req->request.num_sgs); } spin_unlock_irqrestore(&dwc->lock, flags); return 0; } static int dwc3_ep_req_list_open(struct inode *inode, struct file *file) { return single_open(file, dwc3_ep_req_list_show, inode->i_private); } static const struct file_operations dwc3_ep_req_list_fops = { .open = dwc3_ep_req_list_open, .write = dwc3_store_ep_num, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int dwc3_ep_queued_req_show(struct seq_file *s, void *unused) { struct dwc3 *dwc = s->private; struct dwc3_ep *dep; struct dwc3_request *req = NULL; struct list_head *ptr = NULL; unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); dep = dwc->eps[ep_num]; seq_printf(s, "%s queued reqs to HW: flags:0x%x\n", dep->name, dep->flags); list_for_each(ptr, &dep->req_queued) { req = list_entry(ptr, struct dwc3_request, list); seq_printf(s, "req:0x%p len:%d sts:%d dma:%pa nsg:%d trb:0x%p\n", req, req->request.length, req->request.status, &req->request.dma, req->request.num_sgs, req->trb); } spin_unlock_irqrestore(&dwc->lock, flags); return 0; } static int dwc3_ep_queued_req_open(struct inode *inode, struct file *file) { return single_open(file, dwc3_ep_queued_req_show, inode->i_private); } const struct file_operations dwc3_ep_req_queued_fops = { .open = dwc3_ep_queued_req_open, .write = dwc3_store_ep_num, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int dwc3_ep_trbs_show(struct seq_file *s, void *unused) { struct dwc3 *dwc = s->private; struct dwc3_ep *dep; struct dwc3_trb *trb; unsigned long flags; int j; if (!ep_num) return 0; spin_lock_irqsave(&dwc->lock, flags); dep = dwc->eps[ep_num]; seq_printf(s, "%s trb pool: flags:0x%x freeslot:%d busyslot:%d\n", dep->name, dep->flags, dep->free_slot, dep->busy_slot); for (j = 0; j < DWC3_TRB_NUM; j++) { trb = &dep->trb_pool[j]; seq_printf(s, "trb:0x%p bph:0x%x bpl:0x%x size:0x%x ctrl: %x\n", trb, trb->bph, trb->bpl, trb->size, trb->ctrl); } spin_unlock_irqrestore(&dwc->lock, flags); return 0; } static int dwc3_ep_trbs_list_open(struct inode *inode, struct file *file) { return single_open(file, dwc3_ep_trbs_show, inode->i_private); } const struct file_operations dwc3_ep_trb_list_fops = { .open = dwc3_ep_trbs_list_open, .write = dwc3_store_ep_num, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static unsigned int ep_addr_rxdbg_mask = 1; module_param(ep_addr_rxdbg_mask, uint, S_IRUGO | S_IWUSR); static unsigned int ep_addr_txdbg_mask = 1; module_param(ep_addr_txdbg_mask, uint, S_IRUGO | S_IWUSR); /* Maximum debug message length */ #define DBG_DATA_MSG 64UL /* Maximum number of messages */ #define DBG_DATA_MAX 2048UL static struct { char (buf[DBG_DATA_MAX])[DBG_DATA_MSG]; /* buffer */ unsigned idx; /* index */ unsigned tty; /* print to console? */ rwlock_t lck; /* lock */ } dbg_dwc3_data = { .idx = 0, .tty = 0, .lck = __RW_LOCK_UNLOCKED(lck) }; /** * dbg_dec: decrements debug event index * @idx: buffer index */ static inline void __maybe_unused dbg_dec(unsigned *idx) { *idx = (*idx - 1) % DBG_DATA_MAX; } /** * dbg_inc: increments debug event index * @idx: buffer index */ static inline void dbg_inc(unsigned *idx) { *idx = (*idx + 1) % DBG_DATA_MAX; } #define TIME_BUF_LEN 20 /*get_timestamp - returns time of day in us */ static char *get_timestamp(char *tbuf) { unsigned long long t; unsigned long nanosec_rem; t = cpu_clock(smp_processor_id()); nanosec_rem = do_div(t, 1000000000)/1000; scnprintf(tbuf, TIME_BUF_LEN, "[%5lu.%06lu] ", (unsigned long)t, nanosec_rem); return tbuf; } static int allow_dbg_print(u8 ep_num) { int dir, num; /* allow bus wide events */ if (ep_num == 0xff) return 1; dir = ep_num & 0x1; num = ep_num >> 1; num = 1 << num; if (dir && (num & ep_addr_txdbg_mask)) return 1; if (!dir && (num & ep_addr_rxdbg_mask)) return 1; return 0; } /** * dbg_print: prints the common part of the event * @addr: endpoint address * @name: event name * @status: status * @extra: extra information */ void dbg_print(u8 ep_num, const char *name, int status, const char *extra) { unsigned long flags; char tbuf[TIME_BUF_LEN]; if (!allow_dbg_print(ep_num)) return; write_lock_irqsave(&dbg_dwc3_data.lck, flags); scnprintf(dbg_dwc3_data.buf[dbg_dwc3_data.idx], DBG_DATA_MSG, "%s\t? %02X %-12.12s %4i ?\t%s\n", get_timestamp(tbuf), ep_num, name, status, extra); dbg_inc(&dbg_dwc3_data.idx); write_unlock_irqrestore(&dbg_dwc3_data.lck, flags); if (dbg_dwc3_data.tty != 0) pr_notice("%s\t? %02X %-7.7s %4i ?\t%s\n", get_timestamp(tbuf), ep_num, name, status, extra); } /** * dbg_done: prints a DONE event * @addr: endpoint address * @td: transfer descriptor * @status: status */ void dbg_done(u8 ep_num, const u32 count, int status) { char msg[DBG_DATA_MSG]; if (!allow_dbg_print(ep_num)) return; scnprintf(msg, sizeof(msg), "%d", count); dbg_print(ep_num, "DONE", status, msg); } /** * dbg_event: prints a generic event * @addr: endpoint address * @name: event name * @status: status */ void dbg_event(u8 ep_num, const char *name, int status) { if (!allow_dbg_print(ep_num)) return; if (name != NULL) dbg_print(ep_num, name, status, ""); } /* * dbg_queue: prints a QUEUE event * @addr: endpoint address * @req: USB request * @status: status */ void dbg_queue(u8 ep_num, const struct usb_request *req, int status) { char msg[DBG_DATA_MSG]; if (!allow_dbg_print(ep_num)) return; if (req != NULL) { scnprintf(msg, sizeof(msg), "%d %d", !req->no_interrupt, req->length); dbg_print(ep_num, "QUEUE", status, msg); } } /** * dbg_setup: prints a SETUP event * @addr: endpoint address * @req: setup request */ void dbg_setup(u8 ep_num, const struct usb_ctrlrequest *req) { char msg[DBG_DATA_MSG]; if (!allow_dbg_print(ep_num)) return; if (req != NULL) { scnprintf(msg, sizeof(msg), "%02X %02X %04X %04X %d", req->bRequestType, req->bRequest, le16_to_cpu(req->wValue), le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength)); dbg_print(ep_num, "SETUP", 0, msg); } } /** * dbg_print_reg: prints a reg value * @name: reg name * @reg: reg value to be printed */ void dbg_print_reg(const char *name, int reg) { unsigned long flags; write_lock_irqsave(&dbg_dwc3_data.lck, flags); scnprintf(dbg_dwc3_data.buf[dbg_dwc3_data.idx], DBG_DATA_MSG, "%s = 0x%08x\n", name, reg); dbg_inc(&dbg_dwc3_data.idx); write_unlock_irqrestore(&dbg_dwc3_data.lck, flags); if (dbg_dwc3_data.tty != 0) pr_notice("%s = 0x%08x\n", name, reg); } /** * store_events: configure if events are going to be also printed to console * */ static ssize_t dwc3_store_events(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned tty; if (buf == NULL) { pr_err("[%s] EINVAL\n", __func__); goto done; } if (sscanf(buf, "%u", &tty) != 1 || tty > 1) { pr_err("<1|0>: enable|disable console log\n"); goto done; } dbg_dwc3_data.tty = tty; pr_info("tty = %u", dbg_dwc3_data.tty); done: return count; } static int dwc3_gadget_data_events_show(struct seq_file *s, void *unused) { unsigned long flags; unsigned i; read_lock_irqsave(&dbg_dwc3_data.lck, flags); i = dbg_dwc3_data.idx; if (strnlen(dbg_dwc3_data.buf[i], DBG_DATA_MSG)) seq_printf(s, "%s\n", dbg_dwc3_data.buf[i]); for (dbg_inc(&i); i != dbg_dwc3_data.idx; dbg_inc(&i)) { if (!strnlen(dbg_dwc3_data.buf[i], DBG_DATA_MSG)) continue; seq_printf(s, "%s\n", dbg_dwc3_data.buf[i]); } read_unlock_irqrestore(&dbg_dwc3_data.lck, flags); return 0; } static int dwc3_gadget_data_events_open(struct inode *inode, struct file *f) { return single_open(f, dwc3_gadget_data_events_show, inode->i_private); } const struct file_operations dwc3_gadget_dbg_data_fops = { .open = dwc3_gadget_data_events_open, .read = seq_read, .write = dwc3_store_events, .llseek = seq_lseek, .release = single_release, }; static ssize_t dwc3_store_int_events(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { int clear_stats, i; unsigned long flags; struct seq_file *s = file->private_data; struct dwc3 *dwc = s->private; struct dwc3_ep *dep; struct timespec ts; if (ubuf == NULL) { pr_err("[%s] EINVAL\n", __func__); goto done; } if (sscanf(ubuf, "%u", &clear_stats) != 1 || clear_stats != 0) { pr_err("Wrong value. To clear stats, enter value as 0.\n"); goto done; } spin_lock_irqsave(&dwc->lock, flags); pr_debug("%s(): clearing debug interrupt buffers\n", __func__); ts = current_kernel_time(); for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { dep = dwc->eps[i]; memset(&dep->dbg_ep_events, 0, sizeof(dep->dbg_ep_events)); memset(&dep->dbg_ep_events_diff, 0, sizeof(dep->dbg_ep_events)); dep->dbg_ep_events_ts = ts; } memset(&dwc->dbg_gadget_events, 0, sizeof(dwc->dbg_gadget_events)); spin_unlock_irqrestore(&dwc->lock, flags); done: return count; } static int dwc3_gadget_int_events_show(struct seq_file *s, void *unused) { unsigned long flags; struct dwc3 *dwc = s->private; struct dwc3_gadget_events *dbg_gadget_events; struct dwc3_ep *dep; int i; struct timespec ts_delta; struct timespec ts_current; u32 ts_delta_ms; spin_lock_irqsave(&dwc->lock, flags); dbg_gadget_events = &dwc->dbg_gadget_events; for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { dep = dwc->eps[i]; if (dep == NULL || !(dep->flags & DWC3_EP_ENABLED)) continue; ts_current = current_kernel_time(); ts_delta = timespec_sub(ts_current, dep->dbg_ep_events_ts); ts_delta_ms = ts_delta.tv_nsec / NSEC_PER_MSEC + ts_delta.tv_sec * MSEC_PER_SEC; seq_printf(s, "\n\n===== dbg_ep_events for EP(%d) %s =====\n", i, dep->name); seq_printf(s, "xfercomplete:%u @ %luHz\n", dep->dbg_ep_events.xfercomplete, ep_event_rate(xfercomplete, dep->dbg_ep_events, dep->dbg_ep_events_diff, ts_delta_ms)); seq_printf(s, "xfernotready:%u @ %luHz\n", dep->dbg_ep_events.xfernotready, ep_event_rate(xfernotready, dep->dbg_ep_events, dep->dbg_ep_events_diff, ts_delta_ms)); seq_printf(s, "control_data:%u @ %luHz\n", dep->dbg_ep_events.control_data, ep_event_rate(control_data, dep->dbg_ep_events, dep->dbg_ep_events_diff, ts_delta_ms)); seq_printf(s, "control_status:%u @ %luHz\n", dep->dbg_ep_events.control_status, ep_event_rate(control_status, dep->dbg_ep_events, dep->dbg_ep_events_diff, ts_delta_ms)); seq_printf(s, "xferinprogress:%u @ %luHz\n", dep->dbg_ep_events.xferinprogress, ep_event_rate(xferinprogress, dep->dbg_ep_events, dep->dbg_ep_events_diff, ts_delta_ms)); seq_printf(s, "rxtxfifoevent:%u @ %luHz\n", dep->dbg_ep_events.rxtxfifoevent, ep_event_rate(rxtxfifoevent, dep->dbg_ep_events, dep->dbg_ep_events_diff, ts_delta_ms)); seq_printf(s, "streamevent:%u @ %luHz\n", dep->dbg_ep_events.streamevent, ep_event_rate(streamevent, dep->dbg_ep_events, dep->dbg_ep_events_diff, ts_delta_ms)); seq_printf(s, "epcmdcomplt:%u @ %luHz\n", dep->dbg_ep_events.epcmdcomplete, ep_event_rate(epcmdcomplete, dep->dbg_ep_events, dep->dbg_ep_events_diff, ts_delta_ms)); seq_printf(s, "cmdcmplt:%u @ %luHz\n", dep->dbg_ep_events.cmdcmplt, ep_event_rate(cmdcmplt, dep->dbg_ep_events, dep->dbg_ep_events_diff, ts_delta_ms)); seq_printf(s, "unknown:%u @ %luHz\n", dep->dbg_ep_events.unknown_event, ep_event_rate(unknown_event, dep->dbg_ep_events, dep->dbg_ep_events_diff, ts_delta_ms)); seq_printf(s, "total:%u @ %luHz\n", dep->dbg_ep_events.total, ep_event_rate(total, dep->dbg_ep_events, dep->dbg_ep_events_diff, ts_delta_ms)); dep->dbg_ep_events_ts = ts_current; dep->dbg_ep_events_diff = dep->dbg_ep_events; } seq_puts(s, "\n=== dbg_gadget events ==\n"); seq_printf(s, "disconnect:%u\n reset:%u\n", dbg_gadget_events->disconnect, dbg_gadget_events->reset); seq_printf(s, "connect:%u\n wakeup:%u\n", dbg_gadget_events->connect, dbg_gadget_events->wakeup); seq_printf(s, "link_status_change:%u\n eopf:%u\n", dbg_gadget_events->link_status_change, dbg_gadget_events->eopf); seq_printf(s, "sof:%u\n suspend:%u\n", dbg_gadget_events->sof, dbg_gadget_events->suspend); seq_printf(s, "erratic_error:%u\n overflow:%u\n", dbg_gadget_events->erratic_error, dbg_gadget_events->overflow); seq_printf(s, "vendor_dev_test_lmp:%u\n cmdcmplt:%u\n", dbg_gadget_events->vendor_dev_test_lmp, dbg_gadget_events->cmdcmplt); seq_printf(s, "unknown_event:%u\n", dbg_gadget_events->unknown_event); seq_printf(s, "\n\t== Last %d interrupts stats ==\t\n", MAX_INTR_STATS); seq_puts(s, "@ time (us):\t"); for (i = 0; i < MAX_INTR_STATS; i++) seq_printf(s, "%lld\t", ktime_to_us(dwc->irq_start_time[i])); seq_puts(s, "\nhard irq time (us):\t"); for (i = 0; i < MAX_INTR_STATS; i++) seq_printf(s, "%d\t", dwc->irq_completion_time[i]); seq_puts(s, "\nevents count:\t"); for (i = 0; i < MAX_INTR_STATS; i++) seq_printf(s, "%d\t", dwc->irq_event_count[i]); seq_puts(s, "\nbh handled count:\t"); for (i = 0; i < MAX_INTR_STATS; i++) seq_printf(s, "%d\t", dwc->bh_handled_evt_cnt[i]); seq_puts(s, "\ntasklet time:\t"); for (i = 0; i < MAX_INTR_STATS; i++) seq_printf(s, "%d\t", dwc->bh_completion_time[i]); seq_puts(s, "\n(usec)\n"); spin_unlock_irqrestore(&dwc->lock, flags); return 0; } static int dwc3_gadget_events_open(struct inode *inode, struct file *f) { return single_open(f, dwc3_gadget_int_events_show, inode->i_private); } const struct file_operations dwc3_gadget_dbg_events_fops = { .open = dwc3_gadget_events_open, .read = seq_read, .write = dwc3_store_int_events, .llseek = seq_lseek, .release = single_release, }; int dwc3_debugfs_init(struct dwc3 *dwc) { struct dentry *root; struct dentry *file; int ret; root = debugfs_create_dir(dev_name(dwc->dev), NULL); if (!root) { ret = -ENOMEM; goto err0; } dwc->root = root; dwc->regset = kzalloc(sizeof(*dwc->regset), GFP_KERNEL); if (!dwc->regset) { ret = -ENOMEM; goto err1; } dwc->regset->regs = dwc3_regs; dwc->regset->nregs = ARRAY_SIZE(dwc3_regs); dwc->regset->base = dwc->regs; file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset); if (!file) { ret = -ENOMEM; goto err1; } if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) { file = debugfs_create_file("mode", S_IRUGO | S_IWUSR, root, dwc, &dwc3_mode_fops); if (!file) { ret = -ENOMEM; goto err1; } } if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) || IS_ENABLED(CONFIG_USB_DWC3_GADGET)) { file = debugfs_create_file("testmode", S_IRUGO | S_IWUSR, root, dwc, &dwc3_testmode_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root, dwc, &dwc3_link_state_fops); if (!file) { ret = -ENOMEM; goto err1; } } file = debugfs_create_file("trbs", S_IRUGO | S_IWUSR, root, dwc, &dwc3_ep_trb_list_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("requests", S_IRUGO | S_IWUSR, root, dwc, &dwc3_ep_req_list_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("queued_reqs", S_IRUGO | S_IWUSR, root, dwc, &dwc3_ep_req_queued_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("events", S_IRUGO | S_IWUSR, root, dwc, &dwc3_gadget_dbg_data_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("int_events", S_IRUGO | S_IWUSR, root, dwc, &dwc3_gadget_dbg_events_fops); if (!file) { ret = -ENOMEM; goto err1; } return 0; err1: debugfs_remove_recursive(root); err0: return ret; } void dwc3_debugfs_exit(struct dwc3 *dwc) { debugfs_remove_recursive(dwc->root); dwc->root = NULL; }
gpl-2.0
karandpr/Doppler3ICS
drivers/scsi/lpfc/lpfc_init.c
420
242464
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/kthread.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/ctype.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_version.h" char *_dump_buf_data; unsigned long _dump_buf_data_order; char *_dump_buf_dif; unsigned long _dump_buf_dif_order; spinlock_t _dump_buf_lock; static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); static int lpfc_sli4_queue_create(struct lpfc_hba *); static void lpfc_sli4_queue_destroy(struct lpfc_hba *); static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); static int lpfc_setup_endian_order(struct lpfc_hba *); static int lpfc_sli4_read_config(struct lpfc_hba *); static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); static void lpfc_free_sgl_list(struct lpfc_hba *); static int lpfc_init_sgl_list(struct lpfc_hba *); static int lpfc_init_active_sgl_array(struct lpfc_hba *); static void lpfc_free_active_sgl(struct lpfc_hba *); static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; static DEFINE_IDR(lpfc_hba_index); /** * lpfc_config_port_prep - Perform lpfc initialization prior to config port * @phba: pointer to lpfc hba data structure. * * This routine will do LPFC initialization prior to issuing the CONFIG_PORT * mailbox command. It retrieves the revision information from the HBA and * collects the Vital Product Data (VPD) about the HBA for preparing the * configuration of the HBA. * * Return codes: * 0 - success. * -ERESTART - requests the SLI layer to reset the HBA and try again. * Any other value - indicates an error. **/ int lpfc_config_port_prep(struct lpfc_hba *phba) { lpfc_vpd_t *vp = &phba->vpd; int i = 0, rc; LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; char *lpfc_vpd_data = NULL; uint16_t offset = 0; static char licensed[56] = "key unlock for use with gnu public licensed code only\0"; static int init_key = 1; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } mb = &pmb->u.mb; phba->link_state = LPFC_INIT_MBX_CMDS; if (lpfc_is_LC_HBA(phba->pcidev->device)) { if (init_key) { uint32_t *ptext = (uint32_t *) licensed; for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) *ptext = cpu_to_be32(*ptext); init_key = 0; } lpfc_read_nv(phba, pmb); memset((char*)mb->un.varRDnvp.rsvd3, 0, sizeof (mb->un.varRDnvp.rsvd3)); memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, sizeof (licensed)); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "0324 Config Port initialization " "error, mbxCmd x%x READ_NVPARM, " "mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); mempool_free(pmb, phba->mbox_mem_pool); return -ERESTART; } memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, sizeof(phba->wwnn)); memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, sizeof(phba->wwpn)); } phba->sli3_options = 0x0; /* Setup and issue mailbox READ REV command */ lpfc_read_rev(phba, pmb); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0439 Adapter failed to init, mbxCmd x%x " "READ_REV, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); mempool_free( pmb, phba->mbox_mem_pool); return -ERESTART; } /* * The value of rr must be 1 since the driver set the cv field to 1. * This setting requires the FW to set all revision fields. */ if (mb->un.varRdRev.rr == 0) { vp->rev.rBit = 0; lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0440 Adapter failed to init, READ_REV has " "missing revision information.\n"); mempool_free(pmb, phba->mbox_mem_pool); return -ERESTART; } if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { mempool_free(pmb, phba->mbox_mem_pool); return -EINVAL; } /* Save information as VPD data */ vp->rev.rBit = 1; memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); vp->rev.biuRev = mb->un.varRdRev.biuRev; vp->rev.smRev = mb->un.varRdRev.smRev; vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; vp->rev.endecRev = mb->un.varRdRev.endecRev; vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; vp->rev.fcphLow = mb->un.varRdRev.fcphLow; vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; vp->rev.postKernRev = mb->un.varRdRev.postKernRev; vp->rev.opFwRev = mb->un.varRdRev.opFwRev; /* If the sli feature level is less then 9, we must * tear down all RPIs and VPIs on link down if NPIV * is enabled. */ if (vp->rev.feaLevelHigh < 9) phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; if (lpfc_is_LC_HBA(phba->pcidev->device)) memcpy(phba->RandomData, (char *)&mb->un.varWords[24], sizeof (phba->RandomData)); /* Get adapter VPD information */ lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); if (!lpfc_vpd_data) goto out_free_mbox; do { lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0441 VPD not present on adapter, " "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); mb->un.varDmp.word_cnt = 0; } /* dump mem may return a zero when finished or we got a * mailbox error, either way we are done. */ if (mb->un.varDmp.word_cnt == 0) break; if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, lpfc_vpd_data + offset, mb->un.varDmp.word_cnt); offset += mb->un.varDmp.word_cnt; } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); lpfc_parse_vpd(phba, lpfc_vpd_data, offset); kfree(lpfc_vpd_data); out_free_mbox: mempool_free(pmb, phba->mbox_mem_pool); return 0; } /** * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd * @phba: pointer to lpfc hba data structure. * @pmboxq: pointer to the driver internal queue element for mailbox command. * * This is the completion handler for driver's configuring asynchronous event * mailbox command to the device. If the mailbox command returns successfully, * it will set internal async event support flag to 1; otherwise, it will * set internal async event support flag to 0. **/ static void lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) { if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) phba->temp_sensor_support = 1; else phba->temp_sensor_support = 0; mempool_free(pmboxq, phba->mbox_mem_pool); return; } /** * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler * @phba: pointer to lpfc hba data structure. * @pmboxq: pointer to the driver internal queue element for mailbox command. * * This is the completion handler for dump mailbox command for getting * wake up parameters. When this command complete, the response contain * Option rom version of the HBA. This function translate the version number * into a human readable string and store it in OptionROMVersion. **/ static void lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct prog_id *prg; uint32_t prog_id_word; char dist = ' '; /* character array used for decoding dist type. */ char dist_char[] = "nabx"; if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { mempool_free(pmboxq, phba->mbox_mem_pool); return; } prg = (struct prog_id *) &prog_id_word; /* word 7 contain option rom version */ prog_id_word = pmboxq->u.mb.un.varWords[7]; /* Decode the Option rom version word to a readable string */ if (prg->dist < 4) dist = dist_char[prg->dist]; if ((prg->dist == 3) && (prg->num == 0)) sprintf(phba->OptionROMVersion, "%d.%d%d", prg->ver, prg->rev, prg->lev); else sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", prg->ver, prg->rev, prg->lev, dist, prg->num); mempool_free(pmboxq, phba->mbox_mem_pool); return; } /** * lpfc_config_port_post - Perform lpfc initialization after config port * @phba: pointer to lpfc hba data structure. * * This routine will do LPFC initialization after the CONFIG_PORT mailbox * command call. It performs all internal resource and state setups on the * port: post IOCB buffers, enable appropriate host interrupt attentions, * ELS ring timers, etc. * * Return codes * 0 - success. * Any other value - error. **/ int lpfc_config_port_post(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; struct lpfc_dmabuf *mp; struct lpfc_sli *psli = &phba->sli; uint32_t status, timeout; int i, j; int rc; spin_lock_irq(&phba->hbalock); /* * If the Config port completed correctly the HBA is not * over heated any more. */ if (phba->over_temp_state == HBA_OVER_TEMP) phba->over_temp_state = HBA_NORMAL_TEMP; spin_unlock_irq(&phba->hbalock); pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } mb = &pmb->u.mb; /* Get login parameters for NID. */ lpfc_read_sparam(phba, pmb, 0); pmb->vport = vport; if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0448 Adapter failed init, mbxCmd x%x " "READ_SPARM mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); phba->link_state = LPFC_HBA_ERROR; mp = (struct lpfc_dmabuf *) pmb->context1; mempool_free( pmb, phba->mbox_mem_pool); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); return -EIO; } mp = (struct lpfc_dmabuf *) pmb->context1; memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); pmb->context1 = NULL; if (phba->cfg_soft_wwnn) u64_to_wwn(phba->cfg_soft_wwnn, vport->fc_sparam.nodeName.u.wwn); if (phba->cfg_soft_wwpn) u64_to_wwn(phba->cfg_soft_wwpn, vport->fc_sparam.portName.u.wwn); memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, sizeof (struct lpfc_name)); memcpy(&vport->fc_portname, &vport->fc_sparam.portName, sizeof (struct lpfc_name)); /* Update the fc_host data structures with new wwn. */ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); fc_host_max_npiv_vports(shost) = phba->max_vpi; /* If no serial number in VPD data, use low 6 bytes of WWNN */ /* This should be consolidated into parse_vpd ? - mr */ if (phba->SerialNumber[0] == 0) { uint8_t *outptr; outptr = &vport->fc_nodename.u.s.IEEE[0]; for (i = 0; i < 12; i++) { status = *outptr++; j = ((status & 0xf0) >> 4); if (j <= 9) phba->SerialNumber[i] = (char)((uint8_t) 0x30 + (uint8_t) j); else phba->SerialNumber[i] = (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); i++; j = (status & 0xf); if (j <= 9) phba->SerialNumber[i] = (char)((uint8_t) 0x30 + (uint8_t) j); else phba->SerialNumber[i] = (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); } } lpfc_read_config(phba, pmb); pmb->vport = vport; if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0453 Adapter failed to init, mbxCmd x%x " "READ_CONFIG, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); phba->link_state = LPFC_HBA_ERROR; mempool_free( pmb, phba->mbox_mem_pool); return -EIO; } /* Check if the port is disabled */ lpfc_sli_read_link_ste(phba); /* Reset the DFT_HBA_Q_DEPTH to the max xri */ if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) phba->cfg_hba_queue_depth = (mb->un.varRdConfig.max_xri + 1) - lpfc_sli4_get_els_iocb_cnt(phba); phba->lmt = mb->un.varRdConfig.lmt; /* Get the default values for Model Name and Description */ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); if ((phba->cfg_link_speed > LINK_SPEED_10G) || ((phba->cfg_link_speed == LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || ((phba->cfg_link_speed == LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || ((phba->cfg_link_speed == LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || ((phba->cfg_link_speed == LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || ((phba->cfg_link_speed == LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb))) { /* Reset link speed to auto */ lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, "1302 Invalid speed for this board: " "Reset link speed to auto: x%x\n", phba->cfg_link_speed); phba->cfg_link_speed = LINK_SPEED_AUTO; } phba->link_state = LPFC_LINK_DOWN; /* Only process IOCBs on ELS ring till hba_state is READY */ if (psli->ring[psli->extra_ring].cmdringaddr) psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; if (psli->ring[psli->fcp_ring].cmdringaddr) psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; if (psli->ring[psli->next_ring].cmdringaddr) psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; /* Post receive buffers for desired rings */ if (phba->sli_rev != 3) lpfc_post_rcv_buf(phba); /* * Configure HBA MSI-X attention conditions to messages if MSI-X mode */ if (phba->intr_type == MSIX) { rc = lpfc_config_msi(phba, pmb); if (rc) { mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "0352 Config MSI mailbox command " "failed, mbxCmd x%x, mbxStatus x%x\n", pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } } spin_lock_irq(&phba->hbalock); /* Initialize ERATT handling flag */ phba->hba_flag &= ~HBA_ERATT_HANDLED; /* Enable appropriate host interrupts */ status = readl(phba->HCregaddr); status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; if (psli->num_rings > 0) status |= HC_R0INT_ENA; if (psli->num_rings > 1) status |= HC_R1INT_ENA; if (psli->num_rings > 2) status |= HC_R2INT_ENA; if (psli->num_rings > 3) status |= HC_R3INT_ENA; if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && (phba->cfg_poll & DISABLE_FCP_RING_INT)) status &= ~(HC_R0INT_ENA); writel(status, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); /* Set up ring-0 (ELS) timer */ timeout = phba->fc_ratov * 2; mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); /* Set up heart beat (HB) timer */ mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); phba->hb_outstanding = 0; phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); if (phba->hba_flag & LINK_DISABLED) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2598 Adapter Link is disabled.\n"); lpfc_down_link(phba, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2599 Adapter failed to issue DOWN_LINK" " mbox command rc 0x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } } else { lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; lpfc_set_loopback_flag(phba); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0454 Adapter failed to init, mbxCmd x%x " "INIT_LINK, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ /* Clear all pending interrupts */ writel(0xffffffff, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ phba->link_state = LPFC_HBA_ERROR; if (rc != MBX_BUSY) mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } } /* MBOX buffer will be freed in mbox compl */ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); lpfc_config_async(phba, pmb, LPFC_ELS_RING); pmb->mbox_cmpl = lpfc_config_async_cmpl; pmb->vport = phba->pport; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0456 Adapter failed to issue " "ASYNCEVT_ENABLE mbox status x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); } /* Get Option rom version */ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); lpfc_dump_wakeup_param(phba, pmb); pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; pmb->vport = phba->pport; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " "to get Option ROM version status x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); } return 0; } /** * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset * @phba: pointer to lpfc HBA data structure. * * This routine will do LPFC uninitialization before the HBA is reset when * bringing down the SLI Layer. * * Return codes * 0 - success. * Any other value - error. **/ int lpfc_hba_down_prep(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; if (phba->sli_rev <= LPFC_SLI_REV3) { /* Disable interrupts */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } if (phba->pport->load_flag & FC_UNLOADING) lpfc_cleanup_discovery_resources(phba->pport); else { vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_cleanup_discovery_resources(vports[i]); lpfc_destroy_vport_work_array(phba, vports); } return 0; } /** * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset * @phba: pointer to lpfc HBA data structure. * * This routine will do uninitialization after the HBA is reset when bring * down the SLI Layer. * * Return codes * 0 - sucess. * Any other value - error. **/ static int lpfc_hba_down_post_s3(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; struct lpfc_dmabuf *mp, *next_mp; LIST_HEAD(completions); int i; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) lpfc_sli_hbqbuf_free_all(phba); else { /* Cleanup preposted buffers on the ELS ring */ pring = &psli->ring[LPFC_ELS_RING]; list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { list_del(&mp->list); pring->postbufq_cnt--; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } } spin_lock_irq(&phba->hbalock); for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; /* At this point in time the HBA is either reset or DOA. Either * way, nothing should be on txcmplq as it will NEVER complete. */ list_splice_init(&pring->txcmplq, &completions); pring->txcmplq_cnt = 0; spin_unlock_irq(&phba->hbalock); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); lpfc_sli_abort_iocb_ring(phba, pring); spin_lock_irq(&phba->hbalock); } spin_unlock_irq(&phba->hbalock); return 0; } /** * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset * @phba: pointer to lpfc HBA data structure. * * This routine will do uninitialization after the HBA is reset when bring * down the SLI Layer. * * Return codes * 0 - sucess. * Any other value - error. **/ static int lpfc_hba_down_post_s4(struct lpfc_hba *phba) { struct lpfc_scsi_buf *psb, *psb_next; LIST_HEAD(aborts); int ret; unsigned long iflag = 0; ret = lpfc_hba_down_post_s3(phba); if (ret) return ret; /* At this point in time the HBA is either reset or DOA. Either * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be * on the lpfc_sgl_list so that it can either be freed if the * driver is unloading or reposted if the driver is restarting * the port. */ spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ /* scsl_buf_list */ /* abts_sgl_list_lock required because worker thread uses this * list. */ spin_lock(&phba->sli4_hba.abts_sgl_list_lock); list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); /* abts_scsi_buf_list_lock required because worker thread uses this * list. */ spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, &aborts); spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); spin_unlock_irq(&phba->hbalock); list_for_each_entry_safe(psb, psb_next, &aborts, list) { psb->pCmd = NULL; psb->status = IOSTAT_SUCCESS; } spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); list_splice(&aborts, &phba->lpfc_scsi_buf_list); spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); return 0; } /** * lpfc_hba_down_post - Wrapper func for hba down post routine * @phba: pointer to lpfc HBA data structure. * * This routine wraps the actual SLI3 or SLI4 routine for performing * uninitialization after the HBA is reset when bring down the SLI Layer. * * Return codes * 0 - sucess. * Any other value - error. **/ int lpfc_hba_down_post(struct lpfc_hba *phba) { return (*phba->lpfc_hba_down_post)(phba); } /** * lpfc_hb_timeout - The HBA-timer timeout handler * @ptr: unsigned long holds the pointer to lpfc hba data structure. * * This is the HBA-timer timeout handler registered to the lpfc driver. When * this timer fires, a HBA timeout event shall be posted to the lpfc driver * work-port-events bitmap and the worker thread is notified. This timeout * event will be used by the worker thread to invoke the actual timeout * handler routine, lpfc_hb_timeout_handler. Any periodical operations will * be performed in the timeout handler and the HBA timeout event bit shall * be cleared by the worker thread after it has taken the event bitmap out. **/ static void lpfc_hb_timeout(unsigned long ptr) { struct lpfc_hba *phba; uint32_t tmo_posted; unsigned long iflag; phba = (struct lpfc_hba *)ptr; /* Check for heart beat timeout conditions */ spin_lock_irqsave(&phba->pport->work_port_lock, iflag); tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; if (!tmo_posted) phba->pport->work_port_events |= WORKER_HB_TMO; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); /* Tell the worker thread there is work to do */ if (!tmo_posted) lpfc_worker_wake_up(phba); return; } /** * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function * @phba: pointer to lpfc hba data structure. * @pmboxq: pointer to the driver internal queue element for mailbox command. * * This is the callback function to the lpfc heart-beat mailbox command. * If configured, the lpfc driver issues the heart-beat mailbox command to * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the * heart-beat mailbox command is issued, the driver shall set up heart-beat * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks * heart-beat outstanding state. Once the mailbox command comes back and * no error conditions detected, the heart-beat mailbox command timer is * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding * state is cleared for the next heart-beat. If the timer expired with the * heart-beat outstanding state set, the driver will put the HBA offline. **/ static void lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) { unsigned long drvr_flag; spin_lock_irqsave(&phba->hbalock, drvr_flag); phba->hb_outstanding = 0; spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Check and reset heart-beat timer is necessary */ mempool_free(pmboxq, phba->mbox_mem_pool); if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && !(phba->link_state == LPFC_HBA_ERROR) && !(phba->pport->load_flag & FC_UNLOADING)) mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); return; } /** * lpfc_hb_timeout_handler - The HBA-timer timeout handler * @phba: pointer to lpfc hba data structure. * * This is the actual HBA-timer timeout handler to be invoked by the worker * thread whenever the HBA timer fired and HBA-timeout event posted. This * handler performs any periodic operations needed for the device. If such * periodic event has already been attended to either in the interrupt handler * or by processing slow-ring or fast-ring events within the HBA-timer * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets * the timer for the next timeout period. If lpfc heart-beat mailbox command * is configured and there is no heart-beat mailbox command outstanding, a * heart-beat mailbox is issued and timer set properly. Otherwise, if there * has been a heart-beat mailbox command outstanding, the HBA shall be put * to offline. **/ void lpfc_hb_timeout_handler(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmboxq; struct lpfc_dmabuf *buf_ptr; int retval; struct lpfc_sli *psli = &phba->sli; LIST_HEAD(completions); if ((phba->link_state == LPFC_HBA_ERROR) || (phba->pport->load_flag & FC_UNLOADING) || (phba->pport->fc_flag & FC_OFFLINE_MODE)) return; spin_lock_irq(&phba->pport->work_port_lock); if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, jiffies)) { spin_unlock_irq(&phba->pport->work_port_lock); if (!phba->hb_outstanding) mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); else mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); return; } spin_unlock_irq(&phba->pport->work_port_lock); if (phba->elsbuf_cnt && (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { spin_lock_irq(&phba->hbalock); list_splice_init(&phba->elsbuf, &completions); phba->elsbuf_cnt = 0; phba->elsbuf_prev_cnt = 0; spin_unlock_irq(&phba->hbalock); while (!list_empty(&completions)) { list_remove_head(&completions, buf_ptr, struct lpfc_dmabuf, list); lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); } } phba->elsbuf_prev_cnt = phba->elsbuf_cnt; /* If there is no heart beat outstanding, issue a heartbeat command */ if (phba->cfg_enable_hba_heartbeat) { if (!phba->hb_outstanding) { pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); if (!pmboxq) { mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); return; } lpfc_heart_beat(phba, pmboxq); pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; pmboxq->vport = phba->pport; retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); if (retval != MBX_BUSY && retval != MBX_SUCCESS) { mempool_free(pmboxq, phba->mbox_mem_pool); mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); return; } mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); phba->hb_outstanding = 1; return; } else { /* * If heart beat timeout called with hb_outstanding set * we need to take the HBA offline. */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0459 Adapter heartbeat failure, " "taking this port offline.\n"); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_unblock_mgmt_io(phba); phba->link_state = LPFC_HBA_ERROR; lpfc_hba_down_post(phba); } } } /** * lpfc_offline_eratt - Bring lpfc offline on hardware error attention * @phba: pointer to lpfc hba data structure. * * This routine is called to bring the HBA offline when HBA hardware error * other than Port Error 6 has been detected. **/ static void lpfc_offline_eratt(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_reset_barrier(phba); spin_lock_irq(&phba->hbalock); lpfc_sli_brdreset(phba); spin_unlock_irq(&phba->hbalock); lpfc_hba_down_post(phba); lpfc_sli_brdready(phba, HS_MBRDY); lpfc_unblock_mgmt_io(phba); phba->link_state = LPFC_HBA_ERROR; return; } /** * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention * @phba: pointer to lpfc hba data structure. * * This routine is called to bring a SLI4 HBA offline when HBA hardware error * other than Port Error 6 has been detected. **/ static void lpfc_sli4_offline_eratt(struct lpfc_hba *phba) { lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_sli4_brdreset(phba); lpfc_hba_down_post(phba); lpfc_sli4_post_status_check(phba); lpfc_unblock_mgmt_io(phba); phba->link_state = LPFC_HBA_ERROR; } /** * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked to handle the deferred HBA hardware error * conditions. This type of error is indicated by HBA by setting ER1 * and another ER bit in the host status register. The driver will * wait until the ER1 bit clears before handling the error condition. **/ static void lpfc_handle_deferred_eratt(struct lpfc_hba *phba) { uint32_t old_host_status = phba->work_hs; struct lpfc_sli_ring *pring; struct lpfc_sli *psli = &phba->sli; /* If the pci channel is offline, ignore possible errors, * since we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~DEFER_ERATT; spin_unlock_irq(&phba->hbalock); return; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0479 Deferred Adapter Hardware Error " "Data: x%x x%x x%x\n", phba->work_hs, phba->work_status[0], phba->work_status[1]); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* * Firmware stops when it triggred erratt. That could cause the I/Os * dropped by the firmware. Error iocb (I/O) on txcmplq and let the * SCSI layer retry it after re-establishing link. */ pring = &psli->ring[psli->fcp_ring]; lpfc_sli_abort_iocb_ring(phba, pring); /* * There was a firmware error. Take the hba offline and then * attempt to restart it. */ lpfc_offline_prep(phba); lpfc_offline(phba); /* Wait for the ER1 bit to clear.*/ while (phba->work_hs & HS_FFER1) { msleep(100); phba->work_hs = readl(phba->HSregaddr); /* If driver is unloading let the worker thread continue */ if (phba->pport->load_flag & FC_UNLOADING) { phba->work_hs = 0; break; } } /* * This is to ptrotect against a race condition in which * first write to the host attention register clear the * host status register. */ if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) phba->work_hs = old_host_status & ~HS_FFER1; spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~DEFER_ERATT; spin_unlock_irq(&phba->hbalock); phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); phba->work_status[1] = readl(phba->MBslimaddr + 0xac); } static void lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) { struct lpfc_board_event_header board_event; struct Scsi_Host *shost; board_event.event_type = FC_REG_BOARD_EVENT; board_event.subcategory = LPFC_EVENT_PORTINTERR; shost = lpfc_shost_from_vport(phba->pport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(board_event), (char *) &board_event, LPFC_NL_VENDOR_ID); } /** * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked to handle the following HBA hardware error * conditions: * 1 - HBA error attention interrupt * 2 - DMA ring index out of range * 3 - Mailbox command came back as unknown **/ static void lpfc_handle_eratt_s3(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; uint32_t event_data; unsigned long temperature; struct temp_event temp_event_data; struct Scsi_Host *shost; /* If the pci channel is offline, ignore possible errors, * since we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~DEFER_ERATT; spin_unlock_irq(&phba->hbalock); return; } /* If resets are disabled then leave the HBA alone and return */ if (!phba->cfg_enable_hba_reset) return; /* Send an internal error event to mgmt application */ lpfc_board_errevt_to_mgmt(phba); if (phba->hba_flag & DEFER_ERATT) lpfc_handle_deferred_eratt(phba); if (phba->work_hs & HS_FFER6) { /* Re-establishing Link */ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "1301 Re-establishing Link " "Data: x%x x%x x%x\n", phba->work_hs, phba->work_status[0], phba->work_status[1]); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* * Firmware stops when it triggled erratt with HS_FFER6. * That could cause the I/Os dropped by the firmware. * Error iocb (I/O) on txcmplq and let the SCSI layer * retry it after re-establishing link. */ pring = &psli->ring[psli->fcp_ring]; lpfc_sli_abort_iocb_ring(phba, pring); /* * There was a firmware error. Take the hba offline and then * attempt to restart it. */ lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_sli_brdrestart(phba); if (lpfc_online(phba) == 0) { /* Initialize the HBA */ lpfc_unblock_mgmt_io(phba); return; } lpfc_unblock_mgmt_io(phba); } else if (phba->work_hs & HS_CRIT_TEMP) { temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; temp_event_data.event_code = LPFC_CRIT_TEMP; temp_event_data.data = (uint32_t)temperature; lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0406 Adapter maximum temperature exceeded " "(%ld), taking this port offline " "Data: x%x x%x x%x\n", temperature, phba->work_hs, phba->work_status[0], phba->work_status[1]); shost = lpfc_shost_from_vport(phba->pport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(temp_event_data), (char *) &temp_event_data, SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); spin_lock_irq(&phba->hbalock); phba->over_temp_state = HBA_OVER_TEMP; spin_unlock_irq(&phba->hbalock); lpfc_offline_eratt(phba); } else { /* The if clause above forces this code path when the status * failure is a value other than FFER6. Do not call the offline * twice. This is the adapter hardware error path. */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0457 Adapter Hardware Error " "Data: x%x x%x x%x\n", phba->work_hs, phba->work_status[0], phba->work_status[1]); event_data = FC_REG_DUMP_EVENT; shost = lpfc_shost_from_vport(vport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(event_data), (char *) &event_data, SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); lpfc_offline_eratt(phba); } return; } /** * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked to handle the SLI4 HBA hardware error attention * conditions. **/ static void lpfc_handle_eratt_s4(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; uint32_t event_data; struct Scsi_Host *shost; /* If the pci channel is offline, ignore possible errors, since * we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) return; /* If resets are disabled then leave the HBA alone and return */ if (!phba->cfg_enable_hba_reset) return; /* Send an internal error event to mgmt application */ lpfc_board_errevt_to_mgmt(phba); /* For now, the actual action for SLI4 device handling is not * specified yet, just treated it as adaptor hardware failure */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", phba->work_status[0], phba->work_status[1]); event_data = FC_REG_DUMP_EVENT; shost = lpfc_shost_from_vport(vport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(event_data), (char *) &event_data, SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); lpfc_sli4_offline_eratt(phba); } /** * lpfc_handle_eratt - Wrapper func for handling hba error attention * @phba: pointer to lpfc HBA data structure. * * This routine wraps the actual SLI3 or SLI4 hba error attention handling * routine from the API jump table function pointer from the lpfc_hba struct. * * Return codes * 0 - sucess. * Any other value - error. **/ void lpfc_handle_eratt(struct lpfc_hba *phba) { (*phba->lpfc_handle_eratt)(phba); } /** * lpfc_handle_latt - The HBA link event handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked from the worker thread to handle a HBA host * attention link event. **/ void lpfc_handle_latt(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_sli *psli = &phba->sli; LPFC_MBOXQ_t *pmb; volatile uint32_t control; struct lpfc_dmabuf *mp; int rc = 0; pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { rc = 1; goto lpfc_handle_latt_err_exit; } mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) { rc = 2; goto lpfc_handle_latt_free_pmb; } mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp->virt) { rc = 3; goto lpfc_handle_latt_free_mp; } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); psli->slistat.link_event++; lpfc_read_la(phba, pmb, mp); pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; pmb->vport = vport; /* Block ELS IOCBs until we have processed this mbox command */ phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { rc = 4; goto lpfc_handle_latt_free_mbuf; } /* Clear Link Attention in HA REG */ spin_lock_irq(&phba->hbalock); writel(HA_LATT, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); return; lpfc_handle_latt_free_mbuf: phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; lpfc_mbuf_free(phba, mp->virt, mp->phys); lpfc_handle_latt_free_mp: kfree(mp); lpfc_handle_latt_free_pmb: mempool_free(pmb, phba->mbox_mem_pool); lpfc_handle_latt_err_exit: /* Enable Link attention interrupts */ spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ /* Clear Link Attention in HA REG */ writel(HA_LATT, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); lpfc_linkdown(phba); phba->link_state = LPFC_HBA_ERROR; lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); return; } /** * lpfc_parse_vpd - Parse VPD (Vital Product Data) * @phba: pointer to lpfc hba data structure. * @vpd: pointer to the vital product data. * @len: length of the vital product data in bytes. * * This routine parses the Vital Product Data (VPD). The VPD is treated as * an array of characters. In this routine, the ModelName, ProgramType, and * ModelDesc, etc. fields of the phba data structure will be populated. * * Return codes * 0 - pointer to the VPD passed in is NULL * 1 - success **/ int lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) { uint8_t lenlo, lenhi; int Length; int i, j; int finished = 0; int index = 0; if (!vpd) return 0; /* Vital Product */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0455 Vital Product Data: x%x x%x x%x x%x\n", (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], (uint32_t) vpd[3]); while (!finished && (index < (len - 4))) { switch (vpd[index]) { case 0x82: case 0x91: index += 1; lenlo = vpd[index]; index += 1; lenhi = vpd[index]; index += 1; i = ((((unsigned short)lenhi) << 8) + lenlo); index += i; break; case 0x90: index += 1; lenlo = vpd[index]; index += 1; lenhi = vpd[index]; index += 1; Length = ((((unsigned short)lenhi) << 8) + lenlo); if (Length > len - index) Length = len - index; while (Length > 0) { /* Look for Serial Number */ if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { index += 2; i = vpd[index]; index += 1; j = 0; Length -= (3+i); while(i--) { phba->SerialNumber[j++] = vpd[index++]; if (j == 31) break; } phba->SerialNumber[j] = 0; continue; } else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { phba->vpd_flag |= VPD_MODEL_DESC; index += 2; i = vpd[index]; index += 1; j = 0; Length -= (3+i); while(i--) { phba->ModelDesc[j++] = vpd[index++]; if (j == 255) break; } phba->ModelDesc[j] = 0; continue; } else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { phba->vpd_flag |= VPD_MODEL_NAME; index += 2; i = vpd[index]; index += 1; j = 0; Length -= (3+i); while(i--) { phba->ModelName[j++] = vpd[index++]; if (j == 79) break; } phba->ModelName[j] = 0; continue; } else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { phba->vpd_flag |= VPD_PROGRAM_TYPE; index += 2; i = vpd[index]; index += 1; j = 0; Length -= (3+i); while(i--) { phba->ProgramType[j++] = vpd[index++]; if (j == 255) break; } phba->ProgramType[j] = 0; continue; } else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { phba->vpd_flag |= VPD_PORT; index += 2; i = vpd[index]; index += 1; j = 0; Length -= (3+i); while(i--) { phba->Port[j++] = vpd[index++]; if (j == 19) break; } phba->Port[j] = 0; continue; } else { index += 2; i = vpd[index]; index += 1; index += i; Length -= (3 + i); } } finished = 0; break; case 0x78: finished = 1; break; default: index ++; break; } } return(1); } /** * lpfc_get_hba_model_desc - Retrieve HBA device model name and description * @phba: pointer to lpfc hba data structure. * @mdp: pointer to the data structure to hold the derived model name. * @descp: pointer to the data structure to hold the derived description. * * This routine retrieves HBA's description based on its registered PCI device * ID. The @descp passed into this function points to an array of 256 chars. It * shall be returned with the model name, maximum speed, and the host bus type. * The @mdp passed into this function points to an array of 80 chars. When the * function returns, the @mdp will be filled with the model name. **/ static void lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) { lpfc_vpd_t *vp; uint16_t dev_id = phba->pcidev->device; int max_speed; int GE = 0; int oneConnect = 0; /* default is not a oneConnect */ struct { char * name; int max_speed; char * bus; } m = {"<Unknown>", 0, ""}; if (mdp && mdp[0] != '\0' && descp && descp[0] != '\0') return; if (phba->lmt & LMT_10Gb) max_speed = 10; else if (phba->lmt & LMT_8Gb) max_speed = 8; else if (phba->lmt & LMT_4Gb) max_speed = 4; else if (phba->lmt & LMT_2Gb) max_speed = 2; else max_speed = 1; vp = &phba->vpd; switch (dev_id) { case PCI_DEVICE_ID_FIREFLY: m = (typeof(m)){"LP6000", max_speed, "PCI"}; break; case PCI_DEVICE_ID_SUPERFLY: if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) m = (typeof(m)){"LP7000", max_speed, "PCI"}; else m = (typeof(m)){"LP7000E", max_speed, "PCI"}; break; case PCI_DEVICE_ID_DRAGONFLY: m = (typeof(m)){"LP8000", max_speed, "PCI"}; break; case PCI_DEVICE_ID_CENTAUR: if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) m = (typeof(m)){"LP9002", max_speed, "PCI"}; else m = (typeof(m)){"LP9000", max_speed, "PCI"}; break; case PCI_DEVICE_ID_RFLY: m = (typeof(m)){"LP952", max_speed, "PCI"}; break; case PCI_DEVICE_ID_PEGASUS: m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; break; case PCI_DEVICE_ID_THOR: m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; break; case PCI_DEVICE_ID_VIPER: m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; break; case PCI_DEVICE_ID_PFLY: m = (typeof(m)){"LP982", max_speed, "PCI-X"}; break; case PCI_DEVICE_ID_TFLY: m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; break; case PCI_DEVICE_ID_HELIOS: m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; break; case PCI_DEVICE_ID_HELIOS_SCSP: m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; break; case PCI_DEVICE_ID_HELIOS_DCSP: m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; break; case PCI_DEVICE_ID_NEPTUNE: m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; break; case PCI_DEVICE_ID_NEPTUNE_SCSP: m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; break; case PCI_DEVICE_ID_NEPTUNE_DCSP: m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; break; case PCI_DEVICE_ID_BMID: m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; break; case PCI_DEVICE_ID_BSMB: m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; break; case PCI_DEVICE_ID_ZEPHYR: m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; break; case PCI_DEVICE_ID_ZEPHYR_SCSP: m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; break; case PCI_DEVICE_ID_ZEPHYR_DCSP: m = (typeof(m)){"LP2105", max_speed, "PCIe"}; GE = 1; break; case PCI_DEVICE_ID_ZMID: m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; break; case PCI_DEVICE_ID_ZSMB: m = (typeof(m)){"LPe111", max_speed, "PCIe"}; break; case PCI_DEVICE_ID_LP101: m = (typeof(m)){"LP101", max_speed, "PCI-X"}; break; case PCI_DEVICE_ID_LP10000S: m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; break; case PCI_DEVICE_ID_LP11000S: m = (typeof(m)){"LP11000-S", max_speed, "PCI-X2"}; break; case PCI_DEVICE_ID_LPE11000S: m = (typeof(m)){"LPe11000-S", max_speed, "PCIe"}; break; case PCI_DEVICE_ID_SAT: m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; break; case PCI_DEVICE_ID_SAT_MID: m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; break; case PCI_DEVICE_ID_SAT_SMB: m = (typeof(m)){"LPe121", max_speed, "PCIe"}; break; case PCI_DEVICE_ID_SAT_DCSP: m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; break; case PCI_DEVICE_ID_SAT_SCSP: m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; break; case PCI_DEVICE_ID_SAT_S: m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; break; case PCI_DEVICE_ID_HORNET: m = (typeof(m)){"LP21000", max_speed, "PCIe"}; GE = 1; break; case PCI_DEVICE_ID_PROTEUS_VF: m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; break; case PCI_DEVICE_ID_PROTEUS_PF: m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; break; case PCI_DEVICE_ID_PROTEUS_S: m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; break; case PCI_DEVICE_ID_TIGERSHARK: oneConnect = 1; m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; break; default: m = (typeof(m)){ NULL }; break; } if (mdp && mdp[0] == '\0') snprintf(mdp, 79,"%s", m.name); /* oneConnect hba requires special processing, they are all initiators * and we put the port number on the end */ if (descp && descp[0] == '\0') { if (oneConnect) snprintf(descp, 255, "Emulex OneConnect %s, FCoE Initiator, Port %s", m.name, phba->Port); else snprintf(descp, 255, "Emulex %s %d%s %s %s", m.name, m.max_speed, (GE) ? "GE" : "Gb", m.bus, (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); } } /** * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring * @phba: pointer to lpfc hba data structure. * @pring: pointer to a IOCB ring. * @cnt: the number of IOCBs to be posted to the IOCB ring. * * This routine posts a given number of IOCBs with the associated DMA buffer * descriptors specified by the cnt argument to the given IOCB ring. * * Return codes * The number of IOCBs NOT able to be posted to the IOCB ring. **/ int lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) { IOCB_t *icmd; struct lpfc_iocbq *iocb; struct lpfc_dmabuf *mp1, *mp2; cnt += pring->missbufcnt; /* While there are buffers to post */ while (cnt > 0) { /* Allocate buffer for command iocb */ iocb = lpfc_sli_get_iocbq(phba); if (iocb == NULL) { pring->missbufcnt = cnt; return cnt; } icmd = &iocb->iocb; /* 2 buffers can be posted per command */ /* Allocate buffer to post */ mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); if (mp1) mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); if (!mp1 || !mp1->virt) { kfree(mp1); lpfc_sli_release_iocbq(phba, iocb); pring->missbufcnt = cnt; return cnt; } INIT_LIST_HEAD(&mp1->list); /* Allocate buffer to post */ if (cnt > 1) { mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); if (mp2) mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp2->phys); if (!mp2 || !mp2->virt) { kfree(mp2); lpfc_mbuf_free(phba, mp1->virt, mp1->phys); kfree(mp1); lpfc_sli_release_iocbq(phba, iocb); pring->missbufcnt = cnt; return cnt; } INIT_LIST_HEAD(&mp2->list); } else { mp2 = NULL; } icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; icmd->ulpBdeCount = 1; cnt--; if (mp2) { icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; cnt--; icmd->ulpBdeCount = 2; } icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; icmd->ulpLe = 1; if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == IOCB_ERROR) { lpfc_mbuf_free(phba, mp1->virt, mp1->phys); kfree(mp1); cnt++; if (mp2) { lpfc_mbuf_free(phba, mp2->virt, mp2->phys); kfree(mp2); cnt++; } lpfc_sli_release_iocbq(phba, iocb); pring->missbufcnt = cnt; return cnt; } lpfc_sli_ringpostbuf_put(phba, pring, mp1); if (mp2) lpfc_sli_ringpostbuf_put(phba, pring, mp2); } pring->missbufcnt = 0; return 0; } /** * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring * @phba: pointer to lpfc hba data structure. * * This routine posts initial receive IOCB buffers to the ELS ring. The * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is * set to 64 IOCBs. * * Return codes * 0 - success (currently always success) **/ static int lpfc_post_rcv_buf(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; /* Ring 0, ELS / CT buffers */ lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); /* Ring 2 - FCP no buffers needed */ return 0; } #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) /** * lpfc_sha_init - Set up initial array of hash table entries * @HashResultPointer: pointer to an array as hash table. * * This routine sets up the initial values to the array of hash table entries * for the LC HBAs. **/ static void lpfc_sha_init(uint32_t * HashResultPointer) { HashResultPointer[0] = 0x67452301; HashResultPointer[1] = 0xEFCDAB89; HashResultPointer[2] = 0x98BADCFE; HashResultPointer[3] = 0x10325476; HashResultPointer[4] = 0xC3D2E1F0; } /** * lpfc_sha_iterate - Iterate initial hash table with the working hash table * @HashResultPointer: pointer to an initial/result hash table. * @HashWorkingPointer: pointer to an working hash table. * * This routine iterates an initial hash table pointed by @HashResultPointer * with the values from the working hash table pointeed by @HashWorkingPointer. * The results are putting back to the initial hash table, returned through * the @HashResultPointer as the result hash table. **/ static void lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) { int t; uint32_t TEMP; uint32_t A, B, C, D, E; t = 16; do { HashWorkingPointer[t] = S(1, HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 8] ^ HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); } while (++t <= 79); t = 0; A = HashResultPointer[0]; B = HashResultPointer[1]; C = HashResultPointer[2]; D = HashResultPointer[3]; E = HashResultPointer[4]; do { if (t < 20) { TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; } else if (t < 40) { TEMP = (B ^ C ^ D) + 0x6ED9EBA1; } else if (t < 60) { TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; } else { TEMP = (B ^ C ^ D) + 0xCA62C1D6; } TEMP += S(5, A) + E + HashWorkingPointer[t]; E = D; D = C; C = S(30, B); B = A; A = TEMP; } while (++t <= 79); HashResultPointer[0] += A; HashResultPointer[1] += B; HashResultPointer[2] += C; HashResultPointer[3] += D; HashResultPointer[4] += E; } /** * lpfc_challenge_key - Create challenge key based on WWPN of the HBA * @RandomChallenge: pointer to the entry of host challenge random number array. * @HashWorking: pointer to the entry of the working hash array. * * This routine calculates the working hash array referred by @HashWorking * from the challenge random numbers associated with the host, referred by * @RandomChallenge. The result is put into the entry of the working hash * array and returned by reference through @HashWorking. **/ static void lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) { *HashWorking = (*RandomChallenge ^ *HashWorking); } /** * lpfc_hba_init - Perform special handling for LC HBA initialization * @phba: pointer to lpfc hba data structure. * @hbainit: pointer to an array of unsigned 32-bit integers. * * This routine performs the special handling for LC HBA initialization. **/ void lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) { int t; uint32_t *HashWorking; uint32_t *pwwnn = (uint32_t *) phba->wwnn; HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); if (!HashWorking) return; HashWorking[0] = HashWorking[78] = *pwwnn++; HashWorking[1] = HashWorking[79] = *pwwnn; for (t = 0; t < 7; t++) lpfc_challenge_key(phba->RandomData + t, HashWorking + t); lpfc_sha_init(hbainit); lpfc_sha_iterate(hbainit, HashWorking); kfree(HashWorking); } /** * lpfc_cleanup - Performs vport cleanups before deleting a vport * @vport: pointer to a virtual N_Port data structure. * * This routine performs the necessary cleanups before deleting the @vport. * It invokes the discovery state machine to perform necessary state * transitions and to release the ndlps associated with the @vport. Note, * the physical port is treated as @vport 0. **/ void lpfc_cleanup(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp, *next_ndlp; int i = 0; if (phba->link_state > LPFC_LINK_DOWN) lpfc_port_link_failure(vport); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) { ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); if (!ndlp) continue; spin_lock_irq(&phba->ndlp_lock); NLP_SET_FREE_REQ(ndlp); spin_unlock_irq(&phba->ndlp_lock); /* Trigger the release of the ndlp memory */ lpfc_nlp_put(ndlp); continue; } spin_lock_irq(&phba->ndlp_lock); if (NLP_CHK_FREE_REQ(ndlp)) { /* The ndlp should not be in memory free mode already */ spin_unlock_irq(&phba->ndlp_lock); continue; } else /* Indicate request for freeing ndlp memory */ NLP_SET_FREE_REQ(ndlp); spin_unlock_irq(&phba->ndlp_lock); if (vport->port_type != LPFC_PHYSICAL_PORT && ndlp->nlp_DID == Fabric_DID) { /* Just free up ndlp with Fabric_DID for vports */ lpfc_nlp_put(ndlp); continue; } if (ndlp->nlp_type & NLP_FABRIC) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } /* At this point, ALL ndlp's should be gone * because of the previous NLP_EVT_DEVICE_RM. * Lets wait for this to happen, if needed. */ while (!list_empty(&vport->fc_nodes)) { if (i++ > 3000) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0233 Nodelist not empty\n"); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_NODE, "0282 did:x%x ndlp:x%p " "usgmap:x%x refcnt:%d\n", ndlp->nlp_DID, (void *)ndlp, ndlp->nlp_usg_map, atomic_read( &ndlp->kref.refcount)); } break; } /* Wait for any activity on ndlps to settle */ msleep(10); } } /** * lpfc_stop_vport_timers - Stop all the timers associated with a vport * @vport: pointer to a virtual N_Port data structure. * * This routine stops all the timers associated with a @vport. This function * is invoked before disabling or deleting a @vport. Note that the physical * port is treated as @vport 0. **/ void lpfc_stop_vport_timers(struct lpfc_vport *vport) { del_timer_sync(&vport->els_tmofunc); del_timer_sync(&vport->fc_fdmitmo); lpfc_can_disctmo(vport); return; } /** * lpfc_stop_hba_timers - Stop all the timers associated with an HBA * @phba: pointer to lpfc hba data structure. * * This routine stops all the timers associated with a HBA. This function is * invoked before either putting a HBA offline or unloading the driver. **/ void lpfc_stop_hba_timers(struct lpfc_hba *phba) { lpfc_stop_vport_timers(phba->pport); del_timer_sync(&phba->sli.mbox_tmo); del_timer_sync(&phba->fabric_block_timer); del_timer_sync(&phba->eratt_poll); del_timer_sync(&phba->hb_tmofunc); phba->hb_outstanding = 0; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: /* Stop any LightPulse device specific driver timers */ del_timer_sync(&phba->fcp_poll_timer); break; case LPFC_PCI_DEV_OC: /* Stop any OneConnect device sepcific driver timers */ break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0297 Invalid device group (x%x)\n", phba->pci_dev_grp); break; } return; } /** * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked * @phba: pointer to lpfc hba data structure. * * This routine marks a HBA's management interface as blocked. Once the HBA's * management interface is marked as blocked, all the user space access to * the HBA, whether they are from sysfs interface or libdfc interface will * all be blocked. The HBA is set to block the management interface when the * driver prepares the HBA interface for online or offline. **/ static void lpfc_block_mgmt_io(struct lpfc_hba * phba) { unsigned long iflag; spin_lock_irqsave(&phba->hbalock, iflag); phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; spin_unlock_irqrestore(&phba->hbalock, iflag); } /** * lpfc_online - Initialize and bring a HBA online * @phba: pointer to lpfc hba data structure. * * This routine initializes the HBA and brings a HBA online. During this * process, the management interface is blocked to prevent user space access * to the HBA interfering with the driver initialization. * * Return codes * 0 - successful * 1 - failed **/ int lpfc_online(struct lpfc_hba *phba) { struct lpfc_vport *vport; struct lpfc_vport **vports; int i; if (!phba) return 0; vport = phba->pport; if (!(vport->fc_flag & FC_OFFLINE_MODE)) return 0; lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0458 Bring Adapter online\n"); lpfc_block_mgmt_io(phba); if (!lpfc_sli_queue_setup(phba)) { lpfc_unblock_mgmt_io(phba); return 1; } if (phba->sli_rev == LPFC_SLI_REV4) { if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ lpfc_unblock_mgmt_io(phba); return 1; } } else { if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ lpfc_unblock_mgmt_io(phba); return 1; } } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { struct Scsi_Host *shost; shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->fc_flag &= ~FC_OFFLINE_MODE; if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; if (phba->sli_rev == LPFC_SLI_REV4) vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); lpfc_unblock_mgmt_io(phba); return 0; } /** * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked * @phba: pointer to lpfc hba data structure. * * This routine marks a HBA's management interface as not blocked. Once the * HBA's management interface is marked as not blocked, all the user space * access to the HBA, whether they are from sysfs interface or libdfc * interface will be allowed. The HBA is set to block the management interface * when the driver prepares the HBA interface for online or offline and then * set to unblock the management interface afterwards. **/ void lpfc_unblock_mgmt_io(struct lpfc_hba * phba) { unsigned long iflag; spin_lock_irqsave(&phba->hbalock, iflag); phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; spin_unlock_irqrestore(&phba->hbalock, iflag); } /** * lpfc_offline_prep - Prepare a HBA to be brought offline * @phba: pointer to lpfc hba data structure. * * This routine is invoked to prepare a HBA to be brought offline. It performs * unregistration login to all the nodes on all vports and flushes the mailbox * queue to make it ready to be brought offline. **/ void lpfc_offline_prep(struct lpfc_hba * phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_vport **vports; int i; if (vport->fc_flag & FC_OFFLINE_MODE) return; lpfc_block_mgmt_io(phba); lpfc_linkdown(phba); /* Issue an unreg_login to all nodes on all vports */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { struct Scsi_Host *shost; if (vports[i]->load_flag & FC_UNLOADING) continue; vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; shost = lpfc_shost_from_vport(vports[i]); list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; if (ndlp->nlp_type & NLP_FABRIC) { lpfc_disc_state_machine(vports[i], ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); lpfc_disc_state_machine(vports[i], ndlp, NULL, NLP_EVT_DEVICE_RM); } spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(shost->host_lock); lpfc_unreg_rpi(vports[i], ndlp); } } } lpfc_destroy_vport_work_array(phba, vports); lpfc_sli_mbox_sys_shutdown(phba); } /** * lpfc_offline - Bring a HBA offline * @phba: pointer to lpfc hba data structure. * * This routine actually brings a HBA offline. It stops all the timers * associated with the HBA, brings down the SLI layer, and eventually * marks the HBA as in offline state for the upper layer protocol. **/ void lpfc_offline(struct lpfc_hba *phba) { struct Scsi_Host *shost; struct lpfc_vport **vports; int i; if (phba->pport->fc_flag & FC_OFFLINE_MODE) return; /* stop port and all timers associated with this hba */ lpfc_stop_port(phba); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_stop_vport_timers(vports[i]); lpfc_destroy_vport_work_array(phba, vports); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0460 Bring Adapter offline\n"); /* Bring down the SLI Layer and cleanup. The HBA is offline now. */ lpfc_sli_hba_down(phba); spin_lock_irq(&phba->hbalock); phba->work_ha = 0; spin_unlock_irq(&phba->hbalock); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->work_port_events = 0; vports[i]->fc_flag |= FC_OFFLINE_MODE; spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); } /** * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists * @phba: pointer to lpfc hba data structure. * * This routine is to free all the SCSI buffers and IOCBs from the driver * list back to kernel. It is called from lpfc_pci_remove_one to free * the internal resources before the device is removed from the system. * * Return codes * 0 - successful (for now, it always returns 0) **/ static int lpfc_scsi_free(struct lpfc_hba *phba) { struct lpfc_scsi_buf *sb, *sb_next; struct lpfc_iocbq *io, *io_next; spin_lock_irq(&phba->hbalock); /* Release all the lpfc_scsi_bufs maintained by this host. */ list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { list_del(&sb->list); pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; } /* Release all the lpfc_iocbq entries maintained by this host. */ list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { list_del(&io->list); kfree(io); phba->total_iocbq_bufs--; } spin_unlock_irq(&phba->hbalock); return 0; } /** * lpfc_create_port - Create an FC port * @phba: pointer to lpfc hba data structure. * @instance: a unique integer ID to this FC port. * @dev: pointer to the device data structure. * * This routine creates a FC port for the upper layer protocol. The FC port * can be created on top of either a physical port or a virtual port provided * by the HBA. This routine also allocates a SCSI host data structure (shost) * and associates the FC port created before adding the shost into the SCSI * layer. * * Return codes * @vport - pointer to the virtual N_Port data structure. * NULL - port create failed. **/ struct lpfc_vport * lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) { struct lpfc_vport *vport; struct Scsi_Host *shost; int error = 0; if (dev != &phba->pcidev->dev) shost = scsi_host_alloc(&lpfc_vport_template, sizeof(struct lpfc_vport)); else shost = scsi_host_alloc(&lpfc_template, sizeof(struct lpfc_vport)); if (!shost) goto out; vport = (struct lpfc_vport *) shost->hostdata; vport->phba = phba; vport->load_flag |= FC_LOADING; vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; vport->fc_rscn_flush = 0; lpfc_get_vport_cfgparam(vport); shost->unique_id = instance; shost->max_id = LPFC_MAX_TARGET; shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; shost->max_cmd_len = 16; if (phba->sli_rev == LPFC_SLI_REV4) { shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; shost->sg_tablesize = phba->cfg_sg_seg_cnt; } /* * Set initial can_queue value since 0 is no longer supported and * scsi_add_host will fail. This will be adjusted later based on the * max xri value determined in hba setup. */ shost->can_queue = phba->cfg_hba_queue_depth - 10; if (dev != &phba->pcidev->dev) { shost->transportt = lpfc_vport_transport_template; vport->port_type = LPFC_NPIV_PORT; } else { shost->transportt = lpfc_transport_template; vport->port_type = LPFC_PHYSICAL_PORT; } /* Initialize all internally managed lists. */ INIT_LIST_HEAD(&vport->fc_nodes); INIT_LIST_HEAD(&vport->rcv_buffer_list); spin_lock_init(&vport->work_port_lock); init_timer(&vport->fc_disctmo); vport->fc_disctmo.function = lpfc_disc_timeout; vport->fc_disctmo.data = (unsigned long)vport; init_timer(&vport->fc_fdmitmo); vport->fc_fdmitmo.function = lpfc_fdmi_tmo; vport->fc_fdmitmo.data = (unsigned long)vport; init_timer(&vport->els_tmofunc); vport->els_tmofunc.function = lpfc_els_timeout; vport->els_tmofunc.data = (unsigned long)vport; error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) goto out_put_shost; spin_lock_irq(&phba->hbalock); list_add_tail(&vport->listentry, &phba->port_list); spin_unlock_irq(&phba->hbalock); return vport; out_put_shost: scsi_host_put(shost); out: return NULL; } /** * destroy_port - destroy an FC port * @vport: pointer to an lpfc virtual N_Port data structure. * * This routine destroys a FC port from the upper layer protocol. All the * resources associated with the port are released. **/ void destroy_port(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; lpfc_debugfs_terminate(vport); fc_remove_host(shost); scsi_remove_host(shost); spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); lpfc_cleanup(vport); return; } /** * lpfc_get_instance - Get a unique integer ID * * This routine allocates a unique integer ID from lpfc_hba_index pool. It * uses the kernel idr facility to perform the task. * * Return codes: * instance - a unique integer ID allocated as the new instance. * -1 - lpfc get instance failed. **/ int lpfc_get_instance(void) { int instance = 0; /* Assign an unused number */ if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) return -1; if (idr_get_new(&lpfc_hba_index, NULL, &instance)) return -1; return instance; } /** * lpfc_scan_finished - method for SCSI layer to detect whether scan is done * @shost: pointer to SCSI host data structure. * @time: elapsed time of the scan in jiffies. * * This routine is called by the SCSI layer with a SCSI host to determine * whether the scan host is finished. * * Note: there is no scan_start function as adapter initialization will have * asynchronously kicked off the link initialization. * * Return codes * 0 - SCSI host scan is not over yet. * 1 - SCSI host scan is over. **/ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int stat = 0; spin_lock_irq(shost->host_lock); if (vport->load_flag & FC_UNLOADING) { stat = 1; goto finished; } if (time >= 30 * HZ) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0461 Scanning longer than 30 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0465 Link down longer than 15 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } if (vport->port_state != LPFC_VPORT_READY) goto finished; if (vport->num_disc_nodes || vport->fc_prli_sent) goto finished; if (vport->fc_map_cnt == 0 && time < 2 * HZ) goto finished; if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) goto finished; stat = 1; finished: spin_unlock_irq(shost->host_lock); return stat; } /** * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port * @shost: pointer to SCSI host data structure. * * This routine initializes a given SCSI host attributes on a FC port. The * SCSI host can be either on top of a physical port or a virtual port. **/ void lpfc_host_attrib_init(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; /* * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). */ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); fc_host_supported_classes(shost) = FC_COS_CLASS3; memset(fc_host_supported_fc4s(shost), 0, sizeof(fc_host_supported_fc4s(shost))); fc_host_supported_fc4s(shost)[2] = 1; fc_host_supported_fc4s(shost)[7] = 1; lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), sizeof fc_host_symbolic_name(shost)); fc_host_supported_speeds(shost) = 0; if (phba->lmt & LMT_10Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; if (phba->lmt & LMT_8Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; if (phba->lmt & LMT_4Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; if (phba->lmt & LMT_2Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; if (phba->lmt & LMT_1Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; fc_host_maxframe_size(shost) = (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; /* This value is also unchanging */ memset(fc_host_active_fc4s(shost), 0, sizeof(fc_host_active_fc4s(shost))); fc_host_active_fc4s(shost)[2] = 1; fc_host_active_fc4s(shost)[7] = 1; fc_host_max_npiv_vports(shost) = phba->max_vpi; spin_lock_irq(shost->host_lock); vport->load_flag &= ~FC_LOADING; spin_unlock_irq(shost->host_lock); } /** * lpfc_stop_port_s3 - Stop SLI3 device port * @phba: pointer to lpfc hba data structure. * * This routine is invoked to stop an SLI3 device port, it stops the device * from generating interrupts and stops the device driver's timers for the * device. **/ static void lpfc_stop_port_s3(struct lpfc_hba *phba) { /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ /* Clear all pending interrupts */ writel(0xffffffff, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ /* Reset some HBA SLI setup states */ lpfc_stop_hba_timers(phba); phba->pport->work_port_events = 0; } /** * lpfc_stop_port_s4 - Stop SLI4 device port * @phba: pointer to lpfc hba data structure. * * This routine is invoked to stop an SLI4 device port, it stops the device * from generating interrupts and stops the device driver's timers for the * device. **/ static void lpfc_stop_port_s4(struct lpfc_hba *phba) { /* Reset some HBA SLI4 setup states */ lpfc_stop_hba_timers(phba); phba->pport->work_port_events = 0; phba->sli4_hba.intr_enable = 0; /* Hard clear it for now, shall have more graceful way to wait later */ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; } /** * lpfc_stop_port - Wrapper function for stopping hba port * @phba: Pointer to HBA context object. * * This routine wraps the actual SLI3 or SLI4 hba stop port routine from * the API jump table function pointer from the lpfc_hba struct. **/ void lpfc_stop_port(struct lpfc_hba *phba) { phba->lpfc_stop_port(phba); } /** * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to remove the driver default fcf record from * the port. This routine currently acts on FCF Index 0. * **/ void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) { int rc = 0; LPFC_MBOXQ_t *mboxq; struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; uint32_t mbox_tmo, req_len; uint32_t shdr_status, shdr_add_status; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2020 Failed to allocate mbox for ADD_FCF cmd\n"); return; } req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - sizeof(struct lpfc_sli4_cfg_mhdr); rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, req_len, LPFC_SLI4_MBX_EMBED); /* * In phase 1, there is a single FCF index, 0. In phase2, the driver * supports multiple FCF indices. */ del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, phba->fcf.fcf_indx); if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); else { mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); } /* The IOCTL status is embedded in the mailbox subheader. */ shdr_status = bf_get(lpfc_mbox_hdr_status, &del_fcf_record->header.cfg_shdr.response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &del_fcf_record->header.cfg_shdr.response); if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2516 DEL FCF of default FCF Index failed " "mbx status x%x, status x%x add_status x%x\n", rc, shdr_status, shdr_add_status); } if (rc != MBX_TIMEOUT) mempool_free(mboxq, phba->mbox_mem_pool); } /** * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async link completion queue entry. * * This routine is to parse the SLI4 link-attention link fault code and * translate it into the base driver's read link attention mailbox command * status. * * Return: Link-attention status in terms of base driver's coding. **/ static uint16_t lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, struct lpfc_acqe_link *acqe_link) { uint16_t latt_fault; switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { case LPFC_ASYNC_LINK_FAULT_NONE: case LPFC_ASYNC_LINK_FAULT_LOCAL: case LPFC_ASYNC_LINK_FAULT_REMOTE: latt_fault = 0; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0398 Invalid link fault code: x%x\n", bf_get(lpfc_acqe_link_fault, acqe_link)); latt_fault = MBXERR_ERROR; break; } return latt_fault; } /** * lpfc_sli4_parse_latt_type - Parse sli4 link attention type * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async link completion queue entry. * * This routine is to parse the SLI4 link attention type and translate it * into the base driver's link attention type coding. * * Return: Link attention type in terms of base driver's coding. **/ static uint8_t lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, struct lpfc_acqe_link *acqe_link) { uint8_t att_type; switch (bf_get(lpfc_acqe_link_status, acqe_link)) { case LPFC_ASYNC_LINK_STATUS_DOWN: case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: att_type = AT_LINK_DOWN; break; case LPFC_ASYNC_LINK_STATUS_UP: /* Ignore physical link up events - wait for logical link up */ att_type = AT_RESERVED; break; case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: att_type = AT_LINK_UP; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0399 Invalid link attention type: x%x\n", bf_get(lpfc_acqe_link_status, acqe_link)); att_type = AT_RESERVED; break; } return att_type; } /** * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async link completion queue entry. * * This routine is to parse the SLI4 link-attention link speed and translate * it into the base driver's link-attention link speed coding. * * Return: Link-attention link speed in terms of base driver's coding. **/ static uint8_t lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, struct lpfc_acqe_link *acqe_link) { uint8_t link_speed; switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { case LPFC_ASYNC_LINK_SPEED_ZERO: link_speed = LA_UNKNW_LINK; break; case LPFC_ASYNC_LINK_SPEED_10MBPS: link_speed = LA_UNKNW_LINK; break; case LPFC_ASYNC_LINK_SPEED_100MBPS: link_speed = LA_UNKNW_LINK; break; case LPFC_ASYNC_LINK_SPEED_1GBPS: link_speed = LA_1GHZ_LINK; break; case LPFC_ASYNC_LINK_SPEED_10GBPS: link_speed = LA_10GHZ_LINK; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0483 Invalid link-attention link speed: x%x\n", bf_get(lpfc_acqe_link_speed, acqe_link)); link_speed = LA_UNKNW_LINK; break; } return link_speed; } /** * lpfc_sli4_async_link_evt - Process the asynchronous link event * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async link completion queue entry. * * This routine is to handle the SLI4 asynchronous link event. **/ static void lpfc_sli4_async_link_evt(struct lpfc_hba *phba, struct lpfc_acqe_link *acqe_link) { struct lpfc_dmabuf *mp; LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; READ_LA_VAR *la; uint8_t att_type; att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) return; phba->fcoe_eventtag = acqe_link->event_tag; pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0395 The mboxq allocation failed\n"); return; } mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0396 The lpfc_dmabuf allocation failed\n"); goto out_free_pmb; } mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp->virt) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0397 The mbuf allocation failed\n"); goto out_free_dmabuf; } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); /* Block ELS IOCBs until we have done process link event */ phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; /* Update link event statistics */ phba->sli.slistat.link_event++; /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ lpfc_read_la(phba, pmb, mp); pmb->vport = phba->pport; /* Parse and translate status field */ mb = &pmb->u.mb; mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); /* Parse and translate link attention fields */ la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; la->eventTag = acqe_link->event_tag; la->attType = att_type; la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); /* Fake the the following irrelvant fields */ la->topology = TOPOLOGY_PT_PT; la->granted_AL_PA = 0; la->il = 0; la->pb = 0; la->fa = 0; la->mm = 0; /* Keep the link status for extra SLI4 state machine reference */ phba->sli4_hba.link_state.speed = bf_get(lpfc_acqe_link_speed, acqe_link); phba->sli4_hba.link_state.duplex = bf_get(lpfc_acqe_link_duplex, acqe_link); phba->sli4_hba.link_state.status = bf_get(lpfc_acqe_link_status, acqe_link); phba->sli4_hba.link_state.physical = bf_get(lpfc_acqe_link_physical, acqe_link); phba->sli4_hba.link_state.fault = bf_get(lpfc_acqe_link_fault, acqe_link); /* Invoke the lpfc_handle_latt mailbox command callback function */ lpfc_mbx_cmpl_read_la(phba, pmb); return; out_free_dmabuf: kfree(mp); out_free_pmb: mempool_free(pmb, phba->mbox_mem_pool); } /** * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async fcoe completion queue entry. * * This routine is to handle the SLI4 asynchronous fcoe event. **/ static void lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, struct lpfc_acqe_fcoe *acqe_fcoe) { uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); int rc; phba->fcoe_eventtag = acqe_fcoe->event_tag; switch (event_type) { case LPFC_FCOE_EVENT_TYPE_NEW_FCF: lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, "2546 New FCF found index 0x%x tag 0x%x\n", acqe_fcoe->fcf_index, acqe_fcoe->event_tag); /* * If the current FCF is in discovered state, or * FCF discovery is in progress do nothing. */ spin_lock_irq(&phba->hbalock); if ((phba->fcf.fcf_flag & FCF_DISCOVERED) || (phba->hba_flag & FCF_DISC_INPROGRESS)) { spin_unlock_irq(&phba->hbalock); break; } spin_unlock_irq(&phba->hbalock); /* Read the FCF table and re-discover SAN. */ rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, "2547 Read FCF record failed 0x%x\n", rc); break; case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2548 FCF Table full count 0x%x tag 0x%x\n", bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), acqe_fcoe->event_tag); break; case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, "2549 FCF disconnected fron network index 0x%x" " tag 0x%x\n", acqe_fcoe->fcf_index, acqe_fcoe->event_tag); /* If the event is not for currently used fcf do nothing */ if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) break; /* * Currently, driver support only one FCF - so treat this as * a link down. */ lpfc_linkdown(phba); /* Unregister FCF if no devices connected to it */ lpfc_unregister_unused_fcf(phba); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0288 Unknown FCoE event type 0x%x event tag " "0x%x\n", event_type, acqe_fcoe->event_tag); break; } } /** * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async dcbx completion queue entry. * * This routine is to handle the SLI4 asynchronous dcbx event. **/ static void lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, struct lpfc_acqe_dcbx *acqe_dcbx) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0290 The SLI4 DCBX asynchronous event is not " "handled yet\n"); } /** * lpfc_sli4_async_event_proc - Process all the pending asynchronous event * @phba: pointer to lpfc hba data structure. * * This routine is invoked by the worker thread to process all the pending * SLI4 asynchronous events. **/ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event; /* First, declare the async event has been handled */ spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~ASYNC_EVENT; spin_unlock_irq(&phba->hbalock); /* Now, handle all the async events */ while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { /* Get the first event from the head of the event queue */ spin_lock_irq(&phba->hbalock); list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, cq_event, struct lpfc_cq_event, list); spin_unlock_irq(&phba->hbalock); /* Process the asynchronous event */ switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { case LPFC_TRAILER_CODE_LINK: lpfc_sli4_async_link_evt(phba, &cq_event->cqe.acqe_link); break; case LPFC_TRAILER_CODE_FCOE: lpfc_sli4_async_fcoe_evt(phba, &cq_event->cqe.acqe_fcoe); break; case LPFC_TRAILER_CODE_DCBX: lpfc_sli4_async_dcbx_evt(phba, &cq_event->cqe.acqe_dcbx); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "1804 Invalid asynchrous event code: " "x%x\n", bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)); break; } /* Free the completion event processed to the free pool */ lpfc_sli4_cq_event_release(phba, cq_event); } } /** * lpfc_api_table_setup - Set up per hba pci-device group func api jump table * @phba: pointer to lpfc hba data structure. * @dev_grp: The HBA PCI-Device group number. * * This routine is invoked to set up the per HBA PCI-Device group function * API jump table entries. * * Return: 0 if success, otherwise -ENODEV **/ int lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) { int rc; /* Set up lpfc PCI-device group */ phba->pci_dev_grp = dev_grp; /* The LPFC_PCI_DEV_OC uses SLI4 */ if (dev_grp == LPFC_PCI_DEV_OC) phba->sli_rev = LPFC_SLI_REV4; /* Set up device INIT API function jump table */ rc = lpfc_init_api_table_setup(phba, dev_grp); if (rc) return -ENODEV; /* Set up SCSI API function jump table */ rc = lpfc_scsi_api_table_setup(phba, dev_grp); if (rc) return -ENODEV; /* Set up SLI API function jump table */ rc = lpfc_sli_api_table_setup(phba, dev_grp); if (rc) return -ENODEV; /* Set up MBOX API function jump table */ rc = lpfc_mbox_api_table_setup(phba, dev_grp); if (rc) return -ENODEV; return 0; } /** * lpfc_log_intr_mode - Log the active interrupt mode * @phba: pointer to lpfc hba data structure. * @intr_mode: active interrupt mode adopted. * * This routine it invoked to log the currently used active interrupt mode * to the device. **/ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) { switch (intr_mode) { case 0: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0470 Enable INTx interrupt mode.\n"); break; case 1: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0481 Enabled MSI interrupt mode.\n"); break; case 2: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0480 Enabled MSI-X interrupt mode.\n"); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0482 Illegal interrupt mode.\n"); break; } return; } /** * lpfc_enable_pci_dev - Enable a generic PCI device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the PCI device that is common to all * PCI devices. * * Return codes * 0 - sucessful * other values - error **/ static int lpfc_enable_pci_dev(struct lpfc_hba *phba) { struct pci_dev *pdev; int bars; /* Obtain PCI device reference */ if (!phba->pcidev) goto out_error; else pdev = phba->pcidev; /* Select PCI BARs */ bars = pci_select_bars(pdev, IORESOURCE_MEM); /* Enable PCI device */ if (pci_enable_device_mem(pdev)) goto out_error; /* Request PCI resource for the device */ if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) goto out_disable_device; /* Set up device as PCI master and save state for EEH */ pci_set_master(pdev); pci_try_set_mwi(pdev); pci_save_state(pdev); return 0; out_disable_device: pci_disable_device(pdev); out_error: return -ENODEV; } /** * lpfc_disable_pci_dev - Disable a generic PCI device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to disable the PCI device that is common to all * PCI devices. **/ static void lpfc_disable_pci_dev(struct lpfc_hba *phba) { struct pci_dev *pdev; int bars; /* Obtain PCI device reference */ if (!phba->pcidev) return; else pdev = phba->pcidev; /* Select PCI BARs */ bars = pci_select_bars(pdev, IORESOURCE_MEM); /* Release PCI resource and disable PCI device */ pci_release_selected_regions(pdev, bars); pci_disable_device(pdev); /* Null out PCI private reference to driver */ pci_set_drvdata(pdev, NULL); return; } /** * lpfc_reset_hba - Reset a hba * @phba: pointer to lpfc hba data structure. * * This routine is invoked to reset a hba device. It brings the HBA * offline, performs a board restart, and then brings the board back * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up * on outstanding mailbox commands. **/ void lpfc_reset_hba(struct lpfc_hba *phba) { /* If resets are disabled then set error state and return. */ if (!phba->cfg_enable_hba_reset) { phba->link_state = LPFC_HBA_ERROR; return; } lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_sli_brdrestart(phba); lpfc_online(phba); lpfc_unblock_mgmt_io(phba); } /** * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the driver internal resources specific to * support the SLI-3 HBA device it attached to. * * Return codes * 0 - sucessful * other values - error **/ static int lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) { struct lpfc_sli *psli; /* * Initialize timers used by driver */ /* Heartbeat timer */ init_timer(&phba->hb_tmofunc); phba->hb_tmofunc.function = lpfc_hb_timeout; phba->hb_tmofunc.data = (unsigned long)phba; psli = &phba->sli; /* MBOX heartbeat timer */ init_timer(&psli->mbox_tmo); psli->mbox_tmo.function = lpfc_mbox_timeout; psli->mbox_tmo.data = (unsigned long) phba; /* FCP polling mode timer */ init_timer(&phba->fcp_poll_timer); phba->fcp_poll_timer.function = lpfc_poll_timeout; phba->fcp_poll_timer.data = (unsigned long) phba; /* Fabric block timer */ init_timer(&phba->fabric_block_timer); phba->fabric_block_timer.function = lpfc_fabric_block_timeout; phba->fabric_block_timer.data = (unsigned long) phba; /* EA polling mode timer */ init_timer(&phba->eratt_poll); phba->eratt_poll.function = lpfc_poll_eratt; phba->eratt_poll.data = (unsigned long) phba; /* Host attention work mask setup */ phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); /* Get all the module params for configuring this host */ lpfc_get_cfgparam(phba); /* * Since the sg_tablesize is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. * 2 segments are added since the IOCB needs a command and response bde. */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); if (phba->cfg_enable_bg) { phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; phba->cfg_sg_dma_buf_size += phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); } /* Also reinitialize the host templates with new values. */ lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; phba->max_vpi = LPFC_MAX_VPI; /* This will be set to correct value after config_port mbox */ phba->max_vports = 0; /* * Initialize the SLI Layer to run with lpfc HBAs. */ lpfc_sli_setup(phba); lpfc_sli_queue_setup(phba); /* Allocate device driver memory */ if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) return -ENOMEM; return 0; } /** * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the driver internal resources set up * specific for supporting the SLI-3 HBA device it attached to. **/ static void lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) { /* Free device driver memory allocated */ lpfc_mem_free_all(phba); return; } /** * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the driver internal resources specific to * support the SLI-4 HBA device it attached to. * * Return codes * 0 - sucessful * other values - error **/ static int lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) { struct lpfc_sli *psli; int rc; int i, hbq_count; /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); if (rc) return -ENODEV; /* * Initialize timers used by driver */ /* Heartbeat timer */ init_timer(&phba->hb_tmofunc); phba->hb_tmofunc.function = lpfc_hb_timeout; phba->hb_tmofunc.data = (unsigned long)phba; psli = &phba->sli; /* MBOX heartbeat timer */ init_timer(&psli->mbox_tmo); psli->mbox_tmo.function = lpfc_mbox_timeout; psli->mbox_tmo.data = (unsigned long) phba; /* Fabric block timer */ init_timer(&phba->fabric_block_timer); phba->fabric_block_timer.function = lpfc_fabric_block_timeout; phba->fabric_block_timer.data = (unsigned long) phba; /* EA polling mode timer */ init_timer(&phba->eratt_poll); phba->eratt_poll.function = lpfc_poll_eratt; phba->eratt_poll.data = (unsigned long) phba; /* * We need to do a READ_CONFIG mailbox command here before * calling lpfc_get_cfgparam. For VFs this will report the * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. * All of the resources allocated * for this Port are tied to these values. */ /* Get all the module params for configuring this host */ lpfc_get_cfgparam(phba); phba->max_vpi = LPFC_MAX_VPI; /* This will be set to correct value after the read_config mbox */ phba->max_vports = 0; /* Program the default value of vlan_id and fc_map */ phba->valid_vlan = 0; phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; /* * Since the sg_tablesize is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. * 2 segments are added since the IOCB needs a command and response bde. * To insure that the scsi sgl does not cross a 4k page boundary only * sgl sizes of 1k, 2k, 4k, and 8k are supported. * Table of sgl sizes and seg_cnt: * sgl size, sg_seg_cnt total seg * 1k 50 52 * 2k 114 116 * 4k 242 244 * 8k 498 500 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192 */ if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) phba->cfg_sg_seg_cnt = 50; else if (phba->cfg_sg_seg_cnt <= 114) phba->cfg_sg_seg_cnt = 114; else if (phba->cfg_sg_seg_cnt <= 242) phba->cfg_sg_seg_cnt = 242; else phba->cfg_sg_seg_cnt = 498; phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp); phba->cfg_sg_dma_buf_size += ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); /* Initialize buffer queue management fields */ hbq_count = lpfc_sli_hbq_count(); for (i = 0; i < hbq_count; ++i) INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); INIT_LIST_HEAD(&phba->rb_pend_list); phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; /* * Initialize the SLI Layer to run with lpfc SLI4 HBAs. */ /* Initialize the Abort scsi buffer list used by driver */ spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); /* This abort list used by worker thread */ spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); /* * Initialize dirver internal slow-path work queues */ /* Driver internel slow-path CQ Event pool */ INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); /* Response IOCB work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); /* Asynchronous event CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); /* Fast-path XRI aborted CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); /* Slow-path XRI aborted CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); /* Receive queue CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); /* Initialize the driver internal SLI layer lists. */ lpfc_sli_setup(phba); lpfc_sli_queue_setup(phba); /* Allocate device driver memory */ rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); if (rc) return -ENOMEM; /* Create the bootstrap mailbox command */ rc = lpfc_create_bootstrap_mbox(phba); if (unlikely(rc)) goto out_free_mem; /* Set up the host's endian order with the device. */ rc = lpfc_setup_endian_order(phba); if (unlikely(rc)) goto out_free_bsmbx; /* Set up the hba's configuration parameters. */ rc = lpfc_sli4_read_config(phba); if (unlikely(rc)) goto out_free_bsmbx; /* Perform a function reset */ rc = lpfc_pci_function_reset(phba); if (unlikely(rc)) goto out_free_bsmbx; /* Create all the SLI4 queues */ rc = lpfc_sli4_queue_create(phba); if (rc) goto out_free_bsmbx; /* Create driver internal CQE event pool */ rc = lpfc_sli4_cq_event_pool_create(phba); if (rc) goto out_destroy_queue; /* Initialize and populate the iocb list per host */ rc = lpfc_init_sgl_list(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1400 Failed to initialize sgl list.\n"); goto out_destroy_cq_event_pool; } rc = lpfc_init_active_sgl_array(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1430 Failed to initialize sgl list.\n"); goto out_free_sgl_list; } rc = lpfc_sli4_init_rpi_hdrs(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1432 Failed to initialize rpi headers.\n"); goto out_free_active_sgl; } phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * phba->cfg_fcp_eq_count), GFP_KERNEL); if (!phba->sli4_hba.fcp_eq_hdl) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2572 Failed allocate memory for fast-path " "per-EQ handle array\n"); goto out_remove_rpi_hdrs; } phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * phba->sli4_hba.cfg_eqn), GFP_KERNEL); if (!phba->sli4_hba.msix_entries) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2573 Failed allocate memory for msi-x " "interrupt vector entries\n"); goto out_free_fcp_eq_hdl; } return rc; out_free_fcp_eq_hdl: kfree(phba->sli4_hba.fcp_eq_hdl); out_remove_rpi_hdrs: lpfc_sli4_remove_rpi_hdrs(phba); out_free_active_sgl: lpfc_free_active_sgl(phba); out_free_sgl_list: lpfc_free_sgl_list(phba); out_destroy_cq_event_pool: lpfc_sli4_cq_event_pool_destroy(phba); out_destroy_queue: lpfc_sli4_queue_destroy(phba); out_free_bsmbx: lpfc_destroy_bootstrap_mbox(phba); out_free_mem: lpfc_mem_free(phba); return rc; } /** * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the driver internal resources set up * specific for supporting the SLI-4 HBA device it attached to. **/ static void lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) { struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; /* unregister default FCFI from the HBA */ lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); /* Free the default FCR table */ lpfc_sli_remove_dflt_fcf(phba); /* Free memory allocated for msi-x interrupt vector entries */ kfree(phba->sli4_hba.msix_entries); /* Free memory allocated for fast-path work queue handles */ kfree(phba->sli4_hba.fcp_eq_hdl); /* Free the allocated rpi headers. */ lpfc_sli4_remove_rpi_hdrs(phba); lpfc_sli4_remove_rpis(phba); /* Free the ELS sgl list */ lpfc_free_active_sgl(phba); lpfc_free_sgl_list(phba); /* Free the SCSI sgl management array */ kfree(phba->sli4_hba.lpfc_scsi_psb_array); /* Free the SLI4 queues */ lpfc_sli4_queue_destroy(phba); /* Free the completion queue EQ event pool */ lpfc_sli4_cq_event_release_all(phba); lpfc_sli4_cq_event_pool_destroy(phba); /* Reset SLI4 HBA FCoE function */ lpfc_pci_function_reset(phba); /* Free the bsmbx region. */ lpfc_destroy_bootstrap_mbox(phba); /* Free the SLI Layer memory with SLI4 HBAs */ lpfc_mem_free_all(phba); /* Free the current connect table */ list_for_each_entry_safe(conn_entry, next_conn_entry, &phba->fcf_conn_rec_list, list) kfree(conn_entry); return; } /** * lpfc_init_api_table_setup - Set up init api fucntion jump table * @phba: The hba struct for which this call is being executed. * @dev_grp: The HBA PCI-Device group number. * * This routine sets up the device INIT interface API function jump table * in @phba struct. * * Returns: 0 - success, -ENODEV - failure. **/ int lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) { switch (dev_grp) { case LPFC_PCI_DEV_LP: phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; phba->lpfc_stop_port = lpfc_stop_port_s3; break; case LPFC_PCI_DEV_OC: phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; phba->lpfc_stop_port = lpfc_stop_port_s4; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1431 Invalid HBA PCI-device group: 0x%x\n", dev_grp); return -ENODEV; break; } return 0; } /** * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the driver internal resources before the * device specific resource setup to support the HBA device it attached to. * * Return codes * 0 - sucessful * other values - error **/ static int lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) { /* * Driver resources common to all SLI revisions */ atomic_set(&phba->fast_event_count, 0); spin_lock_init(&phba->hbalock); /* Initialize ndlp management spinlock */ spin_lock_init(&phba->ndlp_lock); INIT_LIST_HEAD(&phba->port_list); INIT_LIST_HEAD(&phba->work_list); init_waitqueue_head(&phba->wait_4_mlo_m_q); /* Initialize the wait queue head for the kernel thread */ init_waitqueue_head(&phba->work_waitq); /* Initialize the scsi buffer list used by driver for scsi IO */ spin_lock_init(&phba->scsi_buf_list_lock); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); /* Initialize the fabric iocb list */ INIT_LIST_HEAD(&phba->fabric_iocb_list); /* Initialize list to save ELS buffers */ INIT_LIST_HEAD(&phba->elsbuf); /* Initialize FCF connection rec list */ INIT_LIST_HEAD(&phba->fcf_conn_rec_list); return 0; } /** * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the driver internal resources after the * device specific resource setup to support the HBA device it attached to. * * Return codes * 0 - sucessful * other values - error **/ static int lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) { int error; /* Startup the kernel thread for this host adapter. */ phba->worker_thread = kthread_run(lpfc_do_work, phba, "lpfc_worker_%d", phba->brd_no); if (IS_ERR(phba->worker_thread)) { error = PTR_ERR(phba->worker_thread); return error; } return 0; } /** * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the driver internal resources set up after * the device specific resource setup for supporting the HBA device it * attached to. **/ static void lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) { /* Stop kernel worker thread */ kthread_stop(phba->worker_thread); } /** * lpfc_free_iocb_list - Free iocb list. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to free the driver's IOCB list and memory. **/ static void lpfc_free_iocb_list(struct lpfc_hba *phba) { struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(iocbq_entry, iocbq_next, &phba->lpfc_iocb_list, list) { list_del(&iocbq_entry->list); kfree(iocbq_entry); phba->total_iocbq_bufs--; } spin_unlock_irq(&phba->hbalock); return; } /** * lpfc_init_iocb_list - Allocate and initialize iocb list. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate and initizlize the driver's IOCB * list and set up the IOCB tag array accordingly. * * Return codes * 0 - sucessful * other values - error **/ static int lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) { struct lpfc_iocbq *iocbq_entry = NULL; uint16_t iotag; int i; /* Initialize and populate the iocb list per host. */ INIT_LIST_HEAD(&phba->lpfc_iocb_list); for (i = 0; i < iocb_count; i++) { iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); if (iocbq_entry == NULL) { printk(KERN_ERR "%s: only allocated %d iocbs of " "expected %d count. Unloading driver.\n", __func__, i, LPFC_IOCB_LIST_CNT); goto out_free_iocbq; } iotag = lpfc_sli_next_iotag(phba, iocbq_entry); if (iotag == 0) { kfree(iocbq_entry); printk(KERN_ERR "%s: failed to allocate IOTAG. " "Unloading driver.\n", __func__); goto out_free_iocbq; } iocbq_entry->sli4_xritag = NO_XRI; spin_lock_irq(&phba->hbalock); list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); phba->total_iocbq_bufs++; spin_unlock_irq(&phba->hbalock); } return 0; out_free_iocbq: lpfc_free_iocb_list(phba); return -ENOMEM; } /** * lpfc_free_sgl_list - Free sgl list. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to free the driver's sgl list and memory. **/ static void lpfc_free_sgl_list(struct lpfc_hba *phba) { struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; LIST_HEAD(sglq_list); int rc = 0; spin_lock_irq(&phba->hbalock); list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); spin_unlock_irq(&phba->hbalock); list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { list_del(&sglq_entry->list); lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); kfree(sglq_entry); phba->sli4_hba.total_sglq_bufs--; } rc = lpfc_sli4_remove_all_sgl_pages(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2005 Unable to deregister pages from HBA: %x", rc); } kfree(phba->sli4_hba.lpfc_els_sgl_array); } /** * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate the driver's active sgl memory. * This array will hold the sglq_entry's for active IOs. **/ static int lpfc_init_active_sgl_array(struct lpfc_hba *phba) { int size; size = sizeof(struct lpfc_sglq *); size *= phba->sli4_hba.max_cfg_param.max_xri; phba->sli4_hba.lpfc_sglq_active_list = kzalloc(size, GFP_KERNEL); if (!phba->sli4_hba.lpfc_sglq_active_list) return -ENOMEM; return 0; } /** * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to walk through the array of active sglq entries * and free all of the resources. * This is just a place holder for now. **/ static void lpfc_free_active_sgl(struct lpfc_hba *phba) { kfree(phba->sli4_hba.lpfc_sglq_active_list); } /** * lpfc_init_sgl_list - Allocate and initialize sgl list. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate and initizlize the driver's sgl * list and set up the sgl xritag tag array accordingly. * * Return codes * 0 - sucessful * other values - error **/ static int lpfc_init_sgl_list(struct lpfc_hba *phba) { struct lpfc_sglq *sglq_entry = NULL; int i; int els_xri_cnt; els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2400 lpfc_init_sgl_list els %d.\n", els_xri_cnt); /* Initialize and populate the sglq list per host/VF. */ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); /* Sanity check on XRI management */ if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2562 No room left for SCSI XRI allocation: " "max_xri=%d, els_xri=%d\n", phba->sli4_hba.max_cfg_param.max_xri, els_xri_cnt); return -ENOMEM; } /* Allocate memory for the ELS XRI management array */ phba->sli4_hba.lpfc_els_sgl_array = kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), GFP_KERNEL); if (!phba->sli4_hba.lpfc_els_sgl_array) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2401 Failed to allocate memory for ELS " "XRI management array of size %d.\n", els_xri_cnt); return -ENOMEM; } /* Keep the SCSI XRI into the XRI management array */ phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; phba->sli4_hba.scsi_xri_cnt = 0; phba->sli4_hba.lpfc_scsi_psb_array = kzalloc((sizeof(struct lpfc_scsi_buf *) * phba->sli4_hba.scsi_xri_max), GFP_KERNEL); if (!phba->sli4_hba.lpfc_scsi_psb_array) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2563 Failed to allocate memory for SCSI " "XRI management array of size %d.\n", phba->sli4_hba.scsi_xri_max); kfree(phba->sli4_hba.lpfc_els_sgl_array); return -ENOMEM; } for (i = 0; i < els_xri_cnt; i++) { sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); if (sglq_entry == NULL) { printk(KERN_ERR "%s: only allocated %d sgls of " "expected %d count. Unloading driver.\n", __func__, i, els_xri_cnt); goto out_free_mem; } sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); if (sglq_entry->sli4_xritag == NO_XRI) { kfree(sglq_entry); printk(KERN_ERR "%s: failed to allocate XRI.\n" "Unloading driver.\n", __func__); goto out_free_mem; } sglq_entry->buff_type = GEN_BUFF_TYPE; sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); if (sglq_entry->virt == NULL) { kfree(sglq_entry); printk(KERN_ERR "%s: failed to allocate mbuf.\n" "Unloading driver.\n", __func__); goto out_free_mem; } sglq_entry->sgl = sglq_entry->virt; memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); /* The list order is used by later block SGL registraton */ spin_lock_irq(&phba->hbalock); list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; phba->sli4_hba.total_sglq_bufs++; spin_unlock_irq(&phba->hbalock); } return 0; out_free_mem: kfree(phba->sli4_hba.lpfc_scsi_psb_array); lpfc_free_sgl_list(phba); return -ENOMEM; } /** * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port * @phba: pointer to lpfc hba data structure. * * This routine is invoked to post rpi header templates to the * HBA consistent with the SLI-4 interface spec. This routine * posts a PAGE_SIZE memory region to the port to hold up to * PAGE_SIZE modulo 64 rpi context headers. * No locks are held here because this is an initialization routine * called only from probe or lpfc_online when interrupts are not * enabled and the driver is reinitializing the device. * * Return codes * 0 - sucessful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) { int rc = 0; int longs; uint16_t rpi_count; struct lpfc_rpi_hdr *rpi_hdr; INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); /* * Provision an rpi bitmask range for discovery. The total count * is the difference between max and base + 1. */ rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + phba->sli4_hba.max_cfg_param.max_rpi - 1; longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); if (!phba->sli4_hba.rpi_bmask) return -ENOMEM; rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); if (!rpi_hdr) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0391 Error during rpi post operation\n"); lpfc_sli4_remove_rpis(phba); rc = -ENODEV; } return rc; } /** * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate a single 4KB memory region to * support rpis and stores them in the phba. This single region * provides support for up to 64 rpis. The region is used globally * by the device. * * Returns: * A valid rpi hdr on success. * A NULL pointer on any failure. **/ struct lpfc_rpi_hdr * lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) { uint16_t rpi_limit, curr_rpi_range; struct lpfc_dmabuf *dmabuf; struct lpfc_rpi_hdr *rpi_hdr; rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + phba->sli4_hba.max_cfg_param.max_rpi - 1; spin_lock_irq(&phba->hbalock); curr_rpi_range = phba->sli4_hba.next_rpi; spin_unlock_irq(&phba->hbalock); /* * The port has a limited number of rpis. The increment here * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value * and to allow the full max_rpi range per port. */ if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) return NULL; /* * First allocate the protocol header region for the port. The * port expects a 4KB DMA-mapped memory region that is 4K aligned. */ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!dmabuf) return NULL; dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { rpi_hdr = NULL; goto err_free_dmabuf; } memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { rpi_hdr = NULL; goto err_free_coherent; } /* Save the rpi header data for cleanup later. */ rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); if (!rpi_hdr) goto err_free_coherent; rpi_hdr->dmabuf = dmabuf; rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; rpi_hdr->page_count = 1; spin_lock_irq(&phba->hbalock); rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); /* * The next_rpi stores the next module-64 rpi value to post * in any subsequent rpi memory region postings. */ phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; spin_unlock_irq(&phba->hbalock); return rpi_hdr; err_free_coherent: dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, dmabuf->virt, dmabuf->phys); err_free_dmabuf: kfree(dmabuf); return NULL; } /** * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions * @phba: pointer to lpfc hba data structure. * * This routine is invoked to remove all memory resources allocated * to support rpis. This routine presumes the caller has released all * rpis consumed by fabric or port logins and is prepared to have * the header pages removed. **/ void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) { struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { list_del(&rpi_hdr->list); dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); kfree(rpi_hdr->dmabuf); kfree(rpi_hdr); } phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); } /** * lpfc_hba_alloc - Allocate driver hba data structure for a device. * @pdev: pointer to pci device data structure. * * This routine is invoked to allocate the driver hba data structure for an * HBA device. If the allocation is successful, the phba reference to the * PCI device data structure is set. * * Return codes * pointer to @phba - sucessful * NULL - error **/ static struct lpfc_hba * lpfc_hba_alloc(struct pci_dev *pdev) { struct lpfc_hba *phba; /* Allocate memory for HBA structure */ phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); if (!phba) { dev_err(&pdev->dev, "failed to allocate hba struct\n"); return NULL; } /* Set reference to PCI device in HBA structure */ phba->pcidev = pdev; /* Assign an unused board number */ phba->brd_no = lpfc_get_instance(); if (phba->brd_no < 0) { kfree(phba); return NULL; } mutex_init(&phba->ct_event_mutex); INIT_LIST_HEAD(&phba->ct_ev_waiters); return phba; } /** * lpfc_hba_free - Free driver hba data structure with a device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to free the driver hba data structure with an * HBA device. **/ static void lpfc_hba_free(struct lpfc_hba *phba) { /* Release the driver assigned board number */ idr_remove(&lpfc_hba_index, phba->brd_no); kfree(phba); return; } /** * lpfc_create_shost - Create hba physical port with associated scsi host. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to create HBA physical port and associate a SCSI * host with it. * * Return codes * 0 - sucessful * other values - error **/ static int lpfc_create_shost(struct lpfc_hba *phba) { struct lpfc_vport *vport; struct Scsi_Host *shost; /* Initialize HBA FC structure */ phba->fc_edtov = FF_DEF_EDTOV; phba->fc_ratov = FF_DEF_RATOV; phba->fc_altov = FF_DEF_ALTOV; phba->fc_arbtov = FF_DEF_ARBTOV; vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); if (!vport) return -ENODEV; shost = lpfc_shost_from_vport(vport); phba->pport = vport; lpfc_debugfs_initialize(vport); /* Put reference to SCSI host to driver's device private data */ pci_set_drvdata(phba->pcidev, shost); return 0; } /** * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to destroy HBA physical port and the associated * SCSI host. **/ static void lpfc_destroy_shost(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; /* Destroy physical port that associated with the SCSI host */ destroy_port(vport); return; } /** * lpfc_setup_bg - Setup Block guard structures and debug areas. * @phba: pointer to lpfc hba data structure. * @shost: the shost to be used to detect Block guard settings. * * This routine sets up the local Block guard protocol settings for @shost. * This routine also allocates memory for debugging bg buffers. **/ static void lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) { int pagecnt = 10; if (lpfc_prot_mask && lpfc_prot_guard) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "1478 Registering BlockGuard with the " "SCSI layer\n"); scsi_host_set_prot(shost, lpfc_prot_mask); scsi_host_set_guard(shost, lpfc_prot_guard); } if (!_dump_buf_data) { while (pagecnt) { spin_lock_init(&_dump_buf_lock); _dump_buf_data = (char *) __get_free_pages(GFP_KERNEL, pagecnt); if (_dump_buf_data) { printk(KERN_ERR "BLKGRD allocated %d pages for " "_dump_buf_data at 0x%p\n", (1 << pagecnt), _dump_buf_data); _dump_buf_data_order = pagecnt; memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT) << pagecnt)); break; } else --pagecnt; } if (!_dump_buf_data_order) printk(KERN_ERR "BLKGRD ERROR unable to allocate " "memory for hexdump\n"); } else printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" "\n", _dump_buf_data); if (!_dump_buf_dif) { while (pagecnt) { _dump_buf_dif = (char *) __get_free_pages(GFP_KERNEL, pagecnt); if (_dump_buf_dif) { printk(KERN_ERR "BLKGRD allocated %d pages for " "_dump_buf_dif at 0x%p\n", (1 << pagecnt), _dump_buf_dif); _dump_buf_dif_order = pagecnt; memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT) << pagecnt)); break; } else --pagecnt; } if (!_dump_buf_dif_order) printk(KERN_ERR "BLKGRD ERROR unable to allocate " "memory for hexdump\n"); } else printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", _dump_buf_dif); } /** * lpfc_post_init_setup - Perform necessary device post initialization setup. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to perform all the necessary post initialization * setup for the device. **/ static void lpfc_post_init_setup(struct lpfc_hba *phba) { struct Scsi_Host *shost; struct lpfc_adapter_event_header adapter_event; /* Get the default values for Model Name and Description */ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); /* * hba setup may have changed the hba_queue_depth so we need to * adjust the value of can_queue. */ shost = pci_get_drvdata(phba->pcidev); shost->can_queue = phba->cfg_hba_queue_depth - 10; if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) lpfc_setup_bg(phba, shost); lpfc_host_attrib_init(shost); if (phba->cfg_poll & DISABLE_FCP_RING_INT) { spin_lock_irq(shost->host_lock); lpfc_poll_start_timer(phba); spin_unlock_irq(shost->host_lock); } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0428 Perform SCSI scan\n"); /* Send board arrival event to upper layer */ adapter_event.event_type = FC_REG_ADAPTER_EVENT; adapter_event.subcategory = LPFC_EVENT_ARRIVAL; fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(adapter_event), (char *) &adapter_event, LPFC_NL_VENDOR_ID); return; } /** * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the PCI device memory space for device * with SLI-3 interface spec. * * Return codes * 0 - sucessful * other values - error **/ static int lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) { struct pci_dev *pdev; unsigned long bar0map_len, bar2map_len; int i, hbq_count; void *ptr; int error = -ENODEV; /* Obtain PCI device reference */ if (!phba->pcidev) return error; else pdev = phba->pcidev; /* Set the device DMA mask size */ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { return error; } } /* Get the bus address of Bar0 and Bar2 and the number of bytes * required by each mapping. */ phba->pci_bar0_map = pci_resource_start(pdev, 0); bar0map_len = pci_resource_len(pdev, 0); phba->pci_bar2_map = pci_resource_start(pdev, 2); bar2map_len = pci_resource_len(pdev, 2); /* Map HBA SLIM to a kernel virtual address. */ phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); if (!phba->slim_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLIM memory.\n"); goto out; } /* Map HBA Control Registers to a kernel virtual address. */ phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); if (!phba->ctrl_regs_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for HBA control registers.\n"); goto out_iounmap_slim; } /* Allocate memory for SLI-2 structures */ phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, &phba->slim2p.phys, GFP_KERNEL); if (!phba->slim2p.virt) goto out_iounmap; memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); phba->IOCBs = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, IOCBs)); phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, lpfc_sli_hbq_size(), &phba->hbqslimp.phys, GFP_KERNEL); if (!phba->hbqslimp.virt) goto out_free_slim; hbq_count = lpfc_sli_hbq_count(); ptr = phba->hbqslimp.virt; for (i = 0; i < hbq_count; ++i) { phba->hbqs[i].hbq_virt = ptr; INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); ptr += (lpfc_hbq_defs[i]->entry_count * sizeof(struct lpfc_hbq_entry)); } phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); INIT_LIST_HEAD(&phba->rb_pend_list); phba->MBslimaddr = phba->slim_memmap_p; phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; return 0; out_free_slim: dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p.virt, phba->slim2p.phys); out_iounmap: iounmap(phba->ctrl_regs_memmap_p); out_iounmap_slim: iounmap(phba->slim_memmap_p); out: return error; } /** * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the PCI device memory space for device * with SLI-3 interface spec. **/ static void lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) { struct pci_dev *pdev; /* Obtain PCI device reference */ if (!phba->pcidev) return; else pdev = phba->pcidev; /* Free coherent DMA memory allocated */ dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, phba->hbqslimp.phys); dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p.virt, phba->slim2p.phys); /* I/O memory unmap */ iounmap(phba->ctrl_regs_memmap_p); iounmap(phba->slim_memmap_p); return; } /** * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status * @phba: pointer to lpfc hba data structure. * * This routine is invoked to wait for SLI4 device Power On Self Test (POST) * done and check status. * * Return 0 if successful, otherwise -ENODEV. **/ int lpfc_sli4_post_status_check(struct lpfc_hba *phba) { struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; uint32_t onlnreg0, onlnreg1; int i, port_error = -ENODEV; if (!phba->sli4_hba.STAregaddr) return -ENODEV; /* Wait up to 30 seconds for the SLI Port POST done and ready */ for (i = 0; i < 3000; i++) { sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); /* Encounter fatal POST error, break out */ if (bf_get(lpfc_hst_state_perr, &sta_reg)) { port_error = -ENODEV; break; } if (LPFC_POST_STAGE_ARMFW_READY == bf_get(lpfc_hst_state_port_status, &sta_reg)) { port_error = 0; break; } msleep(10); } if (port_error) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1408 Failure HBA POST Status: sta_reg=0x%x, " "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " "dl=x%x, pstatus=x%x\n", sta_reg.word0, bf_get(lpfc_hst_state_perr, &sta_reg), bf_get(lpfc_hst_state_sfi, &sta_reg), bf_get(lpfc_hst_state_nip, &sta_reg), bf_get(lpfc_hst_state_ipc, &sta_reg), bf_get(lpfc_hst_state_xrom, &sta_reg), bf_get(lpfc_hst_state_dl, &sta_reg), bf_get(lpfc_hst_state_port_status, &sta_reg)); /* Log device information */ scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " "FeatureL1=0x%x, FeatureL2=0x%x\n", bf_get(lpfc_scratchpad_chiptype, &scratchpad), bf_get(lpfc_scratchpad_slirev, &scratchpad), bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); /* With uncoverable error, log the error message and return error */ onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); if (uerrlo_reg.word0 || uerrhi_reg.word0) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1422 HBA Unrecoverable error: " "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " "online0_reg=0x%x, online1_reg=0x%x\n", uerrlo_reg.word0, uerrhi_reg.word0, onlnreg0, onlnreg1); } return -ENODEV; } return port_error; } /** * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up SLI4 BAR0 PCI config space register * memory map. **/ static void lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) { phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_ONLINE0; phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_ONLINE1; phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_SCRATCHPAD; } /** * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up SLI4 BAR1 control status register (CSR) * memory map. **/ static void lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) { phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + LPFC_HST_STATE; phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + LPFC_HST_ISR0; phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + LPFC_HST_IMR0; phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + LPFC_HST_ISCR0; return; } /** * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. * @phba: pointer to lpfc hba data structure. * @vf: virtual function number * * This routine is invoked to set up SLI4 BAR2 doorbell register memory map * based on the given viftual function number, @vf. * * Return 0 if successful, otherwise -ENODEV. **/ static int lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) { if (vf > LPFC_VIR_FUNC_MAX) return -ENODEV; phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); return 0; } /** * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox * @phba: pointer to lpfc hba data structure. * * This routine is invoked to create the bootstrap mailbox * region consistent with the SLI-4 interface spec. This * routine allocates all memory necessary to communicate * mailbox commands to the port and sets up all alignment * needs. No locks are expected to be held when calling * this routine. * * Return codes * 0 - sucessful * ENOMEM - could not allocated memory. **/ static int lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) { uint32_t bmbx_size; struct lpfc_dmabuf *dmabuf; struct dma_address *dma_address; uint32_t pa_addr; uint64_t phys_addr; dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!dmabuf) return -ENOMEM; /* * The bootstrap mailbox region is comprised of 2 parts * plus an alignment restriction of 16 bytes. */ bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); return -ENOMEM; } memset(dmabuf->virt, 0, bmbx_size); /* * Initialize the bootstrap mailbox pointers now so that the register * operations are simple later. The mailbox dma address is required * to be 16-byte aligned. Also align the virtual memory as each * maibox is copied into the bmbx mailbox region before issuing the * command to the port. */ phba->sli4_hba.bmbx.dmabuf = dmabuf; phba->sli4_hba.bmbx.bmbx_size = bmbx_size; phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, LPFC_ALIGN_16_BYTE); phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, LPFC_ALIGN_16_BYTE); /* * Set the high and low physical addresses now. The SLI4 alignment * requirement is 16 bytes and the mailbox is posted to the port * as two 30-bit addresses. The other data is a bit marking whether * the 30-bit address is the high or low address. * Upcast bmbx aphys to 64bits so shift instruction compiles * clean on 32 bit machines. */ dma_address = &phba->sli4_hba.bmbx.dma_address; phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | LPFC_BMBX_BIT1_ADDR_HI); pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | LPFC_BMBX_BIT1_ADDR_LO); return 0; } /** * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources * @phba: pointer to lpfc hba data structure. * * This routine is invoked to teardown the bootstrap mailbox * region and release all host resources. This routine requires * the caller to ensure all mailbox commands recovered, no * additional mailbox comands are sent, and interrupts are disabled * before calling this routine. * **/ static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) { dma_free_coherent(&phba->pcidev->dev, phba->sli4_hba.bmbx.bmbx_size, phba->sli4_hba.bmbx.dmabuf->virt, phba->sli4_hba.bmbx.dmabuf->phys); kfree(phba->sli4_hba.bmbx.dmabuf); memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); } /** * lpfc_sli4_read_config - Get the config parameters. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to read the configuration parameters from the HBA. * The configuration parameters are used to set the base and maximum values * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource * allocation for the port. * * Return codes * 0 - sucessful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ static int lpfc_sli4_read_config(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmb; struct lpfc_mbx_read_config *rd_config; uint32_t rc = 0; pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2011 Unable to allocate memory for issuing " "SLI_CONFIG_SPECIAL mailbox command\n"); return -ENOMEM; } lpfc_read_config(phba, pmb); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2012 Mailbox failed , mbxCmd x%x " "READ_CONFIG, mbxStatus x%x\n", bf_get(lpfc_mqe_command, &pmb->u.mqe), bf_get(lpfc_mqe_status, &pmb->u.mqe)); rc = -EIO; } else { rd_config = &pmb->u.mqe.un.rd_config; phba->sli4_hba.max_cfg_param.max_xri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); phba->sli4_hba.max_cfg_param.xri_base = bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); phba->sli4_hba.max_cfg_param.max_vpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); phba->sli4_hba.max_cfg_param.vpi_base = bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); phba->sli4_hba.max_cfg_param.max_rpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); phba->sli4_hba.max_cfg_param.rpi_base = bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); phba->sli4_hba.max_cfg_param.max_vfi = bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); phba->sli4_hba.max_cfg_param.vfi_base = bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); phba->sli4_hba.max_cfg_param.max_fcfi = bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); phba->sli4_hba.max_cfg_param.fcfi_base = bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); phba->sli4_hba.max_cfg_param.max_eq = bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); phba->sli4_hba.max_cfg_param.max_rq = bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); phba->sli4_hba.max_cfg_param.max_wq = bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); phba->sli4_hba.max_cfg_param.max_cq = bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; phba->max_vports = phba->max_vpi; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2003 cfg params XRI(B:%d M:%d), " "VPI(B:%d M:%d) " "VFI(B:%d M:%d) " "RPI(B:%d M:%d) " "FCFI(B:%d M:%d)\n", phba->sli4_hba.max_cfg_param.xri_base, phba->sli4_hba.max_cfg_param.max_xri, phba->sli4_hba.max_cfg_param.vpi_base, phba->sli4_hba.max_cfg_param.max_vpi, phba->sli4_hba.max_cfg_param.vfi_base, phba->sli4_hba.max_cfg_param.max_vfi, phba->sli4_hba.max_cfg_param.rpi_base, phba->sli4_hba.max_cfg_param.max_rpi, phba->sli4_hba.max_cfg_param.fcfi_base, phba->sli4_hba.max_cfg_param.max_fcfi); } mempool_free(pmb, phba->mbox_mem_pool); /* Reset the DFT_HBA_Q_DEPTH to the max xri */ if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) phba->cfg_hba_queue_depth = phba->sli4_hba.max_cfg_param.max_xri; return rc; } /** * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to setup the host-side endian order to the * HBA consistent with the SLI-4 interface spec. * * Return codes * 0 - sucessful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ static int lpfc_setup_endian_order(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; uint32_t rc = 0; uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, HOST_ENDIAN_HIGH_WORD1}; mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0492 Unable to allocate memory for issuing " "SLI_CONFIG_SPECIAL mailbox command\n"); return -ENOMEM; } /* * The SLI4_CONFIG_SPECIAL mailbox command requires the first two * words to contain special data values and no other data. */ memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0493 SLI_CONFIG_SPECIAL mailbox failed with " "status x%x\n", rc); rc = -EIO; } mempool_free(mboxq, phba->mbox_mem_pool); return rc; } /** * lpfc_sli4_queue_create - Create all the SLI4 queues * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA * operation. For each SLI4 queue type, the parameters such as queue entry * count (queue depth) shall be taken from the module parameter. For now, * we just use some constant number as place holder. * * Return codes * 0 - sucessful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ static int lpfc_sli4_queue_create(struct lpfc_hba *phba) { struct lpfc_queue *qdesc; int fcp_eqidx, fcp_cqidx, fcp_wqidx; int cfg_fcp_wq_count; int cfg_fcp_eq_count; /* * Sanity check for confiugred queue parameters against the run-time * device parameters */ /* Sanity check on FCP fast-path WQ parameters */ cfg_fcp_wq_count = phba->cfg_fcp_wq_count; if (cfg_fcp_wq_count > (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF; if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2581 Not enough WQs (%d) from " "the pci function for supporting " "FCP WQs (%d)\n", phba->sli4_hba.max_cfg_param.max_wq, phba->cfg_fcp_wq_count); goto out_error; } lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2582 Not enough WQs (%d) from the pci " "function for supporting the requested " "FCP WQs (%d), the actual FCP WQs can " "be supported: %d\n", phba->sli4_hba.max_cfg_param.max_wq, phba->cfg_fcp_wq_count, cfg_fcp_wq_count); } /* The actual number of FCP work queues adopted */ phba->cfg_fcp_wq_count = cfg_fcp_wq_count; /* Sanity check on FCP fast-path EQ parameters */ cfg_fcp_eq_count = phba->cfg_fcp_eq_count; if (cfg_fcp_eq_count > (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF; if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2574 Not enough EQs (%d) from the " "pci function for supporting FCP " "EQs (%d)\n", phba->sli4_hba.max_cfg_param.max_eq, phba->cfg_fcp_eq_count); goto out_error; } lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2575 Not enough EQs (%d) from the pci " "function for supporting the requested " "FCP EQs (%d), the actual FCP EQs can " "be supported: %d\n", phba->sli4_hba.max_cfg_param.max_eq, phba->cfg_fcp_eq_count, cfg_fcp_eq_count); } /* It does not make sense to have more EQs than WQs */ if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2593 The number of FCP EQs (%d) is more " "than the number of FCP WQs (%d), take " "the number of FCP EQs same as than of " "WQs (%d)\n", cfg_fcp_eq_count, phba->cfg_fcp_wq_count, phba->cfg_fcp_wq_count); cfg_fcp_eq_count = phba->cfg_fcp_wq_count; } /* The actual number of FCP event queues adopted */ phba->cfg_fcp_eq_count = cfg_fcp_eq_count; /* The overall number of event queues used */ phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; /* * Create Event Queues (EQs) */ /* Get EQ depth from module parameter, fake the default for now */ phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; /* Create slow path event queue */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, phba->sli4_hba.eq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0496 Failed allocate slow-path EQ\n"); goto out_error; } phba->sli4_hba.sp_eq = qdesc; /* Create fast-path FCP Event Queue(s) */ phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * phba->cfg_fcp_eq_count), GFP_KERNEL); if (!phba->sli4_hba.fp_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2576 Failed allocate memory for fast-path " "EQ record array\n"); goto out_free_sp_eq; } for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, phba->sli4_hba.eq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0497 Failed allocate fast-path EQ\n"); goto out_free_fp_eq; } phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; } /* * Create Complete Queues (CQs) */ /* Get CQ depth from module parameter, fake the default for now */ phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; /* Create slow-path Mailbox Command Complete Queue */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0500 Failed allocate slow-path mailbox CQ\n"); goto out_free_fp_eq; } phba->sli4_hba.mbx_cq = qdesc; /* Create slow-path ELS Complete Queue */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0501 Failed allocate slow-path ELS CQ\n"); goto out_free_mbx_cq; } phba->sli4_hba.els_cq = qdesc; /* Create slow-path Unsolicited Receive Complete Queue */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0502 Failed allocate slow-path USOL RX CQ\n"); goto out_free_els_cq; } phba->sli4_hba.rxq_cq = qdesc; /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * phba->cfg_fcp_eq_count), GFP_KERNEL); if (!phba->sli4_hba.fcp_cq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2577 Failed allocate memory for fast-path " "CQ record array\n"); goto out_free_rxq_cq; } for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0499 Failed allocate fast-path FCP " "CQ (%d)\n", fcp_cqidx); goto out_free_fcp_cq; } phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; } /* Create Mailbox Command Queue */ phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, phba->sli4_hba.mq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0505 Failed allocate slow-path MQ\n"); goto out_free_fcp_cq; } phba->sli4_hba.mbx_wq = qdesc; /* * Create all the Work Queues (WQs) */ phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; /* Create slow-path ELS Work Queue */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, phba->sli4_hba.wq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0504 Failed allocate slow-path ELS WQ\n"); goto out_free_mbx_wq; } phba->sli4_hba.els_wq = qdesc; /* Create fast-path FCP Work Queue(s) */ phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * phba->cfg_fcp_wq_count), GFP_KERNEL); if (!phba->sli4_hba.fcp_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2578 Failed allocate memory for fast-path " "WQ record array\n"); goto out_free_els_wq; } for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, phba->sli4_hba.wq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0503 Failed allocate fast-path FCP " "WQ (%d)\n", fcp_wqidx); goto out_free_fcp_wq; } phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; } /* * Create Receive Queue (RQ) */ phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; /* Create Receive Queue for header */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, phba->sli4_hba.rq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0506 Failed allocate receive HRQ\n"); goto out_free_fcp_wq; } phba->sli4_hba.hdr_rq = qdesc; /* Create Receive Queue for data */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, phba->sli4_hba.rq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0507 Failed allocate receive DRQ\n"); goto out_free_hdr_rq; } phba->sli4_hba.dat_rq = qdesc; return 0; out_free_hdr_rq: lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); phba->sli4_hba.hdr_rq = NULL; out_free_fcp_wq: for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; } kfree(phba->sli4_hba.fcp_wq); out_free_els_wq: lpfc_sli4_queue_free(phba->sli4_hba.els_wq); phba->sli4_hba.els_wq = NULL; out_free_mbx_wq: lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); phba->sli4_hba.mbx_wq = NULL; out_free_fcp_cq: for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; } kfree(phba->sli4_hba.fcp_cq); out_free_rxq_cq: lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); phba->sli4_hba.rxq_cq = NULL; out_free_els_cq: lpfc_sli4_queue_free(phba->sli4_hba.els_cq); phba->sli4_hba.els_cq = NULL; out_free_mbx_cq: lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); phba->sli4_hba.mbx_cq = NULL; out_free_fp_eq: for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; } kfree(phba->sli4_hba.fp_eq); out_free_sp_eq: lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); phba->sli4_hba.sp_eq = NULL; out_error: return -ENOMEM; } /** * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues * @phba: pointer to lpfc hba data structure. * * This routine is invoked to release all the SLI4 queues with the FCoE HBA * operation. * * Return codes * 0 - sucessful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ static void lpfc_sli4_queue_destroy(struct lpfc_hba *phba) { int fcp_qidx; /* Release mailbox command work queue */ lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); phba->sli4_hba.mbx_wq = NULL; /* Release ELS work queue */ lpfc_sli4_queue_free(phba->sli4_hba.els_wq); phba->sli4_hba.els_wq = NULL; /* Release FCP work queue */ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); kfree(phba->sli4_hba.fcp_wq); phba->sli4_hba.fcp_wq = NULL; /* Release unsolicited receive queue */ lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); phba->sli4_hba.hdr_rq = NULL; lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); phba->sli4_hba.dat_rq = NULL; /* Release unsolicited receive complete queue */ lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); phba->sli4_hba.rxq_cq = NULL; /* Release ELS complete queue */ lpfc_sli4_queue_free(phba->sli4_hba.els_cq); phba->sli4_hba.els_cq = NULL; /* Release mailbox command complete queue */ lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); phba->sli4_hba.mbx_cq = NULL; /* Release FCP response complete queue */ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); kfree(phba->sli4_hba.fcp_cq); phba->sli4_hba.fcp_cq = NULL; /* Release fast-path event queue */ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); kfree(phba->sli4_hba.fp_eq); phba->sli4_hba.fp_eq = NULL; /* Release slow-path event queue */ lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); phba->sli4_hba.sp_eq = NULL; return; } /** * lpfc_sli4_queue_setup - Set up all the SLI4 queues * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up all the SLI4 queues for the FCoE HBA * operation. * * Return codes * 0 - sucessful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ int lpfc_sli4_queue_setup(struct lpfc_hba *phba) { int rc = -ENOMEM; int fcp_eqidx, fcp_cqidx, fcp_wqidx; int fcp_cq_index = 0; /* * Set up Event Queues (EQs) */ /* Set up slow-path event queue */ if (!phba->sli4_hba.sp_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0520 Slow-path EQ not allocated\n"); goto out_error; } rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, LPFC_SP_DEF_IMAX); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0521 Failed setup of slow-path EQ: " "rc = 0x%x\n", rc); goto out_error; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2583 Slow-path EQ setup: queue-id=%d\n", phba->sli4_hba.sp_eq->queue_id); /* Set up fast-path event queue */ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0522 Fast-path EQ (%d) not " "allocated\n", fcp_eqidx); goto out_destroy_fp_eq; } rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], phba->cfg_fcp_imax); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0523 Failed setup of fast-path EQ " "(%d), rc = 0x%x\n", fcp_eqidx, rc); goto out_destroy_fp_eq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2584 Fast-path EQ setup: " "queue[%d]-id=%d\n", fcp_eqidx, phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); } /* * Set up Complete Queues (CQs) */ /* Set up slow-path MBOX Complete Queue as the first CQ */ if (!phba->sli4_hba.mbx_cq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0528 Mailbox CQ not allocated\n"); goto out_destroy_fp_eq; } rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, LPFC_MCQ, LPFC_MBOX); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0529 Failed setup of slow-path mailbox CQ: " "rc = 0x%x\n", rc); goto out_destroy_fp_eq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", phba->sli4_hba.mbx_cq->queue_id, phba->sli4_hba.sp_eq->queue_id); /* Set up slow-path ELS Complete Queue */ if (!phba->sli4_hba.els_cq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0530 ELS CQ not allocated\n"); goto out_destroy_mbx_cq; } rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, LPFC_WCQ, LPFC_ELS); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0531 Failed setup of slow-path ELS CQ: " "rc = 0x%x\n", rc); goto out_destroy_mbx_cq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", phba->sli4_hba.els_cq->queue_id, phba->sli4_hba.sp_eq->queue_id); /* Set up slow-path Unsolicited Receive Complete Queue */ if (!phba->sli4_hba.rxq_cq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0532 USOL RX CQ not allocated\n"); goto out_destroy_els_cq; } rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq, LPFC_RCQ, LPFC_USOL); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0533 Failed setup of slow-path USOL RX CQ: " "rc = 0x%x\n", rc); goto out_destroy_els_cq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n", phba->sli4_hba.rxq_cq->queue_id, phba->sli4_hba.sp_eq->queue_id); /* Set up fast-path FCP Response Complete Queue */ for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0526 Fast-path FCP CQ (%d) not " "allocated\n", fcp_cqidx); goto out_destroy_fcp_cq; } rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], phba->sli4_hba.fp_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0527 Failed setup of fast-path FCP " "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); goto out_destroy_fcp_cq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2588 FCP CQ setup: cq[%d]-id=%d, " "parent eq[%d]-id=%d\n", fcp_cqidx, phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, fcp_cqidx, phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); } /* * Set up all the Work Queues (WQs) */ /* Set up Mailbox Command Queue */ if (!phba->sli4_hba.mbx_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0538 Slow-path MQ not allocated\n"); goto out_destroy_fcp_cq; } rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, phba->sli4_hba.mbx_cq, LPFC_MBOX); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0539 Failed setup of slow-path MQ: " "rc = 0x%x\n", rc); goto out_destroy_fcp_cq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", phba->sli4_hba.mbx_wq->queue_id, phba->sli4_hba.mbx_cq->queue_id); /* Set up slow-path ELS Work Queue */ if (!phba->sli4_hba.els_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0536 Slow-path ELS WQ not allocated\n"); goto out_destroy_mbx_wq; } rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, phba->sli4_hba.els_cq, LPFC_ELS); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0537 Failed setup of slow-path ELS WQ: " "rc = 0x%x\n", rc); goto out_destroy_mbx_wq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", phba->sli4_hba.els_wq->queue_id, phba->sli4_hba.els_cq->queue_id); /* Set up fast-path FCP Work Queue */ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0534 Fast-path FCP WQ (%d) not " "allocated\n", fcp_wqidx); goto out_destroy_fcp_wq; } rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], phba->sli4_hba.fcp_cq[fcp_cq_index], LPFC_FCP); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0535 Failed setup of fast-path FCP " "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); goto out_destroy_fcp_wq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2591 FCP WQ setup: wq[%d]-id=%d, " "parent cq[%d]-id=%d\n", fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, fcp_cq_index, phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); /* Round robin FCP Work Queue's Completion Queue assignment */ fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); } /* * Create Receive Queue (RQ) */ if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0540 Receive Queue not allocated\n"); goto out_destroy_fcp_wq; } rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, phba->sli4_hba.rxq_cq, LPFC_USOL); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0541 Failed setup of Receive Queue: " "rc = 0x%x\n", rc); goto out_destroy_fcp_wq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " "parent cq-id=%d\n", phba->sli4_hba.hdr_rq->queue_id, phba->sli4_hba.dat_rq->queue_id, phba->sli4_hba.rxq_cq->queue_id); return 0; out_destroy_fcp_wq: for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); out_destroy_mbx_wq: lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); out_destroy_fcp_cq: for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); out_destroy_els_cq: lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); out_destroy_mbx_cq: lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); out_destroy_fp_eq: for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); out_error: return rc; } /** * lpfc_sli4_queue_unset - Unset all the SLI4 queues * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset all the SLI4 queues with the FCoE HBA * operation. * * Return codes * 0 - sucessful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ void lpfc_sli4_queue_unset(struct lpfc_hba *phba) { int fcp_qidx; /* Unset mailbox command work queue */ lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); /* Unset ELS work queue */ lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); /* Unset unsolicited receive queue */ lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); /* Unset FCP work queue */ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); /* Unset mailbox command complete queue */ lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); /* Unset ELS complete queue */ lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); /* Unset unsolicited receive complete queue */ lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); /* Unset FCP response complete queue */ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); /* Unset fast-path event queue */ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); /* Unset slow-path event queue */ lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); } /** * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate and set up a pool of completion queue * events. The body of the completion queue event is a completion queue entry * CQE. For now, this pool is used for the interrupt service routine to queue * the following HBA completion queue events for the worker thread to process: * - Mailbox asynchronous events * - Receive queue completion unsolicited events * Later, this can be used for all the slow-path events. * * Return codes * 0 - sucessful * -ENOMEM - No availble memory **/ static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event; int i; for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); if (!cq_event) goto out_pool_create_fail; list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); } return 0; out_pool_create_fail: lpfc_sli4_cq_event_pool_destroy(phba); return -ENOMEM; } /** * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool * @phba: pointer to lpfc hba data structure. * * This routine is invoked to free the pool of completion queue events at * driver unload time. Note that, it is the responsibility of the driver * cleanup routine to free all the outstanding completion-queue events * allocated from this pool back into the pool before invoking this routine * to destroy the pool. **/ static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event, *next_cq_event; list_for_each_entry_safe(cq_event, next_cq_event, &phba->sli4_hba.sp_cqe_event_pool, list) { list_del(&cq_event->list); kfree(cq_event); } } /** * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool * @phba: pointer to lpfc hba data structure. * * This routine is the lock free version of the API invoked to allocate a * completion-queue event from the free pool. * * Return: Pointer to the newly allocated completion-queue event if successful * NULL otherwise. **/ struct lpfc_cq_event * __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event = NULL; list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, struct lpfc_cq_event, list); return cq_event; } /** * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool * @phba: pointer to lpfc hba data structure. * * This routine is the lock version of the API invoked to allocate a * completion-queue event from the free pool. * * Return: Pointer to the newly allocated completion-queue event if successful * NULL otherwise. **/ struct lpfc_cq_event * lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event; unsigned long iflags; spin_lock_irqsave(&phba->hbalock, iflags); cq_event = __lpfc_sli4_cq_event_alloc(phba); spin_unlock_irqrestore(&phba->hbalock, iflags); return cq_event; } /** * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool * @phba: pointer to lpfc hba data structure. * @cq_event: pointer to the completion queue event to be freed. * * This routine is the lock free version of the API invoked to release a * completion-queue event back into the free pool. **/ void __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, struct lpfc_cq_event *cq_event) { list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); } /** * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool * @phba: pointer to lpfc hba data structure. * @cq_event: pointer to the completion queue event to be freed. * * This routine is the lock version of the API invoked to release a * completion-queue event back into the free pool. **/ void lpfc_sli4_cq_event_release(struct lpfc_hba *phba, struct lpfc_cq_event *cq_event) { unsigned long iflags; spin_lock_irqsave(&phba->hbalock, iflags); __lpfc_sli4_cq_event_release(phba, cq_event); spin_unlock_irqrestore(&phba->hbalock, iflags); } /** * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool * @phba: pointer to lpfc hba data structure. * * This routine is to free all the pending completion-queue events to the * back into the free pool for device reset. **/ static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) { LIST_HEAD(cqelist); struct lpfc_cq_event *cqe; unsigned long iflags; /* Retrieve all the pending WCQEs from pending WCQE lists */ spin_lock_irqsave(&phba->hbalock, iflags); /* Pending FCP XRI abort events */ list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, &cqelist); /* Pending ELS XRI abort events */ list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, &cqelist); /* Pending asynnc events */ list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, &cqelist); spin_unlock_irqrestore(&phba->hbalock, iflags); while (!list_empty(&cqelist)) { list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); lpfc_sli4_cq_event_release(phba, cqe); } } /** * lpfc_pci_function_reset - Reset pci function. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to request a PCI function reset. It will destroys * all resources assigned to the PCI function which originates this request. * * Return codes * 0 - sucessful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ int lpfc_pci_function_reset(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; uint32_t rc = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0494 Unable to allocate memory for issuing " "SLI_FUNCTION_RESET mailbox command\n"); return -ENOMEM; } /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, LPFC_SLI4_MBX_EMBED); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (rc != MBX_TIMEOUT) mempool_free(mboxq, phba->mbox_mem_pool); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0495 SLI_FUNCTION_RESET mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); rc = -ENXIO; } return rc; } /** * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands * @phba: pointer to lpfc hba data structure. * @cnt: number of nop mailbox commands to send. * * This routine is invoked to send a number @cnt of NOP mailbox command and * wait for each command to complete. * * Return: the number of NOP mailbox command completed. **/ static int lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) { LPFC_MBOXQ_t *mboxq; int length, cmdsent; uint32_t mbox_tmo; uint32_t rc = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; if (cnt == 0) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2518 Requested to send 0 NOP mailbox cmd\n"); return cnt; } mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2519 Unable to allocate memory for issuing " "NOP mailbox command\n"); return 0; } /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ length = (sizeof(struct lpfc_mbx_nop) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); for (cmdsent = 0; cmdsent < cnt; cmdsent++) { if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); if (rc == MBX_TIMEOUT) break; /* Check return status */ shdr = (union lpfc_sli4_cfg_shdr *) &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2520 NOP mailbox command failed " "status x%x add_status x%x mbx " "status x%x\n", shdr_status, shdr_add_status, rc); break; } } if (rc != MBX_TIMEOUT) mempool_free(mboxq, phba->mbox_mem_pool); return cmdsent; } /** * lpfc_sli4_fcfi_unreg - Unregister fcfi to device * @phba: pointer to lpfc hba data structure. * @fcfi: fcf index. * * This routine is invoked to unregister a FCFI from device. **/ void lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) { LPFC_MBOXQ_t *mbox; uint32_t mbox_tmo; int rc; unsigned long flags; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return; lpfc_unreg_fcfi(mbox, fcfi); if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } if (rc != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); if (rc != MBX_SUCCESS) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2517 Unregister FCFI command failed " "status %d, mbxStatus x%x\n", rc, bf_get(lpfc_mqe_status, &mbox->u.mqe)); else { spin_lock_irqsave(&phba->hbalock, flags); /* Mark the FCFI is no longer registered */ phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); spin_unlock_irqrestore(&phba->hbalock, flags); } } /** * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the PCI device memory space for device * with SLI-4 interface spec. * * Return codes * 0 - sucessful * other values - error **/ static int lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) { struct pci_dev *pdev; unsigned long bar0map_len, bar1map_len, bar2map_len; int error = -ENODEV; /* Obtain PCI device reference */ if (!phba->pcidev) return error; else pdev = phba->pcidev; /* Set the device DMA mask size */ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { return error; } } /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the * number of bytes required by each mapping. They are actually * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. */ phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ phba->sli4_hba.conf_regs_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); if (!phba->sli4_hba.conf_regs_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 PCI config registers.\n"); goto out; } /* Map SLI4 HBA Control Register base to a kernel virtual address. */ phba->sli4_hba.ctrl_regs_memmap_p = ioremap(phba->pci_bar1_map, bar1map_len); if (!phba->sli4_hba.ctrl_regs_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 HBA control registers.\n"); goto out_iounmap_conf; } /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ phba->sli4_hba.drbl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); if (!phba->sli4_hba.drbl_regs_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 HBA doorbell registers.\n"); goto out_iounmap_ctrl; } /* Set up BAR0 PCI config space register memory map */ lpfc_sli4_bar0_register_memmap(phba); /* Set up BAR1 register memory map */ lpfc_sli4_bar1_register_memmap(phba); /* Set up BAR2 register memory map */ error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); if (error) goto out_iounmap_all; return 0; out_iounmap_all: iounmap(phba->sli4_hba.drbl_regs_memmap_p); out_iounmap_ctrl: iounmap(phba->sli4_hba.ctrl_regs_memmap_p); out_iounmap_conf: iounmap(phba->sli4_hba.conf_regs_memmap_p); out: return error; } /** * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the PCI device memory space for device * with SLI-4 interface spec. **/ static void lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) { struct pci_dev *pdev; /* Obtain PCI device reference */ if (!phba->pcidev) return; else pdev = phba->pcidev; /* Free coherent DMA memory allocated */ /* Unmap I/O memory space */ iounmap(phba->sli4_hba.drbl_regs_memmap_p); iounmap(phba->sli4_hba.ctrl_regs_memmap_p); iounmap(phba->sli4_hba.conf_regs_memmap_p); return; } /** * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI-X interrupt vectors to device * with SLI-3 interface specs. The kernel function pci_enable_msix() is * called to enable the MSI-X vectors. Note that pci_enable_msix(), once * invoked, enables either all or nothing, depending on the current * availability of PCI vector resources. The device driver is responsible * for calling the individual request_irq() to register each MSI-X vector * with a interrupt handler, which is done in this function. Note that * later when device is unloading, the driver should always call free_irq() * on all MSI-X vectors it has done request_irq() on before calling * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device * will be left with MSI-X enabled and leaks its vectors. * * Return codes * 0 - sucessful * other values - error **/ static int lpfc_sli_enable_msix(struct lpfc_hba *phba) { int rc, i; LPFC_MBOXQ_t *pmb; /* Set up MSI-X multi-message vectors */ for (i = 0; i < LPFC_MSIX_VECTORS; i++) phba->msix_entries[i].entry = i; /* Configure MSI-X capability structure */ rc = pci_enable_msix(phba->pcidev, phba->msix_entries, ARRAY_SIZE(phba->msix_entries)); if (rc) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0420 PCI enable MSI-X failed (%d)\n", rc); goto msi_fail_out; } for (i = 0; i < LPFC_MSIX_VECTORS; i++) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0477 MSI-X entry[%d]: vector=x%x " "message=%d\n", i, phba->msix_entries[i].vector, phba->msix_entries[i].entry); /* * Assign MSI-X vectors to interrupt handlers */ /* vector-0 is associated to slow-path handler */ rc = request_irq(phba->msix_entries[0].vector, &lpfc_sli_sp_intr_handler, IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0421 MSI-X slow-path request_irq failed " "(%d)\n", rc); goto msi_fail_out; } /* vector-1 is associated to fast-path handler */ rc = request_irq(phba->msix_entries[1].vector, &lpfc_sli_fp_intr_handler, IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0429 MSI-X fast-path request_irq failed " "(%d)\n", rc); goto irq_fail_out; } /* * Configure HBA MSI-X attention conditions to messages */ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { rc = -ENOMEM; lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0474 Unable to allocate memory for issuing " "MBOX_CONFIG_MSI command\n"); goto mem_fail_out; } rc = lpfc_config_msi(phba, pmb); if (rc) goto mbx_fail_out; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "0351 Config MSI mailbox command failed, " "mbxCmd x%x, mbxStatus x%x\n", pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); goto mbx_fail_out; } /* Free memory allocated for mailbox command */ mempool_free(pmb, phba->mbox_mem_pool); return rc; mbx_fail_out: /* Free memory allocated for mailbox command */ mempool_free(pmb, phba->mbox_mem_pool); mem_fail_out: /* free the irq already requested */ free_irq(phba->msix_entries[1].vector, phba); irq_fail_out: /* free the irq already requested */ free_irq(phba->msix_entries[0].vector, phba); msi_fail_out: /* Unconfigure MSI-X capability structure */ pci_disable_msix(phba->pcidev); return rc; } /** * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to release the MSI-X vectors and then disable the * MSI-X interrupt mode to device with SLI-3 interface spec. **/ static void lpfc_sli_disable_msix(struct lpfc_hba *phba) { int i; /* Free up MSI-X multi-message vectors */ for (i = 0; i < LPFC_MSIX_VECTORS; i++) free_irq(phba->msix_entries[i].vector, phba); /* Disable MSI-X */ pci_disable_msix(phba->pcidev); return; } /** * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI interrupt mode to device with * SLI-3 interface spec. The kernel function pci_enable_msi() is called to * enable the MSI vector. The device driver is responsible for calling the * request_irq() to register MSI vector with a interrupt the handler, which * is done in this function. * * Return codes * 0 - sucessful * other values - error */ static int lpfc_sli_enable_msi(struct lpfc_hba *phba) { int rc; rc = pci_enable_msi(phba->pcidev); if (!rc) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0462 PCI enable MSI mode success.\n"); else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0471 PCI enable MSI mode failed (%d)\n", rc); return rc; } rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (rc) { pci_disable_msi(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0478 MSI request_irq failed (%d)\n", rc); } return rc; } /** * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to disable the MSI interrupt mode to device with * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has * done request_irq() on before calling pci_disable_msi(). Failure to do so * results in a BUG_ON() and a device will be left with MSI enabled and leaks * its vector. */ static void lpfc_sli_disable_msi(struct lpfc_hba *phba) { free_irq(phba->pcidev->irq, phba); pci_disable_msi(phba->pcidev); return; } /** * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable device interrupt and associate driver's * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface * spec. Depends on the interrupt mode configured to the driver, the driver * will try to fallback from the configured interrupt mode to an interrupt * mode which is supported by the platform, kernel, and device in the order * of: * MSI-X -> MSI -> IRQ. * * Return codes * 0 - sucessful * other values - error **/ static uint32_t lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) { uint32_t intr_mode = LPFC_INTR_ERROR; int retval; if (cfg_mode == 2) { /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); if (!retval) { /* Now, try to enable MSI-X interrupt mode */ retval = lpfc_sli_enable_msix(phba); if (!retval) { /* Indicate initialization to MSI-X mode */ phba->intr_type = MSIX; intr_mode = 2; } } } /* Fallback to MSI if MSI-X initialization failed */ if (cfg_mode >= 1 && phba->intr_type == NONE) { retval = lpfc_sli_enable_msi(phba); if (!retval) { /* Indicate initialization to MSI mode */ phba->intr_type = MSI; intr_mode = 1; } } /* Fallback to INTx if both MSI-X/MSI initalization failed */ if (phba->intr_type == NONE) { retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (!retval) { /* Indicate initialization to INTx mode */ phba->intr_type = INTx; intr_mode = 0; } } return intr_mode; } /** * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to disable device interrupt and disassociate the * driver's interrupt handler(s) from interrupt vector(s) to device with * SLI-3 interface spec. Depending on the interrupt mode, the driver will * release the interrupt vector(s) for the message signaled interrupt. **/ static void lpfc_sli_disable_intr(struct lpfc_hba *phba) { /* Disable the currently initialized interrupt mode */ if (phba->intr_type == MSIX) lpfc_sli_disable_msix(phba); else if (phba->intr_type == MSI) lpfc_sli_disable_msi(phba); else if (phba->intr_type == INTx) free_irq(phba->pcidev->irq, phba); /* Reset interrupt management states */ phba->intr_type = NONE; phba->sli.slistat.sli_intr = 0; return; } /** * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI-X interrupt vectors to device * with SLI-4 interface spec. The kernel function pci_enable_msix() is called * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, * enables either all or nothing, depending on the current availability of * PCI vector resources. The device driver is responsible for calling the * individual request_irq() to register each MSI-X vector with a interrupt * handler, which is done in this function. Note that later when device is * unloading, the driver should always call free_irq() on all MSI-X vectors * it has done request_irq() on before calling pci_disable_msix(). Failure * to do so results in a BUG_ON() and a device will be left with MSI-X * enabled and leaks its vectors. * * Return codes * 0 - sucessful * other values - error **/ static int lpfc_sli4_enable_msix(struct lpfc_hba *phba) { int rc, index; /* Set up MSI-X multi-message vectors */ for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) phba->sli4_hba.msix_entries[index].entry = index; /* Configure MSI-X capability structure */ rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, phba->sli4_hba.cfg_eqn); if (rc) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0484 PCI enable MSI-X failed (%d)\n", rc); goto msi_fail_out; } /* Log MSI-X vector assignment */ for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0489 MSI-X entry[%d]: vector=x%x " "message=%d\n", index, phba->sli4_hba.msix_entries[index].vector, phba->sli4_hba.msix_entries[index].entry); /* * Assign MSI-X vectors to interrupt handlers */ /* The first vector must associated to slow-path handler for MQ */ rc = request_irq(phba->sli4_hba.msix_entries[0].vector, &lpfc_sli4_sp_intr_handler, IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0485 MSI-X slow-path request_irq failed " "(%d)\n", rc); goto msi_fail_out; } /* The rest of the vector(s) are associated to fast-path handler(s) */ for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; rc = request_irq(phba->sli4_hba.msix_entries[index].vector, &lpfc_sli4_fp_intr_handler, IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, &phba->sli4_hba.fcp_eq_hdl[index - 1]); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0486 MSI-X fast-path (%d) " "request_irq failed (%d)\n", index, rc); goto cfg_fail_out; } } return rc; cfg_fail_out: /* free the irq already requested */ for (--index; index >= 1; index--) free_irq(phba->sli4_hba.msix_entries[index - 1].vector, &phba->sli4_hba.fcp_eq_hdl[index - 1]); /* free the irq already requested */ free_irq(phba->sli4_hba.msix_entries[0].vector, phba); msi_fail_out: /* Unconfigure MSI-X capability structure */ pci_disable_msix(phba->pcidev); return rc; } /** * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to release the MSI-X vectors and then disable the * MSI-X interrupt mode to device with SLI-4 interface spec. **/ static void lpfc_sli4_disable_msix(struct lpfc_hba *phba) { int index; /* Free up MSI-X multi-message vectors */ free_irq(phba->sli4_hba.msix_entries[0].vector, phba); for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) free_irq(phba->sli4_hba.msix_entries[index].vector, &phba->sli4_hba.fcp_eq_hdl[index - 1]); /* Disable MSI-X */ pci_disable_msix(phba->pcidev); return; } /** * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI interrupt mode to device with * SLI-4 interface spec. The kernel function pci_enable_msi() is called * to enable the MSI vector. The device driver is responsible for calling * the request_irq() to register MSI vector with a interrupt the handler, * which is done in this function. * * Return codes * 0 - sucessful * other values - error **/ static int lpfc_sli4_enable_msi(struct lpfc_hba *phba) { int rc, index; rc = pci_enable_msi(phba->pcidev); if (!rc) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0487 PCI enable MSI mode success.\n"); else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0488 PCI enable MSI mode failed (%d)\n", rc); return rc; } rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (rc) { pci_disable_msi(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0490 MSI request_irq failed (%d)\n", rc); } for (index = 0; index < phba->cfg_fcp_eq_count; index++) { phba->sli4_hba.fcp_eq_hdl[index].idx = index; phba->sli4_hba.fcp_eq_hdl[index].phba = phba; } return rc; } /** * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to disable the MSI interrupt mode to device with * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has * done request_irq() on before calling pci_disable_msi(). Failure to do so * results in a BUG_ON() and a device will be left with MSI enabled and leaks * its vector. **/ static void lpfc_sli4_disable_msi(struct lpfc_hba *phba) { free_irq(phba->pcidev->irq, phba); pci_disable_msi(phba->pcidev); return; } /** * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable device interrupt and associate driver's * interrupt handler(s) to interrupt vector(s) to device with SLI-4 * interface spec. Depends on the interrupt mode configured to the driver, * the driver will try to fallback from the configured interrupt mode to an * interrupt mode which is supported by the platform, kernel, and device in * the order of: * MSI-X -> MSI -> IRQ. * * Return codes * 0 - sucessful * other values - error **/ static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) { uint32_t intr_mode = LPFC_INTR_ERROR; int retval, index; if (cfg_mode == 2) { /* Preparation before conf_msi mbox cmd */ retval = 0; if (!retval) { /* Now, try to enable MSI-X interrupt mode */ retval = lpfc_sli4_enable_msix(phba); if (!retval) { /* Indicate initialization to MSI-X mode */ phba->intr_type = MSIX; intr_mode = 2; } } } /* Fallback to MSI if MSI-X initialization failed */ if (cfg_mode >= 1 && phba->intr_type == NONE) { retval = lpfc_sli4_enable_msi(phba); if (!retval) { /* Indicate initialization to MSI mode */ phba->intr_type = MSI; intr_mode = 1; } } /* Fallback to INTx if both MSI-X/MSI initalization failed */ if (phba->intr_type == NONE) { retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (!retval) { /* Indicate initialization to INTx mode */ phba->intr_type = INTx; intr_mode = 0; for (index = 0; index < phba->cfg_fcp_eq_count; index++) { phba->sli4_hba.fcp_eq_hdl[index].idx = index; phba->sli4_hba.fcp_eq_hdl[index].phba = phba; } } } return intr_mode; } /** * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to disable device interrupt and disassociate * the driver's interrupt handler(s) from interrupt vector(s) to device * with SLI-4 interface spec. Depending on the interrupt mode, the driver * will release the interrupt vector(s) for the message signaled interrupt. **/ static void lpfc_sli4_disable_intr(struct lpfc_hba *phba) { /* Disable the currently initialized interrupt mode */ if (phba->intr_type == MSIX) lpfc_sli4_disable_msix(phba); else if (phba->intr_type == MSI) lpfc_sli4_disable_msi(phba); else if (phba->intr_type == INTx) free_irq(phba->pcidev->irq, phba); /* Reset interrupt management states */ phba->intr_type = NONE; phba->sli.slistat.sli_intr = 0; return; } /** * lpfc_unset_hba - Unset SLI3 hba device initialization * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the HBA device initialization steps to * a device with SLI-3 interface spec. **/ static void lpfc_unset_hba(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(shost->host_lock); lpfc_stop_hba_timers(phba); phba->pport->work_port_events = 0; lpfc_sli_hba_down(phba); lpfc_sli_brdrestart(phba); lpfc_sli_disable_intr(phba); return; } /** * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the HBA device initialization steps to * a device with SLI-4 interface spec. **/ static void lpfc_sli4_unset_hba(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(shost->host_lock); phba->pport->work_port_events = 0; lpfc_sli4_hba_down(phba); lpfc_sli4_disable_intr(phba); return; } /** * lpfc_sli4_hba_unset - Unset the fcoe hba * @phba: Pointer to HBA context object. * * This function is called in the SLI4 code path to reset the HBA's FCoE * function. The caller is not required to hold any lock. This routine * issues PCI function reset mailbox command to reset the FCoE function. * At the end of the function, it calls lpfc_hba_down_post function to * free any pending commands. **/ static void lpfc_sli4_hba_unset(struct lpfc_hba *phba) { int wait_cnt = 0; LPFC_MBOXQ_t *mboxq; lpfc_stop_hba_timers(phba); phba->sli4_hba.intr_enable = 0; /* * Gracefully wait out the potential current outstanding asynchronous * mailbox command. */ /* First, block any pending async mailbox command from posted */ spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; spin_unlock_irq(&phba->hbalock); /* Now, trying to wait it out if we can */ while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { msleep(10); if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) break; } /* Forcefully release the outstanding mailbox command if timed out */ if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { spin_lock_irq(&phba->hbalock); mboxq = phba->sli.mbox_active; mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; __lpfc_mbox_cmpl_put(phba, mboxq); phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; phba->sli.mbox_active = NULL; spin_unlock_irq(&phba->hbalock); } /* Tear down the queues in the HBA */ lpfc_sli4_queue_unset(phba); /* Disable PCI subsystem interrupt */ lpfc_sli4_disable_intr(phba); /* Stop kthread signal shall trigger work_done one more time */ kthread_stop(phba->worker_thread); /* Stop the SLI4 device port */ phba->pport->work_port_events = 0; } /** * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. * @pdev: pointer to PCI device * @pid: pointer to PCI device identifier * * This routine is to be called to attach a device with SLI-3 interface spec * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific * information of the device and driver to see if the driver state that it can * support this kind of device. If the match is successful, the driver core * invokes this routine. If this routine determines it can claim the HBA, it * does all the initialization that it needs to do to handle the HBA properly. * * Return code * 0 - driver can claim the device * negative value - driver can not claim the device **/ static int __devinit lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) { struct lpfc_hba *phba; struct lpfc_vport *vport = NULL; int error; uint32_t cfg_mode, intr_mode; /* Allocate memory for HBA structure */ phba = lpfc_hba_alloc(pdev); if (!phba) return -ENOMEM; /* Perform generic PCI device enabling operation */ error = lpfc_enable_pci_dev(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1401 Failed to enable pci device.\n"); goto out_free_phba; } /* Set up SLI API function jump table for PCI-device group-0 HBAs */ error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); if (error) goto out_disable_pci_dev; /* Set up SLI-3 specific device PCI memory space */ error = lpfc_sli_pci_mem_setup(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1402 Failed to set up pci memory space.\n"); goto out_disable_pci_dev; } /* Set up phase-1 common device driver resources */ error = lpfc_setup_driver_resource_phase1(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1403 Failed to set up driver resource.\n"); goto out_unset_pci_mem_s3; } /* Set up SLI-3 specific device driver resources */ error = lpfc_sli_driver_resource_setup(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1404 Failed to set up driver resource.\n"); goto out_unset_pci_mem_s3; } /* Initialize and populate the iocb list per host */ error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1405 Failed to initialize iocb list.\n"); goto out_unset_driver_resource_s3; } /* Set up common device driver resources */ error = lpfc_setup_driver_resource_phase2(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1406 Failed to set up driver resource.\n"); goto out_free_iocb_list; } /* Create SCSI host to the physical port */ error = lpfc_create_shost(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1407 Failed to create scsi host.\n"); goto out_unset_driver_resource; } /* Configure sysfs attributes */ vport = phba->pport; error = lpfc_alloc_sysfs_attr(vport); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1476 Failed to allocate sysfs attr\n"); goto out_destroy_shost; } /* Now, trying to enable interrupt and bring up the device */ cfg_mode = phba->cfg_use_msi; while (true) { /* Put device to a known state before enabling interrupt */ lpfc_stop_port(phba); /* Configure and enable interrupt */ intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0431 Failed to enable interrupt.\n"); error = -ENODEV; goto out_free_sysfs_attr; } /* SLI-3 HBA setup */ if (lpfc_sli_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1477 Failed to set up hba\n"); error = -ENODEV; goto out_remove_device; } /* Wait 50ms for the interrupts of previous mailbox commands */ msleep(50); /* Check active interrupts on message signaled interrupts */ if (intr_mode == 0 || phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { /* Log the current active interrupt mode */ phba->intr_mode = intr_mode; lpfc_log_intr_mode(phba, intr_mode); break; } else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0447 Configure interrupt mode (%d) " "failed active interrupt test.\n", intr_mode); /* Disable the current interrupt mode */ lpfc_sli_disable_intr(phba); /* Try next level of interrupt mode */ cfg_mode = --intr_mode; } } /* Perform post initialization setup */ lpfc_post_init_setup(phba); /* Check if there are static vports to be created. */ lpfc_create_static_vport(phba); return 0; out_remove_device: lpfc_unset_hba(phba); out_free_sysfs_attr: lpfc_free_sysfs_attr(vport); out_destroy_shost: lpfc_destroy_shost(phba); out_unset_driver_resource: lpfc_unset_driver_resource_phase2(phba); out_free_iocb_list: lpfc_free_iocb_list(phba); out_unset_driver_resource_s3: lpfc_sli_driver_resource_unset(phba); out_unset_pci_mem_s3: lpfc_sli_pci_mem_unset(phba); out_disable_pci_dev: lpfc_disable_pci_dev(phba); out_free_phba: lpfc_hba_free(phba); return error; } /** * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. * @pdev: pointer to PCI device * * This routine is to be called to disattach a device with SLI-3 interface * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is * removed from PCI bus, it performs all the necessary cleanup for the HBA * device to be removed from the PCI subsystem properly. **/ static void __devexit lpfc_pci_remove_one_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_vport **vports; struct lpfc_hba *phba = vport->phba; int i; int bars = pci_select_bars(pdev, IORESOURCE_MEM); spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(&phba->hbalock); lpfc_free_sysfs_attr(vport); /* Release all the vports against this physical port */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) fc_vport_terminate(vports[i]->fc_vport); lpfc_destroy_vport_work_array(phba, vports); /* Remove FC host and then SCSI host with the physical port */ fc_remove_host(shost); scsi_remove_host(shost); lpfc_cleanup(vport); /* * Bring down the SLI Layer. This step disable all interrupts, * clears the rings, discards all mailbox commands, and resets * the HBA. */ /* HBA interrupt will be diabled after this call */ lpfc_sli_hba_down(phba); /* Stop kthread signal shall trigger work_done one more time */ kthread_stop(phba->worker_thread); /* Final cleanup of txcmplq and reset the HBA */ lpfc_sli_brdrestart(phba); lpfc_stop_hba_timers(phba); spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); lpfc_debugfs_terminate(vport); /* Disable interrupt */ lpfc_sli_disable_intr(phba); pci_set_drvdata(pdev, NULL); scsi_host_put(shost); /* * Call scsi_free before mem_free since scsi bufs are released to their * corresponding pools here. */ lpfc_scsi_free(phba); lpfc_mem_free_all(phba); dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, phba->hbqslimp.phys); /* Free resources associated with SLI2 interface */ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p.virt, phba->slim2p.phys); /* unmap adapter SLIM and Control Registers */ iounmap(phba->ctrl_regs_memmap_p); iounmap(phba->slim_memmap_p); lpfc_hba_free(phba); pci_release_selected_regions(pdev, bars); pci_disable_device(pdev); } /** * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt * @pdev: pointer to PCI device * @msg: power management message * * This routine is to be called from the kernel's PCI subsystem to support * system Power Management (PM) to device with SLI-3 interface spec. When * PM invokes this method, it quiesces the device by stopping the driver's * worker thread for the device, turning off device's interrupt and DMA, * and bring the device offline. Note that as the driver implements the * minimum PM requirements to a power-aware driver's PM support for the * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) * to the suspend() method call will be treated as SUSPEND and the driver will * fully reinitialize its device during resume() method call, the driver will * set device to PCI_D3hot state in PCI config space instead of setting it * according to the @msg provided by the PM. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0473 PCI device Power Management suspend.\n"); /* Bring down the device */ lpfc_offline_prep(phba); lpfc_offline(phba); kthread_stop(phba->worker_thread); /* Disable interrupt from device */ lpfc_sli_disable_intr(phba); /* Save device state to PCI config space */ pci_save_state(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } /** * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt * @pdev: pointer to PCI device * * This routine is to be called from the kernel's PCI subsystem to support * system Power Management (PM) to device with SLI-3 interface spec. When PM * invokes this method, it restores the device's PCI config space state and * fully reinitializes the device and brings it online. Note that as the * driver implements the minimum PM requirements to a power-aware driver's * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, * FREEZE) to the suspend() method call will be treated as SUSPEND and the * driver will fully reinitialize its device during resume() method call, * the device will be set to PCI_D0 directly in PCI config space before * restoring the state. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int lpfc_pci_resume_one_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; uint32_t intr_mode; int error; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0452 PCI device Power Management resume.\n"); /* Restore device state from PCI config space */ pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); if (pdev->is_busmaster) pci_set_master(pdev); /* Startup the kernel thread for this host adapter. */ phba->worker_thread = kthread_run(lpfc_do_work, phba, "lpfc_worker_%d", phba->brd_no); if (IS_ERR(phba->worker_thread)) { error = PTR_ERR(phba->worker_thread); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0434 PM resume failed to start worker " "thread: error=x%x.\n", error); return error; } /* Configure and enable interrupt */ intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0430 PM resume Failed to enable interrupt\n"); return -EIO; } else phba->intr_mode = intr_mode; /* Restart HBA and bring it online */ lpfc_sli_brdrestart(phba); lpfc_online(phba); /* Log the current active interrupt mode */ lpfc_log_intr_mode(phba, phba->intr_mode); return 0; } /** * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error * @pdev: pointer to PCI device. * @state: the current PCI connection state. * * This routine is called from the PCI subsystem for I/O error handling to * device with SLI-3 interface spec. This function is called by the PCI * subsystem after a PCI bus error affecting this device has been detected. * When this function is invoked, it will need to stop all the I/Os and * interrupt(s) to the device. Once that is done, it will return * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery * as desired. * * Return codes * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ static pci_ers_result_t lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; if (state == pci_channel_io_perm_failure) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0472 PCI channel I/O permanent failure\n"); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); /* Clean up all driver's outstanding SCSI I/Os */ lpfc_sli_flush_fcp_rings(phba); return PCI_ERS_RESULT_DISCONNECT; } pci_disable_device(pdev); /* * There may be I/Os dropped by the firmware. * Error iocb (I/O) on txcmplq and let the SCSI layer * retry it after re-establishing link. */ pring = &psli->ring[psli->fcp_ring]; lpfc_sli_abort_iocb_ring(phba, pring); /* Disable interrupt */ lpfc_sli_disable_intr(phba); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. * @pdev: pointer to PCI device. * * This routine is called from the PCI subsystem for error handling to * device with SLI-3 interface spec. This is called after PCI bus has been * reset to restart the PCI card from scratch, as if from a cold-boot. * During the PCI subsystem error recovery, after driver returns * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error * recovery and then call this routine before calling the .resume method * to recover the device. This function will initialize the HBA device, * enable the interrupt, but it will just put the HBA to offline state * without passing any I/O traffic. * * Return codes * PCI_ERS_RESULT_RECOVERED - the device has been recovered * PCI_ERS_RESULT_DISCONNECT - device could not be recovered */ static pci_ers_result_t lpfc_io_slot_reset_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; struct lpfc_sli *psli = &phba->sli; uint32_t intr_mode; dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); if (pci_enable_device_mem(pdev)) { printk(KERN_ERR "lpfc: Cannot re-enable " "PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_restore_state(pdev); if (pdev->is_busmaster) pci_set_master(pdev); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* Configure and enable interrupt */ intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0427 Cannot re-enable interrupt after " "slot reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } else phba->intr_mode = intr_mode; /* Take device offline; this will perform cleanup */ lpfc_offline(phba); lpfc_sli_brdrestart(phba); /* Log the current active interrupt mode */ lpfc_log_intr_mode(phba, phba->intr_mode); return PCI_ERS_RESULT_RECOVERED; } /** * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. * @pdev: pointer to PCI device * * This routine is called from the PCI subsystem for error handling to device * with SLI-3 interface spec. It is called when kernel error recovery tells * the lpfc driver that it is ok to resume normal PCI operation after PCI bus * error recovery. After this call, traffic can start to flow from this device * again. */ static void lpfc_io_resume_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; lpfc_online(phba); } /** * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve * @phba: pointer to lpfc hba data structure. * * returns the number of ELS/CT IOCBs to reserve **/ int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) { int max_xri = phba->sli4_hba.max_cfg_param.max_xri; if (phba->sli_rev == LPFC_SLI_REV4) { if (max_xri <= 100) return 4; else if (max_xri <= 256) return 8; else if (max_xri <= 512) return 16; else if (max_xri <= 1024) return 32; else return 48; } else return 0; } /** * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys * @pdev: pointer to PCI device * @pid: pointer to PCI device identifier * * This routine is called from the kernel's PCI subsystem to device with * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific * information of the device and driver to see if the driver state that it * can support this kind of device. If the match is successful, the driver * core invokes this routine. If this routine determines it can claim the HBA, * it does all the initialization that it needs to do to handle the HBA * properly. * * Return code * 0 - driver can claim the device * negative value - driver can not claim the device **/ static int __devinit lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) { struct lpfc_hba *phba; struct lpfc_vport *vport = NULL; int error; uint32_t cfg_mode, intr_mode; int mcnt; /* Allocate memory for HBA structure */ phba = lpfc_hba_alloc(pdev); if (!phba) return -ENOMEM; /* Perform generic PCI device enabling operation */ error = lpfc_enable_pci_dev(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1409 Failed to enable pci device.\n"); goto out_free_phba; } /* Set up SLI API function jump table for PCI-device group-1 HBAs */ error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); if (error) goto out_disable_pci_dev; /* Set up SLI-4 specific device PCI memory space */ error = lpfc_sli4_pci_mem_setup(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1410 Failed to set up pci memory space.\n"); goto out_disable_pci_dev; } /* Set up phase-1 common device driver resources */ error = lpfc_setup_driver_resource_phase1(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1411 Failed to set up driver resource.\n"); goto out_unset_pci_mem_s4; } /* Set up SLI-4 Specific device driver resources */ error = lpfc_sli4_driver_resource_setup(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1412 Failed to set up driver resource.\n"); goto out_unset_pci_mem_s4; } /* Initialize and populate the iocb list per host */ error = lpfc_init_iocb_list(phba, phba->sli4_hba.max_cfg_param.max_xri); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1413 Failed to initialize iocb list.\n"); goto out_unset_driver_resource_s4; } /* Set up common device driver resources */ error = lpfc_setup_driver_resource_phase2(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1414 Failed to set up driver resource.\n"); goto out_free_iocb_list; } /* Create SCSI host to the physical port */ error = lpfc_create_shost(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1415 Failed to create scsi host.\n"); goto out_unset_driver_resource; } /* Configure sysfs attributes */ vport = phba->pport; error = lpfc_alloc_sysfs_attr(vport); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1416 Failed to allocate sysfs attr\n"); goto out_destroy_shost; } /* Now, trying to enable interrupt and bring up the device */ cfg_mode = phba->cfg_use_msi; while (true) { /* Put device to a known state before enabling interrupt */ lpfc_stop_port(phba); /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0426 Failed to enable interrupt.\n"); error = -ENODEV; goto out_free_sysfs_attr; } /* Set up SLI-4 HBA */ if (lpfc_sli4_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1421 Failed to set up hba\n"); error = -ENODEV; goto out_disable_intr; } /* Send NOP mbx cmds for non-INTx mode active interrupt test */ if (intr_mode != 0) mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, LPFC_ACT_INTR_CNT); /* Check active interrupts received only for MSI/MSI-X */ if (intr_mode == 0 || phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { /* Log the current active interrupt mode */ phba->intr_mode = intr_mode; lpfc_log_intr_mode(phba, intr_mode); break; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0451 Configure interrupt mode (%d) " "failed active interrupt test.\n", intr_mode); /* Unset the preivous SLI-4 HBA setup */ lpfc_sli4_unset_hba(phba); /* Try next level of interrupt mode */ cfg_mode = --intr_mode; } /* Perform post initialization setup */ lpfc_post_init_setup(phba); /* Check if there are static vports to be created. */ lpfc_create_static_vport(phba); return 0; out_disable_intr: lpfc_sli4_disable_intr(phba); out_free_sysfs_attr: lpfc_free_sysfs_attr(vport); out_destroy_shost: lpfc_destroy_shost(phba); out_unset_driver_resource: lpfc_unset_driver_resource_phase2(phba); out_free_iocb_list: lpfc_free_iocb_list(phba); out_unset_driver_resource_s4: lpfc_sli4_driver_resource_unset(phba); out_unset_pci_mem_s4: lpfc_sli4_pci_mem_unset(phba); out_disable_pci_dev: lpfc_disable_pci_dev(phba); out_free_phba: lpfc_hba_free(phba); return error; } /** * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem * @pdev: pointer to PCI device * * This routine is called from the kernel's PCI subsystem to device with * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is * removed from PCI bus, it performs all the necessary cleanup for the HBA * device to be removed from the PCI subsystem properly. **/ static void __devexit lpfc_pci_remove_one_s4(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_vport **vports; struct lpfc_hba *phba = vport->phba; int i; /* Mark the device unloading flag */ spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(&phba->hbalock); /* Free the HBA sysfs attributes */ lpfc_free_sysfs_attr(vport); /* Release all the vports against this physical port */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) fc_vport_terminate(vports[i]->fc_vport); lpfc_destroy_vport_work_array(phba, vports); /* Remove FC host and then SCSI host with the physical port */ fc_remove_host(shost); scsi_remove_host(shost); /* Perform cleanup on the physical port */ lpfc_cleanup(vport); /* * Bring down the SLI Layer. This step disables all interrupts, * clears the rings, discards all mailbox commands, and resets * the HBA FCoE function. */ lpfc_debugfs_terminate(vport); lpfc_sli4_hba_unset(phba); spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi * buffers are released to their corresponding pools here. */ lpfc_scsi_free(phba); lpfc_sli4_driver_resource_unset(phba); /* Unmap adapter Control and Doorbell registers */ lpfc_sli4_pci_mem_unset(phba); /* Release PCI resources and disable device's PCI function */ scsi_host_put(shost); lpfc_disable_pci_dev(phba); /* Finally, free the driver's device data structure */ lpfc_hba_free(phba); return; } /** * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt * @pdev: pointer to PCI device * @msg: power management message * * This routine is called from the kernel's PCI subsystem to support system * Power Management (PM) to device with SLI-4 interface spec. When PM invokes * this method, it quiesces the device by stopping the driver's worker * thread for the device, turning off device's interrupt and DMA, and bring * the device offline. Note that as the driver implements the minimum PM * requirements to a power-aware driver's PM support for suspend/resume -- all * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() * method call will be treated as SUSPEND and the driver will fully * reinitialize its device during resume() method call, the driver will set * device to PCI_D3hot state in PCI config space instead of setting it * according to the @msg provided by the PM. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0298 PCI device Power Management suspend.\n"); /* Bring down the device */ lpfc_offline_prep(phba); lpfc_offline(phba); kthread_stop(phba->worker_thread); /* Disable interrupt from device */ lpfc_sli4_disable_intr(phba); /* Save device state to PCI config space */ pci_save_state(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } /** * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt * @pdev: pointer to PCI device * * This routine is called from the kernel's PCI subsystem to support system * Power Management (PM) to device with SLI-4 interface spac. When PM invokes * this method, it restores the device's PCI config space state and fully * reinitializes the device and brings it online. Note that as the driver * implements the minimum PM requirements to a power-aware driver's PM for * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) * to the suspend() method call will be treated as SUSPEND and the driver * will fully reinitialize its device during resume() method call, the device * will be set to PCI_D0 directly in PCI config space before restoring the * state. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int lpfc_pci_resume_one_s4(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; uint32_t intr_mode; int error; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0292 PCI device Power Management resume.\n"); /* Restore device state from PCI config space */ pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); if (pdev->is_busmaster) pci_set_master(pdev); /* Startup the kernel thread for this host adapter. */ phba->worker_thread = kthread_run(lpfc_do_work, phba, "lpfc_worker_%d", phba->brd_no); if (IS_ERR(phba->worker_thread)) { error = PTR_ERR(phba->worker_thread); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0293 PM resume failed to start worker " "thread: error=x%x.\n", error); return error; } /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0294 PM resume Failed to enable interrupt\n"); return -EIO; } else phba->intr_mode = intr_mode; /* Restart HBA and bring it online */ lpfc_sli_brdrestart(phba); lpfc_online(phba); /* Log the current active interrupt mode */ lpfc_log_intr_mode(phba, phba->intr_mode); return 0; } /** * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device * @pdev: pointer to PCI device. * @state: the current PCI connection state. * * This routine is called from the PCI subsystem for error handling to device * with SLI-4 interface spec. This function is called by the PCI subsystem * after a PCI bus error affecting this device has been detected. When this * function is invoked, it will need to stop all the I/Os and interrupt(s) * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET * for the PCI subsystem to perform proper recovery as desired. * * Return codes * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ static pci_ers_result_t lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) { return PCI_ERS_RESULT_NEED_RESET; } /** * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch * @pdev: pointer to PCI device. * * This routine is called from the PCI subsystem for error handling to device * with SLI-4 interface spec. It is called after PCI bus has been reset to * restart the PCI card from scratch, as if from a cold-boot. During the * PCI subsystem error recovery, after the driver returns * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error * recovery and then call this routine before calling the .resume method to * recover the device. This function will initialize the HBA device, enable * the interrupt, but it will just put the HBA to offline state without * passing any I/O traffic. * * Return codes * PCI_ERS_RESULT_RECOVERED - the device has been recovered * PCI_ERS_RESULT_DISCONNECT - device could not be recovered */ static pci_ers_result_t lpfc_io_slot_reset_s4(struct pci_dev *pdev) { return PCI_ERS_RESULT_RECOVERED; } /** * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device * @pdev: pointer to PCI device * * This routine is called from the PCI subsystem for error handling to device * with SLI-4 interface spec. It is called when kernel error recovery tells * the lpfc driver that it is ok to resume normal PCI operation after PCI bus * error recovery. After this call, traffic can start to flow from this device * again. **/ static void lpfc_io_resume_s4(struct pci_dev *pdev) { return; } /** * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem * @pdev: pointer to PCI device * @pid: pointer to PCI device identifier * * This routine is to be registered to the kernel's PCI subsystem. When an * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks * at PCI device-specific information of the device and driver to see if the * driver state that it can support this kind of device. If the match is * successful, the driver core invokes this routine. This routine dispatches * the action to the proper SLI-3 or SLI-4 device probing routine, which will * do all the initialization that it needs to do to handle the HBA device * properly. * * Return code * 0 - driver can claim the device * negative value - driver can not claim the device **/ static int __devinit lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) { int rc; struct lpfc_sli_intf intf; if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0)) return -ENODEV; if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && (bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4)) rc = lpfc_pci_probe_one_s4(pdev, pid); else rc = lpfc_pci_probe_one_s3(pdev, pid); return rc; } /** * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem * @pdev: pointer to PCI device * * This routine is to be registered to the kernel's PCI subsystem. When an * Emulex HBA is removed from PCI bus, the driver core invokes this routine. * This routine dispatches the action to the proper SLI-3 or SLI-4 device * remove routine, which will perform all the necessary cleanup for the * device to be removed from the PCI subsystem properly. **/ static void __devexit lpfc_pci_remove_one(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: lpfc_pci_remove_one_s3(pdev); break; case LPFC_PCI_DEV_OC: lpfc_pci_remove_one_s4(pdev); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1424 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return; } /** * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management * @pdev: pointer to PCI device * @msg: power management message * * This routine is to be registered to the kernel's PCI subsystem to support * system Power Management (PM). When PM invokes this method, it dispatches * the action to the proper SLI-3 or SLI-4 device suspend routine, which will * suspend the device. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; int rc = -ENODEV; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: rc = lpfc_pci_suspend_one_s3(pdev, msg); break; case LPFC_PCI_DEV_OC: rc = lpfc_pci_suspend_one_s4(pdev, msg); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1425 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return rc; } /** * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management * @pdev: pointer to PCI device * * This routine is to be registered to the kernel's PCI subsystem to support * system Power Management (PM). When PM invokes this method, it dispatches * the action to the proper SLI-3 or SLI-4 device resume routine, which will * resume the device. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int lpfc_pci_resume_one(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; int rc = -ENODEV; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: rc = lpfc_pci_resume_one_s3(pdev); break; case LPFC_PCI_DEV_OC: rc = lpfc_pci_resume_one_s4(pdev); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1426 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return rc; } /** * lpfc_io_error_detected - lpfc method for handling PCI I/O error * @pdev: pointer to PCI device. * @state: the current PCI connection state. * * This routine is registered to the PCI subsystem for error handling. This * function is called by the PCI subsystem after a PCI bus error affecting * this device has been detected. When this routine is invoked, it dispatches * the action to the proper SLI-3 or SLI-4 device error detected handling * routine, which will perform the proper error detected operation. * * Return codes * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: rc = lpfc_io_error_detected_s3(pdev, state); break; case LPFC_PCI_DEV_OC: rc = lpfc_io_error_detected_s4(pdev, state); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1427 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return rc; } /** * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch * @pdev: pointer to PCI device. * * This routine is registered to the PCI subsystem for error handling. This * function is called after PCI bus has been reset to restart the PCI card * from scratch, as if from a cold-boot. When this routine is invoked, it * dispatches the action to the proper SLI-3 or SLI-4 device reset handling * routine, which will perform the proper device reset. * * Return codes * PCI_ERS_RESULT_RECOVERED - the device has been recovered * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: rc = lpfc_io_slot_reset_s3(pdev); break; case LPFC_PCI_DEV_OC: rc = lpfc_io_slot_reset_s4(pdev); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1428 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return rc; } /** * lpfc_io_resume - lpfc method for resuming PCI I/O operation * @pdev: pointer to PCI device * * This routine is registered to the PCI subsystem for error handling. It * is called when kernel error recovery tells the lpfc driver that it is * OK to resume normal PCI operation after PCI bus error recovery. When * this routine is invoked, it dispatches the action to the proper SLI-3 * or SLI-4 device io_resume routine, which will resume the device operation. **/ static void lpfc_io_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: lpfc_io_resume_s3(pdev); break; case LPFC_PCI_DEV_OC: lpfc_io_resume_s4(pdev); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1429 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return; } static struct pci_device_id lpfc_id_table[] = { {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, PCI_ANY_ID, PCI_ANY_ID, }, { 0 } }; MODULE_DEVICE_TABLE(pci, lpfc_id_table); static struct pci_error_handlers lpfc_err_handler = { .error_detected = lpfc_io_error_detected, .slot_reset = lpfc_io_slot_reset, .resume = lpfc_io_resume, }; static struct pci_driver lpfc_driver = { .name = LPFC_DRIVER_NAME, .id_table = lpfc_id_table, .probe = lpfc_pci_probe_one, .remove = __devexit_p(lpfc_pci_remove_one), .suspend = lpfc_pci_suspend_one, .resume = lpfc_pci_resume_one, .err_handler = &lpfc_err_handler, }; /** * lpfc_init - lpfc module initialization routine * * This routine is to be invoked when the lpfc module is loaded into the * kernel. The special kernel macro module_init() is used to indicate the * role of this routine to the kernel as lpfc module entry point. * * Return codes * 0 - successful * -ENOMEM - FC attach transport failed * all others - failed */ static int __init lpfc_init(void) { int error = 0; printk(LPFC_MODULE_DESC "\n"); printk(LPFC_COPYRIGHT "\n"); if (lpfc_enable_npiv) { lpfc_transport_functions.vport_create = lpfc_vport_create; lpfc_transport_functions.vport_delete = lpfc_vport_delete; } lpfc_transport_template = fc_attach_transport(&lpfc_transport_functions); if (lpfc_transport_template == NULL) return -ENOMEM; if (lpfc_enable_npiv) { lpfc_vport_transport_template = fc_attach_transport(&lpfc_vport_transport_functions); if (lpfc_vport_transport_template == NULL) { fc_release_transport(lpfc_transport_template); return -ENOMEM; } } error = pci_register_driver(&lpfc_driver); if (error) { fc_release_transport(lpfc_transport_template); if (lpfc_enable_npiv) fc_release_transport(lpfc_vport_transport_template); } return error; } /** * lpfc_exit - lpfc module removal routine * * This routine is invoked when the lpfc module is removed from the kernel. * The special kernel macro module_exit() is used to indicate the role of * this routine to the kernel as lpfc module exit point. */ static void __exit lpfc_exit(void) { pci_unregister_driver(&lpfc_driver); fc_release_transport(lpfc_transport_template); if (lpfc_enable_npiv) fc_release_transport(lpfc_vport_transport_template); if (_dump_buf_data) { printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " "at 0x%p\n", (1L << _dump_buf_data_order), _dump_buf_data); free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); } if (_dump_buf_dif) { printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " "at 0x%p\n", (1L << _dump_buf_dif_order), _dump_buf_dif); free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); } } module_init(lpfc_init); module_exit(lpfc_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(LPFC_MODULE_DESC); MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
gpl-2.0
faux123/Nexus-grouper
drivers/rtc/rtc-pl031.c
1444
11389
/* * drivers/rtc/rtc-pl031.c * * Real Time Clock interface for ARM AMBA PrimeCell 031 RTC * * Author: Deepak Saxena <dsaxena@plexity.net> * * Copyright 2006 (c) MontaVista Software, Inc. * * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> * Copyright 2010 (c) ST-Ericsson AB * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/amba/bus.h> #include <linux/io.h> #include <linux/bcd.h> #include <linux/delay.h> #include <linux/slab.h> /* * Register definitions */ #define RTC_DR 0x00 /* Data read register */ #define RTC_MR 0x04 /* Match register */ #define RTC_LR 0x08 /* Data load register */ #define RTC_CR 0x0c /* Control register */ #define RTC_IMSC 0x10 /* Interrupt mask and set register */ #define RTC_RIS 0x14 /* Raw interrupt status register */ #define RTC_MIS 0x18 /* Masked interrupt status register */ #define RTC_ICR 0x1c /* Interrupt clear register */ /* ST variants have additional timer functionality */ #define RTC_TDR 0x20 /* Timer data read register */ #define RTC_TLR 0x24 /* Timer data load register */ #define RTC_TCR 0x28 /* Timer control register */ #define RTC_YDR 0x30 /* Year data read register */ #define RTC_YMR 0x34 /* Year match register */ #define RTC_YLR 0x38 /* Year data load register */ #define RTC_CR_CWEN (1 << 26) /* Clockwatch enable bit */ #define RTC_TCR_EN (1 << 1) /* Periodic timer enable bit */ /* Common bit definitions for Interrupt status and control registers */ #define RTC_BIT_AI (1 << 0) /* Alarm interrupt bit */ #define RTC_BIT_PI (1 << 1) /* Periodic interrupt bit. ST variants only. */ /* Common bit definations for ST v2 for reading/writing time */ #define RTC_SEC_SHIFT 0 #define RTC_SEC_MASK (0x3F << RTC_SEC_SHIFT) /* Second [0-59] */ #define RTC_MIN_SHIFT 6 #define RTC_MIN_MASK (0x3F << RTC_MIN_SHIFT) /* Minute [0-59] */ #define RTC_HOUR_SHIFT 12 #define RTC_HOUR_MASK (0x1F << RTC_HOUR_SHIFT) /* Hour [0-23] */ #define RTC_WDAY_SHIFT 17 #define RTC_WDAY_MASK (0x7 << RTC_WDAY_SHIFT) /* Day of Week [1-7] 1=Sunday */ #define RTC_MDAY_SHIFT 20 #define RTC_MDAY_MASK (0x1F << RTC_MDAY_SHIFT) /* Day of Month [1-31] */ #define RTC_MON_SHIFT 25 #define RTC_MON_MASK (0xF << RTC_MON_SHIFT) /* Month [1-12] 1=January */ #define RTC_TIMER_FREQ 32768 struct pl031_local { struct rtc_device *rtc; void __iomem *base; u8 hw_designer; u8 hw_revision:4; }; static int pl031_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct pl031_local *ldata = dev_get_drvdata(dev); unsigned long imsc; /* Clear any pending alarm interrupts. */ writel(RTC_BIT_AI, ldata->base + RTC_ICR); imsc = readl(ldata->base + RTC_IMSC); if (enabled == 1) writel(imsc | RTC_BIT_AI, ldata->base + RTC_IMSC); else writel(imsc & ~RTC_BIT_AI, ldata->base + RTC_IMSC); return 0; } /* * Convert Gregorian date to ST v2 RTC format. */ static int pl031_stv2_tm_to_time(struct device *dev, struct rtc_time *tm, unsigned long *st_time, unsigned long *bcd_year) { int year = tm->tm_year + 1900; int wday = tm->tm_wday; /* wday masking is not working in hardware so wday must be valid */ if (wday < -1 || wday > 6) { dev_err(dev, "invalid wday value %d\n", tm->tm_wday); return -EINVAL; } else if (wday == -1) { /* wday is not provided, calculate it here */ unsigned long time; struct rtc_time calc_tm; rtc_tm_to_time(tm, &time); rtc_time_to_tm(time, &calc_tm); wday = calc_tm.tm_wday; } *bcd_year = (bin2bcd(year % 100) | bin2bcd(year / 100) << 8); *st_time = ((tm->tm_mon + 1) << RTC_MON_SHIFT) | (tm->tm_mday << RTC_MDAY_SHIFT) | ((wday + 1) << RTC_WDAY_SHIFT) | (tm->tm_hour << RTC_HOUR_SHIFT) | (tm->tm_min << RTC_MIN_SHIFT) | (tm->tm_sec << RTC_SEC_SHIFT); return 0; } /* * Convert ST v2 RTC format to Gregorian date. */ static int pl031_stv2_time_to_tm(unsigned long st_time, unsigned long bcd_year, struct rtc_time *tm) { tm->tm_year = bcd2bin(bcd_year) + (bcd2bin(bcd_year >> 8) * 100); tm->tm_mon = ((st_time & RTC_MON_MASK) >> RTC_MON_SHIFT) - 1; tm->tm_mday = ((st_time & RTC_MDAY_MASK) >> RTC_MDAY_SHIFT); tm->tm_wday = ((st_time & RTC_WDAY_MASK) >> RTC_WDAY_SHIFT) - 1; tm->tm_hour = ((st_time & RTC_HOUR_MASK) >> RTC_HOUR_SHIFT); tm->tm_min = ((st_time & RTC_MIN_MASK) >> RTC_MIN_SHIFT); tm->tm_sec = ((st_time & RTC_SEC_MASK) >> RTC_SEC_SHIFT); tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); tm->tm_year -= 1900; return 0; } static int pl031_stv2_read_time(struct device *dev, struct rtc_time *tm) { struct pl031_local *ldata = dev_get_drvdata(dev); pl031_stv2_time_to_tm(readl(ldata->base + RTC_DR), readl(ldata->base + RTC_YDR), tm); return 0; } static int pl031_stv2_set_time(struct device *dev, struct rtc_time *tm) { unsigned long time; unsigned long bcd_year; struct pl031_local *ldata = dev_get_drvdata(dev); int ret; ret = pl031_stv2_tm_to_time(dev, tm, &time, &bcd_year); if (ret == 0) { writel(bcd_year, ldata->base + RTC_YLR); writel(time, ldata->base + RTC_LR); } return ret; } static int pl031_stv2_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct pl031_local *ldata = dev_get_drvdata(dev); int ret; ret = pl031_stv2_time_to_tm(readl(ldata->base + RTC_MR), readl(ldata->base + RTC_YMR), &alarm->time); alarm->pending = readl(ldata->base + RTC_RIS) & RTC_BIT_AI; alarm->enabled = readl(ldata->base + RTC_IMSC) & RTC_BIT_AI; return ret; } static int pl031_stv2_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct pl031_local *ldata = dev_get_drvdata(dev); unsigned long time; unsigned long bcd_year; int ret; /* At the moment, we can only deal with non-wildcarded alarm times. */ ret = rtc_valid_tm(&alarm->time); if (ret == 0) { ret = pl031_stv2_tm_to_time(dev, &alarm->time, &time, &bcd_year); if (ret == 0) { writel(bcd_year, ldata->base + RTC_YMR); writel(time, ldata->base + RTC_MR); pl031_alarm_irq_enable(dev, alarm->enabled); } } return ret; } static irqreturn_t pl031_interrupt(int irq, void *dev_id) { struct pl031_local *ldata = dev_id; unsigned long rtcmis; unsigned long events = 0; rtcmis = readl(ldata->base + RTC_MIS); if (rtcmis) { writel(rtcmis, ldata->base + RTC_ICR); if (rtcmis & RTC_BIT_AI) events |= (RTC_AF | RTC_IRQF); /* Timer interrupt is only available in ST variants */ if ((rtcmis & RTC_BIT_PI) && (ldata->hw_designer == AMBA_VENDOR_ST)) events |= (RTC_PF | RTC_IRQF); rtc_update_irq(ldata->rtc, 1, events); return IRQ_HANDLED; } return IRQ_NONE; } static int pl031_read_time(struct device *dev, struct rtc_time *tm) { struct pl031_local *ldata = dev_get_drvdata(dev); rtc_time_to_tm(readl(ldata->base + RTC_DR), tm); return 0; } static int pl031_set_time(struct device *dev, struct rtc_time *tm) { unsigned long time; struct pl031_local *ldata = dev_get_drvdata(dev); int ret; ret = rtc_tm_to_time(tm, &time); if (ret == 0) writel(time, ldata->base + RTC_LR); return ret; } static int pl031_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct pl031_local *ldata = dev_get_drvdata(dev); rtc_time_to_tm(readl(ldata->base + RTC_MR), &alarm->time); alarm->pending = readl(ldata->base + RTC_RIS) & RTC_BIT_AI; alarm->enabled = readl(ldata->base + RTC_IMSC) & RTC_BIT_AI; return 0; } static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct pl031_local *ldata = dev_get_drvdata(dev); unsigned long time; int ret; /* At the moment, we can only deal with non-wildcarded alarm times. */ ret = rtc_valid_tm(&alarm->time); if (ret == 0) { ret = rtc_tm_to_time(&alarm->time, &time); if (ret == 0) { writel(time, ldata->base + RTC_MR); pl031_alarm_irq_enable(dev, alarm->enabled); } } return ret; } static int pl031_remove(struct amba_device *adev) { struct pl031_local *ldata = dev_get_drvdata(&adev->dev); amba_set_drvdata(adev, NULL); free_irq(adev->irq[0], ldata->rtc); rtc_device_unregister(ldata->rtc); iounmap(ldata->base); kfree(ldata); amba_release_regions(adev); return 0; } static int pl031_probe(struct amba_device *adev, const struct amba_id *id) { int ret; struct pl031_local *ldata; struct rtc_class_ops *ops = id->data; ret = amba_request_regions(adev, NULL); if (ret) goto err_req; ldata = kzalloc(sizeof(struct pl031_local), GFP_KERNEL); if (!ldata) { ret = -ENOMEM; goto out; } ldata->base = ioremap(adev->res.start, resource_size(&adev->res)); if (!ldata->base) { ret = -ENOMEM; goto out_no_remap; } amba_set_drvdata(adev, ldata); ldata->hw_designer = amba_manf(adev); ldata->hw_revision = amba_rev(adev); dev_dbg(&adev->dev, "designer ID = 0x%02x\n", ldata->hw_designer); dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision); /* Enable the clockwatch on ST Variants */ if ((ldata->hw_designer == AMBA_VENDOR_ST) && (ldata->hw_revision > 1)) writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN, ldata->base + RTC_CR); ldata->rtc = rtc_device_register("pl031", &adev->dev, ops, THIS_MODULE); if (IS_ERR(ldata->rtc)) { ret = PTR_ERR(ldata->rtc); goto out_no_rtc; } if (request_irq(adev->irq[0], pl031_interrupt, IRQF_DISABLED, "rtc-pl031", ldata)) { ret = -EIO; goto out_no_irq; } return 0; out_no_irq: rtc_device_unregister(ldata->rtc); out_no_rtc: iounmap(ldata->base); amba_set_drvdata(adev, NULL); out_no_remap: kfree(ldata); out: amba_release_regions(adev); err_req: return ret; } /* Operations for the original ARM version */ static struct rtc_class_ops arm_pl031_ops = { .read_time = pl031_read_time, .set_time = pl031_set_time, .read_alarm = pl031_read_alarm, .set_alarm = pl031_set_alarm, .alarm_irq_enable = pl031_alarm_irq_enable, }; /* The First ST derivative */ static struct rtc_class_ops stv1_pl031_ops = { .read_time = pl031_read_time, .set_time = pl031_set_time, .read_alarm = pl031_read_alarm, .set_alarm = pl031_set_alarm, .alarm_irq_enable = pl031_alarm_irq_enable, }; /* And the second ST derivative */ static struct rtc_class_ops stv2_pl031_ops = { .read_time = pl031_stv2_read_time, .set_time = pl031_stv2_set_time, .read_alarm = pl031_stv2_read_alarm, .set_alarm = pl031_stv2_set_alarm, .alarm_irq_enable = pl031_alarm_irq_enable, }; static struct amba_id pl031_ids[] = { { .id = 0x00041031, .mask = 0x000fffff, .data = &arm_pl031_ops, }, /* ST Micro variants */ { .id = 0x00180031, .mask = 0x00ffffff, .data = &stv1_pl031_ops, }, { .id = 0x00280031, .mask = 0x00ffffff, .data = &stv2_pl031_ops, }, {0, 0}, }; static struct amba_driver pl031_driver = { .drv = { .name = "rtc-pl031", }, .id_table = pl031_ids, .probe = pl031_probe, .remove = pl031_remove, }; static int __init pl031_init(void) { return amba_driver_register(&pl031_driver); } static void __exit pl031_exit(void) { amba_driver_unregister(&pl031_driver); } module_init(pl031_init); module_exit(pl031_exit); MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net"); MODULE_DESCRIPTION("ARM AMBA PL031 RTC Driver"); MODULE_LICENSE("GPL");
gpl-2.0
linuxium/rockchips-kk
arch/sparc/kernel/ds.c
1444
26157
/* ds.c: Domain Services driver for Logical Domains * * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/reboot.h> #include <linux/cpu.h> #include <asm/hypervisor.h> #include <asm/ldc.h> #include <asm/vio.h> #include <asm/mdesc.h> #include <asm/head.h> #include <asm/irq.h> #include "kernel.h" #define DRV_MODULE_NAME "ds" #define PFX DRV_MODULE_NAME ": " #define DRV_MODULE_VERSION "1.0" #define DRV_MODULE_RELDATE "Jul 11, 2007" static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("Sun LDOM domain services driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); struct ds_msg_tag { __u32 type; #define DS_INIT_REQ 0x00 #define DS_INIT_ACK 0x01 #define DS_INIT_NACK 0x02 #define DS_REG_REQ 0x03 #define DS_REG_ACK 0x04 #define DS_REG_NACK 0x05 #define DS_UNREG_REQ 0x06 #define DS_UNREG_ACK 0x07 #define DS_UNREG_NACK 0x08 #define DS_DATA 0x09 #define DS_NACK 0x0a __u32 len; }; /* Result codes */ #define DS_OK 0x00 #define DS_REG_VER_NACK 0x01 #define DS_REG_DUP 0x02 #define DS_INV_HDL 0x03 #define DS_TYPE_UNKNOWN 0x04 struct ds_version { __u16 major; __u16 minor; }; struct ds_ver_req { struct ds_msg_tag tag; struct ds_version ver; }; struct ds_ver_ack { struct ds_msg_tag tag; __u16 minor; }; struct ds_ver_nack { struct ds_msg_tag tag; __u16 major; }; struct ds_reg_req { struct ds_msg_tag tag; __u64 handle; __u16 major; __u16 minor; char svc_id[0]; }; struct ds_reg_ack { struct ds_msg_tag tag; __u64 handle; __u16 minor; }; struct ds_reg_nack { struct ds_msg_tag tag; __u64 handle; __u16 major; }; struct ds_unreg_req { struct ds_msg_tag tag; __u64 handle; }; struct ds_unreg_ack { struct ds_msg_tag tag; __u64 handle; }; struct ds_unreg_nack { struct ds_msg_tag tag; __u64 handle; }; struct ds_data { struct ds_msg_tag tag; __u64 handle; }; struct ds_data_nack { struct ds_msg_tag tag; __u64 handle; __u64 result; }; struct ds_info; struct ds_cap_state { __u64 handle; void (*data)(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); const char *service_id; u8 state; #define CAP_STATE_UNKNOWN 0x00 #define CAP_STATE_REG_SENT 0x01 #define CAP_STATE_REGISTERED 0x02 }; static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); static void domain_shutdown_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); static void domain_panic_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); #ifdef CONFIG_HOTPLUG_CPU static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); #endif static void ds_pri_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); static void ds_var_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); static struct ds_cap_state ds_states_template[] = { { .service_id = "md-update", .data = md_update_data, }, { .service_id = "domain-shutdown", .data = domain_shutdown_data, }, { .service_id = "domain-panic", .data = domain_panic_data, }, #ifdef CONFIG_HOTPLUG_CPU { .service_id = "dr-cpu", .data = dr_cpu_data, }, #endif { .service_id = "pri", .data = ds_pri_data, }, { .service_id = "var-config", .data = ds_var_data, }, { .service_id = "var-config-backup", .data = ds_var_data, }, }; static DEFINE_SPINLOCK(ds_lock); struct ds_info { struct ldc_channel *lp; u8 hs_state; #define DS_HS_START 0x01 #define DS_HS_DONE 0x02 u64 id; void *rcv_buf; int rcv_buf_len; struct ds_cap_state *ds_states; int num_ds_states; struct ds_info *next; }; static struct ds_info *ds_info_list; static struct ds_cap_state *find_cap(struct ds_info *dp, u64 handle) { unsigned int index = handle >> 32; if (index >= dp->num_ds_states) return NULL; return &dp->ds_states[index]; } static struct ds_cap_state *find_cap_by_string(struct ds_info *dp, const char *name) { int i; for (i = 0; i < dp->num_ds_states; i++) { if (strcmp(dp->ds_states[i].service_id, name)) continue; return &dp->ds_states[i]; } return NULL; } static int __ds_send(struct ldc_channel *lp, void *data, int len) { int err, limit = 1000; err = -EINVAL; while (limit-- > 0) { err = ldc_write(lp, data, len); if (!err || (err != -EAGAIN)) break; udelay(1); } return err; } static int ds_send(struct ldc_channel *lp, void *data, int len) { unsigned long flags; int err; spin_lock_irqsave(&ds_lock, flags); err = __ds_send(lp, data, len); spin_unlock_irqrestore(&ds_lock, flags); return err; } struct ds_md_update_req { __u64 req_num; }; struct ds_md_update_res { __u64 req_num; __u32 result; }; static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ldc_channel *lp = dp->lp; struct ds_data *dpkt = buf; struct ds_md_update_req *rp; struct { struct ds_data data; struct ds_md_update_res res; } pkt; rp = (struct ds_md_update_req *) (dpkt + 1); printk(KERN_INFO "ds-%llu: Machine description update.\n", dp->id); mdesc_update(); memset(&pkt, 0, sizeof(pkt)); pkt.data.tag.type = DS_DATA; pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); pkt.data.handle = cp->handle; pkt.res.req_num = rp->req_num; pkt.res.result = DS_OK; ds_send(lp, &pkt, sizeof(pkt)); } struct ds_shutdown_req { __u64 req_num; __u32 ms_delay; }; struct ds_shutdown_res { __u64 req_num; __u32 result; char reason[1]; }; static void domain_shutdown_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ldc_channel *lp = dp->lp; struct ds_data *dpkt = buf; struct ds_shutdown_req *rp; struct { struct ds_data data; struct ds_shutdown_res res; } pkt; rp = (struct ds_shutdown_req *) (dpkt + 1); printk(KERN_ALERT "ds-%llu: Shutdown request from " "LDOM manager received.\n", dp->id); memset(&pkt, 0, sizeof(pkt)); pkt.data.tag.type = DS_DATA; pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); pkt.data.handle = cp->handle; pkt.res.req_num = rp->req_num; pkt.res.result = DS_OK; pkt.res.reason[0] = 0; ds_send(lp, &pkt, sizeof(pkt)); orderly_poweroff(true); } struct ds_panic_req { __u64 req_num; }; struct ds_panic_res { __u64 req_num; __u32 result; char reason[1]; }; static void domain_panic_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ldc_channel *lp = dp->lp; struct ds_data *dpkt = buf; struct ds_panic_req *rp; struct { struct ds_data data; struct ds_panic_res res; } pkt; rp = (struct ds_panic_req *) (dpkt + 1); printk(KERN_ALERT "ds-%llu: Panic request from " "LDOM manager received.\n", dp->id); memset(&pkt, 0, sizeof(pkt)); pkt.data.tag.type = DS_DATA; pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); pkt.data.handle = cp->handle; pkt.res.req_num = rp->req_num; pkt.res.result = DS_OK; pkt.res.reason[0] = 0; ds_send(lp, &pkt, sizeof(pkt)); panic("PANIC requested by LDOM manager."); } #ifdef CONFIG_HOTPLUG_CPU struct dr_cpu_tag { __u64 req_num; __u32 type; #define DR_CPU_CONFIGURE 0x43 #define DR_CPU_UNCONFIGURE 0x55 #define DR_CPU_FORCE_UNCONFIGURE 0x46 #define DR_CPU_STATUS 0x53 /* Responses */ #define DR_CPU_OK 0x6f #define DR_CPU_ERROR 0x65 __u32 num_records; }; struct dr_cpu_resp_entry { __u32 cpu; __u32 result; #define DR_CPU_RES_OK 0x00 #define DR_CPU_RES_FAILURE 0x01 #define DR_CPU_RES_BLOCKED 0x02 #define DR_CPU_RES_CPU_NOT_RESPONDING 0x03 #define DR_CPU_RES_NOT_IN_MD 0x04 __u32 stat; #define DR_CPU_STAT_NOT_PRESENT 0x00 #define DR_CPU_STAT_UNCONFIGURED 0x01 #define DR_CPU_STAT_CONFIGURED 0x02 __u32 str_off; }; static void __dr_cpu_send_error(struct ds_info *dp, struct ds_cap_state *cp, struct ds_data *data) { struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); struct { struct ds_data data; struct dr_cpu_tag tag; } pkt; int msg_len; memset(&pkt, 0, sizeof(pkt)); pkt.data.tag.type = DS_DATA; pkt.data.handle = cp->handle; pkt.tag.req_num = tag->req_num; pkt.tag.type = DR_CPU_ERROR; pkt.tag.num_records = 0; msg_len = (sizeof(struct ds_data) + sizeof(struct dr_cpu_tag)); pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag); __ds_send(dp->lp, &pkt, msg_len); } static void dr_cpu_send_error(struct ds_info *dp, struct ds_cap_state *cp, struct ds_data *data) { unsigned long flags; spin_lock_irqsave(&ds_lock, flags); __dr_cpu_send_error(dp, cp, data); spin_unlock_irqrestore(&ds_lock, flags); } #define CPU_SENTINEL 0xffffffff static void purge_dups(u32 *list, u32 num_ents) { unsigned int i; for (i = 0; i < num_ents; i++) { u32 cpu = list[i]; unsigned int j; if (cpu == CPU_SENTINEL) continue; for (j = i + 1; j < num_ents; j++) { if (list[j] == cpu) list[j] = CPU_SENTINEL; } } } static int dr_cpu_size_response(int ncpus) { return (sizeof(struct ds_data) + sizeof(struct dr_cpu_tag) + (sizeof(struct dr_cpu_resp_entry) * ncpus)); } static void dr_cpu_init_response(struct ds_data *resp, u64 req_num, u64 handle, int resp_len, int ncpus, cpumask_t *mask, u32 default_stat) { struct dr_cpu_resp_entry *ent; struct dr_cpu_tag *tag; int i, cpu; tag = (struct dr_cpu_tag *) (resp + 1); ent = (struct dr_cpu_resp_entry *) (tag + 1); resp->tag.type = DS_DATA; resp->tag.len = resp_len - sizeof(struct ds_msg_tag); resp->handle = handle; tag->req_num = req_num; tag->type = DR_CPU_OK; tag->num_records = ncpus; i = 0; for_each_cpu(cpu, mask) { ent[i].cpu = cpu; ent[i].result = DR_CPU_RES_OK; ent[i].stat = default_stat; i++; } BUG_ON(i != ncpus); } static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus, u32 res, u32 stat) { struct dr_cpu_resp_entry *ent; struct dr_cpu_tag *tag; int i; tag = (struct dr_cpu_tag *) (resp + 1); ent = (struct dr_cpu_resp_entry *) (tag + 1); for (i = 0; i < ncpus; i++) { if (ent[i].cpu != cpu) continue; ent[i].result = res; ent[i].stat = stat; break; } } static int __cpuinit dr_cpu_configure(struct ds_info *dp, struct ds_cap_state *cp, u64 req_num, cpumask_t *mask) { struct ds_data *resp; int resp_len, ncpus, cpu; unsigned long flags; ncpus = cpumask_weight(mask); resp_len = dr_cpu_size_response(ncpus); resp = kzalloc(resp_len, GFP_KERNEL); if (!resp) return -ENOMEM; dr_cpu_init_response(resp, req_num, cp->handle, resp_len, ncpus, mask, DR_CPU_STAT_CONFIGURED); mdesc_populate_present_mask(mask); mdesc_fill_in_cpu_data(mask); for_each_cpu(cpu, mask) { int err; printk(KERN_INFO "ds-%llu: Starting cpu %d...\n", dp->id, cpu); err = cpu_up(cpu); if (err) { __u32 res = DR_CPU_RES_FAILURE; __u32 stat = DR_CPU_STAT_UNCONFIGURED; if (!cpu_present(cpu)) { /* CPU not present in MD */ res = DR_CPU_RES_NOT_IN_MD; stat = DR_CPU_STAT_NOT_PRESENT; } else if (err == -ENODEV) { /* CPU did not call in successfully */ res = DR_CPU_RES_CPU_NOT_RESPONDING; } printk(KERN_INFO "ds-%llu: CPU startup failed err=%d\n", dp->id, err); dr_cpu_mark(resp, cpu, ncpus, res, stat); } } spin_lock_irqsave(&ds_lock, flags); __ds_send(dp->lp, resp, resp_len); spin_unlock_irqrestore(&ds_lock, flags); kfree(resp); /* Redistribute IRQs, taking into account the new cpus. */ fixup_irqs(); return 0; } static int dr_cpu_unconfigure(struct ds_info *dp, struct ds_cap_state *cp, u64 req_num, cpumask_t *mask) { struct ds_data *resp; int resp_len, ncpus, cpu; unsigned long flags; ncpus = cpumask_weight(mask); resp_len = dr_cpu_size_response(ncpus); resp = kzalloc(resp_len, GFP_KERNEL); if (!resp) return -ENOMEM; dr_cpu_init_response(resp, req_num, cp->handle, resp_len, ncpus, mask, DR_CPU_STAT_UNCONFIGURED); for_each_cpu(cpu, mask) { int err; printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n", dp->id, cpu); err = cpu_down(cpu); if (err) dr_cpu_mark(resp, cpu, ncpus, DR_CPU_RES_FAILURE, DR_CPU_STAT_CONFIGURED); } spin_lock_irqsave(&ds_lock, flags); __ds_send(dp->lp, resp, resp_len); spin_unlock_irqrestore(&ds_lock, flags); kfree(resp); return 0; } static void __cpuinit dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ds_data *data = buf; struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); u32 *cpu_list = (u32 *) (tag + 1); u64 req_num = tag->req_num; cpumask_t mask; unsigned int i; int err; switch (tag->type) { case DR_CPU_CONFIGURE: case DR_CPU_UNCONFIGURE: case DR_CPU_FORCE_UNCONFIGURE: break; default: dr_cpu_send_error(dp, cp, data); return; } purge_dups(cpu_list, tag->num_records); cpumask_clear(&mask); for (i = 0; i < tag->num_records; i++) { if (cpu_list[i] == CPU_SENTINEL) continue; if (cpu_list[i] < nr_cpu_ids) cpumask_set_cpu(cpu_list[i], &mask); } if (tag->type == DR_CPU_CONFIGURE) err = dr_cpu_configure(dp, cp, req_num, &mask); else err = dr_cpu_unconfigure(dp, cp, req_num, &mask); if (err) dr_cpu_send_error(dp, cp, data); } #endif /* CONFIG_HOTPLUG_CPU */ struct ds_pri_msg { __u64 req_num; __u64 type; #define DS_PRI_REQUEST 0x00 #define DS_PRI_DATA 0x01 #define DS_PRI_UPDATE 0x02 }; static void ds_pri_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ds_data *dpkt = buf; struct ds_pri_msg *rp; rp = (struct ds_pri_msg *) (dpkt + 1); printk(KERN_INFO "ds-%llu: PRI REQ [%llx:%llx], len=%d\n", dp->id, rp->req_num, rp->type, len); } struct ds_var_hdr { __u32 type; #define DS_VAR_SET_REQ 0x00 #define DS_VAR_DELETE_REQ 0x01 #define DS_VAR_SET_RESP 0x02 #define DS_VAR_DELETE_RESP 0x03 }; struct ds_var_set_msg { struct ds_var_hdr hdr; char name_and_value[0]; }; struct ds_var_delete_msg { struct ds_var_hdr hdr; char name[0]; }; struct ds_var_resp { struct ds_var_hdr hdr; __u32 result; #define DS_VAR_SUCCESS 0x00 #define DS_VAR_NO_SPACE 0x01 #define DS_VAR_INVALID_VAR 0x02 #define DS_VAR_INVALID_VAL 0x03 #define DS_VAR_NOT_PRESENT 0x04 }; static DEFINE_MUTEX(ds_var_mutex); static int ds_var_doorbell; static int ds_var_response; static void ds_var_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ds_data *dpkt = buf; struct ds_var_resp *rp; rp = (struct ds_var_resp *) (dpkt + 1); if (rp->hdr.type != DS_VAR_SET_RESP && rp->hdr.type != DS_VAR_DELETE_RESP) return; ds_var_response = rp->result; wmb(); ds_var_doorbell = 1; } void ldom_set_var(const char *var, const char *value) { struct ds_cap_state *cp; struct ds_info *dp; unsigned long flags; spin_lock_irqsave(&ds_lock, flags); cp = NULL; for (dp = ds_info_list; dp; dp = dp->next) { struct ds_cap_state *tmp; tmp = find_cap_by_string(dp, "var-config"); if (tmp && tmp->state == CAP_STATE_REGISTERED) { cp = tmp; break; } } if (!cp) { for (dp = ds_info_list; dp; dp = dp->next) { struct ds_cap_state *tmp; tmp = find_cap_by_string(dp, "var-config-backup"); if (tmp && tmp->state == CAP_STATE_REGISTERED) { cp = tmp; break; } } } spin_unlock_irqrestore(&ds_lock, flags); if (cp) { union { struct { struct ds_data data; struct ds_var_set_msg msg; } header; char all[512]; } pkt; char *base, *p; int msg_len, loops; memset(&pkt, 0, sizeof(pkt)); pkt.header.data.tag.type = DS_DATA; pkt.header.data.handle = cp->handle; pkt.header.msg.hdr.type = DS_VAR_SET_REQ; base = p = &pkt.header.msg.name_and_value[0]; strcpy(p, var); p += strlen(var) + 1; strcpy(p, value); p += strlen(value) + 1; msg_len = (sizeof(struct ds_data) + sizeof(struct ds_var_set_msg) + (p - base)); msg_len = (msg_len + 3) & ~3; pkt.header.data.tag.len = msg_len - sizeof(struct ds_msg_tag); mutex_lock(&ds_var_mutex); spin_lock_irqsave(&ds_lock, flags); ds_var_doorbell = 0; ds_var_response = -1; __ds_send(dp->lp, &pkt, msg_len); spin_unlock_irqrestore(&ds_lock, flags); loops = 1000; while (ds_var_doorbell == 0) { if (loops-- < 0) break; barrier(); udelay(100); } mutex_unlock(&ds_var_mutex); if (ds_var_doorbell == 0 || ds_var_response != DS_VAR_SUCCESS) printk(KERN_ERR "ds-%llu: var-config [%s:%s] " "failed, response(%d).\n", dp->id, var, value, ds_var_response); } else { printk(KERN_ERR PFX "var-config not registered so " "could not set (%s) variable to (%s).\n", var, value); } } static char full_boot_str[256] __attribute__((aligned(32))); static int reboot_data_supported; void ldom_reboot(const char *boot_command) { /* Don't bother with any of this if the boot_command * is empty. */ if (boot_command && strlen(boot_command)) { unsigned long len; strcpy(full_boot_str, "boot "); strcpy(full_boot_str + strlen("boot "), boot_command); len = strlen(full_boot_str); if (reboot_data_supported) { unsigned long ra = kimage_addr_to_ra(full_boot_str); unsigned long hv_ret; hv_ret = sun4v_reboot_data_set(ra, len); if (hv_ret != HV_EOK) pr_err("SUN4V: Unable to set reboot data " "hv_ret=%lu\n", hv_ret); } else { ldom_set_var("reboot-command", full_boot_str); } } sun4v_mach_sir(); } void ldom_power_off(void) { sun4v_mach_exit(0); } static void ds_conn_reset(struct ds_info *dp) { printk(KERN_ERR "ds-%llu: ds_conn_reset() from %p\n", dp->id, __builtin_return_address(0)); } static int register_services(struct ds_info *dp) { struct ldc_channel *lp = dp->lp; int i; for (i = 0; i < dp->num_ds_states; i++) { struct { struct ds_reg_req req; u8 id_buf[256]; } pbuf; struct ds_cap_state *cp = &dp->ds_states[i]; int err, msg_len; u64 new_count; if (cp->state == CAP_STATE_REGISTERED) continue; new_count = sched_clock() & 0xffffffff; cp->handle = ((u64) i << 32) | new_count; msg_len = (sizeof(struct ds_reg_req) + strlen(cp->service_id)); memset(&pbuf, 0, sizeof(pbuf)); pbuf.req.tag.type = DS_REG_REQ; pbuf.req.tag.len = (msg_len - sizeof(struct ds_msg_tag)); pbuf.req.handle = cp->handle; pbuf.req.major = 1; pbuf.req.minor = 0; strcpy(pbuf.req.svc_id, cp->service_id); err = __ds_send(lp, &pbuf, msg_len); if (err > 0) cp->state = CAP_STATE_REG_SENT; } return 0; } static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt) { if (dp->hs_state == DS_HS_START) { if (pkt->type != DS_INIT_ACK) goto conn_reset; dp->hs_state = DS_HS_DONE; return register_services(dp); } if (dp->hs_state != DS_HS_DONE) goto conn_reset; if (pkt->type == DS_REG_ACK) { struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt; struct ds_cap_state *cp = find_cap(dp, ap->handle); if (!cp) { printk(KERN_ERR "ds-%llu: REG ACK for unknown " "handle %llx\n", dp->id, ap->handle); return 0; } printk(KERN_INFO "ds-%llu: Registered %s service.\n", dp->id, cp->service_id); cp->state = CAP_STATE_REGISTERED; } else if (pkt->type == DS_REG_NACK) { struct ds_reg_nack *np = (struct ds_reg_nack *) pkt; struct ds_cap_state *cp = find_cap(dp, np->handle); if (!cp) { printk(KERN_ERR "ds-%llu: REG NACK for " "unknown handle %llx\n", dp->id, np->handle); return 0; } cp->state = CAP_STATE_UNKNOWN; } return 0; conn_reset: ds_conn_reset(dp); return -ECONNRESET; } static void __send_ds_nack(struct ds_info *dp, u64 handle) { struct ds_data_nack nack = { .tag = { .type = DS_NACK, .len = (sizeof(struct ds_data_nack) - sizeof(struct ds_msg_tag)), }, .handle = handle, .result = DS_INV_HDL, }; __ds_send(dp->lp, &nack, sizeof(nack)); } static LIST_HEAD(ds_work_list); static DECLARE_WAIT_QUEUE_HEAD(ds_wait); struct ds_queue_entry { struct list_head list; struct ds_info *dp; int req_len; int __pad; u64 req[0]; }; static void process_ds_work(void) { struct ds_queue_entry *qp, *tmp; unsigned long flags; LIST_HEAD(todo); spin_lock_irqsave(&ds_lock, flags); list_splice_init(&ds_work_list, &todo); spin_unlock_irqrestore(&ds_lock, flags); list_for_each_entry_safe(qp, tmp, &todo, list) { struct ds_data *dpkt = (struct ds_data *) qp->req; struct ds_info *dp = qp->dp; struct ds_cap_state *cp = find_cap(dp, dpkt->handle); int req_len = qp->req_len; if (!cp) { printk(KERN_ERR "ds-%llu: Data for unknown " "handle %llu\n", dp->id, dpkt->handle); spin_lock_irqsave(&ds_lock, flags); __send_ds_nack(dp, dpkt->handle); spin_unlock_irqrestore(&ds_lock, flags); } else { cp->data(dp, cp, dpkt, req_len); } list_del(&qp->list); kfree(qp); } } static int ds_thread(void *__unused) { DEFINE_WAIT(wait); while (1) { prepare_to_wait(&ds_wait, &wait, TASK_INTERRUPTIBLE); if (list_empty(&ds_work_list)) schedule(); finish_wait(&ds_wait, &wait); if (kthread_should_stop()) break; process_ds_work(); } return 0; } static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len) { struct ds_data *dpkt = (struct ds_data *) pkt; struct ds_queue_entry *qp; qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC); if (!qp) { __send_ds_nack(dp, dpkt->handle); } else { qp->dp = dp; memcpy(&qp->req, pkt, len); list_add_tail(&qp->list, &ds_work_list); wake_up(&ds_wait); } return 0; } static void ds_up(struct ds_info *dp) { struct ldc_channel *lp = dp->lp; struct ds_ver_req req; int err; req.tag.type = DS_INIT_REQ; req.tag.len = sizeof(req) - sizeof(struct ds_msg_tag); req.ver.major = 1; req.ver.minor = 0; err = __ds_send(lp, &req, sizeof(req)); if (err > 0) dp->hs_state = DS_HS_START; } static void ds_reset(struct ds_info *dp) { int i; dp->hs_state = 0; for (i = 0; i < dp->num_ds_states; i++) { struct ds_cap_state *cp = &dp->ds_states[i]; cp->state = CAP_STATE_UNKNOWN; } } static void ds_event(void *arg, int event) { struct ds_info *dp = arg; struct ldc_channel *lp = dp->lp; unsigned long flags; int err; spin_lock_irqsave(&ds_lock, flags); if (event == LDC_EVENT_UP) { ds_up(dp); spin_unlock_irqrestore(&ds_lock, flags); return; } if (event == LDC_EVENT_RESET) { ds_reset(dp); spin_unlock_irqrestore(&ds_lock, flags); return; } if (event != LDC_EVENT_DATA_READY) { printk(KERN_WARNING "ds-%llu: Unexpected LDC event %d\n", dp->id, event); spin_unlock_irqrestore(&ds_lock, flags); return; } err = 0; while (1) { struct ds_msg_tag *tag; err = ldc_read(lp, dp->rcv_buf, sizeof(*tag)); if (unlikely(err < 0)) { if (err == -ECONNRESET) ds_conn_reset(dp); break; } if (err == 0) break; tag = dp->rcv_buf; err = ldc_read(lp, tag + 1, tag->len); if (unlikely(err < 0)) { if (err == -ECONNRESET) ds_conn_reset(dp); break; } if (err < tag->len) break; if (tag->type < DS_DATA) err = ds_handshake(dp, dp->rcv_buf); else err = ds_data(dp, dp->rcv_buf, sizeof(*tag) + err); if (err == -ECONNRESET) break; } spin_unlock_irqrestore(&ds_lock, flags); } static int __devinit ds_probe(struct vio_dev *vdev, const struct vio_device_id *id) { static int ds_version_printed; struct ldc_channel_config ds_cfg = { .event = ds_event, .mtu = 4096, .mode = LDC_MODE_STREAM, }; struct mdesc_handle *hp; struct ldc_channel *lp; struct ds_info *dp; const u64 *val; int err, i; if (ds_version_printed++ == 0) printk(KERN_INFO "%s", version); dp = kzalloc(sizeof(*dp), GFP_KERNEL); err = -ENOMEM; if (!dp) goto out_err; hp = mdesc_grab(); val = mdesc_get_property(hp, vdev->mp, "id", NULL); if (val) dp->id = *val; mdesc_release(hp); dp->rcv_buf = kzalloc(4096, GFP_KERNEL); if (!dp->rcv_buf) goto out_free_dp; dp->rcv_buf_len = 4096; dp->ds_states = kzalloc(sizeof(ds_states_template), GFP_KERNEL); if (!dp->ds_states) goto out_free_rcv_buf; memcpy(dp->ds_states, ds_states_template, sizeof(ds_states_template)); dp->num_ds_states = ARRAY_SIZE(ds_states_template); for (i = 0; i < dp->num_ds_states; i++) dp->ds_states[i].handle = ((u64)i << 32); ds_cfg.tx_irq = vdev->tx_irq; ds_cfg.rx_irq = vdev->rx_irq; lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out_free_ds_states; } dp->lp = lp; err = ldc_bind(lp, "DS"); if (err) goto out_free_ldc; spin_lock_irq(&ds_lock); dp->next = ds_info_list; ds_info_list = dp; spin_unlock_irq(&ds_lock); return err; out_free_ldc: ldc_free(dp->lp); out_free_ds_states: kfree(dp->ds_states); out_free_rcv_buf: kfree(dp->rcv_buf); out_free_dp: kfree(dp); out_err: return err; } static int ds_remove(struct vio_dev *vdev) { return 0; } static const struct vio_device_id ds_match[] = { { .type = "domain-services-port", }, {}, }; static struct vio_driver ds_driver = { .id_table = ds_match, .probe = ds_probe, .remove = ds_remove, .driver = { .name = "ds", .owner = THIS_MODULE, } }; static int __init ds_init(void) { unsigned long hv_ret, major, minor; if (tlb_type == hypervisor) { hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor); if (hv_ret == HV_EOK) { pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n", major, minor); reboot_data_supported = 1; } } kthread_run(ds_thread, NULL, "kldomd"); return vio_register_driver(&ds_driver); } fs_initcall(ds_init);
gpl-2.0
rmcc/malata-kernel-smb_a1XXX
fs/posix_acl.c
1700
8652
/* * linux/fs/posix_acl.c * * Copyright (C) 2002 by Andreas Gruenbacher <a.gruenbacher@computer.org> * * Fixes from William Schumacher incorporated on 15 March 2001. * (Reported by Charles Bertsch, <CBertsch@microtest.com>). */ /* * This file contains generic functions for manipulating * POSIX 1003.1e draft standard 17 ACLs. */ #include <linux/kernel.h> #include <linux/slab.h> #include <asm/atomic.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/posix_acl.h> #include <linux/module.h> #include <linux/errno.h> EXPORT_SYMBOL(posix_acl_alloc); EXPORT_SYMBOL(posix_acl_clone); EXPORT_SYMBOL(posix_acl_valid); EXPORT_SYMBOL(posix_acl_equiv_mode); EXPORT_SYMBOL(posix_acl_from_mode); EXPORT_SYMBOL(posix_acl_create_masq); EXPORT_SYMBOL(posix_acl_chmod_masq); EXPORT_SYMBOL(posix_acl_permission); /* * Allocate a new ACL with the specified number of entries. */ struct posix_acl * posix_acl_alloc(int count, gfp_t flags) { const size_t size = sizeof(struct posix_acl) + count * sizeof(struct posix_acl_entry); struct posix_acl *acl = kmalloc(size, flags); if (acl) { atomic_set(&acl->a_refcount, 1); acl->a_count = count; } return acl; } /* * Clone an ACL. */ struct posix_acl * posix_acl_clone(const struct posix_acl *acl, gfp_t flags) { struct posix_acl *clone = NULL; if (acl) { int size = sizeof(struct posix_acl) + acl->a_count * sizeof(struct posix_acl_entry); clone = kmemdup(acl, size, flags); if (clone) atomic_set(&clone->a_refcount, 1); } return clone; } /* * Check if an acl is valid. Returns 0 if it is, or -E... otherwise. */ int posix_acl_valid(const struct posix_acl *acl) { const struct posix_acl_entry *pa, *pe; int state = ACL_USER_OBJ; unsigned int id = 0; /* keep gcc happy */ int needs_mask = 0; FOREACH_ACL_ENTRY(pa, acl, pe) { if (pa->e_perm & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE)) return -EINVAL; switch (pa->e_tag) { case ACL_USER_OBJ: if (state == ACL_USER_OBJ) { id = 0; state = ACL_USER; break; } return -EINVAL; case ACL_USER: if (state != ACL_USER) return -EINVAL; if (pa->e_id == ACL_UNDEFINED_ID || pa->e_id < id) return -EINVAL; id = pa->e_id + 1; needs_mask = 1; break; case ACL_GROUP_OBJ: if (state == ACL_USER) { id = 0; state = ACL_GROUP; break; } return -EINVAL; case ACL_GROUP: if (state != ACL_GROUP) return -EINVAL; if (pa->e_id == ACL_UNDEFINED_ID || pa->e_id < id) return -EINVAL; id = pa->e_id + 1; needs_mask = 1; break; case ACL_MASK: if (state != ACL_GROUP) return -EINVAL; state = ACL_OTHER; break; case ACL_OTHER: if (state == ACL_OTHER || (state == ACL_GROUP && !needs_mask)) { state = 0; break; } return -EINVAL; default: return -EINVAL; } } if (state == 0) return 0; return -EINVAL; } /* * Returns 0 if the acl can be exactly represented in the traditional * file mode permission bits, or else 1. Returns -E... on error. */ int posix_acl_equiv_mode(const struct posix_acl *acl, mode_t *mode_p) { const struct posix_acl_entry *pa, *pe; mode_t mode = 0; int not_equiv = 0; FOREACH_ACL_ENTRY(pa, acl, pe) { switch (pa->e_tag) { case ACL_USER_OBJ: mode |= (pa->e_perm & S_IRWXO) << 6; break; case ACL_GROUP_OBJ: mode |= (pa->e_perm & S_IRWXO) << 3; break; case ACL_OTHER: mode |= pa->e_perm & S_IRWXO; break; case ACL_MASK: mode = (mode & ~S_IRWXG) | ((pa->e_perm & S_IRWXO) << 3); not_equiv = 1; break; case ACL_USER: case ACL_GROUP: not_equiv = 1; break; default: return -EINVAL; } } if (mode_p) *mode_p = (*mode_p & ~S_IRWXUGO) | mode; return not_equiv; } /* * Create an ACL representing the file mode permission bits of an inode. */ struct posix_acl * posix_acl_from_mode(mode_t mode, gfp_t flags) { struct posix_acl *acl = posix_acl_alloc(3, flags); if (!acl) return ERR_PTR(-ENOMEM); acl->a_entries[0].e_tag = ACL_USER_OBJ; acl->a_entries[0].e_id = ACL_UNDEFINED_ID; acl->a_entries[0].e_perm = (mode & S_IRWXU) >> 6; acl->a_entries[1].e_tag = ACL_GROUP_OBJ; acl->a_entries[1].e_id = ACL_UNDEFINED_ID; acl->a_entries[1].e_perm = (mode & S_IRWXG) >> 3; acl->a_entries[2].e_tag = ACL_OTHER; acl->a_entries[2].e_id = ACL_UNDEFINED_ID; acl->a_entries[2].e_perm = (mode & S_IRWXO); return acl; } /* * Return 0 if current is granted want access to the inode * by the acl. Returns -E... otherwise. */ int posix_acl_permission(struct inode *inode, const struct posix_acl *acl, int want) { const struct posix_acl_entry *pa, *pe, *mask_obj; int found = 0; FOREACH_ACL_ENTRY(pa, acl, pe) { switch(pa->e_tag) { case ACL_USER_OBJ: /* (May have been checked already) */ if (inode->i_uid == current_fsuid()) goto check_perm; break; case ACL_USER: if (pa->e_id == current_fsuid()) goto mask; break; case ACL_GROUP_OBJ: if (in_group_p(inode->i_gid)) { found = 1; if ((pa->e_perm & want) == want) goto mask; } break; case ACL_GROUP: if (in_group_p(pa->e_id)) { found = 1; if ((pa->e_perm & want) == want) goto mask; } break; case ACL_MASK: break; case ACL_OTHER: if (found) return -EACCES; else goto check_perm; default: return -EIO; } } return -EIO; mask: for (mask_obj = pa+1; mask_obj != pe; mask_obj++) { if (mask_obj->e_tag == ACL_MASK) { if ((pa->e_perm & mask_obj->e_perm & want) == want) return 0; return -EACCES; } } check_perm: if ((pa->e_perm & want) == want) return 0; return -EACCES; } /* * Modify acl when creating a new inode. The caller must ensure the acl is * only referenced once. * * mode_p initially must contain the mode parameter to the open() / creat() * system calls. All permissions that are not granted by the acl are removed. * The permissions in the acl are changed to reflect the mode_p parameter. */ int posix_acl_create_masq(struct posix_acl *acl, mode_t *mode_p) { struct posix_acl_entry *pa, *pe; struct posix_acl_entry *group_obj = NULL, *mask_obj = NULL; mode_t mode = *mode_p; int not_equiv = 0; /* assert(atomic_read(acl->a_refcount) == 1); */ FOREACH_ACL_ENTRY(pa, acl, pe) { switch(pa->e_tag) { case ACL_USER_OBJ: pa->e_perm &= (mode >> 6) | ~S_IRWXO; mode &= (pa->e_perm << 6) | ~S_IRWXU; break; case ACL_USER: case ACL_GROUP: not_equiv = 1; break; case ACL_GROUP_OBJ: group_obj = pa; break; case ACL_OTHER: pa->e_perm &= mode | ~S_IRWXO; mode &= pa->e_perm | ~S_IRWXO; break; case ACL_MASK: mask_obj = pa; not_equiv = 1; break; default: return -EIO; } } if (mask_obj) { mask_obj->e_perm &= (mode >> 3) | ~S_IRWXO; mode &= (mask_obj->e_perm << 3) | ~S_IRWXG; } else { if (!group_obj) return -EIO; group_obj->e_perm &= (mode >> 3) | ~S_IRWXO; mode &= (group_obj->e_perm << 3) | ~S_IRWXG; } *mode_p = (*mode_p & ~S_IRWXUGO) | mode; return not_equiv; } /* * Modify the ACL for the chmod syscall. */ int posix_acl_chmod_masq(struct posix_acl *acl, mode_t mode) { struct posix_acl_entry *group_obj = NULL, *mask_obj = NULL; struct posix_acl_entry *pa, *pe; /* assert(atomic_read(acl->a_refcount) == 1); */ FOREACH_ACL_ENTRY(pa, acl, pe) { switch(pa->e_tag) { case ACL_USER_OBJ: pa->e_perm = (mode & S_IRWXU) >> 6; break; case ACL_USER: case ACL_GROUP: break; case ACL_GROUP_OBJ: group_obj = pa; break; case ACL_MASK: mask_obj = pa; break; case ACL_OTHER: pa->e_perm = (mode & S_IRWXO); break; default: return -EIO; } } if (mask_obj) { mask_obj->e_perm = (mode & S_IRWXG) >> 3; } else { if (!group_obj) return -EIO; group_obj->e_perm = (mode & S_IRWXG) >> 3; } return 0; }
gpl-2.0
AscendG630-DEV/android_kernel_huawei_g630
drivers/hwmon/ad7314.c
3236
3806
/* * AD7314 digital temperature sensor driver for AD7314, ADT7301 and ADT7302 * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. * * Conversion to hwmon from IIO done by Jonathan Cameron <jic23@cam.ac.uk> */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/spi/spi.h> #include <linux/module.h> #include <linux/err.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> /* * AD7314 power mode */ #define AD7314_PD 0x2000 /* * AD7314 temperature masks */ #define AD7314_TEMP_SIGN 0x200 #define AD7314_TEMP_MASK 0x7FE0 #define AD7314_TEMP_OFFSET 5 /* * ADT7301 and ADT7302 temperature masks */ #define ADT7301_TEMP_SIGN 0x2000 #define ADT7301_TEMP_MASK 0x3FFF enum ad7314_variant { adt7301, adt7302, ad7314, }; struct ad7314_data { struct spi_device *spi_dev; struct device *hwmon_dev; u16 rx ____cacheline_aligned; }; static int ad7314_spi_read(struct ad7314_data *chip) { int ret; ret = spi_read(chip->spi_dev, (u8 *)&chip->rx, sizeof(chip->rx)); if (ret < 0) { dev_err(&chip->spi_dev->dev, "SPI read error\n"); return ret; } return be16_to_cpu(chip->rx); } static ssize_t ad7314_show_temperature(struct device *dev, struct device_attribute *attr, char *buf) { struct ad7314_data *chip = dev_get_drvdata(dev); s16 data; int ret; ret = ad7314_spi_read(chip); if (ret < 0) return ret; switch (spi_get_device_id(chip->spi_dev)->driver_data) { case ad7314: data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET; data = (data << 6) >> 6; return sprintf(buf, "%d\n", 250 * data); case adt7301: case adt7302: /* * Documented as a 13 bit twos complement register * with a sign bit - which is a 14 bit 2's complement * register. 1lsb - 31.25 milli degrees centigrade */ data = ret & ADT7301_TEMP_MASK; data = (data << 2) >> 2; return sprintf(buf, "%d\n", DIV_ROUND_CLOSEST(data * 3125, 100)); default: return -EINVAL; } } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ad7314_show_temperature, NULL, 0); static struct attribute *ad7314_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, NULL, }; static const struct attribute_group ad7314_group = { .attrs = ad7314_attributes, }; static int __devinit ad7314_probe(struct spi_device *spi_dev) { int ret; struct ad7314_data *chip; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { ret = -ENOMEM; goto error_ret; } dev_set_drvdata(&spi_dev->dev, chip); ret = sysfs_create_group(&spi_dev->dev.kobj, &ad7314_group); if (ret < 0) goto error_free_chip; chip->hwmon_dev = hwmon_device_register(&spi_dev->dev); if (IS_ERR(chip->hwmon_dev)) { ret = PTR_ERR(chip->hwmon_dev); goto error_remove_group; } chip->spi_dev = spi_dev; return 0; error_remove_group: sysfs_remove_group(&spi_dev->dev.kobj, &ad7314_group); error_free_chip: kfree(chip); error_ret: return ret; } static int __devexit ad7314_remove(struct spi_device *spi_dev) { struct ad7314_data *chip = dev_get_drvdata(&spi_dev->dev); hwmon_device_unregister(chip->hwmon_dev); sysfs_remove_group(&spi_dev->dev.kobj, &ad7314_group); kfree(chip); return 0; } static const struct spi_device_id ad7314_id[] = { { "adt7301", adt7301 }, { "adt7302", adt7302 }, { "ad7314", ad7314 }, { } }; MODULE_DEVICE_TABLE(spi, ad7314_id); static struct spi_driver ad7314_driver = { .driver = { .name = "ad7314", .owner = THIS_MODULE, }, .probe = ad7314_probe, .remove = __devexit_p(ad7314_remove), .id_table = ad7314_id, }; module_spi_driver(ad7314_driver); MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>"); MODULE_DESCRIPTION("Analog Devices AD7314, ADT7301 and ADT7302 digital" " temperature sensor driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
jstotero/Cucciolone-Rewrite
drivers/video/omap/lcd_inn1510.c
4260
3027
/* * LCD panel support for the TI OMAP1510 Innovator board * * Copyright (C) 2004 Nokia Corporation * Author: Imre Deak <imre.deak@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <plat/fpga.h> #include "omapfb.h" static int innovator1510_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) { return 0; } static void innovator1510_panel_cleanup(struct lcd_panel *panel) { } static int innovator1510_panel_enable(struct lcd_panel *panel) { fpga_write(0x7, OMAP1510_FPGA_LCD_PANEL_CONTROL); return 0; } static void innovator1510_panel_disable(struct lcd_panel *panel) { fpga_write(0x0, OMAP1510_FPGA_LCD_PANEL_CONTROL); } static unsigned long innovator1510_panel_get_caps(struct lcd_panel *panel) { return 0; } struct lcd_panel innovator1510_panel = { .name = "inn1510", .config = OMAP_LCDC_PANEL_TFT, .bpp = 16, .data_lines = 16, .x_res = 240, .y_res = 320, .pixel_clock = 12500, .hsw = 40, .hfp = 40, .hbp = 72, .vsw = 1, .vfp = 1, .vbp = 0, .pcd = 12, .init = innovator1510_panel_init, .cleanup = innovator1510_panel_cleanup, .enable = innovator1510_panel_enable, .disable = innovator1510_panel_disable, .get_caps = innovator1510_panel_get_caps, }; static int innovator1510_panel_probe(struct platform_device *pdev) { omapfb_register_panel(&innovator1510_panel); return 0; } static int innovator1510_panel_remove(struct platform_device *pdev) { return 0; } static int innovator1510_panel_suspend(struct platform_device *pdev, pm_message_t mesg) { return 0; } static int innovator1510_panel_resume(struct platform_device *pdev) { return 0; } struct platform_driver innovator1510_panel_driver = { .probe = innovator1510_panel_probe, .remove = innovator1510_panel_remove, .suspend = innovator1510_panel_suspend, .resume = innovator1510_panel_resume, .driver = { .name = "lcd_inn1510", .owner = THIS_MODULE, }, }; static int __init innovator1510_panel_drv_init(void) { return platform_driver_register(&innovator1510_panel_driver); } static void __exit innovator1510_panel_drv_cleanup(void) { platform_driver_unregister(&innovator1510_panel_driver); } module_init(innovator1510_panel_drv_init); module_exit(innovator1510_panel_drv_cleanup);
gpl-2.0
GuneetAtwal/kernel_mt6589
arch/arm/mach-exynos/cpuidle.c
4516
5985
/* linux/arch/arm/mach-exynos4/cpuidle.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include <linux/io.h> #include <linux/export.h> #include <linux/time.h> #include <asm/proc-fns.h> #include <asm/smp_scu.h> #include <asm/suspend.h> #include <asm/unified.h> #include <mach/regs-pmu.h> #include <mach/pmu.h> #include <plat/cpu.h> #define REG_DIRECTGO_ADDR (samsung_rev() == EXYNOS4210_REV_1_1 ? \ S5P_INFORM7 : (samsung_rev() == EXYNOS4210_REV_1_0 ? \ (S5P_VA_SYSRAM + 0x24) : S5P_INFORM0)) #define REG_DIRECTGO_FLAG (samsung_rev() == EXYNOS4210_REV_1_1 ? \ S5P_INFORM6 : (samsung_rev() == EXYNOS4210_REV_1_0 ? \ (S5P_VA_SYSRAM + 0x20) : S5P_INFORM1)) #define S5P_CHECK_AFTR 0xFCBA0D10 static int exynos4_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); static int exynos4_enter_lowpower(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); static struct cpuidle_state exynos4_cpuidle_set[] __initdata = { [0] = { .enter = exynos4_enter_idle, .exit_latency = 1, .target_residency = 100000, .flags = CPUIDLE_FLAG_TIME_VALID, .name = "C0", .desc = "ARM clock gating(WFI)", }, [1] = { .enter = exynos4_enter_lowpower, .exit_latency = 300, .target_residency = 100000, .flags = CPUIDLE_FLAG_TIME_VALID, .name = "C1", .desc = "ARM power down", }, }; static DEFINE_PER_CPU(struct cpuidle_device, exynos4_cpuidle_device); static struct cpuidle_driver exynos4_idle_driver = { .name = "exynos4_idle", .owner = THIS_MODULE, }; /* Ext-GIC nIRQ/nFIQ is the only wakeup source in AFTR */ static void exynos4_set_wakeupmask(void) { __raw_writel(0x0000ff3e, S5P_WAKEUP_MASK); } static unsigned int g_pwr_ctrl, g_diag_reg; static void save_cpu_arch_register(void) { /*read power control register*/ asm("mrc p15, 0, %0, c15, c0, 0" : "=r"(g_pwr_ctrl) : : "cc"); /*read diagnostic register*/ asm("mrc p15, 0, %0, c15, c0, 1" : "=r"(g_diag_reg) : : "cc"); return; } static void restore_cpu_arch_register(void) { /*write power control register*/ asm("mcr p15, 0, %0, c15, c0, 0" : : "r"(g_pwr_ctrl) : "cc"); /*write diagnostic register*/ asm("mcr p15, 0, %0, c15, c0, 1" : : "r"(g_diag_reg) : "cc"); return; } static int idle_finisher(unsigned long flags) { cpu_do_idle(); return 1; } static int exynos4_enter_core0_aftr(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct timeval before, after; int idle_time; unsigned long tmp; local_irq_disable(); do_gettimeofday(&before); exynos4_set_wakeupmask(); /* Set value of power down register for aftr mode */ exynos4_sys_powerdown_conf(SYS_AFTR); __raw_writel(virt_to_phys(s3c_cpu_resume), REG_DIRECTGO_ADDR); __raw_writel(S5P_CHECK_AFTR, REG_DIRECTGO_FLAG); save_cpu_arch_register(); /* Setting Central Sequence Register for power down mode */ tmp = __raw_readl(S5P_CENTRAL_SEQ_CONFIGURATION); tmp &= ~S5P_CENTRAL_LOWPWR_CFG; __raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION); cpu_pm_enter(); cpu_suspend(0, idle_finisher); #ifdef CONFIG_SMP scu_enable(S5P_VA_SCU); #endif cpu_pm_exit(); restore_cpu_arch_register(); /* * If PMU failed while entering sleep mode, WFI will be * ignored by PMU and then exiting cpu_do_idle(). * S5P_CENTRAL_LOWPWR_CFG bit will not be set automatically * in this situation. */ tmp = __raw_readl(S5P_CENTRAL_SEQ_CONFIGURATION); if (!(tmp & S5P_CENTRAL_LOWPWR_CFG)) { tmp |= S5P_CENTRAL_LOWPWR_CFG; __raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION); } /* Clear wakeup state register */ __raw_writel(0x0, S5P_WAKEUP_STAT); do_gettimeofday(&after); local_irq_enable(); idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + (after.tv_usec - before.tv_usec); dev->last_residency = idle_time; return index; } static int exynos4_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct timeval before, after; int idle_time; local_irq_disable(); do_gettimeofday(&before); cpu_do_idle(); do_gettimeofday(&after); local_irq_enable(); idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + (after.tv_usec - before.tv_usec); dev->last_residency = idle_time; return index; } static int exynos4_enter_lowpower(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { int new_index = index; /* This mode only can be entered when other core's are offline */ if (num_online_cpus() > 1) new_index = drv->safe_state_index; if (new_index == 0) return exynos4_enter_idle(dev, drv, new_index); else return exynos4_enter_core0_aftr(dev, drv, new_index); } static int __init exynos4_init_cpuidle(void) { int i, max_cpuidle_state, cpu_id; struct cpuidle_device *device; struct cpuidle_driver *drv = &exynos4_idle_driver; /* Setup cpuidle driver */ drv->state_count = (sizeof(exynos4_cpuidle_set) / sizeof(struct cpuidle_state)); max_cpuidle_state = drv->state_count; for (i = 0; i < max_cpuidle_state; i++) { memcpy(&drv->states[i], &exynos4_cpuidle_set[i], sizeof(struct cpuidle_state)); } drv->safe_state_index = 0; cpuidle_register_driver(&exynos4_idle_driver); for_each_cpu(cpu_id, cpu_online_mask) { device = &per_cpu(exynos4_cpuidle_device, cpu_id); device->cpu = cpu_id; if (cpu_id == 0) device->state_count = (sizeof(exynos4_cpuidle_set) / sizeof(struct cpuidle_state)); else device->state_count = 1; /* Support IDLE only */ if (cpuidle_register_device(device)) { printk(KERN_ERR "CPUidle register device failed\n,"); return -EIO; } } return 0; } device_initcall(exynos4_init_cpuidle);
gpl-2.0
A2109devs/lenovo_a2109a_kernel
arch/ia64/sn/kernel/huberror.c
4772
6371
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1992 - 1997, 2000,2002-2007 Silicon Graphics, Inc. All rights reserved. */ #include <linux/types.h> #include <linux/interrupt.h> #include <asm/delay.h> #include <asm/sn/sn_sal.h> #include "ioerror.h" #include <asm/sn/addrs.h> #include <asm/sn/shubio.h> #include <asm/sn/geo.h> #include "xtalk/xwidgetdev.h" #include "xtalk/hubdev.h" #include <asm/sn/bte.h> void hubiio_crb_error_handler(struct hubdev_info *hubdev_info); extern void bte_crb_error_handler(cnodeid_t, int, int, ioerror_t *, int); static irqreturn_t hub_eint_handler(int irq, void *arg) { struct hubdev_info *hubdev_info; struct ia64_sal_retval ret_stuff; nasid_t nasid; ret_stuff.status = 0; ret_stuff.v0 = 0; hubdev_info = (struct hubdev_info *)arg; nasid = hubdev_info->hdi_nasid; if (is_shub1()) { SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT, (u64) nasid, 0, 0, 0, 0, 0, 0); if ((int)ret_stuff.v0) panic("%s: Fatal %s Error", __func__, ((nasid & 1) ? "TIO" : "HUBII")); if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ (void)hubiio_crb_error_handler(hubdev_info); } else if (nasid & 1) { /* TIO errors */ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT, (u64) nasid, 0, 0, 0, 0, 0, 0); if ((int)ret_stuff.v0) panic("%s: Fatal TIO Error", __func__); } else bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); return IRQ_HANDLED; } /* * Free the hub CRB "crbnum" which encountered an error. * Assumption is, error handling was successfully done, * and we now want to return the CRB back to Hub for normal usage. * * In order to free the CRB, all that's needed is to de-allocate it * * Assumption: * No other processor is mucking around with the hub control register. * So, upper layer has to single thread this. */ void hubiio_crb_free(struct hubdev_info *hubdev_info, int crbnum) { ii_icrb0_b_u_t icrbb; /* * The hardware does NOT clear the mark bit, so it must get cleared * here to be sure the error is not processed twice. */ icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICRB_B(crbnum)); icrbb.b_mark = 0; REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICRB_B(crbnum), icrbb.ii_icrb0_b_regval); /* * Deallocate the register wait till hub indicates it's done. */ REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICDR, (IIO_ICDR_PND | crbnum)); while (REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICDR) & IIO_ICDR_PND) cpu_relax(); } /* * hubiio_crb_error_handler * * This routine gets invoked when a hub gets an error * interrupt. So, the routine is running in interrupt context * at error interrupt level. * Action: * It's responsible for identifying ALL the CRBs that are marked * with error, and process them. * * If you find the CRB that's marked with error, map this to the * reason it caused error, and invoke appropriate error handler. * * XXX Be aware of the information in the context register. * * NOTE: * Use REMOTE_HUB_* macro instead of LOCAL_HUB_* so that the interrupt * handler can be run on any node. (not necessarily the node * corresponding to the hub that encountered error). */ void hubiio_crb_error_handler(struct hubdev_info *hubdev_info) { nasid_t nasid; ii_icrb0_a_u_t icrba; /* II CRB Register A */ ii_icrb0_b_u_t icrbb; /* II CRB Register B */ ii_icrb0_c_u_t icrbc; /* II CRB Register C */ ii_icrb0_d_u_t icrbd; /* II CRB Register D */ ii_icrb0_e_u_t icrbe; /* II CRB Register D */ int i; int num_errors = 0; /* Num of errors handled */ ioerror_t ioerror; nasid = hubdev_info->hdi_nasid; /* * XXX - Add locking for any recovery actions */ /* * Scan through all CRBs in the Hub, and handle the errors * in any of the CRBs marked. */ for (i = 0; i < IIO_NUM_CRBS; i++) { /* Check this crb entry to see if it is in error. */ icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(nasid, IIO_ICRB_B(i)); if (icrbb.b_mark == 0) { continue; } icrba.ii_icrb0_a_regval = REMOTE_HUB_L(nasid, IIO_ICRB_A(i)); IOERROR_INIT(&ioerror); /* read other CRB error registers. */ icrbc.ii_icrb0_c_regval = REMOTE_HUB_L(nasid, IIO_ICRB_C(i)); icrbd.ii_icrb0_d_regval = REMOTE_HUB_L(nasid, IIO_ICRB_D(i)); icrbe.ii_icrb0_e_regval = REMOTE_HUB_L(nasid, IIO_ICRB_E(i)); IOERROR_SETVALUE(&ioerror, errortype, icrbb.b_ecode); /* Check if this error is due to BTE operation, * and handle it separately. */ if (icrbd.d_bteop || ((icrbb.b_initiator == IIO_ICRB_INIT_BTE0 || icrbb.b_initiator == IIO_ICRB_INIT_BTE1) && (icrbb.b_imsgtype == IIO_ICRB_IMSGT_BTE || icrbb.b_imsgtype == IIO_ICRB_IMSGT_SN1NET))) { int bte_num; if (icrbd.d_bteop) bte_num = icrbc.c_btenum; else /* b_initiator bit 2 gives BTE number */ bte_num = (icrbb.b_initiator & 0x4) >> 2; hubiio_crb_free(hubdev_info, i); bte_crb_error_handler(nasid_to_cnodeid(nasid), bte_num, i, &ioerror, icrbd.d_bteop); num_errors++; continue; } } } /* * Function : hub_error_init * Purpose : initialize the error handling requirements for a given hub. * Parameters : cnode, the compact nodeid. * Assumptions : Called only once per hub, either by a local cpu. Or by a * remote cpu, when this hub is headless.(cpuless) * Returns : None */ void hub_error_init(struct hubdev_info *hubdev_info) { if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED, "SN_hub_error", hubdev_info)) { printk(KERN_ERR "hub_error_init: Failed to request_irq for 0x%p\n", hubdev_info); return; } sn_set_err_irq_affinity(SGI_II_ERROR); } /* * Function : ice_error_init * Purpose : initialize the error handling requirements for a given tio. * Parameters : cnode, the compact nodeid. * Assumptions : Called only once per tio. * Returns : None */ void ice_error_init(struct hubdev_info *hubdev_info) { if (request_irq (SGI_TIO_ERROR, (void *)hub_eint_handler, IRQF_SHARED, "SN_TIO_error", (void *)hubdev_info)) { printk("ice_error_init: request_irq() error hubdev_info 0x%p\n", hubdev_info); return; } sn_set_err_irq_affinity(SGI_TIO_ERROR); }
gpl-2.0
sakuraba001/android_kernel_samsung_d2
net/core/netprio_cgroup.c
4772
7551
/* * net/core/netprio_cgroup.c Priority Control Group * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Neil Horman <nhorman@tuxdriver.com> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/cgroup.h> #include <linux/rcupdate.h> #include <linux/atomic.h> #include <net/rtnetlink.h> #include <net/pkt_cls.h> #include <net/sock.h> #include <net/netprio_cgroup.h> static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp); static void cgrp_destroy(struct cgroup *cgrp); static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp); struct cgroup_subsys net_prio_subsys = { .name = "net_prio", .create = cgrp_create, .destroy = cgrp_destroy, .populate = cgrp_populate, #ifdef CONFIG_NETPRIO_CGROUP .subsys_id = net_prio_subsys_id, #endif .module = THIS_MODULE }; #define PRIOIDX_SZ 128 static unsigned long prioidx_map[PRIOIDX_SZ]; static DEFINE_SPINLOCK(prioidx_map_lock); static atomic_t max_prioidx = ATOMIC_INIT(0); static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp) { return container_of(cgroup_subsys_state(cgrp, net_prio_subsys_id), struct cgroup_netprio_state, css); } static int get_prioidx(u32 *prio) { unsigned long flags; u32 prioidx; spin_lock_irqsave(&prioidx_map_lock, flags); prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ); if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) { spin_unlock_irqrestore(&prioidx_map_lock, flags); return -ENOSPC; } set_bit(prioidx, prioidx_map); spin_unlock_irqrestore(&prioidx_map_lock, flags); atomic_set(&max_prioidx, prioidx); *prio = prioidx; return 0; } static void put_prioidx(u32 idx) { unsigned long flags; spin_lock_irqsave(&prioidx_map_lock, flags); clear_bit(idx, prioidx_map); spin_unlock_irqrestore(&prioidx_map_lock, flags); } static void extend_netdev_table(struct net_device *dev, u32 new_len) { size_t new_size = sizeof(struct netprio_map) + ((sizeof(u32) * new_len)); struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL); struct netprio_map *old_priomap; int i; old_priomap = rtnl_dereference(dev->priomap); if (!new_priomap) { printk(KERN_WARNING "Unable to alloc new priomap!\n"); return; } for (i = 0; old_priomap && (i < old_priomap->priomap_len); i++) new_priomap->priomap[i] = old_priomap->priomap[i]; new_priomap->priomap_len = new_len; rcu_assign_pointer(dev->priomap, new_priomap); if (old_priomap) kfree_rcu(old_priomap, rcu); } static void update_netdev_tables(void) { struct net_device *dev; u32 max_len = atomic_read(&max_prioidx) + 1; struct netprio_map *map; rtnl_lock(); for_each_netdev(&init_net, dev) { map = rtnl_dereference(dev->priomap); if ((!map) || (map->priomap_len < max_len)) extend_netdev_table(dev, max_len); } rtnl_unlock(); } static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) { struct cgroup_netprio_state *cs; int ret; cs = kzalloc(sizeof(*cs), GFP_KERNEL); if (!cs) return ERR_PTR(-ENOMEM); if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) { kfree(cs); return ERR_PTR(-EINVAL); } ret = get_prioidx(&cs->prioidx); if (ret != 0) { printk(KERN_WARNING "No space in priority index array\n"); kfree(cs); return ERR_PTR(ret); } return &cs->css; } static void cgrp_destroy(struct cgroup *cgrp) { struct cgroup_netprio_state *cs; struct net_device *dev; struct netprio_map *map; cs = cgrp_netprio_state(cgrp); rtnl_lock(); for_each_netdev(&init_net, dev) { map = rtnl_dereference(dev->priomap); if (map) map->priomap[cs->prioidx] = 0; } rtnl_unlock(); put_prioidx(cs->prioidx); kfree(cs); } static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft) { return (u64)cgrp_netprio_state(cgrp)->prioidx; } static int read_priomap(struct cgroup *cont, struct cftype *cft, struct cgroup_map_cb *cb) { struct net_device *dev; u32 prioidx = cgrp_netprio_state(cont)->prioidx; u32 priority; struct netprio_map *map; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { map = rcu_dereference(dev->priomap); priority = map ? map->priomap[prioidx] : 0; cb->fill(cb, dev->name, priority); } rcu_read_unlock(); return 0; } static int write_priomap(struct cgroup *cgrp, struct cftype *cft, const char *buffer) { char *devname = kstrdup(buffer, GFP_KERNEL); int ret = -EINVAL; u32 prioidx = cgrp_netprio_state(cgrp)->prioidx; unsigned long priority; char *priostr; struct net_device *dev; struct netprio_map *map; if (!devname) return -ENOMEM; /* * Minimally sized valid priomap string */ if (strlen(devname) < 3) goto out_free_devname; priostr = strstr(devname, " "); if (!priostr) goto out_free_devname; /* *Separate the devname from the associated priority *and advance the priostr poitner to the priority value */ *priostr = '\0'; priostr++; /* * If the priostr points to NULL, we're at the end of the passed * in string, and its not a valid write */ if (*priostr == '\0') goto out_free_devname; ret = kstrtoul(priostr, 10, &priority); if (ret < 0) goto out_free_devname; ret = -ENODEV; dev = dev_get_by_name(&init_net, devname); if (!dev) goto out_free_devname; update_netdev_tables(); ret = 0; rcu_read_lock(); map = rcu_dereference(dev->priomap); if (map) map->priomap[prioidx] = priority; rcu_read_unlock(); dev_put(dev); out_free_devname: kfree(devname); return ret; } static struct cftype ss_files[] = { { .name = "prioidx", .read_u64 = read_prioidx, }, { .name = "ifpriomap", .read_map = read_priomap, .write_string = write_priomap, }, }; static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) { return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); } static int netprio_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct netprio_map *old; /* * Note this is called with rtnl_lock held so we have update side * protection on our rcu assignments */ switch (event) { case NETDEV_UNREGISTER: old = rtnl_dereference(dev->priomap); RCU_INIT_POINTER(dev->priomap, NULL); if (old) kfree_rcu(old, rcu); break; } return NOTIFY_DONE; } static struct notifier_block netprio_device_notifier = { .notifier_call = netprio_device_event }; static int __init init_cgroup_netprio(void) { int ret; ret = cgroup_load_subsys(&net_prio_subsys); if (ret) goto out; #ifndef CONFIG_NETPRIO_CGROUP smp_wmb(); net_prio_subsys_id = net_prio_subsys.subsys_id; #endif register_netdevice_notifier(&netprio_device_notifier); out: return ret; } static void __exit exit_cgroup_netprio(void) { struct netprio_map *old; struct net_device *dev; unregister_netdevice_notifier(&netprio_device_notifier); cgroup_unload_subsys(&net_prio_subsys); #ifndef CONFIG_NETPRIO_CGROUP net_prio_subsys_id = -1; synchronize_rcu(); #endif rtnl_lock(); for_each_netdev(&init_net, dev) { old = rtnl_dereference(dev->priomap); RCU_INIT_POINTER(dev->priomap, NULL); if (old) kfree_rcu(old, rcu); } rtnl_unlock(); } module_init(init_cgroup_netprio); module_exit(exit_cgroup_netprio); MODULE_LICENSE("GPL v2");
gpl-2.0
tjarnold/jewel_3.4.49
drivers/hv/connection.c
4772
8274
/* * * Copyright (c) 2009, Microsoft Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Authors: * Haiyang Zhang <haiyangz@microsoft.com> * Hank Janssen <hjanssen@microsoft.com> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/hyperv.h> #include <asm/hyperv.h> #include "hyperv_vmbus.h" struct vmbus_connection vmbus_connection = { .conn_state = DISCONNECTED, .next_gpadl_handle = ATOMIC_INIT(0xE1E10), }; /* * vmbus_connect - Sends a connect request on the partition service connection */ int vmbus_connect(void) { int ret = 0; int t; struct vmbus_channel_msginfo *msginfo = NULL; struct vmbus_channel_initiate_contact *msg; unsigned long flags; /* Initialize the vmbus connection */ vmbus_connection.conn_state = CONNECTING; vmbus_connection.work_queue = create_workqueue("hv_vmbus_con"); if (!vmbus_connection.work_queue) { ret = -ENOMEM; goto cleanup; } INIT_LIST_HEAD(&vmbus_connection.chn_msg_list); spin_lock_init(&vmbus_connection.channelmsg_lock); INIT_LIST_HEAD(&vmbus_connection.chn_list); spin_lock_init(&vmbus_connection.channel_lock); /* * Setup the vmbus event connection for channel interrupt * abstraction stuff */ vmbus_connection.int_page = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 0); if (vmbus_connection.int_page == NULL) { ret = -ENOMEM; goto cleanup; } vmbus_connection.recv_int_page = vmbus_connection.int_page; vmbus_connection.send_int_page = (void *)((unsigned long)vmbus_connection.int_page + (PAGE_SIZE >> 1)); /* * Setup the monitor notification facility. The 1st page for * parent->child and the 2nd page for child->parent */ vmbus_connection.monitor_pages = (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 1); if (vmbus_connection.monitor_pages == NULL) { ret = -ENOMEM; goto cleanup; } msginfo = kzalloc(sizeof(*msginfo) + sizeof(struct vmbus_channel_initiate_contact), GFP_KERNEL); if (msginfo == NULL) { ret = -ENOMEM; goto cleanup; } init_completion(&msginfo->waitevent); msg = (struct vmbus_channel_initiate_contact *)msginfo->msg; msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT; msg->vmbus_version_requested = VMBUS_REVISION_NUMBER; msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages); msg->monitor_page2 = virt_to_phys( (void *)((unsigned long)vmbus_connection.monitor_pages + PAGE_SIZE)); /* * Add to list before we send the request since we may * receive the response before returning from this routine */ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_add_tail(&msginfo->msglistentry, &vmbus_connection.chn_msg_list); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_initiate_contact)); if (ret != 0) { spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&msginfo->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); goto cleanup; } /* Wait for the connection response */ t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ); if (t == 0) { spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&msginfo->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); ret = -ETIMEDOUT; goto cleanup; } spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&msginfo->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); /* Check if successful */ if (msginfo->response.version_response.version_supported) { vmbus_connection.conn_state = CONNECTED; } else { pr_err("Unable to connect, " "Version %d not supported by Hyper-V\n", VMBUS_REVISION_NUMBER); ret = -ECONNREFUSED; goto cleanup; } kfree(msginfo); return 0; cleanup: vmbus_connection.conn_state = DISCONNECTED; if (vmbus_connection.work_queue) destroy_workqueue(vmbus_connection.work_queue); if (vmbus_connection.int_page) { free_pages((unsigned long)vmbus_connection.int_page, 0); vmbus_connection.int_page = NULL; } if (vmbus_connection.monitor_pages) { free_pages((unsigned long)vmbus_connection.monitor_pages, 1); vmbus_connection.monitor_pages = NULL; } kfree(msginfo); return ret; } /* * relid2channel - Get the channel object given its * child relative id (ie channel id) */ struct vmbus_channel *relid2channel(u32 relid) { struct vmbus_channel *channel; struct vmbus_channel *found_channel = NULL; unsigned long flags; spin_lock_irqsave(&vmbus_connection.channel_lock, flags); list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { if (channel->offermsg.child_relid == relid) { found_channel = channel; break; } } spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); return found_channel; } /* * process_chn_event - Process a channel event notification */ static void process_chn_event(u32 relid) { struct vmbus_channel *channel; unsigned long flags; /* * Find the channel based on this relid and invokes the * channel callback to process the event */ channel = relid2channel(relid); if (!channel) { pr_err("channel not found for relid - %u\n", relid); return; } /* * A channel once created is persistent even when there * is no driver handling the device. An unloading driver * sets the onchannel_callback to NULL under the * protection of the channel inbound_lock. Thus, checking * and invoking the driver specific callback takes care of * orderly unloading of the driver. */ spin_lock_irqsave(&channel->inbound_lock, flags); if (channel->onchannel_callback != NULL) channel->onchannel_callback(channel->channel_callback_context); else pr_err("no channel callback for relid - %u\n", relid); spin_unlock_irqrestore(&channel->inbound_lock, flags); } /* * vmbus_on_event - Handler for events */ void vmbus_on_event(unsigned long data) { u32 dword; u32 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5; int bit; u32 relid; u32 *recv_int_page = vmbus_connection.recv_int_page; /* Check events */ if (!recv_int_page) return; for (dword = 0; dword < maxdword; dword++) { if (!recv_int_page[dword]) continue; for (bit = 0; bit < 32; bit++) { if (sync_test_and_clear_bit(bit, (unsigned long *)&recv_int_page[dword])) { relid = (dword << 5) + bit; if (relid == 0) /* * Special case - vmbus * channel protocol msg */ continue; process_chn_event(relid); } } } } /* * vmbus_post_msg - Send a msg on the vmbus's message connection */ int vmbus_post_msg(void *buffer, size_t buflen) { union hv_connection_id conn_id; int ret = 0; int retries = 0; conn_id.asu32 = 0; conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID; /* * hv_post_message() can have transient failures because of * insufficient resources. Retry the operation a couple of * times before giving up. */ while (retries < 3) { ret = hv_post_message(conn_id, 1, buffer, buflen); if (ret != HV_STATUS_INSUFFICIENT_BUFFERS) return ret; retries++; msleep(100); } return ret; } /* * vmbus_set_event - Send an event notification to the parent */ int vmbus_set_event(u32 child_relid) { /* Each u32 represents 32 channels */ sync_set_bit(child_relid & 31, (unsigned long *)vmbus_connection.send_int_page + (child_relid >> 5)); return hv_signal_event(); }
gpl-2.0
MoKee/android_kernel_lge_g3
drivers/input/touchscreen/cyttsp_i2c.c
5028
3390
/* * Source for: * Cypress TrueTouch(TM) Standard Product (TTSP) I2C touchscreen driver. * For use with Cypress Txx3xx parts. * Supported parts include: * CY8CTST341 * CY8CTMA340 * * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2, and only version 2, as published by the * Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Contact Cypress Semiconductor at www.cypress.com <kev@cypress.com> * */ #include "cyttsp_core.h" #include <linux/i2c.h> #include <linux/input.h> #define CY_I2C_DATA_SIZE 128 static int cyttsp_i2c_read_block_data(struct cyttsp *ts, u8 addr, u8 length, void *values) { struct i2c_client *client = to_i2c_client(ts->dev); struct i2c_msg msgs[] = { { .addr = client->addr, .flags = 0, .len = 1, .buf = &addr, }, { .addr = client->addr, .flags = I2C_M_RD, .len = length, .buf = values, }, }; int retval; retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (retval < 0) return retval; return retval != ARRAY_SIZE(msgs) ? -EIO : 0; } static int cyttsp_i2c_write_block_data(struct cyttsp *ts, u8 addr, u8 length, const void *values) { struct i2c_client *client = to_i2c_client(ts->dev); int retval; ts->xfer_buf[0] = addr; memcpy(&ts->xfer_buf[1], values, length); retval = i2c_master_send(client, ts->xfer_buf, length + 1); return retval < 0 ? retval : 0; } static const struct cyttsp_bus_ops cyttsp_i2c_bus_ops = { .bustype = BUS_I2C, .write = cyttsp_i2c_write_block_data, .read = cyttsp_i2c_read_block_data, }; static int __devinit cyttsp_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct cyttsp *ts; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(&client->dev, "I2C functionality not Supported\n"); return -EIO; } ts = cyttsp_probe(&cyttsp_i2c_bus_ops, &client->dev, client->irq, CY_I2C_DATA_SIZE); if (IS_ERR(ts)) return PTR_ERR(ts); i2c_set_clientdata(client, ts); return 0; } static int __devexit cyttsp_i2c_remove(struct i2c_client *client) { struct cyttsp *ts = i2c_get_clientdata(client); cyttsp_remove(ts); return 0; } static const struct i2c_device_id cyttsp_i2c_id[] = { { CY_I2C_NAME, 0 }, { } }; MODULE_DEVICE_TABLE(i2c, cyttsp_i2c_id); static struct i2c_driver cyttsp_i2c_driver = { .driver = { .name = CY_I2C_NAME, .owner = THIS_MODULE, .pm = &cyttsp_pm_ops, }, .probe = cyttsp_i2c_probe, .remove = __devexit_p(cyttsp_i2c_remove), .id_table = cyttsp_i2c_id, }; module_i2c_driver(cyttsp_i2c_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) I2C driver"); MODULE_AUTHOR("Cypress"); MODULE_ALIAS("i2c:cyttsp");
gpl-2.0
Jackeagle/android_kernel_lge_d838
arch/avr32/mm/init.c
7588
4472
/* * Copyright (C) 2004-2006 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/init.h> #include <linux/mmzone.h> #include <linux/module.h> #include <linux/bootmem.h> #include <linux/pagemap.h> #include <linux/nodemask.h> #include <asm/page.h> #include <asm/mmu_context.h> #include <asm/tlb.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/setup.h> #include <asm/sections.h> pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data; struct page *empty_zero_page; EXPORT_SYMBOL(empty_zero_page); /* * Cache of MMU context last used. */ unsigned long mmu_context_cache = NO_CONTEXT; /* * paging_init() sets up the page tables * * This routine also unmaps the page at virtual kernel address 0, so * that we can trap those pesky NULL-reference errors in the kernel. */ void __init paging_init(void) { extern unsigned long _evba; void *zero_page; int nid; /* * Make sure we can handle exceptions before enabling * paging. Not that we should ever _get_ any exceptions this * early, but you never know... */ printk("Exception vectors start at %p\n", &_evba); sysreg_write(EVBA, (unsigned long)&_evba); /* * Since we are ready to handle exceptions now, we should let * the CPU generate them... */ __asm__ __volatile__ ("csrf %0" : : "i"(SR_EM_BIT)); /* * Allocate the zero page. The allocator will panic if it * can't satisfy the request, so no need to check. */ zero_page = alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE); sysreg_write(PTBR, (unsigned long)swapper_pg_dir); enable_mmu(); printk ("CPU: Paging enabled\n"); for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); unsigned long zones_size[MAX_NR_ZONES]; unsigned long low, start_pfn; start_pfn = pgdat->bdata->node_min_pfn; low = pgdat->bdata->node_low_pfn; memset(zones_size, 0, sizeof(zones_size)); zones_size[ZONE_NORMAL] = low - start_pfn; printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", nid, start_pfn, low); free_area_init_node(nid, zones_size, start_pfn, NULL); printk("Node %u: mem_map starts at %p\n", pgdat->node_id, pgdat->node_mem_map); } mem_map = NODE_DATA(0)->node_mem_map; empty_zero_page = virt_to_page(zero_page); flush_dcache_page(empty_zero_page); } void __init mem_init(void) { int codesize, reservedpages, datasize, initsize; int nid, i; reservedpages = 0; high_memory = NULL; /* this will put all low memory onto the freelists */ for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); unsigned long node_pages = 0; void *node_high_memory; num_physpages += pgdat->node_present_pages; if (pgdat->node_spanned_pages != 0) node_pages = free_all_bootmem_node(pgdat); totalram_pages += node_pages; for (i = 0; i < node_pages; i++) if (PageReserved(pgdat->node_mem_map + i)) reservedpages++; node_high_memory = (void *)((pgdat->node_start_pfn + pgdat->node_spanned_pages) << PAGE_SHIFT); if (node_high_memory > high_memory) high_memory = node_high_memory; } max_mapnr = MAP_NR(high_memory); codesize = (unsigned long)_etext - (unsigned long)_text; datasize = (unsigned long)_edata - (unsigned long)_data; initsize = (unsigned long)__init_end - (unsigned long)__init_begin; printk ("Memory: %luk/%luk available (%dk kernel code, " "%dk reserved, %dk data, %dk init)\n", nr_free_pages() << (PAGE_SHIFT - 10), totalram_pages << (PAGE_SHIFT - 10), codesize >> 10, reservedpages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); } static inline void free_area(unsigned long addr, unsigned long end, char *s) { unsigned int size = (end - addr) >> 10; for (; addr < end; addr += PAGE_SIZE) { struct page *page = virt_to_page(addr); ClearPageReserved(page); init_page_count(page); free_page(addr); totalram_pages++; } if (size && s) printk(KERN_INFO "Freeing %s memory: %dK (%lx - %lx)\n", s, size, end - (size << 10), end); } void free_initmem(void) { free_area((unsigned long)__init_begin, (unsigned long)__init_end, "init"); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { free_area(start, end, "initrd"); } #endif
gpl-2.0
TeamFreedom/mecha_2.6.35
drivers/input/touchscreen/touchit213.c
9892
6060
/* * Sahara TouchIT-213 serial touchscreen driver * * Copyright (c) 2007-2008 Claudio Nieder <private@claudio.ch> * * Based on Touchright driver (drivers/input/touchscreen/touchright.c) * Copyright (c) 2006 Rick Koch <n1gp@hotmail.com> * Copyright (c) 2004 Vojtech Pavlik * and Dan Streetman <ddstreet@ieee.org> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #define DRIVER_DESC "Sahara TouchIT-213 serial touchscreen driver" MODULE_AUTHOR("Claudio Nieder <private@claudio.ch>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Definitions & global arrays. */ /* * Data is received through COM1 at 9600bit/s,8bit,no parity in packets * of 5 byte each. * * +--------+ +--------+ +--------+ +--------+ +--------+ * |1000000p| |0xxxxxxx| |0xxxxxxx| |0yyyyyyy| |0yyyyyyy| * +--------+ +--------+ +--------+ +--------+ +--------+ * MSB LSB MSB LSB * * The value of p is 1 as long as the screen is touched and 0 when * reporting the location where touching stopped, e.g. where the pen was * lifted from the screen. * * When holding the screen in landscape mode as the BIOS text output is * presented, x is the horizontal axis with values growing from left to * right and y is the vertical axis with values growing from top to * bottom. * * When holding the screen in portrait mode with the Sahara logo in its * correct position, x ist the vertical axis with values growing from * top to bottom and y is the horizontal axis with values growing from * right to left. */ #define T213_FORMAT_TOUCH_BIT 0x01 #define T213_FORMAT_STATUS_BYTE 0x80 #define T213_FORMAT_STATUS_MASK ~T213_FORMAT_TOUCH_BIT /* * On my Sahara Touch-IT 213 I have observed x values from 0 to 0x7f0 * and y values from 0x1d to 0x7e9, so the actual measurement is * probably done with an 11 bit precision. */ #define T213_MIN_XC 0 #define T213_MAX_XC 0x07ff #define T213_MIN_YC 0 #define T213_MAX_YC 0x07ff /* * Per-touchscreen data. */ struct touchit213 { struct input_dev *dev; struct serio *serio; int idx; unsigned char csum; unsigned char data[5]; char phys[32]; }; static irqreturn_t touchit213_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct touchit213 *touchit213 = serio_get_drvdata(serio); struct input_dev *dev = touchit213->dev; touchit213->data[touchit213->idx] = data; switch (touchit213->idx++) { case 0: if ((touchit213->data[0] & T213_FORMAT_STATUS_MASK) != T213_FORMAT_STATUS_BYTE) { pr_debug("unsynchronized data: 0x%02x\n", data); touchit213->idx = 0; } break; case 4: touchit213->idx = 0; input_report_abs(dev, ABS_X, (touchit213->data[1] << 7) | touchit213->data[2]); input_report_abs(dev, ABS_Y, (touchit213->data[3] << 7) | touchit213->data[4]); input_report_key(dev, BTN_TOUCH, touchit213->data[0] & T213_FORMAT_TOUCH_BIT); input_sync(dev); break; } return IRQ_HANDLED; } /* * touchit213_disconnect() is the opposite of touchit213_connect() */ static void touchit213_disconnect(struct serio *serio) { struct touchit213 *touchit213 = serio_get_drvdata(serio); input_get_device(touchit213->dev); input_unregister_device(touchit213->dev); serio_close(serio); serio_set_drvdata(serio, NULL); input_put_device(touchit213->dev); kfree(touchit213); } /* * touchit213_connect() is the routine that is called when someone adds a * new serio device that supports the Touchright protocol and registers it as * an input device. */ static int touchit213_connect(struct serio *serio, struct serio_driver *drv) { struct touchit213 *touchit213; struct input_dev *input_dev; int err; touchit213 = kzalloc(sizeof(struct touchit213), GFP_KERNEL); input_dev = input_allocate_device(); if (!touchit213 || !input_dev) { err = -ENOMEM; goto fail1; } touchit213->serio = serio; touchit213->dev = input_dev; snprintf(touchit213->phys, sizeof(touchit213->phys), "%s/input0", serio->phys); input_dev->name = "Sahara Touch-iT213 Serial TouchScreen"; input_dev->phys = touchit213->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_TOUCHIT213; input_dev->id.product = 0; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(touchit213->dev, ABS_X, T213_MIN_XC, T213_MAX_XC, 0, 0); input_set_abs_params(touchit213->dev, ABS_Y, T213_MIN_YC, T213_MAX_YC, 0, 0); serio_set_drvdata(serio, touchit213); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(touchit213->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(touchit213); return err; } /* * The serio driver structure. */ static struct serio_device_id touchit213_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_TOUCHIT213, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, touchit213_serio_ids); static struct serio_driver touchit213_drv = { .driver = { .name = "touchit213", }, .description = DRIVER_DESC, .id_table = touchit213_serio_ids, .interrupt = touchit213_interrupt, .connect = touchit213_connect, .disconnect = touchit213_disconnect, }; /* * The functions for inserting/removing us as a module. */ static int __init touchit213_init(void) { return serio_register_driver(&touchit213_drv); } static void __exit touchit213_exit(void) { serio_unregister_driver(&touchit213_drv); } module_init(touchit213_init); module_exit(touchit213_exit);
gpl-2.0
airidosas252/linux-allwinner-a10
fs/hfs/extent.c
9892
13850
/* * linux/fs/hfs/extent.c * * Copyright (C) 1995-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <roman@ardistech.com> * This file may be distributed under the terms of the GNU General Public License. * * This file contains the functions related to the extents B-tree. */ #include <linux/pagemap.h> #include "hfs_fs.h" #include "btree.h" /*================ File-local functions ================*/ /* * build_key */ static void hfs_ext_build_key(hfs_btree_key *key, u32 cnid, u16 block, u8 type) { key->key_len = 7; key->ext.FkType = type; key->ext.FNum = cpu_to_be32(cnid); key->ext.FABN = cpu_to_be16(block); } /* * hfs_ext_compare() * * Description: * This is the comparison function used for the extents B-tree. In * comparing extent B-tree entries, the file id is the most * significant field (compared as unsigned ints); the fork type is * the second most significant field (compared as unsigned chars); * and the allocation block number field is the least significant * (compared as unsigned ints). * Input Variable(s): * struct hfs_ext_key *key1: pointer to the first key to compare * struct hfs_ext_key *key2: pointer to the second key to compare * Output Variable(s): * NONE * Returns: * int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2 * Preconditions: * key1 and key2 point to "valid" (struct hfs_ext_key)s. * Postconditions: * This function has no side-effects */ int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2) { __be32 fnum1, fnum2; __be16 block1, block2; fnum1 = key1->ext.FNum; fnum2 = key2->ext.FNum; if (fnum1 != fnum2) return be32_to_cpu(fnum1) < be32_to_cpu(fnum2) ? -1 : 1; if (key1->ext.FkType != key2->ext.FkType) return key1->ext.FkType < key2->ext.FkType ? -1 : 1; block1 = key1->ext.FABN; block2 = key2->ext.FABN; if (block1 == block2) return 0; return be16_to_cpu(block1) < be16_to_cpu(block2) ? -1 : 1; } /* * hfs_ext_find_block * * Find a block within an extent record */ static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off) { int i; u16 count; for (i = 0; i < 3; ext++, i++) { count = be16_to_cpu(ext->count); if (off < count) return be16_to_cpu(ext->block) + off; off -= count; } /* panic? */ return 0; } static int hfs_ext_block_count(struct hfs_extent *ext) { int i; u16 count = 0; for (i = 0; i < 3; ext++, i++) count += be16_to_cpu(ext->count); return count; } static u16 hfs_ext_lastblock(struct hfs_extent *ext) { int i; ext += 2; for (i = 0; i < 2; ext--, i++) if (ext->count) break; return be16_to_cpu(ext->block) + be16_to_cpu(ext->count); } static void __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd) { int res; hfs_ext_build_key(fd->search_key, inode->i_ino, HFS_I(inode)->cached_start, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA); res = hfs_brec_find(fd); if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) { if (res != -ENOENT) return; hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec)); HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW); } else { if (res) return; hfs_bnode_write(fd->bnode, HFS_I(inode)->cached_extents, fd->entryoffset, fd->entrylength); HFS_I(inode)->flags &= ~HFS_FLG_EXT_DIRTY; } } void hfs_ext_write_extent(struct inode *inode) { struct hfs_find_data fd; if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) { hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd); __hfs_ext_write_extent(inode, &fd); hfs_find_exit(&fd); } } static inline int __hfs_ext_read_extent(struct hfs_find_data *fd, struct hfs_extent *extent, u32 cnid, u32 block, u8 type) { int res; hfs_ext_build_key(fd->search_key, cnid, block, type); fd->key->ext.FNum = 0; res = hfs_brec_find(fd); if (res && res != -ENOENT) return res; if (fd->key->ext.FNum != fd->search_key->ext.FNum || fd->key->ext.FkType != fd->search_key->ext.FkType) return -ENOENT; if (fd->entrylength != sizeof(hfs_extent_rec)) return -EIO; hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfs_extent_rec)); return 0; } static inline int __hfs_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block) { int res; if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) __hfs_ext_write_extent(inode, fd); res = __hfs_ext_read_extent(fd, HFS_I(inode)->cached_extents, inode->i_ino, block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA); if (!res) { HFS_I(inode)->cached_start = be16_to_cpu(fd->key->ext.FABN); HFS_I(inode)->cached_blocks = hfs_ext_block_count(HFS_I(inode)->cached_extents); } else { HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0; HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW); } return res; } static int hfs_ext_read_extent(struct inode *inode, u16 block) { struct hfs_find_data fd; int res; if (block >= HFS_I(inode)->cached_start && block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks) return 0; hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd); res = __hfs_ext_cache_extent(&fd, inode, block); hfs_find_exit(&fd); return res; } static void hfs_dump_extent(struct hfs_extent *extent) { int i; dprint(DBG_EXTENT, " "); for (i = 0; i < 3; i++) dprint(DBG_EXTENT, " %u:%u", be16_to_cpu(extent[i].block), be16_to_cpu(extent[i].count)); dprint(DBG_EXTENT, "\n"); } static int hfs_add_extent(struct hfs_extent *extent, u16 offset, u16 alloc_block, u16 block_count) { u16 count, start; int i; hfs_dump_extent(extent); for (i = 0; i < 3; extent++, i++) { count = be16_to_cpu(extent->count); if (offset == count) { start = be16_to_cpu(extent->block); if (alloc_block != start + count) { if (++i >= 3) return -ENOSPC; extent++; extent->block = cpu_to_be16(alloc_block); } else block_count += count; extent->count = cpu_to_be16(block_count); return 0; } else if (offset < count) break; offset -= count; } /* panic? */ return -EIO; } static int hfs_free_extents(struct super_block *sb, struct hfs_extent *extent, u16 offset, u16 block_nr) { u16 count, start; int i; hfs_dump_extent(extent); for (i = 0; i < 3; extent++, i++) { count = be16_to_cpu(extent->count); if (offset == count) goto found; else if (offset < count) break; offset -= count; } /* panic? */ return -EIO; found: for (;;) { start = be16_to_cpu(extent->block); if (count <= block_nr) { hfs_clear_vbm_bits(sb, start, count); extent->block = 0; extent->count = 0; block_nr -= count; } else { count -= block_nr; hfs_clear_vbm_bits(sb, start + count, block_nr); extent->count = cpu_to_be16(count); block_nr = 0; } if (!block_nr || !i) return 0; i--; extent--; count = be16_to_cpu(extent->count); } } int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type) { struct hfs_find_data fd; u32 total_blocks, blocks, start; u32 cnid = be32_to_cpu(file->FlNum); struct hfs_extent *extent; int res, i; if (type == HFS_FK_DATA) { total_blocks = be32_to_cpu(file->PyLen); extent = file->ExtRec; } else { total_blocks = be32_to_cpu(file->RPyLen); extent = file->RExtRec; } total_blocks /= HFS_SB(sb)->alloc_blksz; if (!total_blocks) return 0; blocks = 0; for (i = 0; i < 3; extent++, i++) blocks += be16_to_cpu(extent[i].count); res = hfs_free_extents(sb, extent, blocks, blocks); if (res) return res; if (total_blocks == blocks) return 0; hfs_find_init(HFS_SB(sb)->ext_tree, &fd); do { res = __hfs_ext_read_extent(&fd, extent, cnid, total_blocks, type); if (res) break; start = be16_to_cpu(fd.key->ext.FABN); hfs_free_extents(sb, extent, total_blocks - start, total_blocks); hfs_brec_remove(&fd); total_blocks = start; } while (total_blocks > blocks); hfs_find_exit(&fd); return res; } /* * hfs_get_block */ int hfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { struct super_block *sb; u16 dblock, ablock; int res; sb = inode->i_sb; /* Convert inode block to disk allocation block */ ablock = (u32)block / HFS_SB(sb)->fs_div; if (block >= HFS_I(inode)->fs_blocks) { if (block > HFS_I(inode)->fs_blocks || !create) return -EIO; if (ablock >= HFS_I(inode)->alloc_blocks) { res = hfs_extend_file(inode); if (res) return res; } } else create = 0; if (ablock < HFS_I(inode)->first_blocks) { dblock = hfs_ext_find_block(HFS_I(inode)->first_extents, ablock); goto done; } mutex_lock(&HFS_I(inode)->extents_lock); res = hfs_ext_read_extent(inode, ablock); if (!res) dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents, ablock - HFS_I(inode)->cached_start); else { mutex_unlock(&HFS_I(inode)->extents_lock); return -EIO; } mutex_unlock(&HFS_I(inode)->extents_lock); done: map_bh(bh_result, sb, HFS_SB(sb)->fs_start + dblock * HFS_SB(sb)->fs_div + (u32)block % HFS_SB(sb)->fs_div); if (create) { set_buffer_new(bh_result); HFS_I(inode)->phys_size += sb->s_blocksize; HFS_I(inode)->fs_blocks++; inode_add_bytes(inode, sb->s_blocksize); mark_inode_dirty(inode); } return 0; } int hfs_extend_file(struct inode *inode) { struct super_block *sb = inode->i_sb; u32 start, len, goal; int res; mutex_lock(&HFS_I(inode)->extents_lock); if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) goal = hfs_ext_lastblock(HFS_I(inode)->first_extents); else { res = hfs_ext_read_extent(inode, HFS_I(inode)->alloc_blocks); if (res) goto out; goal = hfs_ext_lastblock(HFS_I(inode)->cached_extents); } len = HFS_I(inode)->clump_blocks; start = hfs_vbm_search_free(sb, goal, &len); if (!len) { res = -ENOSPC; goto out; } dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) { if (!HFS_I(inode)->first_blocks) { dprint(DBG_EXTENT, "first extents\n"); /* no extents yet */ HFS_I(inode)->first_extents[0].block = cpu_to_be16(start); HFS_I(inode)->first_extents[0].count = cpu_to_be16(len); res = 0; } else { /* try to append to extents in inode */ res = hfs_add_extent(HFS_I(inode)->first_extents, HFS_I(inode)->alloc_blocks, start, len); if (res == -ENOSPC) goto insert_extent; } if (!res) { hfs_dump_extent(HFS_I(inode)->first_extents); HFS_I(inode)->first_blocks += len; } } else { res = hfs_add_extent(HFS_I(inode)->cached_extents, HFS_I(inode)->alloc_blocks - HFS_I(inode)->cached_start, start, len); if (!res) { hfs_dump_extent(HFS_I(inode)->cached_extents); HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY; HFS_I(inode)->cached_blocks += len; } else if (res == -ENOSPC) goto insert_extent; } out: mutex_unlock(&HFS_I(inode)->extents_lock); if (!res) { HFS_I(inode)->alloc_blocks += len; mark_inode_dirty(inode); if (inode->i_ino < HFS_FIRSTUSER_CNID) set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags); set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags); sb->s_dirt = 1; } return res; insert_extent: dprint(DBG_EXTENT, "insert new extent\n"); hfs_ext_write_extent(inode); memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec)); HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start); HFS_I(inode)->cached_extents[0].count = cpu_to_be16(len); hfs_dump_extent(HFS_I(inode)->cached_extents); HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW; HFS_I(inode)->cached_start = HFS_I(inode)->alloc_blocks; HFS_I(inode)->cached_blocks = len; res = 0; goto out; } void hfs_file_truncate(struct inode *inode) { struct super_block *sb = inode->i_sb; struct hfs_find_data fd; u16 blk_cnt, alloc_cnt, start; u32 size; int res; dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino, (long long)HFS_I(inode)->phys_size, inode->i_size); if (inode->i_size > HFS_I(inode)->phys_size) { struct address_space *mapping = inode->i_mapping; void *fsdata; struct page *page; int res; /* XXX: Can use generic_cont_expand? */ size = inode->i_size - 1; res = pagecache_write_begin(NULL, mapping, size+1, 0, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); if (!res) { res = pagecache_write_end(NULL, mapping, size+1, 0, 0, page, fsdata); } if (res) inode->i_size = HFS_I(inode)->phys_size; return; } else if (inode->i_size == HFS_I(inode)->phys_size) return; size = inode->i_size + HFS_SB(sb)->alloc_blksz - 1; blk_cnt = size / HFS_SB(sb)->alloc_blksz; alloc_cnt = HFS_I(inode)->alloc_blocks; if (blk_cnt == alloc_cnt) goto out; mutex_lock(&HFS_I(inode)->extents_lock); hfs_find_init(HFS_SB(sb)->ext_tree, &fd); while (1) { if (alloc_cnt == HFS_I(inode)->first_blocks) { hfs_free_extents(sb, HFS_I(inode)->first_extents, alloc_cnt, alloc_cnt - blk_cnt); hfs_dump_extent(HFS_I(inode)->first_extents); HFS_I(inode)->first_blocks = blk_cnt; break; } res = __hfs_ext_cache_extent(&fd, inode, alloc_cnt); if (res) break; start = HFS_I(inode)->cached_start; hfs_free_extents(sb, HFS_I(inode)->cached_extents, alloc_cnt - start, alloc_cnt - blk_cnt); hfs_dump_extent(HFS_I(inode)->cached_extents); if (blk_cnt > start) { HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY; break; } alloc_cnt = start; HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0; HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW); hfs_brec_remove(&fd); } hfs_find_exit(&fd); mutex_unlock(&HFS_I(inode)->extents_lock); HFS_I(inode)->alloc_blocks = blk_cnt; out: HFS_I(inode)->phys_size = inode->i_size; HFS_I(inode)->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits); mark_inode_dirty(inode); }
gpl-2.0
asce1062/android_kernel_lge_msm7x27a-common
drivers/firmware/edd.c
9892
20462
/* * linux/drivers/firmware/edd.c * Copyright (C) 2002, 2003, 2004 Dell Inc. * by Matt Domsch <Matt_Domsch@dell.com> * disk signature by Matt Domsch, Andrew Wilks, and Sandeep K. Shandilya * legacy CHS by Patrick J. LoPresti <patl@users.sourceforge.net> * * BIOS Enhanced Disk Drive Services (EDD) * conformant to T13 Committee www.t13.org * projects 1572D, 1484D, 1386D, 1226DT * * This code takes information provided by BIOS EDD calls * fn41 - Check Extensions Present and * fn48 - Get Device Parameters with EDD extensions * made in setup.S, copied to safe structures in setup.c, * and presents it in sysfs. * * Please see http://linux.dell.com/edd/results.html for * the list of BIOSs which have been reported to implement EDD. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License v2.0 as published by * the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <linux/init.h> #include <linux/stat.h> #include <linux/err.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/limits.h> #include <linux/device.h> #include <linux/pci.h> #include <linux/blkdev.h> #include <linux/edd.h> #define EDD_VERSION "0.16" #define EDD_DATE "2004-Jun-25" MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>"); MODULE_DESCRIPTION("sysfs interface to BIOS EDD information"); MODULE_LICENSE("GPL"); MODULE_VERSION(EDD_VERSION); #define left (PAGE_SIZE - (p - buf) - 1) struct edd_device { unsigned int index; unsigned int mbr_signature; struct edd_info *info; struct kobject kobj; }; struct edd_attribute { struct attribute attr; ssize_t(*show) (struct edd_device * edev, char *buf); int (*test) (struct edd_device * edev); }; /* forward declarations */ static int edd_dev_is_type(struct edd_device *edev, const char *type); static struct pci_dev *edd_get_pci_dev(struct edd_device *edev); static struct edd_device *edd_devices[EDD_MBR_SIG_MAX]; #define EDD_DEVICE_ATTR(_name,_mode,_show,_test) \ struct edd_attribute edd_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .show = _show, \ .test = _test, \ }; static int edd_has_mbr_signature(struct edd_device *edev) { return edev->index < min_t(unsigned char, edd.mbr_signature_nr, EDD_MBR_SIG_MAX); } static int edd_has_edd_info(struct edd_device *edev) { return edev->index < min_t(unsigned char, edd.edd_info_nr, EDDMAXNR); } static inline struct edd_info * edd_dev_get_info(struct edd_device *edev) { return edev->info; } static inline void edd_dev_set_info(struct edd_device *edev, int i) { edev->index = i; if (edd_has_mbr_signature(edev)) edev->mbr_signature = edd.mbr_signature[i]; if (edd_has_edd_info(edev)) edev->info = &edd.edd_info[i]; } #define to_edd_attr(_attr) container_of(_attr,struct edd_attribute,attr) #define to_edd_device(obj) container_of(obj,struct edd_device,kobj) static ssize_t edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf) { struct edd_device *dev = to_edd_device(kobj); struct edd_attribute *edd_attr = to_edd_attr(attr); ssize_t ret = -EIO; if (edd_attr->show) ret = edd_attr->show(dev, buf); return ret; } static const struct sysfs_ops edd_attr_ops = { .show = edd_attr_show, }; static ssize_t edd_show_host_bus(struct edd_device *edev, char *buf) { struct edd_info *info; char *p = buf; int i; if (!edev) return -EINVAL; info = edd_dev_get_info(edev); if (!info || !buf) return -EINVAL; for (i = 0; i < 4; i++) { if (isprint(info->params.host_bus_type[i])) { p += scnprintf(p, left, "%c", info->params.host_bus_type[i]); } else { p += scnprintf(p, left, " "); } } if (!strncmp(info->params.host_bus_type, "ISA", 3)) { p += scnprintf(p, left, "\tbase_address: %x\n", info->params.interface_path.isa.base_address); } else if (!strncmp(info->params.host_bus_type, "PCIX", 4) || !strncmp(info->params.host_bus_type, "PCI", 3) || !strncmp(info->params.host_bus_type, "XPRS", 4)) { p += scnprintf(p, left, "\t%02x:%02x.%d channel: %u\n", info->params.interface_path.pci.bus, info->params.interface_path.pci.slot, info->params.interface_path.pci.function, info->params.interface_path.pci.channel); } else if (!strncmp(info->params.host_bus_type, "IBND", 4) || !strncmp(info->params.host_bus_type, "HTPT", 4)) { p += scnprintf(p, left, "\tTBD: %llx\n", info->params.interface_path.ibnd.reserved); } else { p += scnprintf(p, left, "\tunknown: %llx\n", info->params.interface_path.unknown.reserved); } return (p - buf); } static ssize_t edd_show_interface(struct edd_device *edev, char *buf) { struct edd_info *info; char *p = buf; int i; if (!edev) return -EINVAL; info = edd_dev_get_info(edev); if (!info || !buf) return -EINVAL; for (i = 0; i < 8; i++) { if (isprint(info->params.interface_type[i])) { p += scnprintf(p, left, "%c", info->params.interface_type[i]); } else { p += scnprintf(p, left, " "); } } if (!strncmp(info->params.interface_type, "ATAPI", 5)) { p += scnprintf(p, left, "\tdevice: %u lun: %u\n", info->params.device_path.atapi.device, info->params.device_path.atapi.lun); } else if (!strncmp(info->params.interface_type, "ATA", 3)) { p += scnprintf(p, left, "\tdevice: %u\n", info->params.device_path.ata.device); } else if (!strncmp(info->params.interface_type, "SCSI", 4)) { p += scnprintf(p, left, "\tid: %u lun: %llu\n", info->params.device_path.scsi.id, info->params.device_path.scsi.lun); } else if (!strncmp(info->params.interface_type, "USB", 3)) { p += scnprintf(p, left, "\tserial_number: %llx\n", info->params.device_path.usb.serial_number); } else if (!strncmp(info->params.interface_type, "1394", 4)) { p += scnprintf(p, left, "\teui: %llx\n", info->params.device_path.i1394.eui); } else if (!strncmp(info->params.interface_type, "FIBRE", 5)) { p += scnprintf(p, left, "\twwid: %llx lun: %llx\n", info->params.device_path.fibre.wwid, info->params.device_path.fibre.lun); } else if (!strncmp(info->params.interface_type, "I2O", 3)) { p += scnprintf(p, left, "\tidentity_tag: %llx\n", info->params.device_path.i2o.identity_tag); } else if (!strncmp(info->params.interface_type, "RAID", 4)) { p += scnprintf(p, left, "\tidentity_tag: %x\n", info->params.device_path.raid.array_number); } else if (!strncmp(info->params.interface_type, "SATA", 4)) { p += scnprintf(p, left, "\tdevice: %u\n", info->params.device_path.sata.device); } else { p += scnprintf(p, left, "\tunknown: %llx %llx\n", info->params.device_path.unknown.reserved1, info->params.device_path.unknown.reserved2); } return (p - buf); } /** * edd_show_raw_data() - copies raw data to buffer for userspace to parse * @edev: target edd_device * @buf: output buffer * * Returns: number of bytes written, or -EINVAL on failure */ static ssize_t edd_show_raw_data(struct edd_device *edev, char *buf) { struct edd_info *info; ssize_t len = sizeof (info->params); if (!edev) return -EINVAL; info = edd_dev_get_info(edev); if (!info || !buf) return -EINVAL; if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) len = info->params.length; /* In case of buggy BIOSs */ if (len > (sizeof(info->params))) len = sizeof(info->params); memcpy(buf, &info->params, len); return len; } static ssize_t edd_show_version(struct edd_device *edev, char *buf) { struct edd_info *info; char *p = buf; if (!edev) return -EINVAL; info = edd_dev_get_info(edev); if (!info || !buf) return -EINVAL; p += scnprintf(p, left, "0x%02x\n", info->version); return (p - buf); } static ssize_t edd_show_mbr_signature(struct edd_device *edev, char *buf) { char *p = buf; p += scnprintf(p, left, "0x%08x\n", edev->mbr_signature); return (p - buf); } static ssize_t edd_show_extensions(struct edd_device *edev, char *buf) { struct edd_info *info; char *p = buf; if (!edev) return -EINVAL; info = edd_dev_get_info(edev); if (!info || !buf) return -EINVAL; if (info->interface_support & EDD_EXT_FIXED_DISK_ACCESS) { p += scnprintf(p, left, "Fixed disk access\n"); } if (info->interface_support & EDD_EXT_DEVICE_LOCKING_AND_EJECTING) { p += scnprintf(p, left, "Device locking and ejecting\n"); } if (info->interface_support & EDD_EXT_ENHANCED_DISK_DRIVE_SUPPORT) { p += scnprintf(p, left, "Enhanced Disk Drive support\n"); } if (info->interface_support & EDD_EXT_64BIT_EXTENSIONS) { p += scnprintf(p, left, "64-bit extensions\n"); } return (p - buf); } static ssize_t edd_show_info_flags(struct edd_device *edev, char *buf) { struct edd_info *info; char *p = buf; if (!edev) return -EINVAL; info = edd_dev_get_info(edev); if (!info || !buf) return -EINVAL; if (info->params.info_flags & EDD_INFO_DMA_BOUNDARY_ERROR_TRANSPARENT) p += scnprintf(p, left, "DMA boundary error transparent\n"); if (info->params.info_flags & EDD_INFO_GEOMETRY_VALID) p += scnprintf(p, left, "geometry valid\n"); if (info->params.info_flags & EDD_INFO_REMOVABLE) p += scnprintf(p, left, "removable\n"); if (info->params.info_flags & EDD_INFO_WRITE_VERIFY) p += scnprintf(p, left, "write verify\n"); if (info->params.info_flags & EDD_INFO_MEDIA_CHANGE_NOTIFICATION) p += scnprintf(p, left, "media change notification\n"); if (info->params.info_flags & EDD_INFO_LOCKABLE) p += scnprintf(p, left, "lockable\n"); if (info->params.info_flags & EDD_INFO_NO_MEDIA_PRESENT) p += scnprintf(p, left, "no media present\n"); if (info->params.info_flags & EDD_INFO_USE_INT13_FN50) p += scnprintf(p, left, "use int13 fn50\n"); return (p - buf); } static ssize_t edd_show_legacy_max_cylinder(struct edd_device *edev, char *buf) { struct edd_info *info; char *p = buf; if (!edev) return -EINVAL; info = edd_dev_get_info(edev); if (!info || !buf) return -EINVAL; p += snprintf(p, left, "%u\n", info->legacy_max_cylinder); return (p - buf); } static ssize_t edd_show_legacy_max_head(struct edd_device *edev, char *buf) { struct edd_info *info; char *p = buf; if (!edev) return -EINVAL; info = edd_dev_get_info(edev); if (!info || !buf) return -EINVAL; p += snprintf(p, left, "%u\n", info->legacy_max_head); return (p - buf); } static ssize_t edd_show_legacy_sectors_per_track(struct edd_device *edev, char *buf) { struct edd_info *info; char *p = buf; if (!edev) return -EINVAL; info = edd_dev_get_info(edev); if (!info || !buf) return -EINVAL; p += snprintf(p, left, "%u\n", info->legacy_sectors_per_track); return (p - buf); } static ssize_t edd_show_default_cylinders(struct edd_device *edev, char *buf) { struct edd_info *info; char *p = buf; if (!edev) return -EINVAL; info = edd_dev_get_info(edev); if (!info || !buf) return -EINVAL; p += scnprintf(p, left, "%u\n", info->params.num_default_cylinders); return (p - buf); } static ssize_t edd_show_default_heads(struct edd_device *edev, char *buf) { struct edd_info *info; char *p = buf; if (!edev) return -EINVAL; info = edd_dev_get_info(edev); if (!info || !buf) return -EINVAL; p += scnprintf(p, left, "%u\n", info->params.num_default_heads); return (p - buf); } static ssize_t edd_show_default_sectors_per_track(struct edd_device *edev, char *buf) { struct edd_info *info; char *p = buf; if (!edev) return -EINVAL; info = edd_dev_get_info(edev); if (!info || !buf) return -EINVAL; p += scnprintf(p, left, "%u\n", info->params.sectors_per_track); return (p - buf); } static ssize_t edd_show_sectors(struct edd_device *edev, char *buf) { struct edd_info *info; char *p = buf; if (!edev) return -EINVAL; info = edd_dev_get_info(edev); if (!info || !buf) return -EINVAL; p += scnprintf(p, left, "%llu\n", info->params.number_of_sectors); return (p - buf); } /* * Some device instances may not have all the above attributes, * or the attribute values may be meaningless (i.e. if * the device is < EDD 3.0, it won't have host_bus and interface * information), so don't bother making files for them. Likewise * if the default_{cylinders,heads,sectors_per_track} values * are zero, the BIOS doesn't provide sane values, don't bother * creating files for them either. */ static int edd_has_legacy_max_cylinder(struct edd_device *edev) { struct edd_info *info; if (!edev) return 0; info = edd_dev_get_info(edev); if (!info) return 0; return info->legacy_max_cylinder > 0; } static int edd_has_legacy_max_head(struct edd_device *edev) { struct edd_info *info; if (!edev) return 0; info = edd_dev_get_info(edev); if (!info) return 0; return info->legacy_max_head > 0; } static int edd_has_legacy_sectors_per_track(struct edd_device *edev) { struct edd_info *info; if (!edev) return 0; info = edd_dev_get_info(edev); if (!info) return 0; return info->legacy_sectors_per_track > 0; } static int edd_has_default_cylinders(struct edd_device *edev) { struct edd_info *info; if (!edev) return 0; info = edd_dev_get_info(edev); if (!info) return 0; return info->params.num_default_cylinders > 0; } static int edd_has_default_heads(struct edd_device *edev) { struct edd_info *info; if (!edev) return 0; info = edd_dev_get_info(edev); if (!info) return 0; return info->params.num_default_heads > 0; } static int edd_has_default_sectors_per_track(struct edd_device *edev) { struct edd_info *info; if (!edev) return 0; info = edd_dev_get_info(edev); if (!info) return 0; return info->params.sectors_per_track > 0; } static int edd_has_edd30(struct edd_device *edev) { struct edd_info *info; int i; u8 csum = 0; if (!edev) return 0; info = edd_dev_get_info(edev); if (!info) return 0; if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) { return 0; } /* We support only T13 spec */ if (info->params.device_path_info_length != 44) return 0; for (i = 30; i < info->params.device_path_info_length + 30; i++) csum += *(((u8 *)&info->params) + i); if (csum) return 0; return 1; } static EDD_DEVICE_ATTR(raw_data, 0444, edd_show_raw_data, edd_has_edd_info); static EDD_DEVICE_ATTR(version, 0444, edd_show_version, edd_has_edd_info); static EDD_DEVICE_ATTR(extensions, 0444, edd_show_extensions, edd_has_edd_info); static EDD_DEVICE_ATTR(info_flags, 0444, edd_show_info_flags, edd_has_edd_info); static EDD_DEVICE_ATTR(sectors, 0444, edd_show_sectors, edd_has_edd_info); static EDD_DEVICE_ATTR(legacy_max_cylinder, 0444, edd_show_legacy_max_cylinder, edd_has_legacy_max_cylinder); static EDD_DEVICE_ATTR(legacy_max_head, 0444, edd_show_legacy_max_head, edd_has_legacy_max_head); static EDD_DEVICE_ATTR(legacy_sectors_per_track, 0444, edd_show_legacy_sectors_per_track, edd_has_legacy_sectors_per_track); static EDD_DEVICE_ATTR(default_cylinders, 0444, edd_show_default_cylinders, edd_has_default_cylinders); static EDD_DEVICE_ATTR(default_heads, 0444, edd_show_default_heads, edd_has_default_heads); static EDD_DEVICE_ATTR(default_sectors_per_track, 0444, edd_show_default_sectors_per_track, edd_has_default_sectors_per_track); static EDD_DEVICE_ATTR(interface, 0444, edd_show_interface, edd_has_edd30); static EDD_DEVICE_ATTR(host_bus, 0444, edd_show_host_bus, edd_has_edd30); static EDD_DEVICE_ATTR(mbr_signature, 0444, edd_show_mbr_signature, edd_has_mbr_signature); /* These are default attributes that are added for every edd * device discovered. There are none. */ static struct attribute * def_attrs[] = { NULL, }; /* These attributes are conditional and only added for some devices. */ static struct edd_attribute * edd_attrs[] = { &edd_attr_raw_data, &edd_attr_version, &edd_attr_extensions, &edd_attr_info_flags, &edd_attr_sectors, &edd_attr_legacy_max_cylinder, &edd_attr_legacy_max_head, &edd_attr_legacy_sectors_per_track, &edd_attr_default_cylinders, &edd_attr_default_heads, &edd_attr_default_sectors_per_track, &edd_attr_interface, &edd_attr_host_bus, &edd_attr_mbr_signature, NULL, }; /** * edd_release - free edd structure * @kobj: kobject of edd structure * * This is called when the refcount of the edd structure * reaches 0. This should happen right after we unregister, * but just in case, we use the release callback anyway. */ static void edd_release(struct kobject * kobj) { struct edd_device * dev = to_edd_device(kobj); kfree(dev); } static struct kobj_type edd_ktype = { .release = edd_release, .sysfs_ops = &edd_attr_ops, .default_attrs = def_attrs, }; static struct kset *edd_kset; /** * edd_dev_is_type() - is this EDD device a 'type' device? * @edev: target edd_device * @type: a host bus or interface identifier string per the EDD spec * * Returns 1 (TRUE) if it is a 'type' device, 0 otherwise. */ static int edd_dev_is_type(struct edd_device *edev, const char *type) { struct edd_info *info; if (!edev) return 0; info = edd_dev_get_info(edev); if (type && info) { if (!strncmp(info->params.host_bus_type, type, strlen(type)) || !strncmp(info->params.interface_type, type, strlen(type))) return 1; } return 0; } /** * edd_get_pci_dev() - finds pci_dev that matches edev * @edev: edd_device * * Returns pci_dev if found, or NULL */ static struct pci_dev * edd_get_pci_dev(struct edd_device *edev) { struct edd_info *info = edd_dev_get_info(edev); if (edd_dev_is_type(edev, "PCI") || edd_dev_is_type(edev, "XPRS")) { return pci_get_bus_and_slot(info->params.interface_path.pci.bus, PCI_DEVFN(info->params.interface_path.pci.slot, info->params.interface_path.pci. function)); } return NULL; } static int edd_create_symlink_to_pcidev(struct edd_device *edev) { struct pci_dev *pci_dev = edd_get_pci_dev(edev); int ret; if (!pci_dev) return 1; ret = sysfs_create_link(&edev->kobj,&pci_dev->dev.kobj,"pci_dev"); pci_dev_put(pci_dev); return ret; } static inline void edd_device_unregister(struct edd_device *edev) { kobject_put(&edev->kobj); } static void edd_populate_dir(struct edd_device * edev) { struct edd_attribute * attr; int error = 0; int i; for (i = 0; (attr = edd_attrs[i]) && !error; i++) { if (!attr->test || (attr->test && attr->test(edev))) error = sysfs_create_file(&edev->kobj,&attr->attr); } if (!error) { edd_create_symlink_to_pcidev(edev); } } static int edd_device_register(struct edd_device *edev, int i) { int error; if (!edev) return 1; edd_dev_set_info(edev, i); edev->kobj.kset = edd_kset; error = kobject_init_and_add(&edev->kobj, &edd_ktype, NULL, "int13_dev%02x", 0x80 + i); if (!error) { edd_populate_dir(edev); kobject_uevent(&edev->kobj, KOBJ_ADD); } return error; } static inline int edd_num_devices(void) { return max_t(unsigned char, min_t(unsigned char, EDD_MBR_SIG_MAX, edd.mbr_signature_nr), min_t(unsigned char, EDDMAXNR, edd.edd_info_nr)); } /** * edd_init() - creates sysfs tree of EDD data */ static int __init edd_init(void) { int i; int rc=0; struct edd_device *edev; printk(KERN_INFO "BIOS EDD facility v%s %s, %d devices found\n", EDD_VERSION, EDD_DATE, edd_num_devices()); if (!edd_num_devices()) { printk(KERN_INFO "EDD information not available.\n"); return -ENODEV; } edd_kset = kset_create_and_add("edd", NULL, firmware_kobj); if (!edd_kset) return -ENOMEM; for (i = 0; i < edd_num_devices(); i++) { edev = kzalloc(sizeof (*edev), GFP_KERNEL); if (!edev) { rc = -ENOMEM; goto out; } rc = edd_device_register(edev, i); if (rc) { kfree(edev); goto out; } edd_devices[i] = edev; } return 0; out: while (--i >= 0) edd_device_unregister(edd_devices[i]); kset_unregister(edd_kset); return rc; } static void __exit edd_exit(void) { int i; struct edd_device *edev; for (i = 0; i < edd_num_devices(); i++) { if ((edev = edd_devices[i])) edd_device_unregister(edev); } kset_unregister(edd_kset); } late_initcall(edd_init); module_exit(edd_exit);
gpl-2.0