repo_name
string
path
string
copies
string
size
string
content
string
license
string
chongzi865458/android4.04_kernel
drivers/watchdog/ar7_wdt.c
3256
8962
/* * drivers/watchdog/ar7_wdt.c * * Copyright (C) 2007 Nicolas Thill <nico@openwrt.org> * Copyright (c) 2005 Enrik Berkhan <Enrik.Berkhan@akk.org> * * Some code taken from: * National Semiconductor SCx200 Watchdog support * Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/platform_device.h> #include <linux/watchdog.h> #include <linux/fs.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/clk.h> #include <asm/addrspace.h> #include <asm/mach-ar7/ar7.h> #define DRVNAME "ar7_wdt" #define LONGNAME "TI AR7 Watchdog Timer" MODULE_AUTHOR("Nicolas Thill <nico@openwrt.org>"); MODULE_DESCRIPTION(LONGNAME); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); static int margin = 60; module_param(margin, int, 0); MODULE_PARM_DESC(margin, "Watchdog margin in seconds"); static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close"); #define READ_REG(x) readl((void __iomem *)&(x)) #define WRITE_REG(x, v) writel((v), (void __iomem *)&(x)) struct ar7_wdt { u32 kick_lock; u32 kick; u32 change_lock; u32 change; u32 disable_lock; u32 disable; u32 prescale_lock; u32 prescale; }; static unsigned long wdt_is_open; static spinlock_t wdt_lock; static unsigned expect_close; /* XXX currently fixed, allows max margin ~68.72 secs */ #define prescale_value 0xffff /* Resource of the WDT registers */ static struct resource *ar7_regs_wdt; /* Pointer to the remapped WDT IO space */ static struct ar7_wdt *ar7_wdt; static struct clk *vbus_clk; static void ar7_wdt_kick(u32 value) { WRITE_REG(ar7_wdt->kick_lock, 0x5555); if ((READ_REG(ar7_wdt->kick_lock) & 3) == 1) { WRITE_REG(ar7_wdt->kick_lock, 0xaaaa); if ((READ_REG(ar7_wdt->kick_lock) & 3) == 3) { WRITE_REG(ar7_wdt->kick, value); return; } } printk(KERN_ERR DRVNAME ": failed to unlock WDT kick reg\n"); } static void ar7_wdt_prescale(u32 value) { WRITE_REG(ar7_wdt->prescale_lock, 0x5a5a); if ((READ_REG(ar7_wdt->prescale_lock) & 3) == 1) { WRITE_REG(ar7_wdt->prescale_lock, 0xa5a5); if ((READ_REG(ar7_wdt->prescale_lock) & 3) == 3) { WRITE_REG(ar7_wdt->prescale, value); return; } } printk(KERN_ERR DRVNAME ": failed to unlock WDT prescale reg\n"); } static void ar7_wdt_change(u32 value) { WRITE_REG(ar7_wdt->change_lock, 0x6666); if ((READ_REG(ar7_wdt->change_lock) & 3) == 1) { WRITE_REG(ar7_wdt->change_lock, 0xbbbb); if ((READ_REG(ar7_wdt->change_lock) & 3) == 3) { WRITE_REG(ar7_wdt->change, value); return; } } printk(KERN_ERR DRVNAME ": failed to unlock WDT change reg\n"); } static void ar7_wdt_disable(u32 value) { WRITE_REG(ar7_wdt->disable_lock, 0x7777); if ((READ_REG(ar7_wdt->disable_lock) & 3) == 1) { WRITE_REG(ar7_wdt->disable_lock, 0xcccc); if ((READ_REG(ar7_wdt->disable_lock) & 3) == 2) { WRITE_REG(ar7_wdt->disable_lock, 0xdddd); if ((READ_REG(ar7_wdt->disable_lock) & 3) == 3) { WRITE_REG(ar7_wdt->disable, value); return; } } } printk(KERN_ERR DRVNAME ": failed to unlock WDT disable reg\n"); } static void ar7_wdt_update_margin(int new_margin) { u32 change; u32 vbus_rate; vbus_rate = clk_get_rate(vbus_clk); change = new_margin * (vbus_rate / prescale_value); if (change < 1) change = 1; if (change > 0xffff) change = 0xffff; ar7_wdt_change(change); margin = change * prescale_value / vbus_rate; printk(KERN_INFO DRVNAME ": timer margin %d seconds (prescale %d, change %d, freq %d)\n", margin, prescale_value, change, vbus_rate); } static void ar7_wdt_enable_wdt(void) { printk(KERN_DEBUG DRVNAME ": enabling watchdog timer\n"); ar7_wdt_disable(1); ar7_wdt_kick(1); } static void ar7_wdt_disable_wdt(void) { printk(KERN_DEBUG DRVNAME ": disabling watchdog timer\n"); ar7_wdt_disable(0); } static int ar7_wdt_open(struct inode *inode, struct file *file) { /* only allow one at a time */ if (test_and_set_bit(0, &wdt_is_open)) return -EBUSY; ar7_wdt_enable_wdt(); expect_close = 0; return nonseekable_open(inode, file); } static int ar7_wdt_release(struct inode *inode, struct file *file) { if (!expect_close) printk(KERN_WARNING DRVNAME ": watchdog device closed unexpectedly," "will not disable the watchdog timer\n"); else if (!nowayout) ar7_wdt_disable_wdt(); clear_bit(0, &wdt_is_open); return 0; } static ssize_t ar7_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) { /* check for a magic close character */ if (len) { size_t i; spin_lock(&wdt_lock); ar7_wdt_kick(1); spin_unlock(&wdt_lock); expect_close = 0; for (i = 0; i < len; ++i) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 1; } } return len; } static long ar7_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { static const struct watchdog_info ident = { .identity = LONGNAME, .firmware_version = 1, .options = (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE), }; int new_margin; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user((struct watchdog_info *)arg, &ident, sizeof(ident))) return -EFAULT; return 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: if (put_user(0, (int *)arg)) return -EFAULT; return 0; case WDIOC_KEEPALIVE: ar7_wdt_kick(1); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_margin, (int *)arg)) return -EFAULT; if (new_margin < 1) return -EINVAL; spin_lock(&wdt_lock); ar7_wdt_update_margin(new_margin); ar7_wdt_kick(1); spin_unlock(&wdt_lock); case WDIOC_GETTIMEOUT: if (put_user(margin, (int *)arg)) return -EFAULT; return 0; default: return -ENOTTY; } } static const struct file_operations ar7_wdt_fops = { .owner = THIS_MODULE, .write = ar7_wdt_write, .unlocked_ioctl = ar7_wdt_ioctl, .open = ar7_wdt_open, .release = ar7_wdt_release, .llseek = no_llseek, }; static struct miscdevice ar7_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &ar7_wdt_fops, }; static int __devinit ar7_wdt_probe(struct platform_device *pdev) { int rc; spin_lock_init(&wdt_lock); ar7_regs_wdt = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); if (!ar7_regs_wdt) { printk(KERN_ERR DRVNAME ": could not get registers resource\n"); rc = -ENODEV; goto out; } if (!request_mem_region(ar7_regs_wdt->start, resource_size(ar7_regs_wdt), LONGNAME)) { printk(KERN_WARNING DRVNAME ": watchdog I/O region busy\n"); rc = -EBUSY; goto out; } ar7_wdt = ioremap(ar7_regs_wdt->start, resource_size(ar7_regs_wdt)); if (!ar7_wdt) { printk(KERN_ERR DRVNAME ": could not ioremap registers\n"); rc = -ENXIO; goto out_mem_region; } vbus_clk = clk_get(NULL, "vbus"); if (IS_ERR(vbus_clk)) { printk(KERN_ERR DRVNAME ": could not get vbus clock\n"); rc = PTR_ERR(vbus_clk); goto out_mem_region; } ar7_wdt_disable_wdt(); ar7_wdt_prescale(prescale_value); ar7_wdt_update_margin(margin); rc = misc_register(&ar7_wdt_miscdev); if (rc) { printk(KERN_ERR DRVNAME ": unable to register misc device\n"); goto out_alloc; } goto out; out_alloc: iounmap(ar7_wdt); out_mem_region: release_mem_region(ar7_regs_wdt->start, resource_size(ar7_regs_wdt)); out: return rc; } static int __devexit ar7_wdt_remove(struct platform_device *pdev) { misc_deregister(&ar7_wdt_miscdev); iounmap(ar7_wdt); release_mem_region(ar7_regs_wdt->start, resource_size(ar7_regs_wdt)); return 0; } static void ar7_wdt_shutdown(struct platform_device *pdev) { if (!nowayout) ar7_wdt_disable_wdt(); } static struct platform_driver ar7_wdt_driver = { .probe = ar7_wdt_probe, .remove = __devexit_p(ar7_wdt_remove), .shutdown = ar7_wdt_shutdown, .driver = { .owner = THIS_MODULE, .name = "ar7_wdt", }, }; static int __init ar7_wdt_init(void) { return platform_driver_register(&ar7_wdt_driver); } static void __exit ar7_wdt_cleanup(void) { platform_driver_unregister(&ar7_wdt_driver); } module_init(ar7_wdt_init); module_exit(ar7_wdt_cleanup);
gpl-2.0
cozybit/aosp-omap-kernel
net/ax25/ax25_dev.c
4280
4891
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/slab.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/spinlock.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/init.h> ax25_dev *ax25_dev_list; DEFINE_SPINLOCK(ax25_dev_lock); ax25_dev *ax25_addr_ax25dev(ax25_address *addr) { ax25_dev *ax25_dev, *res = NULL; spin_lock_bh(&ax25_dev_lock); for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) if (ax25cmp(addr, (ax25_address *)ax25_dev->dev->dev_addr) == 0) { res = ax25_dev; } spin_unlock_bh(&ax25_dev_lock); return res; } /* * This is called when an interface is brought up. These are * reasonable defaults. */ void ax25_dev_device_up(struct net_device *dev) { ax25_dev *ax25_dev; if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) { printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n"); return; } ax25_unregister_sysctl(); dev->ax25_ptr = ax25_dev; ax25_dev->dev = dev; dev_hold(dev); ax25_dev->forward = NULL; ax25_dev->values[AX25_VALUES_IPDEFMODE] = AX25_DEF_IPDEFMODE; ax25_dev->values[AX25_VALUES_AXDEFMODE] = AX25_DEF_AXDEFMODE; ax25_dev->values[AX25_VALUES_BACKOFF] = AX25_DEF_BACKOFF; ax25_dev->values[AX25_VALUES_CONMODE] = AX25_DEF_CONMODE; ax25_dev->values[AX25_VALUES_WINDOW] = AX25_DEF_WINDOW; ax25_dev->values[AX25_VALUES_EWINDOW] = AX25_DEF_EWINDOW; ax25_dev->values[AX25_VALUES_T1] = AX25_DEF_T1; ax25_dev->values[AX25_VALUES_T2] = AX25_DEF_T2; ax25_dev->values[AX25_VALUES_T3] = AX25_DEF_T3; ax25_dev->values[AX25_VALUES_IDLE] = AX25_DEF_IDLE; ax25_dev->values[AX25_VALUES_N2] = AX25_DEF_N2; ax25_dev->values[AX25_VALUES_PACLEN] = AX25_DEF_PACLEN; ax25_dev->values[AX25_VALUES_PROTOCOL] = AX25_DEF_PROTOCOL; ax25_dev->values[AX25_VALUES_DS_TIMEOUT]= AX25_DEF_DS_TIMEOUT; #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER) ax25_ds_setup_timer(ax25_dev); #endif spin_lock_bh(&ax25_dev_lock); ax25_dev->next = ax25_dev_list; ax25_dev_list = ax25_dev; spin_unlock_bh(&ax25_dev_lock); ax25_register_sysctl(); } void ax25_dev_device_down(struct net_device *dev) { ax25_dev *s, *ax25_dev; if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return; ax25_unregister_sysctl(); spin_lock_bh(&ax25_dev_lock); #ifdef CONFIG_AX25_DAMA_SLAVE ax25_ds_del_timer(ax25_dev); #endif /* * Remove any packet forwarding that points to this device. */ for (s = ax25_dev_list; s != NULL; s = s->next) if (s->forward == dev) s->forward = NULL; if ((s = ax25_dev_list) == ax25_dev) { ax25_dev_list = s->next; spin_unlock_bh(&ax25_dev_lock); dev_put(dev); kfree(ax25_dev); ax25_register_sysctl(); return; } while (s != NULL && s->next != NULL) { if (s->next == ax25_dev) { s->next = ax25_dev->next; spin_unlock_bh(&ax25_dev_lock); dev_put(dev); kfree(ax25_dev); ax25_register_sysctl(); return; } s = s->next; } spin_unlock_bh(&ax25_dev_lock); dev->ax25_ptr = NULL; ax25_register_sysctl(); } int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) { ax25_dev *ax25_dev, *fwd_dev; if ((ax25_dev = ax25_addr_ax25dev(&fwd->port_from)) == NULL) return -EINVAL; switch (cmd) { case SIOCAX25ADDFWD: if ((fwd_dev = ax25_addr_ax25dev(&fwd->port_to)) == NULL) return -EINVAL; if (ax25_dev->forward != NULL) return -EINVAL; ax25_dev->forward = fwd_dev->dev; break; case SIOCAX25DELFWD: if (ax25_dev->forward == NULL) return -EINVAL; ax25_dev->forward = NULL; break; default: return -EINVAL; } return 0; } struct net_device *ax25_fwd_dev(struct net_device *dev) { ax25_dev *ax25_dev; if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return dev; if (ax25_dev->forward == NULL) return dev; return ax25_dev->forward; } /* * Free all memory associated with device structures. */ void __exit ax25_dev_free(void) { ax25_dev *s, *ax25_dev; spin_lock_bh(&ax25_dev_lock); ax25_dev = ax25_dev_list; while (ax25_dev != NULL) { s = ax25_dev; dev_put(ax25_dev->dev); ax25_dev = ax25_dev->next; kfree(s); } ax25_dev_list = NULL; spin_unlock_bh(&ax25_dev_lock); }
gpl-2.0
gearslam/Ak-xGenesis-gee-SLIMPort
drivers/net/ethernet/amd/pcnet32.c
4536
82553
/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */ /* * Copyright 1996-1999 Thomas Bogendoerfer * * Derived from the lance driver written 1993,1994,1995 by Donald Becker. * * Copyright 1993 United States Government as represented by the * Director, National Security Agency. * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * This driver is for PCnet32 and PCnetPCI based ethercards */ /************************************************************************** * 23 Oct, 2000. * Fixed a few bugs, related to running the controller in 32bit mode. * * Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. * *************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "pcnet32" #define DRV_VERSION "1.35" #define DRV_RELDATE "21.Apr.2008" #define PFX DRV_NAME ": " static const char *const version = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/crc32.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/moduleparam.h> #include <linux/bitops.h> #include <linux/io.h> #include <linux/uaccess.h> #include <asm/dma.h> #include <asm/irq.h> /* * PCI device identifiers for "new style" Linux PCI Device Drivers */ static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), }, /* * Adapters that were sold with IBM's RS/6000 or pSeries hardware have * the incorrect vendor id. */ { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE), .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, }, { } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl); static int cards_found; /* * VLB I/O addresses */ static unsigned int pcnet32_portlist[] = { 0x300, 0x320, 0x340, 0x360, 0 }; static int pcnet32_debug; static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ static int pcnet32vlb; /* check for VLB cards ? */ static struct net_device *pcnet32_dev; static int max_interrupt_work = 2; static int rx_copybreak = 200; #define PCNET32_PORT_AUI 0x00 #define PCNET32_PORT_10BT 0x01 #define PCNET32_PORT_GPSI 0x02 #define PCNET32_PORT_MII 0x03 #define PCNET32_PORT_PORTSEL 0x03 #define PCNET32_PORT_ASEL 0x04 #define PCNET32_PORT_100 0x40 #define PCNET32_PORT_FD 0x80 #define PCNET32_DMA_MASK 0xffffffff #define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ)) #define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4)) /* * table to translate option values from tulip * to internal options */ static const unsigned char options_mapping[] = { PCNET32_PORT_ASEL, /* 0 Auto-select */ PCNET32_PORT_AUI, /* 1 BNC/AUI */ PCNET32_PORT_AUI, /* 2 AUI/BNC */ PCNET32_PORT_ASEL, /* 3 not supported */ PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ PCNET32_PORT_ASEL, /* 5 not supported */ PCNET32_PORT_ASEL, /* 6 not supported */ PCNET32_PORT_ASEL, /* 7 not supported */ PCNET32_PORT_ASEL, /* 8 not supported */ PCNET32_PORT_MII, /* 9 MII 10baseT */ PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ PCNET32_PORT_MII, /* 11 MII (autosel) */ PCNET32_PORT_10BT, /* 12 10BaseT */ PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ /* 14 MII 100BaseTx-FD */ PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, PCNET32_PORT_ASEL /* 15 not supported */ }; static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = { "Loopback test (offline)" }; #define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test) #define PCNET32_NUM_REGS 136 #define MAX_UNITS 8 /* More are supported, limit only on options */ static int options[MAX_UNITS]; static int full_duplex[MAX_UNITS]; static int homepna[MAX_UNITS]; /* * Theory of Operation * * This driver uses the same software structure as the normal lance * driver. So look for a verbose description in lance.c. The differences * to the normal lance driver is the use of the 32bit mode of PCnet32 * and PCnetPCI chips. Because these chips are 32bit chips, there is no * 16MB limitation and we don't need bounce buffers. */ /* * Set the number of Tx and Rx buffers, using Log_2(# buffers). * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). */ #ifndef PCNET32_LOG_TX_BUFFERS #define PCNET32_LOG_TX_BUFFERS 4 #define PCNET32_LOG_RX_BUFFERS 5 #define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */ #define PCNET32_LOG_MAX_RX_BUFFERS 9 #endif #define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS)) #define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS)) #define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) #define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS)) #define PKT_BUF_SKB 1544 /* actual buffer length after being aligned */ #define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN) /* chip wants twos complement of the (aligned) buffer length */ #define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB) /* Offsets from base I/O address. */ #define PCNET32_WIO_RDP 0x10 #define PCNET32_WIO_RAP 0x12 #define PCNET32_WIO_RESET 0x14 #define PCNET32_WIO_BDP 0x16 #define PCNET32_DWIO_RDP 0x10 #define PCNET32_DWIO_RAP 0x14 #define PCNET32_DWIO_RESET 0x18 #define PCNET32_DWIO_BDP 0x1C #define PCNET32_TOTAL_SIZE 0x20 #define CSR0 0 #define CSR0_INIT 0x1 #define CSR0_START 0x2 #define CSR0_STOP 0x4 #define CSR0_TXPOLL 0x8 #define CSR0_INTEN 0x40 #define CSR0_IDON 0x0100 #define CSR0_NORMAL (CSR0_START | CSR0_INTEN) #define PCNET32_INIT_LOW 1 #define PCNET32_INIT_HIGH 2 #define CSR3 3 #define CSR4 4 #define CSR5 5 #define CSR5_SUSPEND 0x0001 #define CSR15 15 #define PCNET32_MC_FILTER 8 #define PCNET32_79C970A 0x2621 /* The PCNET32 Rx and Tx ring descriptors. */ struct pcnet32_rx_head { __le32 base; __le16 buf_length; /* two`s complement of length */ __le16 status; __le32 msg_length; __le32 reserved; }; struct pcnet32_tx_head { __le32 base; __le16 length; /* two`s complement of length */ __le16 status; __le32 misc; __le32 reserved; }; /* The PCNET32 32-Bit initialization block, described in databook. */ struct pcnet32_init_block { __le16 mode; __le16 tlen_rlen; u8 phys_addr[6]; __le16 reserved; __le32 filter[2]; /* Receive and transmit ring base, along with extra bits. */ __le32 rx_ring; __le32 tx_ring; }; /* PCnet32 access functions */ struct pcnet32_access { u16 (*read_csr) (unsigned long, int); void (*write_csr) (unsigned long, int, u16); u16 (*read_bcr) (unsigned long, int); void (*write_bcr) (unsigned long, int, u16); u16 (*read_rap) (unsigned long); void (*write_rap) (unsigned long, u16); void (*reset) (unsigned long); }; /* * The first field of pcnet32_private is read by the ethernet device * so the structure should be allocated using pci_alloc_consistent(). */ struct pcnet32_private { struct pcnet32_init_block *init_block; /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ struct pcnet32_rx_head *rx_ring; struct pcnet32_tx_head *tx_ring; dma_addr_t init_dma_addr;/* DMA address of beginning of the init block, returned by pci_alloc_consistent */ struct pci_dev *pci_dev; const char *name; /* The saved address of a sent-in-place packet/buffer, for skfree(). */ struct sk_buff **tx_skbuff; struct sk_buff **rx_skbuff; dma_addr_t *tx_dma_addr; dma_addr_t *rx_dma_addr; const struct pcnet32_access *a; spinlock_t lock; /* Guard lock */ unsigned int cur_rx, cur_tx; /* The next free ring entry */ unsigned int rx_ring_size; /* current rx ring size */ unsigned int tx_ring_size; /* current tx ring size */ unsigned int rx_mod_mask; /* rx ring modular mask */ unsigned int tx_mod_mask; /* tx ring modular mask */ unsigned short rx_len_bits; unsigned short tx_len_bits; dma_addr_t rx_ring_dma_addr; dma_addr_t tx_ring_dma_addr; unsigned int dirty_rx, /* ring entries to be freed. */ dirty_tx; struct net_device *dev; struct napi_struct napi; char tx_full; char phycount; /* number of phys found */ int options; unsigned int shared_irq:1, /* shared irq possible */ dxsuflo:1, /* disable transmit stop on uflo */ mii:1; /* mii port available */ struct net_device *next; struct mii_if_info mii_if; struct timer_list watchdog_timer; u32 msg_enable; /* debug message level */ /* each bit indicates an available PHY */ u32 phymask; unsigned short chip_version; /* which variant this is */ /* saved registers during ethtool blink */ u16 save_regs[4]; }; static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); static int pcnet32_probe1(unsigned long, int, struct pci_dev *); static int pcnet32_open(struct net_device *); static int pcnet32_init_ring(struct net_device *); static netdev_tx_t pcnet32_start_xmit(struct sk_buff *, struct net_device *); static void pcnet32_tx_timeout(struct net_device *dev); static irqreturn_t pcnet32_interrupt(int, void *); static int pcnet32_close(struct net_device *); static struct net_device_stats *pcnet32_get_stats(struct net_device *); static void pcnet32_load_multicast(struct net_device *dev); static void pcnet32_set_multicast_list(struct net_device *); static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); static void pcnet32_watchdog(struct net_device *); static int mdio_read(struct net_device *dev, int phy_id, int reg_num); static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val); static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits); static void pcnet32_ethtool_test(struct net_device *dev, struct ethtool_test *eth_test, u64 * data); static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1); static int pcnet32_get_regs_len(struct net_device *dev); static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *ptr); static void pcnet32_purge_tx_ring(struct net_device *dev); static int pcnet32_alloc_ring(struct net_device *dev, const char *name); static void pcnet32_free_ring(struct net_device *dev); static void pcnet32_check_media(struct net_device *dev, int verbose); static u16 pcnet32_wio_read_csr(unsigned long addr, int index) { outw(index, addr + PCNET32_WIO_RAP); return inw(addr + PCNET32_WIO_RDP); } static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val) { outw(index, addr + PCNET32_WIO_RAP); outw(val, addr + PCNET32_WIO_RDP); } static u16 pcnet32_wio_read_bcr(unsigned long addr, int index) { outw(index, addr + PCNET32_WIO_RAP); return inw(addr + PCNET32_WIO_BDP); } static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val) { outw(index, addr + PCNET32_WIO_RAP); outw(val, addr + PCNET32_WIO_BDP); } static u16 pcnet32_wio_read_rap(unsigned long addr) { return inw(addr + PCNET32_WIO_RAP); } static void pcnet32_wio_write_rap(unsigned long addr, u16 val) { outw(val, addr + PCNET32_WIO_RAP); } static void pcnet32_wio_reset(unsigned long addr) { inw(addr + PCNET32_WIO_RESET); } static int pcnet32_wio_check(unsigned long addr) { outw(88, addr + PCNET32_WIO_RAP); return inw(addr + PCNET32_WIO_RAP) == 88; } static const struct pcnet32_access pcnet32_wio = { .read_csr = pcnet32_wio_read_csr, .write_csr = pcnet32_wio_write_csr, .read_bcr = pcnet32_wio_read_bcr, .write_bcr = pcnet32_wio_write_bcr, .read_rap = pcnet32_wio_read_rap, .write_rap = pcnet32_wio_write_rap, .reset = pcnet32_wio_reset }; static u16 pcnet32_dwio_read_csr(unsigned long addr, int index) { outl(index, addr + PCNET32_DWIO_RAP); return inl(addr + PCNET32_DWIO_RDP) & 0xffff; } static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val) { outl(index, addr + PCNET32_DWIO_RAP); outl(val, addr + PCNET32_DWIO_RDP); } static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index) { outl(index, addr + PCNET32_DWIO_RAP); return inl(addr + PCNET32_DWIO_BDP) & 0xffff; } static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val) { outl(index, addr + PCNET32_DWIO_RAP); outl(val, addr + PCNET32_DWIO_BDP); } static u16 pcnet32_dwio_read_rap(unsigned long addr) { return inl(addr + PCNET32_DWIO_RAP) & 0xffff; } static void pcnet32_dwio_write_rap(unsigned long addr, u16 val) { outl(val, addr + PCNET32_DWIO_RAP); } static void pcnet32_dwio_reset(unsigned long addr) { inl(addr + PCNET32_DWIO_RESET); } static int pcnet32_dwio_check(unsigned long addr) { outl(88, addr + PCNET32_DWIO_RAP); return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88; } static const struct pcnet32_access pcnet32_dwio = { .read_csr = pcnet32_dwio_read_csr, .write_csr = pcnet32_dwio_write_csr, .read_bcr = pcnet32_dwio_read_bcr, .write_bcr = pcnet32_dwio_write_bcr, .read_rap = pcnet32_dwio_read_rap, .write_rap = pcnet32_dwio_write_rap, .reset = pcnet32_dwio_reset }; static void pcnet32_netif_stop(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); dev->trans_start = jiffies; /* prevent tx timeout */ napi_disable(&lp->napi); netif_tx_disable(dev); } static void pcnet32_netif_start(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); ulong ioaddr = dev->base_addr; u16 val; netif_wake_queue(dev); val = lp->a->read_csr(ioaddr, CSR3); val &= 0x00ff; lp->a->write_csr(ioaddr, CSR3, val); napi_enable(&lp->napi); } /* * Allocate space for the new sized tx ring. * Free old resources * Save new resources. * Any failure keeps old resources. * Must be called with lp->lock held. */ static void pcnet32_realloc_tx_ring(struct net_device *dev, struct pcnet32_private *lp, unsigned int size) { dma_addr_t new_ring_dma_addr; dma_addr_t *new_dma_addr_list; struct pcnet32_tx_head *new_tx_ring; struct sk_buff **new_skb_list; pcnet32_purge_tx_ring(dev); new_tx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * (1 << size), &new_ring_dma_addr); if (new_tx_ring == NULL) { netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); return; } memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size)); new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), GFP_ATOMIC); if (!new_dma_addr_list) { netif_err(lp, drv, dev, "Memory allocation failed\n"); goto free_new_tx_ring; } new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), GFP_ATOMIC); if (!new_skb_list) { netif_err(lp, drv, dev, "Memory allocation failed\n"); goto free_new_lists; } kfree(lp->tx_skbuff); kfree(lp->tx_dma_addr); pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, lp->tx_ring, lp->tx_ring_dma_addr); lp->tx_ring_size = (1 << size); lp->tx_mod_mask = lp->tx_ring_size - 1; lp->tx_len_bits = (size << 12); lp->tx_ring = new_tx_ring; lp->tx_ring_dma_addr = new_ring_dma_addr; lp->tx_dma_addr = new_dma_addr_list; lp->tx_skbuff = new_skb_list; return; free_new_lists: kfree(new_dma_addr_list); free_new_tx_ring: pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * (1 << size), new_tx_ring, new_ring_dma_addr); } /* * Allocate space for the new sized rx ring. * Re-use old receive buffers. * alloc extra buffers * free unneeded buffers * free unneeded buffers * Save new resources. * Any failure keeps old resources. * Must be called with lp->lock held. */ static void pcnet32_realloc_rx_ring(struct net_device *dev, struct pcnet32_private *lp, unsigned int size) { dma_addr_t new_ring_dma_addr; dma_addr_t *new_dma_addr_list; struct pcnet32_rx_head *new_rx_ring; struct sk_buff **new_skb_list; int new, overlap; new_rx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * (1 << size), &new_ring_dma_addr); if (new_rx_ring == NULL) { netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); return; } memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), GFP_ATOMIC); if (!new_dma_addr_list) { netif_err(lp, drv, dev, "Memory allocation failed\n"); goto free_new_rx_ring; } new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), GFP_ATOMIC); if (!new_skb_list) { netif_err(lp, drv, dev, "Memory allocation failed\n"); goto free_new_lists; } /* first copy the current receive buffers */ overlap = min(size, lp->rx_ring_size); for (new = 0; new < overlap; new++) { new_rx_ring[new] = lp->rx_ring[new]; new_dma_addr_list[new] = lp->rx_dma_addr[new]; new_skb_list[new] = lp->rx_skbuff[new]; } /* now allocate any new buffers needed */ for (; new < size; new++) { struct sk_buff *rx_skbuff; new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB); rx_skbuff = new_skb_list[new]; if (!rx_skbuff) { /* keep the original lists and buffers */ netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n", __func__); goto free_all_new; } skb_reserve(rx_skbuff, NET_IP_ALIGN); new_dma_addr_list[new] = pci_map_single(lp->pci_dev, rx_skbuff->data, PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); new_rx_ring[new].status = cpu_to_le16(0x8000); } /* and free any unneeded buffers */ for (; new < lp->rx_ring_size; new++) { if (lp->rx_skbuff[new]) { pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb(lp->rx_skbuff[new]); } } kfree(lp->rx_skbuff); kfree(lp->rx_dma_addr); pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, lp->rx_ring, lp->rx_ring_dma_addr); lp->rx_ring_size = (1 << size); lp->rx_mod_mask = lp->rx_ring_size - 1; lp->rx_len_bits = (size << 4); lp->rx_ring = new_rx_ring; lp->rx_ring_dma_addr = new_ring_dma_addr; lp->rx_dma_addr = new_dma_addr_list; lp->rx_skbuff = new_skb_list; return; free_all_new: while (--new >= lp->rx_ring_size) { if (new_skb_list[new]) { pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb(new_skb_list[new]); } } kfree(new_skb_list); free_new_lists: kfree(new_dma_addr_list); free_new_rx_ring: pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * (1 << size), new_rx_ring, new_ring_dma_addr); } static void pcnet32_purge_rx_ring(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); int i; /* free all allocated skbuffs */ for (i = 0; i < lp->rx_ring_size; i++) { lp->rx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ if (lp->rx_skbuff[i]) { pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(lp->rx_skbuff[i]); } lp->rx_skbuff[i] = NULL; lp->rx_dma_addr[i] = 0; } } #ifdef CONFIG_NET_POLL_CONTROLLER static void pcnet32_poll_controller(struct net_device *dev) { disable_irq(dev->irq); pcnet32_interrupt(0, dev); enable_irq(dev->irq); } #endif static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; int r = -EOPNOTSUPP; if (lp->mii) { spin_lock_irqsave(&lp->lock, flags); mii_ethtool_gset(&lp->mii_if, cmd); spin_unlock_irqrestore(&lp->lock, flags); r = 0; } return r; } static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; int r = -EOPNOTSUPP; if (lp->mii) { spin_lock_irqsave(&lp->lock, flags); r = mii_ethtool_sset(&lp->mii_if, cmd); spin_unlock_irqrestore(&lp->lock, flags); } return r; } static void pcnet32_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct pcnet32_private *lp = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); if (lp->pci_dev) strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info)); else snprintf(info->bus_info, sizeof(info->bus_info), "VLB 0x%lx", dev->base_addr); } static u32 pcnet32_get_link(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; int r; spin_lock_irqsave(&lp->lock, flags); if (lp->mii) { r = mii_link_ok(&lp->mii_if); } else if (lp->chip_version >= PCNET32_79C970A) { ulong ioaddr = dev->base_addr; /* card base I/O address */ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); } else { /* can not detect link on really old chips */ r = 1; } spin_unlock_irqrestore(&lp->lock, flags); return r; } static u32 pcnet32_get_msglevel(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); return lp->msg_enable; } static void pcnet32_set_msglevel(struct net_device *dev, u32 value) { struct pcnet32_private *lp = netdev_priv(dev); lp->msg_enable = value; } static int pcnet32_nway_reset(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; int r = -EOPNOTSUPP; if (lp->mii) { spin_lock_irqsave(&lp->lock, flags); r = mii_nway_restart(&lp->mii_if); spin_unlock_irqrestore(&lp->lock, flags); } return r; } static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct pcnet32_private *lp = netdev_priv(dev); ering->tx_max_pending = TX_MAX_RING_SIZE; ering->tx_pending = lp->tx_ring_size; ering->rx_max_pending = RX_MAX_RING_SIZE; ering->rx_pending = lp->rx_ring_size; } static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; unsigned int size; ulong ioaddr = dev->base_addr; int i; if (ering->rx_mini_pending || ering->rx_jumbo_pending) return -EINVAL; if (netif_running(dev)) pcnet32_netif_stop(dev); spin_lock_irqsave(&lp->lock, flags); lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); /* set the minimum ring size to 4, to allow the loopback test to work * unchanged. */ for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { if (size <= (1 << i)) break; } if ((1 << i) != lp->tx_ring_size) pcnet32_realloc_tx_ring(dev, lp, i); size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { if (size <= (1 << i)) break; } if ((1 << i) != lp->rx_ring_size) pcnet32_realloc_rx_ring(dev, lp, i); lp->napi.weight = lp->rx_ring_size / 2; if (netif_running(dev)) { pcnet32_netif_start(dev); pcnet32_restart(dev, CSR0_NORMAL); } spin_unlock_irqrestore(&lp->lock, flags); netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n", lp->rx_ring_size, lp->tx_ring_size); return 0; } static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data) { memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); } static int pcnet32_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_TEST: return PCNET32_TEST_LEN; default: return -EOPNOTSUPP; } } static void pcnet32_ethtool_test(struct net_device *dev, struct ethtool_test *test, u64 * data) { struct pcnet32_private *lp = netdev_priv(dev); int rc; if (test->flags == ETH_TEST_FL_OFFLINE) { rc = pcnet32_loopback_test(dev, data); if (rc) { netif_printk(lp, hw, KERN_DEBUG, dev, "Loopback test failed\n"); test->flags |= ETH_TEST_FL_FAILED; } else netif_printk(lp, hw, KERN_DEBUG, dev, "Loopback test passed\n"); } else netif_printk(lp, hw, KERN_DEBUG, dev, "No tests to run (specify 'Offline' on ethtool)\n"); } /* end pcnet32_ethtool_test */ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) { struct pcnet32_private *lp = netdev_priv(dev); const struct pcnet32_access *a = lp->a; /* access to registers */ ulong ioaddr = dev->base_addr; /* card base I/O address */ struct sk_buff *skb; /* sk buff */ int x, i; /* counters */ int numbuffs = 4; /* number of TX/RX buffers and descs */ u16 status = 0x8300; /* TX ring status */ __le16 teststatus; /* test of ring status */ int rc; /* return code */ int size; /* size of packets */ unsigned char *packet; /* source packet data */ static const int data_len = 60; /* length of source packets */ unsigned long flags; unsigned long ticks; rc = 1; /* default to fail */ if (netif_running(dev)) pcnet32_netif_stop(dev); spin_lock_irqsave(&lp->lock, flags); lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size)); /* Reset the PCNET32 */ lp->a->reset(ioaddr); lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ /* switch pcnet32 to 32bit mode */ lp->a->write_bcr(ioaddr, 20, 2); /* purge & init rings but don't actually restart */ pcnet32_restart(dev, 0x0000); lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ /* Initialize Transmit buffers. */ size = data_len + 15; for (x = 0; x < numbuffs; x++) { skb = netdev_alloc_skb(dev, size); if (!skb) { netif_printk(lp, hw, KERN_DEBUG, dev, "Cannot allocate skb at line: %d!\n", __LINE__); goto clean_up; } packet = skb->data; skb_put(skb, size); /* create space for data */ lp->tx_skbuff[x] = skb; lp->tx_ring[x].length = cpu_to_le16(-skb->len); lp->tx_ring[x].misc = 0; /* put DA and SA into the skb */ for (i = 0; i < 6; i++) *packet++ = dev->dev_addr[i]; for (i = 0; i < 6; i++) *packet++ = dev->dev_addr[i]; /* type */ *packet++ = 0x08; *packet++ = 0x06; /* packet number */ *packet++ = x; /* fill packet with data */ for (i = 0; i < data_len; i++) *packet++ = i; lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); wmb(); /* Make sure owner changes after all others are visible */ lp->tx_ring[x].status = cpu_to_le16(status); } x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */ a->write_bcr(ioaddr, 32, x | 0x0002); /* set int loopback in CSR15 */ x = a->read_csr(ioaddr, CSR15) & 0xfffc; lp->a->write_csr(ioaddr, CSR15, x | 0x0044); teststatus = cpu_to_le16(0x8000); lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ /* Check status of descriptors */ for (x = 0; x < numbuffs; x++) { ticks = 0; rmb(); while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { spin_unlock_irqrestore(&lp->lock, flags); msleep(1); spin_lock_irqsave(&lp->lock, flags); rmb(); ticks++; } if (ticks == 200) { netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x); break; } } lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ wmb(); if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n"); for (x = 0; x < numbuffs; x++) { netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x); skb = lp->rx_skbuff[x]; for (i = 0; i < size; i++) pr_cont(" %02x", *(skb->data + i)); pr_cont("\n"); } } x = 0; rc = 0; while (x < numbuffs && !rc) { skb = lp->rx_skbuff[x]; packet = lp->tx_skbuff[x]->data; for (i = 0; i < size; i++) { if (*(skb->data + i) != packet[i]) { netif_printk(lp, hw, KERN_DEBUG, dev, "Error in compare! %2x - %02x %02x\n", i, *(skb->data + i), packet[i]); rc = 1; break; } } x++; } clean_up: *data1 = rc; pcnet32_purge_tx_ring(dev); x = a->read_csr(ioaddr, CSR15); a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */ x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ a->write_bcr(ioaddr, 32, (x & ~0x0002)); if (netif_running(dev)) { pcnet32_netif_start(dev); pcnet32_restart(dev, CSR0_NORMAL); } else { pcnet32_purge_rx_ring(dev); lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ } spin_unlock_irqrestore(&lp->lock, flags); return rc; } /* end pcnet32_loopback_test */ static int pcnet32_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { struct pcnet32_private *lp = netdev_priv(dev); const struct pcnet32_access *a = lp->a; ulong ioaddr = dev->base_addr; unsigned long flags; int i; switch (state) { case ETHTOOL_ID_ACTIVE: /* Save the current value of the bcrs */ spin_lock_irqsave(&lp->lock, flags); for (i = 4; i < 8; i++) lp->save_regs[i - 4] = a->read_bcr(ioaddr, i); spin_unlock_irqrestore(&lp->lock, flags); return 2; /* cycle on/off twice per second */ case ETHTOOL_ID_ON: case ETHTOOL_ID_OFF: /* Blink the led */ spin_lock_irqsave(&lp->lock, flags); for (i = 4; i < 8; i++) a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); spin_unlock_irqrestore(&lp->lock, flags); break; case ETHTOOL_ID_INACTIVE: /* Restore the original value of the bcrs */ spin_lock_irqsave(&lp->lock, flags); for (i = 4; i < 8; i++) a->write_bcr(ioaddr, i, lp->save_regs[i - 4]); spin_unlock_irqrestore(&lp->lock, flags); } return 0; } /* * lp->lock must be held. */ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, int can_sleep) { int csr5; struct pcnet32_private *lp = netdev_priv(dev); const struct pcnet32_access *a = lp->a; ulong ioaddr = dev->base_addr; int ticks; /* really old chips have to be stopped. */ if (lp->chip_version < PCNET32_79C970A) return 0; /* set SUSPEND (SPND) - CSR5 bit 0 */ csr5 = a->read_csr(ioaddr, CSR5); a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); /* poll waiting for bit to be set */ ticks = 0; while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) { spin_unlock_irqrestore(&lp->lock, *flags); if (can_sleep) msleep(1); else mdelay(1); spin_lock_irqsave(&lp->lock, *flags); ticks++; if (ticks > 200) { netif_printk(lp, hw, KERN_DEBUG, dev, "Error getting into suspend!\n"); return 0; } } return 1; } /* * process one receive descriptor entry */ static void pcnet32_rx_entry(struct net_device *dev, struct pcnet32_private *lp, struct pcnet32_rx_head *rxp, int entry) { int status = (short)le16_to_cpu(rxp->status) >> 8; int rx_in_place = 0; struct sk_buff *skb; short pkt_len; if (status != 0x03) { /* There was an error. */ /* * There is a tricky error noted by John Murphy, * <murf@perftech.com> to Russ Nelson: Even with full-sized * buffers it's possible for a jabber packet to use two * buffers, with only the last correctly noting the error. */ if (status & 0x01) /* Only count a general error at the */ dev->stats.rx_errors++; /* end of a packet. */ if (status & 0x20) dev->stats.rx_frame_errors++; if (status & 0x10) dev->stats.rx_over_errors++; if (status & 0x08) dev->stats.rx_crc_errors++; if (status & 0x04) dev->stats.rx_fifo_errors++; return; } pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4; /* Discard oversize frames. */ if (unlikely(pkt_len > PKT_BUF_SIZE)) { netif_err(lp, drv, dev, "Impossible packet size %d!\n", pkt_len); dev->stats.rx_errors++; return; } if (pkt_len < 60) { netif_err(lp, rx_err, dev, "Runt packet!\n"); dev->stats.rx_errors++; return; } if (pkt_len > rx_copybreak) { struct sk_buff *newskb; newskb = netdev_alloc_skb(dev, PKT_BUF_SKB); if (newskb) { skb_reserve(newskb, NET_IP_ALIGN); skb = lp->rx_skbuff[entry]; pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[entry], PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); skb_put(skb, pkt_len); lp->rx_skbuff[entry] = newskb; lp->rx_dma_addr[entry] = pci_map_single(lp->pci_dev, newskb->data, PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); rx_in_place = 1; } else skb = NULL; } else skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN); if (skb == NULL) { netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n"); dev->stats.rx_dropped++; return; } if (!rx_in_place) { skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, pkt_len); /* Make room */ pci_dma_sync_single_for_cpu(lp->pci_dev, lp->rx_dma_addr[entry], pkt_len, PCI_DMA_FROMDEVICE); skb_copy_to_linear_data(skb, (unsigned char *)(lp->rx_skbuff[entry]->data), pkt_len); pci_dma_sync_single_for_device(lp->pci_dev, lp->rx_dma_addr[entry], pkt_len, PCI_DMA_FROMDEVICE); } dev->stats.rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); dev->stats.rx_packets++; } static int pcnet32_rx(struct net_device *dev, int budget) { struct pcnet32_private *lp = netdev_priv(dev); int entry = lp->cur_rx & lp->rx_mod_mask; struct pcnet32_rx_head *rxp = &lp->rx_ring[entry]; int npackets = 0; /* If we own the next entry, it's a new packet. Send it up. */ while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) { pcnet32_rx_entry(dev, lp, rxp, entry); npackets += 1; /* * The docs say that the buffer length isn't touched, but Andrew * Boyd of QNX reports that some revs of the 79C965 clear it. */ rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE); wmb(); /* Make sure owner changes after others are visible */ rxp->status = cpu_to_le16(0x8000); entry = (++lp->cur_rx) & lp->rx_mod_mask; rxp = &lp->rx_ring[entry]; } return npackets; } static int pcnet32_tx(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); unsigned int dirty_tx = lp->dirty_tx; int delta; int must_restart = 0; while (dirty_tx != lp->cur_tx) { int entry = dirty_tx & lp->tx_mod_mask; int status = (short)le16_to_cpu(lp->tx_ring[entry].status); if (status < 0) break; /* It still hasn't been Txed */ lp->tx_ring[entry].base = 0; if (status & 0x4000) { /* There was a major error, log it. */ int err_status = le32_to_cpu(lp->tx_ring[entry].misc); dev->stats.tx_errors++; netif_err(lp, tx_err, dev, "Tx error status=%04x err_status=%08x\n", status, err_status); if (err_status & 0x04000000) dev->stats.tx_aborted_errors++; if (err_status & 0x08000000) dev->stats.tx_carrier_errors++; if (err_status & 0x10000000) dev->stats.tx_window_errors++; #ifndef DO_DXSUFLO if (err_status & 0x40000000) { dev->stats.tx_fifo_errors++; /* Ackk! On FIFO errors the Tx unit is turned off! */ /* Remove this verbosity later! */ netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); must_restart = 1; } #else if (err_status & 0x40000000) { dev->stats.tx_fifo_errors++; if (!lp->dxsuflo) { /* If controller doesn't recover ... */ /* Ackk! On FIFO errors the Tx unit is turned off! */ /* Remove this verbosity later! */ netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); must_restart = 1; } } #endif } else { if (status & 0x1800) dev->stats.collisions++; dev->stats.tx_packets++; } /* We must free the original skb */ if (lp->tx_skbuff[entry]) { pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[entry], lp->tx_skbuff[entry]-> len, PCI_DMA_TODEVICE); dev_kfree_skb_any(lp->tx_skbuff[entry]); lp->tx_skbuff[entry] = NULL; lp->tx_dma_addr[entry] = 0; } dirty_tx++; } delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); if (delta > lp->tx_ring_size) { netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n", dirty_tx, lp->cur_tx, lp->tx_full); dirty_tx += lp->tx_ring_size; delta -= lp->tx_ring_size; } if (lp->tx_full && netif_queue_stopped(dev) && delta < lp->tx_ring_size - 2) { /* The ring is no longer full, clear tbusy. */ lp->tx_full = 0; netif_wake_queue(dev); } lp->dirty_tx = dirty_tx; return must_restart; } static int pcnet32_poll(struct napi_struct *napi, int budget) { struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); struct net_device *dev = lp->dev; unsigned long ioaddr = dev->base_addr; unsigned long flags; int work_done; u16 val; work_done = pcnet32_rx(dev, budget); spin_lock_irqsave(&lp->lock, flags); if (pcnet32_tx(dev)) { /* reset the chip to clear the error condition, then restart */ lp->a->reset(ioaddr); lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ pcnet32_restart(dev, CSR0_START); netif_wake_queue(dev); } spin_unlock_irqrestore(&lp->lock, flags); if (work_done < budget) { spin_lock_irqsave(&lp->lock, flags); __napi_complete(napi); /* clear interrupt masks */ val = lp->a->read_csr(ioaddr, CSR3); val &= 0x00ff; lp->a->write_csr(ioaddr, CSR3, val); /* Set interrupt enable. */ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN); spin_unlock_irqrestore(&lp->lock, flags); } return work_done; } #define PCNET32_REGS_PER_PHY 32 #define PCNET32_MAX_PHYS 32 static int pcnet32_get_regs_len(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); int j = lp->phycount * PCNET32_REGS_PER_PHY; return (PCNET32_NUM_REGS + j) * sizeof(u16); } static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *ptr) { int i, csr0; u16 *buff = ptr; struct pcnet32_private *lp = netdev_priv(dev); const struct pcnet32_access *a = lp->a; ulong ioaddr = dev->base_addr; unsigned long flags; spin_lock_irqsave(&lp->lock, flags); csr0 = a->read_csr(ioaddr, CSR0); if (!(csr0 & CSR0_STOP)) /* If not stopped */ pcnet32_suspend(dev, &flags, 1); /* read address PROM */ for (i = 0; i < 16; i += 2) *buff++ = inw(ioaddr + i); /* read control and status registers */ for (i = 0; i < 90; i++) *buff++ = a->read_csr(ioaddr, i); *buff++ = a->read_csr(ioaddr, 112); *buff++ = a->read_csr(ioaddr, 114); /* read bus configuration registers */ for (i = 0; i < 30; i++) *buff++ = a->read_bcr(ioaddr, i); *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ for (i = 31; i < 36; i++) *buff++ = a->read_bcr(ioaddr, i); /* read mii phy registers */ if (lp->mii) { int j; for (j = 0; j < PCNET32_MAX_PHYS; j++) { if (lp->phymask & (1 << j)) { for (i = 0; i < PCNET32_REGS_PER_PHY; i++) { lp->a->write_bcr(ioaddr, 33, (j << 5) | i); *buff++ = lp->a->read_bcr(ioaddr, 34); } } } } if (!(csr0 & CSR0_STOP)) { /* If not stopped */ int csr5; /* clear SUSPEND (SPND) - CSR5 bit 0 */ csr5 = a->read_csr(ioaddr, CSR5); a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); } spin_unlock_irqrestore(&lp->lock, flags); } static const struct ethtool_ops pcnet32_ethtool_ops = { .get_settings = pcnet32_get_settings, .set_settings = pcnet32_set_settings, .get_drvinfo = pcnet32_get_drvinfo, .get_msglevel = pcnet32_get_msglevel, .set_msglevel = pcnet32_set_msglevel, .nway_reset = pcnet32_nway_reset, .get_link = pcnet32_get_link, .get_ringparam = pcnet32_get_ringparam, .set_ringparam = pcnet32_set_ringparam, .get_strings = pcnet32_get_strings, .self_test = pcnet32_ethtool_test, .set_phys_id = pcnet32_set_phys_id, .get_regs_len = pcnet32_get_regs_len, .get_regs = pcnet32_get_regs, .get_sset_count = pcnet32_get_sset_count, }; /* only probes for non-PCI devices, the rest are handled by * pci_register_driver via pcnet32_probe_pci */ static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist) { unsigned int *port, ioaddr; /* search for PCnet32 VLB cards at known addresses */ for (port = pcnet32_portlist; (ioaddr = *port); port++) { if (request_region (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { /* check if there is really a pcnet chip on that ioaddr */ if ((inb(ioaddr + 14) == 0x57) && (inb(ioaddr + 15) == 0x57)) { pcnet32_probe1(ioaddr, 0, NULL); } else { release_region(ioaddr, PCNET32_TOTAL_SIZE); } } } } static int __devinit pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long ioaddr; int err; err = pci_enable_device(pdev); if (err < 0) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("failed to enable device -- err=%d\n", err); return err; } pci_set_master(pdev); ioaddr = pci_resource_start(pdev, 0); if (!ioaddr) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("card has no PCI IO resources, aborting\n"); return -ENODEV; } if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("architecture does not support 32bit PCI busmaster DMA\n"); return -ENODEV; } if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("io address range already allocated\n"); return -EBUSY; } err = pcnet32_probe1(ioaddr, 1, pdev); if (err < 0) pci_disable_device(pdev); return err; } static const struct net_device_ops pcnet32_netdev_ops = { .ndo_open = pcnet32_open, .ndo_stop = pcnet32_close, .ndo_start_xmit = pcnet32_start_xmit, .ndo_tx_timeout = pcnet32_tx_timeout, .ndo_get_stats = pcnet32_get_stats, .ndo_set_rx_mode = pcnet32_set_multicast_list, .ndo_do_ioctl = pcnet32_ioctl, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = pcnet32_poll_controller, #endif }; /* pcnet32_probe1 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. * pdev will be NULL when called from pcnet32_probe_vlbus. */ static int __devinit pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) { struct pcnet32_private *lp; int i, media; int fdx, mii, fset, dxsuflo; int chip_version; char *chipname; struct net_device *dev; const struct pcnet32_access *a = NULL; u8 promaddr[6]; int ret = -ENODEV; /* reset the chip */ pcnet32_wio_reset(ioaddr); /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { a = &pcnet32_wio; } else { pcnet32_dwio_reset(ioaddr); if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && pcnet32_dwio_check(ioaddr)) { a = &pcnet32_dwio; } else { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("No access methods\n"); goto err_release_region; } } chip_version = a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16); if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) pr_info(" PCnet chip version is %#x\n", chip_version); if ((chip_version & 0xfff) != 0x003) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_info("Unsupported chip version\n"); goto err_release_region; } /* initialize variables */ fdx = mii = fset = dxsuflo = 0; chip_version = (chip_version >> 12) & 0xffff; switch (chip_version) { case 0x2420: chipname = "PCnet/PCI 79C970"; /* PCI */ break; case 0x2430: if (shared) chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ else chipname = "PCnet/32 79C965"; /* 486/VL bus */ break; case 0x2621: chipname = "PCnet/PCI II 79C970A"; /* PCI */ fdx = 1; break; case 0x2623: chipname = "PCnet/FAST 79C971"; /* PCI */ fdx = 1; mii = 1; fset = 1; break; case 0x2624: chipname = "PCnet/FAST+ 79C972"; /* PCI */ fdx = 1; mii = 1; fset = 1; break; case 0x2625: chipname = "PCnet/FAST III 79C973"; /* PCI */ fdx = 1; mii = 1; break; case 0x2626: chipname = "PCnet/Home 79C978"; /* PCI */ fdx = 1; /* * This is based on specs published at www.amd.com. This section * assumes that a card with a 79C978 wants to go into standard * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, * and the module option homepna=1 can select this instead. */ media = a->read_bcr(ioaddr, 49); media &= ~3; /* default to 10Mb ethernet */ if (cards_found < MAX_UNITS && homepna[cards_found]) media |= 1; /* switch to home wiring mode */ if (pcnet32_debug & NETIF_MSG_PROBE) printk(KERN_DEBUG PFX "media set to %sMbit mode\n", (media & 1) ? "1" : "10"); a->write_bcr(ioaddr, 49, media); break; case 0x2627: chipname = "PCnet/FAST III 79C975"; /* PCI */ fdx = 1; mii = 1; break; case 0x2628: chipname = "PCnet/PRO 79C976"; fdx = 1; mii = 1; break; default: if (pcnet32_debug & NETIF_MSG_PROBE) pr_info("PCnet version %#x, no PCnet32 chip\n", chip_version); goto err_release_region; } /* * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit * starting until the packet is loaded. Strike one for reliability, lose * one for latency - although on PCI this isn't a big loss. Older chips * have FIFO's smaller than a packet, so you can't do this. * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn. */ if (fset) { a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860)); a->write_csr(ioaddr, 80, (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00); dxsuflo = 1; } dev = alloc_etherdev(sizeof(*lp)); if (!dev) { ret = -ENOMEM; goto err_release_region; } if (pdev) SET_NETDEV_DEV(dev, &pdev->dev); if (pcnet32_debug & NETIF_MSG_PROBE) pr_info("%s at %#3lx,", chipname, ioaddr); /* In most chips, after a chip reset, the ethernet address is read from the * station address PROM at the base address and programmed into the * "Physical Address Registers" CSR12-14. * As a precautionary measure, we read the PROM values and complain if * they disagree with the CSRs. If they miscompare, and the PROM addr * is valid, then the PROM addr is used. */ for (i = 0; i < 3; i++) { unsigned int val; val = a->read_csr(ioaddr, i + 12) & 0x0ffff; /* There may be endianness issues here. */ dev->dev_addr[2 * i] = val & 0x0ff; dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff; } /* read PROM address and compare with CSR address */ for (i = 0; i < 6; i++) promaddr[i] = inb(ioaddr + i); if (memcmp(promaddr, dev->dev_addr, 6) || !is_valid_ether_addr(dev->dev_addr)) { if (is_valid_ether_addr(promaddr)) { if (pcnet32_debug & NETIF_MSG_PROBE) { pr_cont(" warning: CSR address invalid,\n"); pr_info(" using instead PROM address of"); } memcpy(dev->dev_addr, promaddr, 6); } } memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ if (!is_valid_ether_addr(dev->perm_addr)) memset(dev->dev_addr, 0, ETH_ALEN); if (pcnet32_debug & NETIF_MSG_PROBE) { pr_cont(" %pM", dev->dev_addr); /* Version 0x2623 and 0x2624 */ if (((chip_version + 1) & 0xfffe) == 0x2624) { i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ pr_info(" tx_start_pt(0x%04x):", i); switch (i >> 10) { case 0: pr_cont(" 20 bytes,"); break; case 1: pr_cont(" 64 bytes,"); break; case 2: pr_cont(" 128 bytes,"); break; case 3: pr_cont("~220 bytes,"); break; } i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ pr_cont(" BCR18(%x):", i & 0xffff); if (i & (1 << 5)) pr_cont("BurstWrEn "); if (i & (1 << 6)) pr_cont("BurstRdEn "); if (i & (1 << 7)) pr_cont("DWordIO "); if (i & (1 << 11)) pr_cont("NoUFlow "); i = a->read_bcr(ioaddr, 25); pr_info(" SRAMSIZE=0x%04x,", i << 8); i = a->read_bcr(ioaddr, 26); pr_cont(" SRAM_BND=0x%04x,", i << 8); i = a->read_bcr(ioaddr, 27); if (i & (1 << 14)) pr_cont("LowLatRx"); } } dev->base_addr = ioaddr; lp = netdev_priv(dev); /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block), &lp->init_dma_addr); if (!lp->init_block) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("Consistent memory allocation failed\n"); ret = -ENOMEM; goto err_free_netdev; } lp->pci_dev = pdev; lp->dev = dev; spin_lock_init(&lp->lock); lp->name = chipname; lp->shared_irq = shared; lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ lp->tx_mod_mask = lp->tx_ring_size - 1; lp->rx_mod_mask = lp->rx_ring_size - 1; lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); lp->mii_if.full_duplex = fdx; lp->mii_if.phy_id_mask = 0x1f; lp->mii_if.reg_num_mask = 0x1f; lp->dxsuflo = dxsuflo; lp->mii = mii; lp->chip_version = chip_version; lp->msg_enable = pcnet32_debug; if ((cards_found >= MAX_UNITS) || (options[cards_found] >= sizeof(options_mapping))) lp->options = PCNET32_PORT_ASEL; else lp->options = options_mapping[options[cards_found]]; lp->mii_if.dev = dev; lp->mii_if.mdio_read = mdio_read; lp->mii_if.mdio_write = mdio_write; /* napi.weight is used in both the napi and non-napi cases */ lp->napi.weight = lp->rx_ring_size / 2; netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); if (fdx && !(lp->options & PCNET32_PORT_ASEL) && ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) lp->options |= PCNET32_PORT_FD; lp->a = a; /* prior to register_netdev, dev->name is not yet correct */ if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { ret = -ENOMEM; goto err_free_ring; } /* detect special T1/E1 WAN card by checking for MAC address */ if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 && dev->dev_addr[2] == 0x75) lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */ lp->init_block->tlen_rlen = cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); for (i = 0; i < 6; i++) lp->init_block->phys_addr[i] = dev->dev_addr[i]; lp->init_block->filter[0] = 0x00000000; lp->init_block->filter[1] = 0x00000000; lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); /* switch pcnet32 to 32bit mode */ a->write_bcr(ioaddr, 20, 2); a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); if (pdev) { /* use the IRQ provided by PCI */ dev->irq = pdev->irq; if (pcnet32_debug & NETIF_MSG_PROBE) pr_cont(" assigned IRQ %d\n", dev->irq); } else { unsigned long irq_mask = probe_irq_on(); /* * To auto-IRQ we enable the initialization-done and DMA error * interrupts. For ISA boards we get a DMA error, but VLB and PCI * boards will work. */ /* Trigger an initialization just for the interrupt. */ a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT); mdelay(1); dev->irq = probe_irq_off(irq_mask); if (!dev->irq) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_cont(", failed to detect IRQ line\n"); ret = -ENODEV; goto err_free_ring; } if (pcnet32_debug & NETIF_MSG_PROBE) pr_cont(", probed IRQ %d\n", dev->irq); } /* Set the mii phy_id so that we can query the link state */ if (lp->mii) { /* lp->phycount and lp->phymask are set to 0 by memset above */ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f; /* scan for PHYs */ for (i = 0; i < PCNET32_MAX_PHYS; i++) { unsigned short id1, id2; id1 = mdio_read(dev, i, MII_PHYSID1); if (id1 == 0xffff) continue; id2 = mdio_read(dev, i, MII_PHYSID2); if (id2 == 0xffff) continue; if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624) continue; /* 79C971 & 79C972 have phantom phy at id 31 */ lp->phycount++; lp->phymask |= (1 << i); lp->mii_if.phy_id = i; if (pcnet32_debug & NETIF_MSG_PROBE) pr_info("Found PHY %04x:%04x at address %d\n", id1, id2, i); } lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); if (lp->phycount > 1) lp->options |= PCNET32_PORT_MII; } init_timer(&lp->watchdog_timer); lp->watchdog_timer.data = (unsigned long)dev; lp->watchdog_timer.function = (void *)&pcnet32_watchdog; /* The PCNET32-specific entries in the device structure. */ dev->netdev_ops = &pcnet32_netdev_ops; dev->ethtool_ops = &pcnet32_ethtool_ops; dev->watchdog_timeo = (5 * HZ); /* Fill in the generic fields of the device structure. */ if (register_netdev(dev)) goto err_free_ring; if (pdev) { pci_set_drvdata(pdev, dev); } else { lp->next = pcnet32_dev; pcnet32_dev = dev; } if (pcnet32_debug & NETIF_MSG_PROBE) pr_info("%s: registered as %s\n", dev->name, lp->name); cards_found++; /* enable LED writes */ a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000); return 0; err_free_ring: pcnet32_free_ring(dev); pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), lp->init_block, lp->init_dma_addr); err_free_netdev: free_netdev(dev); err_release_region: release_region(ioaddr, PCNET32_TOTAL_SIZE); return ret; } /* if any allocation fails, caller must also call pcnet32_free_ring */ static int pcnet32_alloc_ring(struct net_device *dev, const char *name) { struct pcnet32_private *lp = netdev_priv(dev); lp->tx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, &lp->tx_ring_dma_addr); if (lp->tx_ring == NULL) { netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); return -ENOMEM; } lp->rx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, &lp->rx_ring_dma_addr); if (lp->rx_ring == NULL) { netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); return -ENOMEM; } lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), GFP_ATOMIC); if (!lp->tx_dma_addr) { netif_err(lp, drv, dev, "Memory allocation failed\n"); return -ENOMEM; } lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), GFP_ATOMIC); if (!lp->rx_dma_addr) { netif_err(lp, drv, dev, "Memory allocation failed\n"); return -ENOMEM; } lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), GFP_ATOMIC); if (!lp->tx_skbuff) { netif_err(lp, drv, dev, "Memory allocation failed\n"); return -ENOMEM; } lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), GFP_ATOMIC); if (!lp->rx_skbuff) { netif_err(lp, drv, dev, "Memory allocation failed\n"); return -ENOMEM; } return 0; } static void pcnet32_free_ring(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); kfree(lp->tx_skbuff); lp->tx_skbuff = NULL; kfree(lp->rx_skbuff); lp->rx_skbuff = NULL; kfree(lp->tx_dma_addr); lp->tx_dma_addr = NULL; kfree(lp->rx_dma_addr); lp->rx_dma_addr = NULL; if (lp->tx_ring) { pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, lp->tx_ring, lp->tx_ring_dma_addr); lp->tx_ring = NULL; } if (lp->rx_ring) { pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, lp->rx_ring, lp->rx_ring_dma_addr); lp->rx_ring = NULL; } } static int pcnet32_open(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); struct pci_dev *pdev = lp->pci_dev; unsigned long ioaddr = dev->base_addr; u16 val; int i; int rc; unsigned long flags; if (request_irq(dev->irq, pcnet32_interrupt, lp->shared_irq ? IRQF_SHARED : 0, dev->name, (void *)dev)) { return -EAGAIN; } spin_lock_irqsave(&lp->lock, flags); /* Check for a valid station address */ if (!is_valid_ether_addr(dev->dev_addr)) { rc = -EINVAL; goto err_free_irq; } /* Reset the PCNET32 */ lp->a->reset(ioaddr); /* switch pcnet32 to 32bit mode */ lp->a->write_bcr(ioaddr, 20, 2); netif_printk(lp, ifup, KERN_DEBUG, dev, "%s() irq %d tx/rx rings %#x/%#x init %#x\n", __func__, dev->irq, (u32) (lp->tx_ring_dma_addr), (u32) (lp->rx_ring_dma_addr), (u32) (lp->init_dma_addr)); /* set/reset autoselect bit */ val = lp->a->read_bcr(ioaddr, 2) & ~2; if (lp->options & PCNET32_PORT_ASEL) val |= 2; lp->a->write_bcr(ioaddr, 2, val); /* handle full duplex setting */ if (lp->mii_if.full_duplex) { val = lp->a->read_bcr(ioaddr, 9) & ~3; if (lp->options & PCNET32_PORT_FD) { val |= 1; if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) val |= 2; } else if (lp->options & PCNET32_PORT_ASEL) { /* workaround of xSeries250, turn on for 79C975 only */ if (lp->chip_version == 0x2627) val |= 3; } lp->a->write_bcr(ioaddr, 9, val); } /* set/reset GPSI bit in test register */ val = lp->a->read_csr(ioaddr, 124) & ~0x10; if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) val |= 0x10; lp->a->write_csr(ioaddr, 124, val); /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT && (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { if (lp->options & PCNET32_PORT_ASEL) { lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; netif_printk(lp, link, KERN_DEBUG, dev, "Setting 100Mb-Full Duplex\n"); } } if (lp->phycount < 2) { /* * 24 Jun 2004 according AMD, in order to change the PHY, * DANAS (or DISPM for 79C976) must be set; then select the speed, * duplex, and/or enable auto negotiation, and clear DANAS */ if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { lp->a->write_bcr(ioaddr, 32, lp->a->read_bcr(ioaddr, 32) | 0x0080); /* disable Auto Negotiation, set 10Mpbs, HD */ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8; if (lp->options & PCNET32_PORT_FD) val |= 0x10; if (lp->options & PCNET32_PORT_100) val |= 0x08; lp->a->write_bcr(ioaddr, 32, val); } else { if (lp->options & PCNET32_PORT_ASEL) { lp->a->write_bcr(ioaddr, 32, lp->a->read_bcr(ioaddr, 32) | 0x0080); /* enable auto negotiate, setup, disable fd */ val = lp->a->read_bcr(ioaddr, 32) & ~0x98; val |= 0x20; lp->a->write_bcr(ioaddr, 32, val); } } } else { int first_phy = -1; u16 bmcr; u32 bcr9; struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; /* * There is really no good other way to handle multiple PHYs * other than turning off all automatics */ val = lp->a->read_bcr(ioaddr, 2); lp->a->write_bcr(ioaddr, 2, val & ~2); val = lp->a->read_bcr(ioaddr, 32); lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ if (!(lp->options & PCNET32_PORT_ASEL)) { /* setup ecmd */ ecmd.port = PORT_MII; ecmd.transceiver = XCVR_INTERNAL; ecmd.autoneg = AUTONEG_DISABLE; ethtool_cmd_speed_set(&ecmd, (lp->options & PCNET32_PORT_100) ? SPEED_100 : SPEED_10); bcr9 = lp->a->read_bcr(ioaddr, 9); if (lp->options & PCNET32_PORT_FD) { ecmd.duplex = DUPLEX_FULL; bcr9 |= (1 << 0); } else { ecmd.duplex = DUPLEX_HALF; bcr9 |= ~(1 << 0); } lp->a->write_bcr(ioaddr, 9, bcr9); } for (i = 0; i < PCNET32_MAX_PHYS; i++) { if (lp->phymask & (1 << i)) { /* isolate all but the first PHY */ bmcr = mdio_read(dev, i, MII_BMCR); if (first_phy == -1) { first_phy = i; mdio_write(dev, i, MII_BMCR, bmcr & ~BMCR_ISOLATE); } else { mdio_write(dev, i, MII_BMCR, bmcr | BMCR_ISOLATE); } /* use mii_ethtool_sset to setup PHY */ lp->mii_if.phy_id = i; ecmd.phy_address = i; if (lp->options & PCNET32_PORT_ASEL) { mii_ethtool_gset(&lp->mii_if, &ecmd); ecmd.autoneg = AUTONEG_ENABLE; } mii_ethtool_sset(&lp->mii_if, &ecmd); } } lp->mii_if.phy_id = first_phy; netif_info(lp, link, dev, "Using PHY number %d\n", first_phy); } #ifdef DO_DXSUFLO if (lp->dxsuflo) { /* Disable transmit stop on underflow */ val = lp->a->read_csr(ioaddr, CSR3); val |= 0x40; lp->a->write_csr(ioaddr, CSR3, val); } #endif lp->init_block->mode = cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); pcnet32_load_multicast(dev); if (pcnet32_init_ring(dev)) { rc = -ENOMEM; goto err_free_ring; } napi_enable(&lp->napi); /* Re-initialize the PCNET32, and start it when done. */ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); netif_start_queue(dev); if (lp->chip_version >= PCNET32_79C970A) { /* Print the link status and start the watchdog */ pcnet32_check_media(dev, 1); mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT); } i = 0; while (i++ < 100) if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) break; /* * We used to clear the InitDone bit, 0x0100, here but Mark Stockton * reports that doing so triggers a bug in the '974. */ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL); netif_printk(lp, ifup, KERN_DEBUG, dev, "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n", i, (u32) (lp->init_dma_addr), lp->a->read_csr(ioaddr, CSR0)); spin_unlock_irqrestore(&lp->lock, flags); return 0; /* Always succeed */ err_free_ring: /* free any allocated skbuffs */ pcnet32_purge_rx_ring(dev); /* * Switch back to 16bit mode to avoid problems with dumb * DOS packet driver after a warm reboot */ lp->a->write_bcr(ioaddr, 20, 4); err_free_irq: spin_unlock_irqrestore(&lp->lock, flags); free_irq(dev->irq, dev); return rc; } /* * The LANCE has been halted for one reason or another (busmaster memory * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure, * etc.). Modern LANCE variants always reload their ring-buffer * configuration when restarted, so we must reinitialize our ring * context before restarting. As part of this reinitialization, * find all packets still on the Tx ring and pretend that they had been * sent (in effect, drop the packets on the floor) - the higher-level * protocols will time out and retransmit. It'd be better to shuffle * these skbs to a temp list and then actually re-Tx them after * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com */ static void pcnet32_purge_tx_ring(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); int i; for (i = 0; i < lp->tx_ring_size; i++) { lp->tx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ if (lp->tx_skbuff[i]) { pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); dev_kfree_skb_any(lp->tx_skbuff[i]); } lp->tx_skbuff[i] = NULL; lp->tx_dma_addr[i] = 0; } } /* Initialize the PCNET32 Rx and Tx rings. */ static int pcnet32_init_ring(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); int i; lp->tx_full = 0; lp->cur_rx = lp->cur_tx = 0; lp->dirty_rx = lp->dirty_tx = 0; for (i = 0; i < lp->rx_ring_size; i++) { struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; if (rx_skbuff == NULL) { lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB); rx_skbuff = lp->rx_skbuff[i]; if (!rx_skbuff) { /* there is not much we can do at this point */ netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n", __func__); return -1; } skb_reserve(rx_skbuff, NET_IP_ALIGN); } rmb(); if (lp->rx_dma_addr[i] == 0) lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data, PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); wmb(); /* Make sure owner changes after all others are visible */ lp->rx_ring[i].status = cpu_to_le16(0x8000); } /* The Tx buffer address is filled in as needed, but we do need to clear * the upper ownership bit. */ for (i = 0; i < lp->tx_ring_size; i++) { lp->tx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ lp->tx_ring[i].base = 0; lp->tx_dma_addr[i] = 0; } lp->init_block->tlen_rlen = cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); for (i = 0; i < 6; i++) lp->init_block->phys_addr[i] = dev->dev_addr[i]; lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); wmb(); /* Make sure all changes are visible */ return 0; } /* the pcnet32 has been issued a stop or reset. Wait for the stop bit * then flush the pending transmit operations, re-initialize the ring, * and tell the chip to initialize. */ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; int i; /* wait for stop */ for (i = 0; i < 100; i++) if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP) break; if (i >= 100) netif_err(lp, drv, dev, "%s timed out waiting for stop\n", __func__); pcnet32_purge_tx_ring(dev); if (pcnet32_init_ring(dev)) return; /* ReInit Ring */ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); i = 0; while (i++ < 1000) if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) break; lp->a->write_csr(ioaddr, CSR0, csr0_bits); } static void pcnet32_tx_timeout(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr, flags; spin_lock_irqsave(&lp->lock, flags); /* Transmitter timeout, serious problems. */ if (pcnet32_debug & NETIF_MSG_DRV) pr_err("%s: transmit timed out, status %4.4x, resetting\n", dev->name, lp->a->read_csr(ioaddr, CSR0)); lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); dev->stats.tx_errors++; if (netif_msg_tx_err(lp)) { int i; printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", lp->cur_rx); for (i = 0; i < lp->rx_ring_size; i++) printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", le32_to_cpu(lp->rx_ring[i].base), (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length), le16_to_cpu(lp->rx_ring[i].status)); for (i = 0; i < lp->tx_ring_size; i++) printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", le32_to_cpu(lp->tx_ring[i].base), (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, le32_to_cpu(lp->tx_ring[i].misc), le16_to_cpu(lp->tx_ring[i].status)); printk("\n"); } pcnet32_restart(dev, CSR0_NORMAL); dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); spin_unlock_irqrestore(&lp->lock, flags); } static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; u16 status; int entry; unsigned long flags; spin_lock_irqsave(&lp->lock, flags); netif_printk(lp, tx_queued, KERN_DEBUG, dev, "%s() called, csr0 %4.4x\n", __func__, lp->a->read_csr(ioaddr, CSR0)); /* Default status -- will not enable Successful-TxDone * interrupt when that option is available to us. */ status = 0x8300; /* Fill in a Tx ring entry */ /* Mask to ring buffer boundary. */ entry = lp->cur_tx & lp->tx_mod_mask; /* Caution: the write order is important here, set the status * with the "ownership" bits last. */ lp->tx_ring[entry].length = cpu_to_le16(-skb->len); lp->tx_ring[entry].misc = 0x00000000; lp->tx_skbuff[entry] = skb; lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); wmb(); /* Make sure owner changes after all others are visible */ lp->tx_ring[entry].status = cpu_to_le16(status); lp->cur_tx++; dev->stats.tx_bytes += skb->len; /* Trigger an immediate send poll. */ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) { lp->tx_full = 1; netif_stop_queue(dev); } spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; } /* The PCNET32 interrupt handler. */ static irqreturn_t pcnet32_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct pcnet32_private *lp; unsigned long ioaddr; u16 csr0; int boguscnt = max_interrupt_work; ioaddr = dev->base_addr; lp = netdev_priv(dev); spin_lock(&lp->lock); csr0 = lp->a->read_csr(ioaddr, CSR0); while ((csr0 & 0x8f00) && --boguscnt >= 0) { if (csr0 == 0xffff) break; /* PCMCIA remove happened */ /* Acknowledge all of the current interrupt sources ASAP. */ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f); netif_printk(lp, intr, KERN_DEBUG, dev, "interrupt csr0=%#2.2x new csr=%#2.2x\n", csr0, lp->a->read_csr(ioaddr, CSR0)); /* Log misc errors. */ if (csr0 & 0x4000) dev->stats.tx_errors++; /* Tx babble. */ if (csr0 & 0x1000) { /* * This happens when our receive ring is full. This * shouldn't be a problem as we will see normal rx * interrupts for the frames in the receive ring. But * there are some PCI chipsets (I can reproduce this * on SP3G with Intel saturn chipset) which have * sometimes problems and will fill up the receive * ring with error descriptors. In this situation we * don't get a rx interrupt, but a missed frame * interrupt sooner or later. */ dev->stats.rx_errors++; /* Missed a Rx frame. */ } if (csr0 & 0x0800) { netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n", csr0); /* unlike for the lance, there is no restart needed */ } if (napi_schedule_prep(&lp->napi)) { u16 val; /* set interrupt masks */ val = lp->a->read_csr(ioaddr, CSR3); val |= 0x5f00; lp->a->write_csr(ioaddr, CSR3, val); __napi_schedule(&lp->napi); break; } csr0 = lp->a->read_csr(ioaddr, CSR0); } netif_printk(lp, intr, KERN_DEBUG, dev, "exiting interrupt, csr0=%#4.4x\n", lp->a->read_csr(ioaddr, CSR0)); spin_unlock(&lp->lock); return IRQ_HANDLED; } static int pcnet32_close(struct net_device *dev) { unsigned long ioaddr = dev->base_addr; struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; del_timer_sync(&lp->watchdog_timer); netif_stop_queue(dev); napi_disable(&lp->napi); spin_lock_irqsave(&lp->lock, flags); dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); netif_printk(lp, ifdown, KERN_DEBUG, dev, "Shutting down ethercard, status was %2.2x\n", lp->a->read_csr(ioaddr, CSR0)); /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* * Switch back to 16bit mode to avoid problems with dumb * DOS packet driver after a warm reboot */ lp->a->write_bcr(ioaddr, 20, 4); spin_unlock_irqrestore(&lp->lock, flags); free_irq(dev->irq, dev); spin_lock_irqsave(&lp->lock, flags); pcnet32_purge_rx_ring(dev); pcnet32_purge_tx_ring(dev); spin_unlock_irqrestore(&lp->lock, flags); return 0; } static struct net_device_stats *pcnet32_get_stats(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; unsigned long flags; spin_lock_irqsave(&lp->lock, flags); dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); spin_unlock_irqrestore(&lp->lock, flags); return &dev->stats; } /* taken from the sunlance driver, which it took from the depca driver */ static void pcnet32_load_multicast(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); volatile struct pcnet32_init_block *ib = lp->init_block; volatile __le16 *mcast_table = (__le16 *)ib->filter; struct netdev_hw_addr *ha; unsigned long ioaddr = dev->base_addr; int i; u32 crc; /* set all multicast bits */ if (dev->flags & IFF_ALLMULTI) { ib->filter[0] = cpu_to_le32(~0U); ib->filter[1] = cpu_to_le32(~0U); lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); return; } /* clear the multicast filter */ ib->filter[0] = 0; ib->filter[1] = 0; /* Add addresses */ netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc = crc >> 26; mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf)); } for (i = 0; i < 4; i++) lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i, le16_to_cpu(mcast_table[i])); } /* * Set or clear the multicast filter for this adaptor. */ static void pcnet32_set_multicast_list(struct net_device *dev) { unsigned long ioaddr = dev->base_addr, flags; struct pcnet32_private *lp = netdev_priv(dev); int csr15, suspended; spin_lock_irqsave(&lp->lock, flags); suspended = pcnet32_suspend(dev, &flags, 0); csr15 = lp->a->read_csr(ioaddr, CSR15); if (dev->flags & IFF_PROMISC) { /* Log any net taps. */ netif_info(lp, hw, dev, "Promiscuous mode enabled\n"); lp->init_block->mode = cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7); lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000); } else { lp->init_block->mode = cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff); pcnet32_load_multicast(dev); } if (suspended) { int csr5; /* clear SUSPEND (SPND) - CSR5 bit 0 */ csr5 = lp->a->read_csr(ioaddr, CSR5); lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); } else { lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); pcnet32_restart(dev, CSR0_NORMAL); netif_wake_queue(dev); } spin_unlock_irqrestore(&lp->lock, flags); } /* This routine assumes that the lp->lock is held */ static int mdio_read(struct net_device *dev, int phy_id, int reg_num) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; u16 val_out; if (!lp->mii) return 0; lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); val_out = lp->a->read_bcr(ioaddr, 34); return val_out; } /* This routine assumes that the lp->lock is held */ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; if (!lp->mii) return; lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); lp->a->write_bcr(ioaddr, 34, val); } static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct pcnet32_private *lp = netdev_priv(dev); int rc; unsigned long flags; /* SIOC[GS]MIIxxx ioctls */ if (lp->mii) { spin_lock_irqsave(&lp->lock, flags); rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); spin_unlock_irqrestore(&lp->lock, flags); } else { rc = -EOPNOTSUPP; } return rc; } static int pcnet32_check_otherphy(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); struct mii_if_info mii = lp->mii_if; u16 bmcr; int i; for (i = 0; i < PCNET32_MAX_PHYS; i++) { if (i == lp->mii_if.phy_id) continue; /* skip active phy */ if (lp->phymask & (1 << i)) { mii.phy_id = i; if (mii_link_ok(&mii)) { /* found PHY with active link */ netif_info(lp, link, dev, "Using PHY number %d\n", i); /* isolate inactive phy */ bmcr = mdio_read(dev, lp->mii_if.phy_id, MII_BMCR); mdio_write(dev, lp->mii_if.phy_id, MII_BMCR, bmcr | BMCR_ISOLATE); /* de-isolate new phy */ bmcr = mdio_read(dev, i, MII_BMCR); mdio_write(dev, i, MII_BMCR, bmcr & ~BMCR_ISOLATE); /* set new phy address */ lp->mii_if.phy_id = i; return 1; } } } return 0; } /* * Show the status of the media. Similar to mii_check_media however it * correctly shows the link speed for all (tested) pcnet32 variants. * Devices with no mii just report link state without speed. * * Caller is assumed to hold and release the lp->lock. */ static void pcnet32_check_media(struct net_device *dev, int verbose) { struct pcnet32_private *lp = netdev_priv(dev); int curr_link; int prev_link = netif_carrier_ok(dev) ? 1 : 0; u32 bcr9; if (lp->mii) { curr_link = mii_link_ok(&lp->mii_if); } else { ulong ioaddr = dev->base_addr; /* card base I/O address */ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0); } if (!curr_link) { if (prev_link || verbose) { netif_carrier_off(dev); netif_info(lp, link, dev, "link down\n"); } if (lp->phycount > 1) { curr_link = pcnet32_check_otherphy(dev); prev_link = 0; } } else if (verbose || !prev_link) { netif_carrier_on(dev); if (lp->mii) { if (netif_msg_link(lp)) { struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; mii_ethtool_gset(&lp->mii_if, &ecmd); netdev_info(dev, "link up, %uMbps, %s-duplex\n", ethtool_cmd_speed(&ecmd), (ecmd.duplex == DUPLEX_FULL) ? "full" : "half"); } bcr9 = lp->a->read_bcr(dev->base_addr, 9); if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { if (lp->mii_if.full_duplex) bcr9 |= (1 << 0); else bcr9 &= ~(1 << 0); lp->a->write_bcr(dev->base_addr, 9, bcr9); } } else { netif_info(lp, link, dev, "link up\n"); } } } /* * Check for loss of link and link establishment. * Can not use mii_check_media because it does nothing if mode is forced. */ static void pcnet32_watchdog(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; /* Print the link status if it has changed */ spin_lock_irqsave(&lp->lock, flags); pcnet32_check_media(dev, 0); spin_unlock_irqrestore(&lp->lock, flags); mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT)); } static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); if (netif_running(dev)) { netif_device_detach(dev); pcnet32_close(dev); } pci_save_state(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int pcnet32_pm_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); if (netif_running(dev)) { pcnet32_open(dev); netif_device_attach(dev); } return 0; } static void __devexit pcnet32_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct pcnet32_private *lp = netdev_priv(dev); unregister_netdev(dev); pcnet32_free_ring(dev); release_region(dev->base_addr, PCNET32_TOTAL_SIZE); pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), lp->init_block, lp->init_dma_addr); free_netdev(dev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } } static struct pci_driver pcnet32_driver = { .name = DRV_NAME, .probe = pcnet32_probe_pci, .remove = __devexit_p(pcnet32_remove_one), .id_table = pcnet32_pci_tbl, .suspend = pcnet32_pm_suspend, .resume = pcnet32_pm_resume, }; /* An additional parameter that may be passed in... */ static int debug = -1; static int tx_start_pt = -1; static int pcnet32_have_pci; module_param(debug, int, 0); MODULE_PARM_DESC(debug, DRV_NAME " debug level"); module_param(max_interrupt_work, int, 0); MODULE_PARM_DESC(max_interrupt_work, DRV_NAME " maximum events handled per interrupt"); module_param(rx_copybreak, int, 0); MODULE_PARM_DESC(rx_copybreak, DRV_NAME " copy breakpoint for copy-only-tiny-frames"); module_param(tx_start_pt, int, 0); MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); module_param(pcnet32vlb, int, 0); MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)"); module_param_array(options, int, NULL, 0); MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)"); module_param_array(full_duplex, int, NULL, 0); MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); /* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */ module_param_array(homepna, int, NULL, 0); MODULE_PARM_DESC(homepna, DRV_NAME " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); MODULE_AUTHOR("Thomas Bogendoerfer"); MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards"); MODULE_LICENSE("GPL"); #define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) static int __init pcnet32_init_module(void) { pr_info("%s", version); pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) tx_start = tx_start_pt; /* find the PCI devices */ if (!pci_register_driver(&pcnet32_driver)) pcnet32_have_pci = 1; /* should we find any remaining VLbus devices ? */ if (pcnet32vlb) pcnet32_probe_vlbus(pcnet32_portlist); if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) pr_info("%d cards_found\n", cards_found); return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; } static void __exit pcnet32_cleanup_module(void) { struct net_device *next_dev; while (pcnet32_dev) { struct pcnet32_private *lp = netdev_priv(pcnet32_dev); next_dev = lp->next; unregister_netdev(pcnet32_dev); pcnet32_free_ring(pcnet32_dev); release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), lp->init_block, lp->init_dma_addr); free_netdev(pcnet32_dev); pcnet32_dev = next_dev; } if (pcnet32_have_pci) pci_unregister_driver(&pcnet32_driver); } module_init(pcnet32_init_module); module_exit(pcnet32_cleanup_module); /* * Local variables: * c-indent-level: 4 * tab-width: 8 * End: */
gpl-2.0
f1vefour/mako
drivers/clk/clk-mux.c
4792
3076
/* * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org> * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Simple multiplexer clock implementation */ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/err.h> /* * DOC: basic adjustable multiplexer clock that cannot gate * * Traits of this clock: * prepare - clk_prepare only ensures that parents are prepared * enable - clk_enable only ensures that parents are enabled * rate - rate is only affected by parent switching. No clk_set_rate support * parent - parent is adjustable through clk_set_parent */ #define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw) static u8 clk_mux_get_parent(struct clk_hw *hw) { struct clk_mux *mux = to_clk_mux(hw); u32 val; /* * FIXME need a mux-specific flag to determine if val is bitwise or numeric * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1 * to 0x7 (index starts at one) * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so * val = 0x4 really means "bit 2, index starts at bit 0" */ val = readl(mux->reg) >> mux->shift; val &= (1 << mux->width) - 1; if (val && (mux->flags & CLK_MUX_INDEX_BIT)) val = ffs(val) - 1; if (val && (mux->flags & CLK_MUX_INDEX_ONE)) val--; if (val >= __clk_get_num_parents(hw->clk)) return -EINVAL; return val; } EXPORT_SYMBOL_GPL(clk_mux_get_parent); static int clk_mux_set_parent(struct clk_hw *hw, u8 index) { struct clk_mux *mux = to_clk_mux(hw); u32 val; unsigned long flags = 0; if (mux->flags & CLK_MUX_INDEX_BIT) index = (1 << ffs(index)); if (mux->flags & CLK_MUX_INDEX_ONE) index++; if (mux->lock) spin_lock_irqsave(mux->lock, flags); val = readl(mux->reg); val &= ~(((1 << mux->width) - 1) << mux->shift); val |= index << mux->shift; writel(val, mux->reg); if (mux->lock) spin_unlock_irqrestore(mux->lock, flags); return 0; } EXPORT_SYMBOL_GPL(clk_mux_set_parent); struct clk_ops clk_mux_ops = { .get_parent = clk_mux_get_parent, .set_parent = clk_mux_set_parent, }; EXPORT_SYMBOL_GPL(clk_mux_ops); struct clk *clk_register_mux(struct device *dev, const char *name, char **parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_mux_flags, spinlock_t *lock) { struct clk_mux *mux; mux = kmalloc(sizeof(struct clk_mux), GFP_KERNEL); if (!mux) { pr_err("%s: could not allocate mux clk\n", __func__); return ERR_PTR(-ENOMEM); } /* struct clk_mux assignments */ mux->reg = reg; mux->shift = shift; mux->width = width; mux->flags = clk_mux_flags; mux->lock = lock; return clk_register(dev, name, &clk_mux_ops, &mux->hw, parent_names, num_parents, flags); }
gpl-2.0
sudosurootdev/kernel_lge_lgl24
net/caif/cfveil.c
5560
2638
/* * Copyright (C) ST-Ericsson AB 2010 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/stddef.h> #include <linux/slab.h> #include <net/caif/caif_layer.h> #include <net/caif/cfsrvl.h> #include <net/caif/cfpkt.h> #define VEI_PAYLOAD 0x00 #define VEI_CMD_BIT 0x80 #define VEI_FLOW_OFF 0x81 #define VEI_FLOW_ON 0x80 #define VEI_SET_PIN 0x82 #define container_obj(layr) container_of(layr, struct cfsrvl, layer) static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt); static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt); struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info) { struct cfsrvl *vei = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC); if (!vei) return NULL; caif_assert(offsetof(struct cfsrvl, layer) == 0); cfsrvl_init(vei, channel_id, dev_info, true); vei->layer.receive = cfvei_receive; vei->layer.transmit = cfvei_transmit; snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id); return &vei->layer; } static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt) { u8 cmd; int ret; caif_assert(layr->up != NULL); caif_assert(layr->receive != NULL); caif_assert(layr->ctrlcmd != NULL); if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { pr_err("Packet is erroneous!\n"); cfpkt_destroy(pkt); return -EPROTO; } switch (cmd) { case VEI_PAYLOAD: ret = layr->up->receive(layr->up, pkt); return ret; case VEI_FLOW_OFF: layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0); cfpkt_destroy(pkt); return 0; case VEI_FLOW_ON: layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0); cfpkt_destroy(pkt); return 0; case VEI_SET_PIN: /* SET RS232 PIN */ cfpkt_destroy(pkt); return 0; default: /* SET RS232 PIN */ pr_warn("Unknown VEI control packet %d (0x%x)!\n", cmd, cmd); cfpkt_destroy(pkt); return -EPROTO; } } static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) { u8 tmp = 0; struct caif_payload_info *info; int ret; struct cfsrvl *service = container_obj(layr); if (!cfsrvl_ready(service, &ret)) goto err; caif_assert(layr->dn != NULL); caif_assert(layr->dn->transmit != NULL); if (cfpkt_add_head(pkt, &tmp, 1) < 0) { pr_err("Packet is erroneous!\n"); ret = -EPROTO; goto err; } /* Add info-> for MUX-layer to route the packet out. */ info = cfpkt_info(pkt); info->channel_id = service->layer.id; info->hdr_len = 1; info->dev_info = &service->dev_info; return layr->dn->transmit(layr->dn, pkt); err: cfpkt_destroy(pkt); return ret; }
gpl-2.0
cottsay/linux
drivers/pcmcia/sa1100_nanoengine.c
9656
3095
/* * drivers/pcmcia/sa1100_nanoengine.c * * PCMCIA implementation routines for BSI nanoEngine. * * In order to have a fully functional pcmcia subsystem in a BSE nanoEngine * board you should carefully read this: * http://cambuca.ldhs.cetuc.puc-rio.br/nanoengine/ * * Copyright (C) 2010 Marcelo Roberto Jimenez <mroberto@cpti.cetuc.puc-rio.br> * * Based on original work for kernel 2.4 by * Miguel Freitas <miguel@cpti.cetuc.puc-rio.br> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/device.h> #include <linux/errno.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/signal.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <mach/hardware.h> #include <mach/nanoengine.h> #include "sa1100_generic.h" struct nanoengine_pins { unsigned output_pins; unsigned clear_outputs; int gpio_rst; int gpio_cd; int gpio_rdy; }; static struct nanoengine_pins nano_skts[] = { { .gpio_rst = GPIO_PC_RESET0, .gpio_cd = GPIO_PC_CD0, .gpio_rdy = GPIO_PC_READY0, }, { .gpio_rst = GPIO_PC_RESET1, .gpio_cd = GPIO_PC_CD1, .gpio_rdy = GPIO_PC_READY1, } }; unsigned num_nano_pcmcia_sockets = ARRAY_SIZE(nano_skts); static int nanoengine_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { unsigned i = skt->nr; int ret; if (i >= num_nano_pcmcia_sockets) return -ENXIO; ret = gpio_request_one(nano_skts[i].gpio_rst, GPIOF_OUT_INIT_LOW, i ? "PC RST1" : "PC RST0"); if (ret) return ret; skt->stat[SOC_STAT_CD].gpio = nano_skts[i].gpio_cd; skt->stat[SOC_STAT_CD].name = i ? "PC CD1" : "PC CD0"; skt->stat[SOC_STAT_RDY].gpio = nano_skts[i].gpio_rdy; skt->stat[SOC_STAT_RDY].name = i ? "PC RDY1" : "PC RDY0"; return 0; } static void nanoengine_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) { gpio_free(nano_skts[skt->nr].gpio_rst); } static int nanoengine_pcmcia_configure_socket( struct soc_pcmcia_socket *skt, const socket_state_t *state) { unsigned i = skt->nr; if (i >= num_nano_pcmcia_sockets) return -ENXIO; gpio_set_value(nano_skts[skt->nr].gpio_rst, !!(state->flags & SS_RESET)); return 0; } static void nanoengine_pcmcia_socket_state( struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { unsigned i = skt->nr; if (i >= num_nano_pcmcia_sockets) return; state->bvd1 = 1; state->bvd2 = 1; state->vs_3v = 1; /* Can only apply 3.3V */ state->vs_Xv = 0; } static struct pcmcia_low_level nanoengine_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = nanoengine_pcmcia_hw_init, .hw_shutdown = nanoengine_pcmcia_hw_shutdown, .configure_socket = nanoengine_pcmcia_configure_socket, .socket_state = nanoengine_pcmcia_socket_state, }; int pcmcia_nanoengine_init(struct device *dev) { int ret = -ENODEV; if (machine_is_nanoengine()) ret = sa11xx_drv_pcmcia_probe( dev, &nanoengine_pcmcia_ops, 0, 2); return ret; }
gpl-2.0
nerdyblonde/N80XX_Kernel
Documentation/spi/spidev_fdx.c
11960
2758
#include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <fcntl.h> #include <string.h> #include <sys/ioctl.h> #include <sys/types.h> #include <sys/stat.h> #include <linux/types.h> #include <linux/spi/spidev.h> static int verbose; static void do_read(int fd, int len) { unsigned char buf[32], *bp; int status; /* read at least 2 bytes, no more than 32 */ if (len < 2) len = 2; else if (len > sizeof(buf)) len = sizeof(buf); memset(buf, 0, sizeof buf); status = read(fd, buf, len); if (status < 0) { perror("read"); return; } if (status != len) { fprintf(stderr, "short read\n"); return; } printf("read(%2d, %2d): %02x %02x,", len, status, buf[0], buf[1]); status -= 2; bp = buf + 2; while (status-- > 0) printf(" %02x", *bp++); printf("\n"); } static void do_msg(int fd, int len) { struct spi_ioc_transfer xfer[2]; unsigned char buf[32], *bp; int status; memset(xfer, 0, sizeof xfer); memset(buf, 0, sizeof buf); if (len > sizeof buf) len = sizeof buf; buf[0] = 0xaa; xfer[0].tx_buf = (unsigned long)buf; xfer[0].len = 1; xfer[1].rx_buf = (unsigned long) buf; xfer[1].len = len; status = ioctl(fd, SPI_IOC_MESSAGE(2), xfer); if (status < 0) { perror("SPI_IOC_MESSAGE"); return; } printf("response(%2d, %2d): ", len, status); for (bp = buf; len; len--) printf(" %02x", *bp++); printf("\n"); } static void dumpstat(const char *name, int fd) { __u8 mode, lsb, bits; __u32 speed; if (ioctl(fd, SPI_IOC_RD_MODE, &mode) < 0) { perror("SPI rd_mode"); return; } if (ioctl(fd, SPI_IOC_RD_LSB_FIRST, &lsb) < 0) { perror("SPI rd_lsb_fist"); return; } if (ioctl(fd, SPI_IOC_RD_BITS_PER_WORD, &bits) < 0) { perror("SPI bits_per_word"); return; } if (ioctl(fd, SPI_IOC_RD_MAX_SPEED_HZ, &speed) < 0) { perror("SPI max_speed_hz"); return; } printf("%s: spi mode %d, %d bits %sper word, %d Hz max\n", name, mode, bits, lsb ? "(lsb first) " : "", speed); } int main(int argc, char **argv) { int c; int readcount = 0; int msglen = 0; int fd; const char *name; while ((c = getopt(argc, argv, "hm:r:v")) != EOF) { switch (c) { case 'm': msglen = atoi(optarg); if (msglen < 0) goto usage; continue; case 'r': readcount = atoi(optarg); if (readcount < 0) goto usage; continue; case 'v': verbose++; continue; case 'h': case '?': usage: fprintf(stderr, "usage: %s [-h] [-m N] [-r N] /dev/spidevB.D\n", argv[0]); return 1; } } if ((optind + 1) != argc) goto usage; name = argv[optind]; fd = open(name, O_RDWR); if (fd < 0) { perror("open"); return 1; } dumpstat(name, fd); if (msglen) do_msg(fd, msglen); if (readcount) do_read(fd, readcount); close(fd); return 0; }
gpl-2.0
gsstudios/android_kernel_samsung_smdk4412
arch/sh/mm/extable_64.c
13752
2184
/* * arch/sh/mm/extable_64.c * * Copyright (C) 2003 Richard Curnow * Copyright (C) 2003, 2004 Paul Mundt * * Cloned from the 2.5 SH version.. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/rwsem.h> #include <linux/module.h> #include <asm/uaccess.h> extern unsigned long copy_user_memcpy, copy_user_memcpy_end; extern void __copy_user_fixup(void); static const struct exception_table_entry __copy_user_fixup_ex = { .fixup = (unsigned long)&__copy_user_fixup, }; /* * Some functions that may trap due to a bad user-mode address have too * many loads and stores in them to make it at all practical to label * each one and put them all in the main exception table. * * In particular, the fast memcpy routine is like this. It's fix-up is * just to fall back to a slow byte-at-a-time copy, which is handled the * conventional way. So it's functionally OK to just handle any trap * occurring in the fast memcpy with that fixup. */ static const struct exception_table_entry *check_exception_ranges(unsigned long addr) { if ((addr >= (unsigned long)&copy_user_memcpy) && (addr <= (unsigned long)&copy_user_memcpy_end)) return &__copy_user_fixup_ex; return NULL; } /* Simple binary search */ const struct exception_table_entry * search_extable(const struct exception_table_entry *first, const struct exception_table_entry *last, unsigned long value) { const struct exception_table_entry *mid; mid = check_exception_ranges(value); if (mid) return mid; while (first <= last) { long diff; mid = (last - first) / 2 + first; diff = mid->insn - value; if (diff == 0) return mid; else if (diff < 0) first = mid+1; else last = mid-1; } return NULL; } int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; fixup = search_exception_tables(regs->pc); if (fixup) { regs->pc = fixup->fixup; return 1; } return 0; }
gpl-2.0
omega-roms/I9505_Omega_Kernel_LL
arch/arm/mm/init.c
185
23416
/* * linux/arch/arm/mm/init.c * * Copyright (C) 1995-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/swap.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/export.h> #include <linux/nodemask.h> #include <linux/initrd.h> #include <linux/of_fdt.h> #include <linux/highmem.h> #include <linux/gfp.h> #include <linux/memblock.h> #include <linux/sort.h> #include <linux/dma-contiguous.h> #include <asm/mach-types.h> #include <asm/memblock.h> #include <asm/prom.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/sizes.h> #include <asm/tlb.h> #include <asm/fixmap.h> #include <asm/cputype.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "mm.h" static unsigned long phys_initrd_start __initdata = 0; static unsigned long phys_initrd_size __initdata = 0; int msm_krait_need_wfe_fixup; EXPORT_SYMBOL(msm_krait_need_wfe_fixup); static int __init early_initrd(char *p) { unsigned long start, size; char *endp; start = memparse(p, &endp); if (*endp == ',') { size = memparse(endp + 1, NULL); phys_initrd_start = start; phys_initrd_size = size; } return 0; } early_param("initrd", early_initrd); static int __init parse_tag_initrd(const struct tag *tag) { printk(KERN_WARNING "ATAG_INITRD is deprecated; " "please update your bootloader.\n"); phys_initrd_start = __virt_to_phys(tag->u.initrd.start); phys_initrd_size = tag->u.initrd.size; return 0; } __tagtable(ATAG_INITRD, parse_tag_initrd); static int __init parse_tag_initrd2(const struct tag *tag) { phys_initrd_start = tag->u.initrd.start; phys_initrd_size = tag->u.initrd.size; return 0; } __tagtable(ATAG_INITRD2, parse_tag_initrd2); #ifdef CONFIG_OF_FLATTREE void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end) { phys_initrd_start = start; phys_initrd_size = end - start; } #endif /* CONFIG_OF_FLATTREE */ /* * This keeps memory configuration data used by a couple memory * initialization functions, as well as show_mem() for the skipping * of holes in the memory map. It is populated by arm_add_memory(). */ struct meminfo meminfo; void show_mem(unsigned int filter) { int free = 0, total = 0, reserved = 0; int shared = 0, cached = 0, slab = 0, i; struct meminfo * mi = &meminfo; printk("Mem-info:\n"); show_free_areas(filter); for_each_bank (i, mi) { struct membank *bank = &mi->bank[i]; unsigned int pfn1, pfn2; struct page *page, *end; pfn1 = bank_pfn_start(bank); pfn2 = bank_pfn_end(bank); page = pfn_to_page(pfn1); end = pfn_to_page(pfn2 - 1) + 1; do { total++; if (PageReserved(page)) reserved++; else if (PageSwapCache(page)) cached++; else if (PageSlab(page)) slab++; else if (!page_count(page)) free++; else shared += page_count(page) - 1; page++; #ifdef CONFIG_SPARSEMEM pfn1++; if (!(pfn1 % PAGES_PER_SECTION)) page = pfn_to_page(pfn1); } while (pfn1 < pfn2); #else } while (page < end); #endif } printk("%d pages of RAM\n", total); printk("%d free pages\n", free); printk("%d reserved pages\n", reserved); printk("%d slab pages\n", slab); printk("%d pages shared\n", shared); printk("%d pages swap cached\n", cached); } static void __init find_limits(unsigned long *min, unsigned long *max_low, unsigned long *max_high) { struct meminfo *mi = &meminfo; int i; /* This assumes the meminfo array is properly sorted */ *min = bank_pfn_start(&mi->bank[0]); for_each_bank (i, mi) if (mi->bank[i].highmem) break; *max_low = bank_pfn_end(&mi->bank[i - 1]); *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]); } static void __init arm_bootmem_init(unsigned long start_pfn, unsigned long end_pfn) { struct memblock_region *reg; unsigned int boot_pages; phys_addr_t bitmap; pg_data_t *pgdat; /* * Allocate the bootmem bitmap page. This must be in a region * of memory which has already been mapped. */ boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, __pfn_to_phys(end_pfn)); /* * Initialise the bootmem allocator, handing the * memory banks over to bootmem. */ node_set_online(0); pgdat = NODE_DATA(0); init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); /* Free the lowmem regions from memblock into bootmem. */ for_each_memblock(memory, reg) { unsigned long start = memblock_region_memory_base_pfn(reg); unsigned long end = memblock_region_memory_end_pfn(reg); if (end >= end_pfn) end = end_pfn; if (start >= end) break; free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT); } /* Reserve the lowmem memblock reserved regions in bootmem. */ for_each_memblock(reserved, reg) { unsigned long start = memblock_region_reserved_base_pfn(reg); unsigned long end = memblock_region_reserved_end_pfn(reg); if (end >= end_pfn) end = end_pfn; if (start >= end) break; reserve_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT); } } #ifdef CONFIG_ZONE_DMA unsigned long arm_dma_zone_size __read_mostly; EXPORT_SYMBOL(arm_dma_zone_size); /* * The DMA mask corresponding to the maximum bus address allocatable * using GFP_DMA. The default here places no restriction on DMA * allocations. This must be the smallest DMA mask in the system, * so a successful GFP_DMA allocation will always satisfy this. */ u32 arm_dma_limit; static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, unsigned long dma_size) { if (size[0] <= dma_size) return; size[ZONE_NORMAL] = size[0] - dma_size; size[ZONE_DMA] = dma_size; hole[ZONE_NORMAL] = hole[0]; hole[ZONE_DMA] = 0; } #endif void __init setup_dma_zone(struct machine_desc *mdesc) { #ifdef CONFIG_ZONE_DMA if (mdesc->dma_zone_size) { arm_dma_zone_size = mdesc->dma_zone_size; arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; } else arm_dma_limit = 0xffffffff; #endif } #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP static void __init arm_bootmem_free_hmnm(unsigned long max_low, unsigned long max_high) { unsigned long max_zone_pfns[MAX_NR_ZONES]; struct memblock_region *reg; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[0] = max_low; #ifdef CONFIG_HIGHMEM max_zone_pfns[ZONE_HIGHMEM] = max_high; #endif for_each_memblock(memory, reg) { unsigned long start = memblock_region_memory_base_pfn(reg); unsigned long end = memblock_region_memory_end_pfn(reg); memblock_set_node(PFN_PHYS(start), PFN_PHYS(end - start), 0); } free_area_init_nodes(max_zone_pfns); } #else static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, unsigned long max_high) { unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; struct memblock_region *reg; /* * initialise the zones. */ memset(zone_size, 0, sizeof(zone_size)); /* * The memory size has already been determined. If we need * to do anything fancy with the allocation of this memory * to the zones, now is the time to do it. */ zone_size[0] = max_low - min; #ifdef CONFIG_HIGHMEM zone_size[ZONE_HIGHMEM] = max_high - max_low; #endif /* * Calculate the size of the holes. * holes = node_size - sum(bank_sizes) */ memcpy(zhole_size, zone_size, sizeof(zhole_size)); for_each_memblock(memory, reg) { unsigned long start = memblock_region_memory_base_pfn(reg); unsigned long end = memblock_region_memory_end_pfn(reg); if (start < max_low) { unsigned long low_end = min(end, max_low); zhole_size[0] -= low_end - start; } #ifdef CONFIG_HIGHMEM if (end > max_low) { unsigned long high_start = max(start, max_low); zhole_size[ZONE_HIGHMEM] -= end - high_start; } #endif } #ifdef CONFIG_ZONE_DMA /* * Adjust the sizes according to any special requirements for * this machine type. */ if (arm_dma_zone_size) arm_adjust_dma_zone(zone_size, zhole_size, arm_dma_zone_size >> PAGE_SHIFT); #endif free_area_init_node(0, zone_size, min, zhole_size); } #endif #ifdef CONFIG_HAVE_ARCH_PFN_VALID int pfn_valid(unsigned long pfn) { return memblock_is_memory(__pfn_to_phys(pfn)); } EXPORT_SYMBOL(pfn_valid); #endif #ifndef CONFIG_SPARSEMEM static void __init arm_memory_present(void) { } #else static void __init arm_memory_present(void) { struct meminfo *mi = &meminfo; int i; for_each_bank(i, mi) { memory_present(0, bank_pfn_start(&mi->bank[i]), bank_pfn_end(&mi->bank[i])); } } #endif static bool arm_memblock_steal_permitted = true; phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) { phys_addr_t phys; BUG_ON(!arm_memblock_steal_permitted); phys = memblock_alloc(size, align); memblock_free(phys, size); memblock_remove(phys, size); return phys; } static int __init meminfo_cmp(const void *_a, const void *_b) { const struct membank *a = _a, *b = _b; long cmp = bank_pfn_start(a) - bank_pfn_start(b); return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; } #ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0 unsigned long membank0_size; EXPORT_SYMBOL(membank0_size); unsigned long membank1_start; EXPORT_SYMBOL(membank1_start); void __init find_membank0_hole(void) { sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); membank0_size = meminfo.bank[0].size; membank1_start = meminfo.bank[1].start; } #endif void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) { int i; #ifndef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); #endif for (i = 0; i < mi->nr_banks; i++) memblock_add(mi->bank[i].start, mi->bank[i].size); /* Register the kernel text, kernel data and initrd with memblock. */ #ifdef CONFIG_XIP_KERNEL memblock_reserve(__pa(_sdata), _end - _sdata); #else memblock_reserve(__pa(_stext), _end - _stext); #endif #ifdef CONFIG_BLK_DEV_INITRD if (phys_initrd_size && !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n", phys_initrd_start, phys_initrd_size); phys_initrd_start = phys_initrd_size = 0; } if (phys_initrd_size && memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", phys_initrd_start, phys_initrd_size); phys_initrd_start = phys_initrd_size = 0; } if (phys_initrd_size) { memblock_reserve(phys_initrd_start, phys_initrd_size); /* Now convert initrd to virtual addresses */ initrd_start = __phys_to_virt(phys_initrd_start); initrd_end = initrd_start + phys_initrd_size; } #endif arm_mm_memblock_reserve(); arm_dt_memblock_reserve(); /* reserve any platform specific memblock areas */ if (mdesc->reserve) mdesc->reserve(); /* * reserve memory for DMA contigouos allocations, * must come from DMA area inside low memory */ dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit)); arm_memblock_steal_permitted = false; memblock_allow_resize(); memblock_dump_all(); } #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE int _early_pfn_valid(unsigned long pfn) { struct meminfo *mi = &meminfo; unsigned int left = 0, right = mi->nr_banks; do { unsigned int mid = (right + left) / 2; struct membank *bank = &mi->bank[mid]; if (pfn < bank_pfn_start(bank)) right = mid; else if (pfn >= bank_pfn_end(bank)) left = mid + 1; else return 1; } while (left < right); return 0; } EXPORT_SYMBOL(_early_pfn_valid); #endif void __init bootmem_init(void) { unsigned long min, max_low, max_high; max_low = max_high = 0; find_limits(&min, &max_low, &max_high); arm_bootmem_init(min, max_low); /* * Sparsemem tries to allocate bootmem in memory_present(), * so must be done after the fixed reservations */ arm_memory_present(); /* * sparse_init() needs the bootmem allocator up and running. */ sparse_init(); #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP arm_bootmem_free_hmnm(max_low, max_high); #else /* * Now free the memory - free_area_init_node needs * the sparse mem_map arrays initialized by sparse_init() * for memmap_init_zone(), otherwise all PFNs are invalid. */ arm_bootmem_free(min, max_low, max_high); #endif /* * This doesn't seem to be used by the Linux memory manager any * more, but is used by ll_rw_block. If we can get rid of it, we * also get rid of some of the stuff above as well. * * Note: max_low_pfn and max_pfn reflect the number of _pages_ in * the system, not the maximum PFN. */ max_low_pfn = max_low - PHYS_PFN_OFFSET; max_pfn = max_high - PHYS_PFN_OFFSET; } static inline int free_area(unsigned long pfn, unsigned long end, char *s) { unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); for (; pfn < end; pfn++) { struct page *page = pfn_to_page(pfn); ClearPageReserved(page); init_page_count(page); __free_page(page); pages++; } if (size && s) printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); return pages; } /* * Poison init memory with an undefined instruction (ARM) or a branch to an * undefined instruction (Thumb). */ static inline void poison_init_mem(void *s, size_t count) { u32 *p = (u32 *)s; for (; count != 0; count -= 4) *p++ = 0xe7fddef0; } static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; unsigned long pg, pgend; /* * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn - 1) + 1; end_pg = pfn_to_page(end_pfn - 1) + 1; /* * Convert to physical addresses, and * round start upwards and end downwards. */ pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; /* * If there are free pages between these, * free the section of the memmap array. */ if (pg < pgend) free_bootmem(pg, pgend - pg); } /* * The mem_map array can get very big. Free as much of the unused portion of * the mem_map that we are allowed to. The page migration code moves pages * in blocks that are rounded per the MAX_ORDER_NR_PAGES definition, so we * can't free mem_map entries that may be dereferenced in this manner. */ static void __init free_unused_memmap(struct meminfo *mi) { unsigned long bank_start, prev_bank_end = 0; unsigned int i; /* * This relies on each bank being in address order. * The banks are sorted previously in bootmem_init(). */ for_each_bank(i, mi) { struct membank *bank = &mi->bank[i]; bank_start = round_down(bank_pfn_start(bank), MAX_ORDER_NR_PAGES); #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist * due to SPARSEMEM sections which aren't present. */ bank_start = min(bank_start, ALIGN(prev_bank_end, PAGES_PER_SECTION)); #else /* * Align down here since the VM subsystem insists that the * memmap entries are valid from the bank start aligned to * MAX_ORDER_NR_PAGES. */ bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES); #endif /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. */ if (prev_bank_end && prev_bank_end < bank_start) free_memmap(prev_bank_end, bank_start); prev_bank_end = round_up(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) free_memmap(prev_bank_end, ALIGN(prev_bank_end, PAGES_PER_SECTION)); #endif } static void __init free_highpages(void) { #ifdef CONFIG_HIGHMEM unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET; struct memblock_region *mem, *res; /* set highmem page free */ for_each_memblock(memory, mem) { unsigned long start = memblock_region_memory_base_pfn(mem); unsigned long end = memblock_region_memory_end_pfn(mem); /* Ignore complete lowmem entries */ if (end <= max_low) continue; /* Truncate partial highmem entries */ if (start < max_low) start = max_low; /* Find and exclude any reserved regions */ for_each_memblock(reserved, res) { unsigned long res_start, res_end; res_start = memblock_region_reserved_base_pfn(res); res_end = memblock_region_reserved_end_pfn(res); if (res_end < start) continue; if (res_start < start) res_start = start; if (res_start > end) res_start = end; if (res_end > end) res_end = end; if (res_start != start) totalhigh_pages += free_area(start, res_start, NULL); start = res_end; if (start == end) break; } /* And now free anything which remains */ if (start < end) totalhigh_pages += free_area(start, end, NULL); } totalram_pages += totalhigh_pages; #endif } /* * mem_init() marks the free areas in the mem_map and tells us how much * memory is free. This is done after various parts of the system have * claimed their memory after the kernel image. */ void __init mem_init(void) { unsigned long reserved_pages, free_pages; struct memblock_region *reg; int i; #ifdef CONFIG_HAVE_TCM /* These pointers are filled in on TCM detection */ extern u32 dtcm_end; extern u32 itcm_end; #endif max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; /* this will put all unused low memory onto the freelists */ free_unused_memmap(&meminfo); totalram_pages += free_all_bootmem(); #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ totalram_pages += free_area(PHYS_PFN_OFFSET, __phys_to_pfn(__pa(swapper_pg_dir)), NULL); #endif free_highpages(); reserved_pages = free_pages = 0; for_each_bank(i, &meminfo) { struct membank *bank = &meminfo.bank[i]; unsigned int pfn1, pfn2; struct page *page, *end; pfn1 = bank_pfn_start(bank); pfn2 = bank_pfn_end(bank); page = pfn_to_page(pfn1); end = pfn_to_page(pfn2 - 1) + 1; do { if (PageReserved(page)) reserved_pages++; else if (!page_count(page)) free_pages++; page++; #ifdef CONFIG_SPARSEMEM pfn1++; if (!(pfn1 % PAGES_PER_SECTION)) page = pfn_to_page(pfn1); } while (pfn1 < pfn2); #else } while (page < end); #endif } /* * Since our memory may not be contiguous, calculate the * real number of pages we have in this system */ printk(KERN_INFO "Memory:"); num_physpages = 0; for_each_memblock(memory, reg) { unsigned long pages = memblock_region_memory_end_pfn(reg) - memblock_region_memory_base_pfn(reg); num_physpages += pages; printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); } printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", nr_free_pages() << (PAGE_SHIFT-10), free_pages << (PAGE_SHIFT-10), reserved_pages << (PAGE_SHIFT-10), totalhigh_pages << (PAGE_SHIFT-10)); #define MLK(b, t) b, t, ((t) - (b)) >> 10 #define MLM(b, t) b, t, ((t) - (b)) >> 20 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) printk(KERN_NOTICE "Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_ARM_USE_USER_ACCESSIBLE_TIMERS " timers : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif #ifdef CONFIG_HAVE_TCM " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif #ifdef CONFIG_MODULES " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif " .text : 0x%p" " - 0x%p" " (%4d kB)\n" " .init : 0x%p" " - 0x%p" " (%4d kB)\n" " .data : 0x%p" " - 0x%p" " (%4d kB)\n" " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), #ifdef CONFIG_ARM_USE_USER_ACCESSIBLE_TIMERS MLK(UL(CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE), UL(CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE) + (PAGE_SIZE)), #endif #ifdef CONFIG_HAVE_TCM MLK(DTCM_OFFSET, (unsigned long) dtcm_end), MLK(ITCM_OFFSET, (unsigned long) itcm_end), #endif MLK(FIXADDR_START, FIXADDR_TOP), MLM(VMALLOC_START, VMALLOC_END), MLM(PAGE_OFFSET, (unsigned long)high_memory), #ifdef CONFIG_HIGHMEM MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * (PAGE_SIZE)), #endif #ifdef CONFIG_MODULES MLM(MODULES_VADDR, MODULES_END), #endif MLK_ROUNDUP(_text, _etext), MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(_sdata, _edata), MLK_ROUNDUP(__bss_start, __bss_stop)); #undef MLK #undef MLM #undef MLK_ROUNDUP /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. */ #ifdef CONFIG_MMU BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); BUG_ON(TASK_SIZE > MODULES_VADDR); #endif #ifdef CONFIG_HIGHMEM BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); #endif if (PAGE_SIZE >= 16384 && num_physpages <= 128) { extern int sysctl_overcommit_memory; /* * On a machine this small we won't get * anywhere without overcommit, so turn * it on by default. */ sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; } } void free_initmem(void) { unsigned long reclaimed_initmem; #ifdef CONFIG_HAVE_TCM extern char __tcm_start, __tcm_end; poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), __phys_to_pfn(__pa(&__tcm_end)), "TCM link"); #endif #ifdef CONFIG_STRICT_MEMORY_RWX poison_init_mem((char *)__arch_info_begin, __init_end - (char *)__arch_info_begin); reclaimed_initmem = free_area(__phys_to_pfn(__pa(__arch_info_begin)), __phys_to_pfn(__pa(__init_end)), "init"); totalram_pages += reclaimed_initmem; #else poison_init_mem(__init_begin, __init_end - __init_begin); if (!machine_is_integrator() && !machine_is_cintegrator()) { reclaimed_initmem = free_area(__phys_to_pfn(__pa(__init_begin)), __phys_to_pfn(__pa(__init_end)), "init"); totalram_pages += reclaimed_initmem; } #endif } #ifdef CONFIG_BLK_DEV_INITRD static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { unsigned long reclaimed_initrd_mem; if (!keep_initrd) { poison_init_mem((void *)start, PAGE_ALIGN(end) - start); reclaimed_initrd_mem = free_area(__phys_to_pfn(__pa(start)), __phys_to_pfn(__pa(end)), "initrd"); totalram_pages += reclaimed_initrd_mem; } } static int __init keepinitrd_setup(char *__unused) { keep_initrd = 1; return 1; } __setup("keepinitrd", keepinitrd_setup); #endif #ifdef CONFIG_MSM_KRAIT_WFE_FIXUP static int __init msm_krait_wfe_init(void) { unsigned int val, midr; midr = read_cpuid_id() & 0xffffff00; if ((midr == 0x511f0400) || (midr == 0x510f0600)) { asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (val)); msm_krait_need_wfe_fixup = (val & 0x10000) ? 1 : 0; } return 0; } pure_initcall(msm_krait_wfe_init); #endif
gpl-2.0
tzlaine/gcc
gcc/testsuite/gfortran.dg/complex_intrinsic_8.f90
185
2536
! { dg-do link } ! ! PR fortran/33197 ! ! Fortran complex trigonometric functions: acos, asin, atan, acosh, asinh, atanh ! ! Compile-time simplifications ! implicit none real(4), parameter :: pi = 2*acos(0.0_4) real(8), parameter :: pi8 = 2*acos(0.0_8) real(4), parameter :: eps = 10*epsilon(0.0_4) real(8), parameter :: eps8 = 10*epsilon(0.0_8) complex(4), parameter :: z0_0 = cmplx(0.0_4, 0.0_4, kind=4) complex(4), parameter :: z1_1 = cmplx(1.0_4, 1.0_4, kind=4) complex(8), parameter :: z80_0 = cmplx(0.0_8, 0.0_8, kind=8) complex(8), parameter :: z81_1 = cmplx(1.0_8, 1.0_8, kind=8) if (abs(acos(z0_0) - cmplx(pi/2,-0.0,4)) > eps) call link_error() if (abs(acos(z1_1) - cmplx(0.904556894, -1.06127506,4)) > eps) call link_error() if (abs(acos(z80_0) - cmplx(pi8/2,-0.0_8,8)) > eps8) call link_error() if (abs(acos(z81_1) - cmplx(0.90455689430238140_8, -1.0612750619050357_8,8)) > eps8) call link_error() if (abs(asin(z0_0) - cmplx(0.0,0.0,4)) > eps) call link_error() if (abs(asin(z1_1) - cmplx(0.66623943, 1.06127506,4)) > eps) call link_error() if (abs(asin(z80_0) - cmplx(0.0_8,0.0_8,8)) > eps8) call link_error() if (abs(asin(z81_1) - cmplx(0.66623943249251527_8, 1.0612750619050357_8,8)) > eps8) call link_error() if (abs(atan(z0_0) - cmplx(0.0,0.0,4)) > eps) call link_error() if (abs(atan(z1_1) - cmplx(1.01722196, 0.40235947,4)) > eps) call link_error() if (abs(atan(z80_0) - cmplx(0.0_8,0.0_8,8)) > eps8) call link_error() if (abs(atan(z81_1) - cmplx(1.0172219678978514_8, 0.40235947810852507_8,8)) > eps8) call link_error() if (abs(acosh(z0_0) - cmplx(0.0,pi/2,4)) > eps) call link_error() if (abs(acosh(z1_1) - cmplx(1.06127506, 0.90455689,4)) > eps) call link_error() if (abs(acosh(z80_0) - cmplx(0.0_8,pi8/2,8)) > eps8) call link_error() if (abs(acosh(z81_1) - cmplx(1.0612750619050357_8, 0.90455689430238140_8,8)) > eps8) call link_error() if (abs(asinh(z0_0) - cmplx(0.0,0.0,4)) > eps) call link_error() if (abs(asinh(z1_1) - cmplx(1.06127506, 0.66623943,4)) > eps) call link_error() if (abs(asinh(z80_0) - cmplx(0.0_8,0.0_8,8)) > eps8) call link_error() if (abs(asinh(z81_1) - cmplx(1.0612750619050357_8, 0.66623943249251527_8,8)) > eps8) call link_error() if (abs(atanh(z0_0) - cmplx(0.0,0.0,4)) > eps) call link_error() if (abs(atanh(z1_1) - cmplx(0.40235947, 1.01722196,4)) > eps) call link_error() if (abs(atanh(z80_0) - cmplx(0.0_8,0.0_8,8)) > eps8) call link_error() if (abs(atanh(z81_1) - cmplx(0.40235947810852507_8, 1.0172219678978514_8,8)) > eps8) call link_error() end
gpl-2.0
getitnowmarketing/Incredible-2.6.35-gb-mr
drivers/media/IR/keymaps/rc-dntv-live-dvb-t.c
953
2048
/* dntv-live-dvb-t.h - Keytable for dntv_live_dvb_t Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> /* DigitalNow DNTV Live DVB-T Remote */ static struct ir_scancode dntv_live_dvb_t[] = { { 0x00, KEY_ESC }, /* 'go up a level?' */ /* Keys 0 to 9 */ { 0x0a, KEY_0 }, { 0x01, KEY_1 }, { 0x02, KEY_2 }, { 0x03, KEY_3 }, { 0x04, KEY_4 }, { 0x05, KEY_5 }, { 0x06, KEY_6 }, { 0x07, KEY_7 }, { 0x08, KEY_8 }, { 0x09, KEY_9 }, { 0x0b, KEY_TUNER }, /* tv/fm */ { 0x0c, KEY_SEARCH }, /* scan */ { 0x0d, KEY_STOP }, { 0x0e, KEY_PAUSE }, { 0x0f, KEY_LIST }, /* source */ { 0x10, KEY_MUTE }, { 0x11, KEY_REWIND }, /* backward << */ { 0x12, KEY_POWER }, { 0x13, KEY_CAMERA }, /* snap */ { 0x14, KEY_AUDIO }, /* stereo */ { 0x15, KEY_CLEAR }, /* reset */ { 0x16, KEY_PLAY }, { 0x17, KEY_ENTER }, { 0x18, KEY_ZOOM }, /* full screen */ { 0x19, KEY_FASTFORWARD }, /* forward >> */ { 0x1a, KEY_CHANNELUP }, { 0x1b, KEY_VOLUMEUP }, { 0x1c, KEY_INFO }, /* preview */ { 0x1d, KEY_RECORD }, /* record */ { 0x1e, KEY_CHANNELDOWN }, { 0x1f, KEY_VOLUMEDOWN }, }; static struct rc_keymap dntv_live_dvb_t_map = { .map = { .scan = dntv_live_dvb_t, .size = ARRAY_SIZE(dntv_live_dvb_t), .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_DNTV_LIVE_DVB_T, } }; static int __init init_rc_map_dntv_live_dvb_t(void) { return ir_register_map(&dntv_live_dvb_t_map); } static void __exit exit_rc_map_dntv_live_dvb_t(void) { ir_unregister_map(&dntv_live_dvb_t_map); } module_init(init_rc_map_dntv_live_dvb_t) module_exit(exit_rc_map_dntv_live_dvb_t) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
Victor-android/kernel_huawei
drivers/media/IR/keymaps/rc-tbs-nec.c
953
1751
/* tbs-nec.h - Keytable for tbs_nec Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> static struct ir_scancode tbs_nec[] = { { 0x04, KEY_POWER2}, /*power*/ { 0x14, KEY_MUTE}, /*mute*/ { 0x07, KEY_1}, { 0x06, KEY_2}, { 0x05, KEY_3}, { 0x0b, KEY_4}, { 0x0a, KEY_5}, { 0x09, KEY_6}, { 0x0f, KEY_7}, { 0x0e, KEY_8}, { 0x0d, KEY_9}, { 0x12, KEY_0}, { 0x16, KEY_CHANNELUP}, /*ch+*/ { 0x11, KEY_CHANNELDOWN},/*ch-*/ { 0x13, KEY_VOLUMEUP}, /*vol+*/ { 0x0c, KEY_VOLUMEDOWN},/*vol-*/ { 0x03, KEY_RECORD}, /*rec*/ { 0x18, KEY_PAUSE}, /*pause*/ { 0x19, KEY_OK}, /*ok*/ { 0x1a, KEY_CAMERA}, /* snapshot */ { 0x01, KEY_UP}, { 0x10, KEY_LEFT}, { 0x02, KEY_RIGHT}, { 0x08, KEY_DOWN}, { 0x15, KEY_FAVORITES}, { 0x17, KEY_SUBTITLE}, { 0x1d, KEY_ZOOM}, { 0x1f, KEY_EXIT}, { 0x1e, KEY_MENU}, { 0x1c, KEY_EPG}, { 0x00, KEY_PREVIOUS}, { 0x1b, KEY_MODE}, }; static struct rc_keymap tbs_nec_map = { .map = { .scan = tbs_nec, .size = ARRAY_SIZE(tbs_nec), .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_TBS_NEC, } }; static int __init init_rc_map_tbs_nec(void) { return ir_register_map(&tbs_nec_map); } static void __exit exit_rc_map_tbs_nec(void) { ir_unregister_map(&tbs_nec_map); } module_init(init_rc_map_tbs_nec) module_exit(exit_rc_map_tbs_nec) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
SM-G920P/SM-G920P-Kernel
arch/mips/pci/ops-lantiq.c
2233
2954
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Copyright (C) 2010 John Crispin <blogic@openwrt.org> */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/mm.h> #include <asm/addrspace.h> #include <linux/vmalloc.h> #include <lantiq_soc.h> #include "pci-lantiq.h" #define LTQ_PCI_CFG_BUSNUM_SHF 16 #define LTQ_PCI_CFG_DEVNUM_SHF 11 #define LTQ_PCI_CFG_FUNNUM_SHF 8 #define PCI_ACCESS_READ 0 #define PCI_ACCESS_WRITE 1 static int ltq_pci_config_access(unsigned char access_type, struct pci_bus *bus, unsigned int devfn, unsigned int where, u32 *data) { unsigned long cfg_base; unsigned long flags; u32 temp; /* we support slot from 0 to 15 dev_fn & 0x68 (AD29) is the SoC itself */ if ((bus->number != 0) || ((devfn & 0xf8) > 0x78) || ((devfn & 0xf8) == 0) || ((devfn & 0xf8) == 0x68)) return 1; spin_lock_irqsave(&ebu_lock, flags); cfg_base = (unsigned long) ltq_pci_mapped_cfg; cfg_base |= (bus->number << LTQ_PCI_CFG_BUSNUM_SHF) | (devfn << LTQ_PCI_CFG_FUNNUM_SHF) | (where & ~0x3); /* Perform access */ if (access_type == PCI_ACCESS_WRITE) { ltq_w32(swab32(*data), ((u32 *)cfg_base)); } else { *data = ltq_r32(((u32 *)(cfg_base))); *data = swab32(*data); } wmb(); /* clean possible Master abort */ cfg_base = (unsigned long) ltq_pci_mapped_cfg; cfg_base |= (0x0 << LTQ_PCI_CFG_FUNNUM_SHF) + 4; temp = ltq_r32(((u32 *)(cfg_base))); temp = swab32(temp); cfg_base = (unsigned long) ltq_pci_mapped_cfg; cfg_base |= (0x68 << LTQ_PCI_CFG_FUNNUM_SHF) + 4; ltq_w32(temp, ((u32 *)cfg_base)); spin_unlock_irqrestore(&ebu_lock, flags); if (((*data) == 0xffffffff) && (access_type == PCI_ACCESS_READ)) return 1; return 0; } int ltq_pci_read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u32 data = 0; if (ltq_pci_config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) return PCIBIOS_DEVICE_NOT_FOUND; if (size == 1) *val = (data >> ((where & 3) << 3)) & 0xff; else if (size == 2) *val = (data >> ((where & 3) << 3)) & 0xffff; else *val = data; return PCIBIOS_SUCCESSFUL; } int ltq_pci_write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { u32 data = 0; if (size == 4) { data = val; } else { if (ltq_pci_config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) return PCIBIOS_DEVICE_NOT_FOUND; if (size == 1) data = (data & ~(0xff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); else if (size == 2) data = (data & ~(0xffff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); } if (ltq_pci_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_SUCCESSFUL; }
gpl-2.0
Twisted-Kernel/Sick-Twisted-Tmo
drivers/gpu/drm/gma500/oaktrail_lvds.c
2233
14329
/* * Copyright © 2006-2009 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Authors: * Eric Anholt <eric@anholt.net> * Dave Airlie <airlied@linux.ie> * Jesse Barnes <jesse.barnes@intel.com> */ #include <linux/i2c.h> #include <drm/drmP.h> #include <asm/mrst.h> #include "intel_bios.h" #include "psb_drv.h" #include "psb_intel_drv.h" #include "psb_intel_reg.h" #include "power.h" #include <linux/pm_runtime.h> /* The max/min PWM frequency in BPCR[31:17] - */ /* The smallest number is 1 (not 0) that can fit in the * 15-bit field of the and then*/ /* shifts to the left by one bit to get the actual 16-bit * value that the 15-bits correspond to.*/ #define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF #define BRIGHTNESS_MAX_LEVEL 100 /** * Sets the power state for the panel. */ static void oaktrail_lvds_set_power(struct drm_device *dev, struct psb_intel_encoder *psb_intel_encoder, bool on) { u32 pp_status; struct drm_psb_private *dev_priv = dev->dev_private; if (!gma_power_begin(dev, true)) return; if (on) { REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON); do { pp_status = REG_READ(PP_STATUS); } while ((pp_status & (PP_ON | PP_READY)) == PP_READY); dev_priv->is_lvds_on = true; if (dev_priv->ops->lvds_bl_power) dev_priv->ops->lvds_bl_power(dev, true); } else { if (dev_priv->ops->lvds_bl_power) dev_priv->ops->lvds_bl_power(dev, false); REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON); do { pp_status = REG_READ(PP_STATUS); } while (pp_status & PP_ON); dev_priv->is_lvds_on = false; pm_request_idle(&dev->pdev->dev); } gma_power_end(dev); } static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder); if (mode == DRM_MODE_DPMS_ON) oaktrail_lvds_set_power(dev, psb_intel_encoder, true); else oaktrail_lvds_set_power(dev, psb_intel_encoder, false); /* XXX: We never power down the LVDS pairs. */ } static void oaktrail_lvds_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector = NULL; struct drm_crtc *crtc = encoder->crtc; u32 lvds_port; uint64_t v = DRM_MODE_SCALE_FULLSCREEN; if (!gma_power_begin(dev, true)) return; /* * The LVDS pin pair will already have been turned on in the * psb_intel_crtc_mode_set since it has a large impact on the DPLL * settings. */ lvds_port = (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN | LVDS_BORDER_EN; /* If the firmware says dither on Moorestown, or the BIOS does on Oaktrail then enable dithering */ if (mode_dev->panel_wants_dither || dev_priv->lvds_dither) lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE; REG_WRITE(LVDS, lvds_port); /* Find the connector we're trying to set up */ list_for_each_entry(connector, &mode_config->connector_list, head) { if (!connector->encoder || connector->encoder->crtc != crtc) continue; } if (!connector) { DRM_ERROR("Couldn't find connector when setting mode"); return; } drm_object_property_get_value( &connector->base, dev->mode_config.scaling_mode_property, &v); if (v == DRM_MODE_SCALE_NO_SCALE) REG_WRITE(PFIT_CONTROL, 0); else if (v == DRM_MODE_SCALE_ASPECT) { if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) || (mode->hdisplay != adjusted_mode->crtc_hdisplay)) { if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) == (mode->hdisplay * adjusted_mode->crtc_vdisplay)) REG_WRITE(PFIT_CONTROL, PFIT_ENABLE); else if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) > (mode->hdisplay * adjusted_mode->crtc_vdisplay)) REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_PILLARBOX); else REG_WRITE(PFIT_CONTROL, PFIT_ENABLE | PFIT_SCALING_MODE_LETTERBOX); } else REG_WRITE(PFIT_CONTROL, PFIT_ENABLE); } else /*(v == DRM_MODE_SCALE_FULLSCREEN)*/ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE); gma_power_end(dev); } static void oaktrail_lvds_prepare(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder); struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; if (!gma_power_begin(dev, true)) return; mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL & BACKLIGHT_DUTY_CYCLE_MASK); oaktrail_lvds_set_power(dev, psb_intel_encoder, false); gma_power_end(dev); } static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; u32 ret; if (gma_power_begin(dev, false)) { ret = ((REG_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; gma_power_end(dev); } else ret = ((dev_priv->regs.saveBLC_PWM_CTL & BACKLIGHT_MODULATION_FREQ_MASK) >> BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; return ret; } static void oaktrail_lvds_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder); struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; if (mode_dev->backlight_duty_cycle == 0) mode_dev->backlight_duty_cycle = oaktrail_lvds_get_max_backlight(dev); oaktrail_lvds_set_power(dev, psb_intel_encoder, true); } static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = { .dpms = oaktrail_lvds_dpms, .mode_fixup = psb_intel_lvds_mode_fixup, .prepare = oaktrail_lvds_prepare, .mode_set = oaktrail_lvds_mode_set, .commit = oaktrail_lvds_commit, }; static struct drm_display_mode lvds_configuration_modes[] = { /* hard coded fixed mode for TPO LTPS LPJ040K001A */ { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 33264, 800, 836, 846, 1056, 0, 480, 489, 491, 525, 0, 0) }, /* hard coded fixed mode for LVDS 800x480 */ { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 30994, 800, 801, 802, 1024, 0, 480, 481, 482, 525, 0, 0) }, /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */ { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072, 1104, 1184, 0, 600, 603, 604, 608, 0, 0) }, /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */ { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104, 1136, 1184, 0, 600, 603, 604, 608, 0, 0) }, /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */ { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124, 1204, 1312, 0, 600, 607, 610, 621, 0, 0) }, /* hard coded fixed mode for LVDS 1024x768 */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 1184, 1344, 0, 768, 771, 777, 806, 0, 0) }, /* hard coded fixed mode for LVDS 1366x768 */ { DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430, 1558, 1664, 0, 768, 769, 770, 776, 0, 0) }, }; /* Returns the panel fixed mode from configuration. */ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { struct drm_display_mode *mode = NULL; struct drm_psb_private *dev_priv = dev->dev_private; struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD; mode_dev->panel_fixed_mode = NULL; /* Use the firmware provided data on Moorestown */ if (dev_priv->has_gct) { mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) return; mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo; mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo; mode->hsync_start = mode->hdisplay + \ ((ti->hsync_offset_hi << 8) | \ ti->hsync_offset_lo); mode->hsync_end = mode->hsync_start + \ ((ti->hsync_pulse_width_hi << 8) | \ ti->hsync_pulse_width_lo); mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \ ti->hblank_lo); mode->vsync_start = \ mode->vdisplay + ((ti->vsync_offset_hi << 4) | \ ti->vsync_offset_lo); mode->vsync_end = \ mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \ ti->vsync_pulse_width_lo); mode->vtotal = mode->vdisplay + \ ((ti->vblank_hi << 8) | ti->vblank_lo); mode->clock = ti->pixel_clock * 10; #if 0 printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay); printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay); printk(KERN_INFO "HSS is %d\n", mode->hsync_start); printk(KERN_INFO "HSE is %d\n", mode->hsync_end); printk(KERN_INFO "htotal is %d\n", mode->htotal); printk(KERN_INFO "VSS is %d\n", mode->vsync_start); printk(KERN_INFO "VSE is %d\n", mode->vsync_end); printk(KERN_INFO "vtotal is %d\n", mode->vtotal); printk(KERN_INFO "clock is %d\n", mode->clock); #endif mode_dev->panel_fixed_mode = mode; } /* Use the BIOS VBT mode if available */ if (mode_dev->panel_fixed_mode == NULL && mode_dev->vbt_mode) mode_dev->panel_fixed_mode = drm_mode_duplicate(dev, mode_dev->vbt_mode); /* Then try the LVDS VBT mode */ if (mode_dev->panel_fixed_mode == NULL) if (dev_priv->lfp_lvds_vbt_mode) mode_dev->panel_fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); /* Then guess */ if (mode_dev->panel_fixed_mode == NULL) mode_dev->panel_fixed_mode = drm_mode_duplicate(dev, &lvds_configuration_modes[2]); drm_mode_set_name(mode_dev->panel_fixed_mode); drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0); } /** * oaktrail_lvds_init - setup LVDS connectors on this device * @dev: drm device * * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). */ void oaktrail_lvds_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { struct psb_intel_encoder *psb_intel_encoder; struct psb_intel_connector *psb_intel_connector; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_psb_private *dev_priv = dev->dev_private; struct edid *edid; struct i2c_adapter *i2c_adap; struct drm_display_mode *scan; /* *modes, *bios_mode; */ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); if (!psb_intel_encoder) return; psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); if (!psb_intel_connector) goto failed_connector; connector = &psb_intel_connector->base; encoder = &psb_intel_encoder->base; dev_priv->is_lvds_on = true; drm_connector_init(dev, connector, &psb_intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_LVDS; drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs); drm_connector_helper_add(connector, &psb_intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; drm_object_attach_property(&connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); drm_object_attach_property(&connector->base, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL); mode_dev->panel_wants_dither = false; if (dev_priv->has_gct) mode_dev->panel_wants_dither = (dev_priv->gct_data. Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE); if (dev_priv->lvds_dither) mode_dev->panel_wants_dither = 1; /* * LVDS discovery: * 1) check for EDID on DDC * 2) check for VBT data * 3) check to see if LVDS is already on * if none of the above, no panel * 4) make sure lid is open * if closed, act like it's not there for now */ i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus); if (i2c_adap == NULL) dev_err(dev->dev, "No ddc adapter available!\n"); /* * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ if (i2c_adap) { edid = drm_get_edid(connector, i2c_adap); if (edid) { drm_mode_connector_update_edid_property(connector, edid); drm_add_edid_modes(connector, edid); kfree(edid); } list_for_each_entry(scan, &connector->probed_modes, head) { if (scan->type & DRM_MODE_TYPE_PREFERRED) { mode_dev->panel_fixed_mode = drm_mode_duplicate(dev, scan); goto out; /* FIXME: check for quirks */ } } } /* * If we didn't get EDID, try geting panel timing * from configuration data */ oaktrail_lvds_get_configuration_mode(dev, mode_dev); if (mode_dev->panel_fixed_mode) { mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; goto out; /* FIXME: check for quirks */ } /* If we still don't have a mode after all that, give up. */ if (!mode_dev->panel_fixed_mode) { dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n"); goto failed_find; } out: drm_sysfs_connector_add(connector); return; failed_find: dev_dbg(dev->dev, "No LVDS modes found, disabling.\n"); if (psb_intel_encoder->ddc_bus) psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus); /* failed_ddc: */ drm_encoder_cleanup(encoder); drm_connector_cleanup(connector); kfree(psb_intel_connector); failed_connector: kfree(psb_intel_encoder); }
gpl-2.0
Multirom-mi4i/android_kernel_xiaomi_ferrari
arch/mips/fw/arc/file.c
2233
1596
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * ARC firmware interface. * * Copyright (C) 1994, 1995, 1996, 1999 Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. */ #include <linux/init.h> #include <asm/fw/arc/types.h> #include <asm/sgialib.h> LONG ArcGetDirectoryEntry(ULONG FileID, struct linux_vdirent *Buffer, ULONG N, ULONG *Count) { return ARC_CALL4(get_vdirent, FileID, Buffer, N, Count); } LONG ArcOpen(CHAR *Path, enum linux_omode OpenMode, ULONG *FileID) { return ARC_CALL3(open, Path, OpenMode, FileID); } LONG ArcClose(ULONG FileID) { return ARC_CALL1(close, FileID); } LONG ArcRead(ULONG FileID, VOID *Buffer, ULONG N, ULONG *Count) { return ARC_CALL4(read, FileID, Buffer, N, Count); } LONG ArcGetReadStatus(ULONG FileID) { return ARC_CALL1(get_rstatus, FileID); } LONG ArcWrite(ULONG FileID, PVOID Buffer, ULONG N, PULONG Count) { return ARC_CALL4(write, FileID, Buffer, N, Count); } LONG ArcSeek(ULONG FileID, struct linux_bigint *Position, enum linux_seekmode SeekMode) { return ARC_CALL3(seek, FileID, Position, SeekMode); } LONG ArcMount(char *name, enum linux_mountops op) { return ARC_CALL2(mount, name, op); } LONG ArcGetFileInformation(ULONG FileID, struct linux_finfo *Information) { return ARC_CALL2(get_finfo, FileID, Information); } LONG ArcSetFileInformation(ULONG FileID, ULONG AttributeFlags, ULONG AttributeMask) { return ARC_CALL3(set_finfo, FileID, AttributeFlags, AttributeMask); }
gpl-2.0
prpplague/DSI85-Development
drivers/media/dvb/frontends/atbm8830.c
3257
12070
/* * Support for AltoBeam GB20600 (a.k.a DMB-TH) demodulator * ATBM8830, ATBM8831 * * Copyright (C) 2009 David T.L. Wong <davidtlwong@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <asm/div64.h> #include "dvb_frontend.h" #include "atbm8830.h" #include "atbm8830_priv.h" #define dprintk(args...) \ do { \ if (debug) \ printk(KERN_DEBUG "atbm8830: " args); \ } while (0) static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); static int atbm8830_write_reg(struct atbm_state *priv, u16 reg, u8 data) { int ret = 0; u8 dev_addr; u8 buf1[] = { reg >> 8, reg & 0xFF }; u8 buf2[] = { data }; struct i2c_msg msg1 = { .flags = 0, .buf = buf1, .len = 2 }; struct i2c_msg msg2 = { .flags = 0, .buf = buf2, .len = 1 }; dev_addr = priv->config->demod_address; msg1.addr = dev_addr; msg2.addr = dev_addr; if (debug >= 2) dprintk("%s: reg=0x%04X, data=0x%02X\n", __func__, reg, data); ret = i2c_transfer(priv->i2c, &msg1, 1); if (ret != 1) return -EIO; ret = i2c_transfer(priv->i2c, &msg2, 1); return (ret != 1) ? -EIO : 0; } static int atbm8830_read_reg(struct atbm_state *priv, u16 reg, u8 *p_data) { int ret; u8 dev_addr; u8 buf1[] = { reg >> 8, reg & 0xFF }; u8 buf2[] = { 0 }; struct i2c_msg msg1 = { .flags = 0, .buf = buf1, .len = 2 }; struct i2c_msg msg2 = { .flags = I2C_M_RD, .buf = buf2, .len = 1 }; dev_addr = priv->config->demod_address; msg1.addr = dev_addr; msg2.addr = dev_addr; ret = i2c_transfer(priv->i2c, &msg1, 1); if (ret != 1) { dprintk("%s: error reg=0x%04x, ret=%i\n", __func__, reg, ret); return -EIO; } ret = i2c_transfer(priv->i2c, &msg2, 1); if (ret != 1) return -EIO; *p_data = buf2[0]; if (debug >= 2) dprintk("%s: reg=0x%04X, data=0x%02X\n", __func__, reg, buf2[0]); return 0; } /* Lock register latch so that multi-register read is atomic */ static inline int atbm8830_reglatch_lock(struct atbm_state *priv, int lock) { return atbm8830_write_reg(priv, REG_READ_LATCH, lock ? 1 : 0); } static int set_osc_freq(struct atbm_state *priv, u32 freq /*in kHz*/) { u32 val; u64 t; /* 0x100000 * freq / 30.4MHz */ t = (u64)0x100000 * freq; do_div(t, 30400); val = t; atbm8830_write_reg(priv, REG_OSC_CLK, val); atbm8830_write_reg(priv, REG_OSC_CLK + 1, val >> 8); atbm8830_write_reg(priv, REG_OSC_CLK + 2, val >> 16); return 0; } static int set_if_freq(struct atbm_state *priv, u32 freq /*in kHz*/) { u32 fs = priv->config->osc_clk_freq; u64 t; u32 val; u8 dat; if (freq != 0) { /* 2 * PI * (freq - fs) / fs * (2 ^ 22) */ t = (u64) 2 * 31416 * (freq - fs); t <<= 22; do_div(t, fs); do_div(t, 1000); val = t; atbm8830_write_reg(priv, REG_TUNER_BASEBAND, 1); atbm8830_write_reg(priv, REG_IF_FREQ, val); atbm8830_write_reg(priv, REG_IF_FREQ+1, val >> 8); atbm8830_write_reg(priv, REG_IF_FREQ+2, val >> 16); atbm8830_read_reg(priv, REG_ADC_CONFIG, &dat); dat &= 0xFC; atbm8830_write_reg(priv, REG_ADC_CONFIG, dat); } else { /* Zero IF */ atbm8830_write_reg(priv, REG_TUNER_BASEBAND, 0); atbm8830_read_reg(priv, REG_ADC_CONFIG, &dat); dat &= 0xFC; dat |= 0x02; atbm8830_write_reg(priv, REG_ADC_CONFIG, dat); if (priv->config->zif_swap_iq) atbm8830_write_reg(priv, REG_SWAP_I_Q, 0x03); else atbm8830_write_reg(priv, REG_SWAP_I_Q, 0x01); } return 0; } static int is_locked(struct atbm_state *priv, u8 *locked) { u8 status; atbm8830_read_reg(priv, REG_LOCK_STATUS, &status); if (locked != NULL) *locked = (status == 1); return 0; } static int set_agc_config(struct atbm_state *priv, u8 min, u8 max, u8 hold_loop) { /* no effect if both min and max are zero */ if (!min && !max) return 0; atbm8830_write_reg(priv, REG_AGC_MIN, min); atbm8830_write_reg(priv, REG_AGC_MAX, max); atbm8830_write_reg(priv, REG_AGC_HOLD_LOOP, hold_loop); return 0; } static int set_static_channel_mode(struct atbm_state *priv) { int i; for (i = 0; i < 5; i++) atbm8830_write_reg(priv, 0x099B + i, 0x08); atbm8830_write_reg(priv, 0x095B, 0x7F); atbm8830_write_reg(priv, 0x09CB, 0x01); atbm8830_write_reg(priv, 0x09CC, 0x7F); atbm8830_write_reg(priv, 0x09CD, 0x7F); atbm8830_write_reg(priv, 0x0E01, 0x20); /* For single carrier */ atbm8830_write_reg(priv, 0x0B03, 0x0A); atbm8830_write_reg(priv, 0x0935, 0x10); atbm8830_write_reg(priv, 0x0936, 0x08); atbm8830_write_reg(priv, 0x093E, 0x08); atbm8830_write_reg(priv, 0x096E, 0x06); /* frame_count_max0 */ atbm8830_write_reg(priv, 0x0B09, 0x00); /* frame_count_max1 */ atbm8830_write_reg(priv, 0x0B0A, 0x08); return 0; } static int set_ts_config(struct atbm_state *priv) { const struct atbm8830_config *cfg = priv->config; /*Set parallel/serial ts mode*/ atbm8830_write_reg(priv, REG_TS_SERIAL, cfg->serial_ts ? 1 : 0); atbm8830_write_reg(priv, REG_TS_CLK_MODE, cfg->serial_ts ? 1 : 0); /*Set ts sampling edge*/ atbm8830_write_reg(priv, REG_TS_SAMPLE_EDGE, cfg->ts_sampling_edge ? 1 : 0); /*Set ts clock freerun*/ atbm8830_write_reg(priv, REG_TS_CLK_FREERUN, cfg->ts_clk_gated ? 0 : 1); return 0; } static int atbm8830_init(struct dvb_frontend *fe) { struct atbm_state *priv = fe->demodulator_priv; const struct atbm8830_config *cfg = priv->config; /*Set oscillator frequency*/ set_osc_freq(priv, cfg->osc_clk_freq); /*Set IF frequency*/ set_if_freq(priv, cfg->if_freq); /*Set AGC Config*/ set_agc_config(priv, cfg->agc_min, cfg->agc_max, cfg->agc_hold_loop); /*Set static channel mode*/ set_static_channel_mode(priv); set_ts_config(priv); /*Turn off DSP reset*/ atbm8830_write_reg(priv, 0x000A, 0); /*SW version test*/ atbm8830_write_reg(priv, 0x020C, 11); /* Run */ atbm8830_write_reg(priv, REG_DEMOD_RUN, 1); return 0; } static void atbm8830_release(struct dvb_frontend *fe) { struct atbm_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); kfree(state); } static int atbm8830_set_fe(struct dvb_frontend *fe, struct dvb_frontend_parameters *fe_params) { struct atbm_state *priv = fe->demodulator_priv; int i; u8 locked = 0; dprintk("%s\n", __func__); /* set frequency */ if (fe->ops.tuner_ops.set_params) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); fe->ops.tuner_ops.set_params(fe, fe_params); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } /* start auto lock */ for (i = 0; i < 10; i++) { mdelay(100); dprintk("Try %d\n", i); is_locked(priv, &locked); if (locked != 0) { dprintk("ATBM8830 locked!\n"); break; } } return 0; } static int atbm8830_get_fe(struct dvb_frontend *fe, struct dvb_frontend_parameters *fe_params) { dprintk("%s\n", __func__); /* TODO: get real readings from device */ /* inversion status */ fe_params->inversion = INVERSION_OFF; /* bandwidth */ fe_params->u.ofdm.bandwidth = BANDWIDTH_8_MHZ; fe_params->u.ofdm.code_rate_HP = FEC_AUTO; fe_params->u.ofdm.code_rate_LP = FEC_AUTO; fe_params->u.ofdm.constellation = QAM_AUTO; /* transmission mode */ fe_params->u.ofdm.transmission_mode = TRANSMISSION_MODE_AUTO; /* guard interval */ fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_AUTO; /* hierarchy */ fe_params->u.ofdm.hierarchy_information = HIERARCHY_NONE; return 0; } static int atbm8830_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *fesettings) { fesettings->min_delay_ms = 0; fesettings->step_size = 0; fesettings->max_drift = 0; return 0; } static int atbm8830_read_status(struct dvb_frontend *fe, fe_status_t *fe_status) { struct atbm_state *priv = fe->demodulator_priv; u8 locked = 0; u8 agc_locked = 0; dprintk("%s\n", __func__); *fe_status = 0; is_locked(priv, &locked); if (locked) { *fe_status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; } dprintk("%s: fe_status=0x%x\n", __func__, *fe_status); atbm8830_read_reg(priv, REG_AGC_LOCK, &agc_locked); dprintk("AGC Lock: %d\n", agc_locked); return 0; } static int atbm8830_read_ber(struct dvb_frontend *fe, u32 *ber) { struct atbm_state *priv = fe->demodulator_priv; u32 frame_err; u8 t; dprintk("%s\n", __func__); atbm8830_reglatch_lock(priv, 1); atbm8830_read_reg(priv, REG_FRAME_ERR_CNT + 1, &t); frame_err = t & 0x7F; frame_err <<= 8; atbm8830_read_reg(priv, REG_FRAME_ERR_CNT, &t); frame_err |= t; atbm8830_reglatch_lock(priv, 0); *ber = frame_err * 100 / 32767; dprintk("%s: ber=0x%x\n", __func__, *ber); return 0; } static int atbm8830_read_signal_strength(struct dvb_frontend *fe, u16 *signal) { struct atbm_state *priv = fe->demodulator_priv; u32 pwm; u8 t; dprintk("%s\n", __func__); atbm8830_reglatch_lock(priv, 1); atbm8830_read_reg(priv, REG_AGC_PWM_VAL + 1, &t); pwm = t & 0x03; pwm <<= 8; atbm8830_read_reg(priv, REG_AGC_PWM_VAL, &t); pwm |= t; atbm8830_reglatch_lock(priv, 0); dprintk("AGC PWM = 0x%02X\n", pwm); pwm = 0x400 - pwm; *signal = pwm * 0x10000 / 0x400; return 0; } static int atbm8830_read_snr(struct dvb_frontend *fe, u16 *snr) { dprintk("%s\n", __func__); *snr = 0; return 0; } static int atbm8830_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { dprintk("%s\n", __func__); *ucblocks = 0; return 0; } static int atbm8830_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct atbm_state *priv = fe->demodulator_priv; return atbm8830_write_reg(priv, REG_I2C_GATE, enable ? 1 : 0); } static struct dvb_frontend_ops atbm8830_ops = { .info = { .name = "AltoBeam ATBM8830/8831 DMB-TH", .type = FE_OFDM, .frequency_min = 474000000, .frequency_max = 858000000, .frequency_stepsize = 10000, .caps = FE_CAN_FEC_AUTO | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO }, .release = atbm8830_release, .init = atbm8830_init, .sleep = NULL, .write = NULL, .i2c_gate_ctrl = atbm8830_i2c_gate_ctrl, .set_frontend = atbm8830_set_fe, .get_frontend = atbm8830_get_fe, .get_tune_settings = atbm8830_get_tune_settings, .read_status = atbm8830_read_status, .read_ber = atbm8830_read_ber, .read_signal_strength = atbm8830_read_signal_strength, .read_snr = atbm8830_read_snr, .read_ucblocks = atbm8830_read_ucblocks, }; struct dvb_frontend *atbm8830_attach(const struct atbm8830_config *config, struct i2c_adapter *i2c) { struct atbm_state *priv = NULL; u8 data = 0; dprintk("%s()\n", __func__); if (config == NULL || i2c == NULL) return NULL; priv = kzalloc(sizeof(struct atbm_state), GFP_KERNEL); if (priv == NULL) goto error_out; priv->config = config; priv->i2c = i2c; /* check if the demod is there */ if (atbm8830_read_reg(priv, REG_CHIP_ID, &data) != 0) { dprintk("%s atbm8830/8831 not found at i2c addr 0x%02X\n", __func__, priv->config->demod_address); goto error_out; } dprintk("atbm8830 chip id: 0x%02X\n", data); memcpy(&priv->frontend.ops, &atbm8830_ops, sizeof(struct dvb_frontend_ops)); priv->frontend.demodulator_priv = priv; atbm8830_init(&priv->frontend); atbm8830_i2c_gate_ctrl(&priv->frontend, 1); return &priv->frontend; error_out: dprintk("%s() error_out\n", __func__); kfree(priv); return NULL; } EXPORT_SYMBOL(atbm8830_attach); MODULE_DESCRIPTION("AltoBeam ATBM8830/8831 GB20600 demodulator driver"); MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>"); MODULE_LICENSE("GPL");
gpl-2.0
cocafe/C6902_kernel_CoCore-Z
arch/powerpc/platforms/85xx/common.c
4537
1769
/* * Routines common to most mpc85xx-based boards. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/of_platform.h> #include <sysdev/cpm2_pic.h> #include "mpc85xx.h" static struct of_device_id __initdata mpc85xx_common_ids[] = { { .type = "soc", }, { .compatible = "soc", }, { .compatible = "simple-bus", }, { .name = "cpm", }, { .name = "localbus", }, { .compatible = "gianfar", }, { .compatible = "fsl,qe", }, { .compatible = "fsl,cpm2", }, { .compatible = "fsl,srio", }, /* So that the DMA channel nodes can be probed individually: */ { .compatible = "fsl,eloplus-dma", }, /* For the PMC driver */ { .compatible = "fsl,mpc8548-guts", }, /* Probably unnecessary? */ { .compatible = "gpio-leds", }, {}, }; int __init mpc85xx_common_publish_devices(void) { return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL); } #ifdef CONFIG_CPM2 static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); int cascade_irq; while ((cascade_irq = cpm2_get_irq()) >= 0) generic_handle_irq(cascade_irq); chip->irq_eoi(&desc->irq_data); } void __init mpc85xx_cpm2_pic_init(void) { struct device_node *np; int irq; /* Setup CPM2 PIC */ np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic"); if (np == NULL) { printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n"); return; } irq = irq_of_parse_and_map(np, 0); if (irq == NO_IRQ) { of_node_put(np); printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n"); return; } cpm2_pic_init(np); of_node_put(np); irq_set_chained_handler(irq, cpm2_cascade); } #endif
gpl-2.0
ML-Design/LG-P500-2.6.35-re-write
arch/sh/boards/mach-migor/lcd_qvga.c
4793
5030
/* * Support for SuperH MigoR Quarter VGA LCD Panel * * Copyright (C) 2008 Magnus Damm * * Based on lcd_powertip.c from Kenati Technologies Pvt Ltd. * Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com>, * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/gpio.h> #include <video/sh_mobile_lcdc.h> #include <cpu/sh7722.h> #include <mach/migor.h> /* LCD Module is a PH240320T according to board schematics. This module * is made up of a 240x320 LCD hooked up to a R61505U (or HX8347-A01?) * Driver IC. This IC is connected to the SH7722 built-in LCDC using a * SYS-80 interface configured in 16 bit mode. * * Index 0: "Device Code Read" returns 0x1505. */ static void reset_lcd_module(void) { gpio_set_value(GPIO_PTH2, 0); mdelay(2); gpio_set_value(GPIO_PTH2, 1); mdelay(1); } /* DB0-DB7 are connected to D1-D8, and DB8-DB15 to D10-D17 */ static unsigned long adjust_reg18(unsigned short data) { unsigned long tmp1, tmp2; tmp1 = (data<<1 | 0x00000001) & 0x000001FF; tmp2 = (data<<2 | 0x00000200) & 0x0003FE00; return tmp1 | tmp2; } static void write_reg(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short reg, unsigned short data) { sys_ops->write_index(sys_ops_handle, adjust_reg18(reg << 8 | data)); } static void write_reg16(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short reg, unsigned short data) { sys_ops->write_index(sys_ops_handle, adjust_reg18(reg)); sys_ops->write_data(sys_ops_handle, adjust_reg18(data)); } static unsigned long read_reg16(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short reg) { unsigned long data; sys_ops->write_index(sys_ops_handle, adjust_reg18(reg)); data = sys_ops->read_data(sys_ops_handle); return ((data >> 1) & 0xff) | ((data >> 2) & 0xff00); } static void migor_lcd_qvga_seq(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short const *data, int no_data) { int i; for (i = 0; i < no_data; i += 2) write_reg16(sys_ops_handle, sys_ops, data[i], data[i + 1]); } static const unsigned short sync_data[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const unsigned short magic0_data[] = { 0x0060, 0x2700, 0x0008, 0x0808, 0x0090, 0x001A, 0x0007, 0x0001, 0x0017, 0x0001, 0x0019, 0x0000, 0x0010, 0x17B0, 0x0011, 0x0116, 0x0012, 0x0198, 0x0013, 0x1400, 0x0029, 0x000C, 0x0012, 0x01B8, }; static const unsigned short magic1_data[] = { 0x0030, 0x0307, 0x0031, 0x0303, 0x0032, 0x0603, 0x0033, 0x0202, 0x0034, 0x0202, 0x0035, 0x0202, 0x0036, 0x1F1F, 0x0037, 0x0303, 0x0038, 0x0303, 0x0039, 0x0603, 0x003A, 0x0202, 0x003B, 0x0102, 0x003C, 0x0204, 0x003D, 0x0000, 0x0001, 0x0100, 0x0002, 0x0300, 0x0003, 0x5028, 0x0020, 0x00ef, 0x0021, 0x0000, 0x0004, 0x0000, 0x0009, 0x0000, 0x000A, 0x0008, 0x000C, 0x0000, 0x000D, 0x0000, 0x0015, 0x8000, }; static const unsigned short magic2_data[] = { 0x0061, 0x0001, 0x0092, 0x0100, 0x0093, 0x0001, 0x0007, 0x0021, }; static const unsigned short magic3_data[] = { 0x0010, 0x16B0, 0x0011, 0x0111, 0x0007, 0x0061, }; int migor_lcd_qvga_setup(void *board_data, void *sohandle, struct sh_mobile_lcdc_sys_bus_ops *so) { unsigned long xres = 320; unsigned long yres = 240; int k; reset_lcd_module(); migor_lcd_qvga_seq(sohandle, so, sync_data, ARRAY_SIZE(sync_data)); if (read_reg16(sohandle, so, 0) != 0x1505) return -ENODEV; pr_info("Migo-R QVGA LCD Module detected.\n"); migor_lcd_qvga_seq(sohandle, so, sync_data, ARRAY_SIZE(sync_data)); write_reg16(sohandle, so, 0x00A4, 0x0001); mdelay(10); migor_lcd_qvga_seq(sohandle, so, magic0_data, ARRAY_SIZE(magic0_data)); mdelay(100); migor_lcd_qvga_seq(sohandle, so, magic1_data, ARRAY_SIZE(magic1_data)); write_reg16(sohandle, so, 0x0050, 0xef - (yres - 1)); write_reg16(sohandle, so, 0x0051, 0x00ef); write_reg16(sohandle, so, 0x0052, 0x0000); write_reg16(sohandle, so, 0x0053, xres - 1); migor_lcd_qvga_seq(sohandle, so, magic2_data, ARRAY_SIZE(magic2_data)); mdelay(10); migor_lcd_qvga_seq(sohandle, so, magic3_data, ARRAY_SIZE(magic3_data)); mdelay(40); /* clear GRAM to avoid displaying garbage */ write_reg16(sohandle, so, 0x0020, 0x0000); /* horiz addr */ write_reg16(sohandle, so, 0x0021, 0x0000); /* vert addr */ for (k = 0; k < (xres * 256); k++) /* yes, 256 words per line */ write_reg16(sohandle, so, 0x0022, 0x0000); write_reg16(sohandle, so, 0x0020, 0x0000); /* reset horiz addr */ write_reg16(sohandle, so, 0x0021, 0x0000); /* reset vert addr */ write_reg16(sohandle, so, 0x0007, 0x0173); mdelay(40); /* enable display */ write_reg(sohandle, so, 0x00, 0x22); mdelay(100); return 0; }
gpl-2.0
Mantoui/kernel_motorola_msm8960-common
arch/sh/boards/mach-migor/lcd_qvga.c
4793
5030
/* * Support for SuperH MigoR Quarter VGA LCD Panel * * Copyright (C) 2008 Magnus Damm * * Based on lcd_powertip.c from Kenati Technologies Pvt Ltd. * Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com>, * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/gpio.h> #include <video/sh_mobile_lcdc.h> #include <cpu/sh7722.h> #include <mach/migor.h> /* LCD Module is a PH240320T according to board schematics. This module * is made up of a 240x320 LCD hooked up to a R61505U (or HX8347-A01?) * Driver IC. This IC is connected to the SH7722 built-in LCDC using a * SYS-80 interface configured in 16 bit mode. * * Index 0: "Device Code Read" returns 0x1505. */ static void reset_lcd_module(void) { gpio_set_value(GPIO_PTH2, 0); mdelay(2); gpio_set_value(GPIO_PTH2, 1); mdelay(1); } /* DB0-DB7 are connected to D1-D8, and DB8-DB15 to D10-D17 */ static unsigned long adjust_reg18(unsigned short data) { unsigned long tmp1, tmp2; tmp1 = (data<<1 | 0x00000001) & 0x000001FF; tmp2 = (data<<2 | 0x00000200) & 0x0003FE00; return tmp1 | tmp2; } static void write_reg(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short reg, unsigned short data) { sys_ops->write_index(sys_ops_handle, adjust_reg18(reg << 8 | data)); } static void write_reg16(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short reg, unsigned short data) { sys_ops->write_index(sys_ops_handle, adjust_reg18(reg)); sys_ops->write_data(sys_ops_handle, adjust_reg18(data)); } static unsigned long read_reg16(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short reg) { unsigned long data; sys_ops->write_index(sys_ops_handle, adjust_reg18(reg)); data = sys_ops->read_data(sys_ops_handle); return ((data >> 1) & 0xff) | ((data >> 2) & 0xff00); } static void migor_lcd_qvga_seq(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short const *data, int no_data) { int i; for (i = 0; i < no_data; i += 2) write_reg16(sys_ops_handle, sys_ops, data[i], data[i + 1]); } static const unsigned short sync_data[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const unsigned short magic0_data[] = { 0x0060, 0x2700, 0x0008, 0x0808, 0x0090, 0x001A, 0x0007, 0x0001, 0x0017, 0x0001, 0x0019, 0x0000, 0x0010, 0x17B0, 0x0011, 0x0116, 0x0012, 0x0198, 0x0013, 0x1400, 0x0029, 0x000C, 0x0012, 0x01B8, }; static const unsigned short magic1_data[] = { 0x0030, 0x0307, 0x0031, 0x0303, 0x0032, 0x0603, 0x0033, 0x0202, 0x0034, 0x0202, 0x0035, 0x0202, 0x0036, 0x1F1F, 0x0037, 0x0303, 0x0038, 0x0303, 0x0039, 0x0603, 0x003A, 0x0202, 0x003B, 0x0102, 0x003C, 0x0204, 0x003D, 0x0000, 0x0001, 0x0100, 0x0002, 0x0300, 0x0003, 0x5028, 0x0020, 0x00ef, 0x0021, 0x0000, 0x0004, 0x0000, 0x0009, 0x0000, 0x000A, 0x0008, 0x000C, 0x0000, 0x000D, 0x0000, 0x0015, 0x8000, }; static const unsigned short magic2_data[] = { 0x0061, 0x0001, 0x0092, 0x0100, 0x0093, 0x0001, 0x0007, 0x0021, }; static const unsigned short magic3_data[] = { 0x0010, 0x16B0, 0x0011, 0x0111, 0x0007, 0x0061, }; int migor_lcd_qvga_setup(void *board_data, void *sohandle, struct sh_mobile_lcdc_sys_bus_ops *so) { unsigned long xres = 320; unsigned long yres = 240; int k; reset_lcd_module(); migor_lcd_qvga_seq(sohandle, so, sync_data, ARRAY_SIZE(sync_data)); if (read_reg16(sohandle, so, 0) != 0x1505) return -ENODEV; pr_info("Migo-R QVGA LCD Module detected.\n"); migor_lcd_qvga_seq(sohandle, so, sync_data, ARRAY_SIZE(sync_data)); write_reg16(sohandle, so, 0x00A4, 0x0001); mdelay(10); migor_lcd_qvga_seq(sohandle, so, magic0_data, ARRAY_SIZE(magic0_data)); mdelay(100); migor_lcd_qvga_seq(sohandle, so, magic1_data, ARRAY_SIZE(magic1_data)); write_reg16(sohandle, so, 0x0050, 0xef - (yres - 1)); write_reg16(sohandle, so, 0x0051, 0x00ef); write_reg16(sohandle, so, 0x0052, 0x0000); write_reg16(sohandle, so, 0x0053, xres - 1); migor_lcd_qvga_seq(sohandle, so, magic2_data, ARRAY_SIZE(magic2_data)); mdelay(10); migor_lcd_qvga_seq(sohandle, so, magic3_data, ARRAY_SIZE(magic3_data)); mdelay(40); /* clear GRAM to avoid displaying garbage */ write_reg16(sohandle, so, 0x0020, 0x0000); /* horiz addr */ write_reg16(sohandle, so, 0x0021, 0x0000); /* vert addr */ for (k = 0; k < (xres * 256); k++) /* yes, 256 words per line */ write_reg16(sohandle, so, 0x0022, 0x0000); write_reg16(sohandle, so, 0x0020, 0x0000); /* reset horiz addr */ write_reg16(sohandle, so, 0x0021, 0x0000); /* reset vert addr */ write_reg16(sohandle, so, 0x0007, 0x0173); mdelay(40); /* enable display */ write_reg(sohandle, so, 0x00, 0x22); mdelay(100); return 0; }
gpl-2.0
Snuzzo/B14CKB1RD_kernel_m8
drivers/clk/clk-divider.c
4793
4836
/* * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org> * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Adjustable divider clock implementation */ #include <linux/clk-provider.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/err.h> #include <linux/string.h> /* * DOC: basic adjustable divider clock that cannot gate * * Traits of this clock: * prepare - clk_prepare only ensures that parents are prepared * enable - clk_enable only ensures that parents are enabled * rate - rate is adjustable. clk->rate = parent->rate / divisor * parent - fixed parent. No clk_set_parent support */ #define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) #define div_mask(d) ((1 << (d->width)) - 1) static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_divider *divider = to_clk_divider(hw); unsigned int div; div = readl(divider->reg) >> divider->shift; div &= div_mask(divider); if (!(divider->flags & CLK_DIVIDER_ONE_BASED)) div++; return parent_rate / div; } EXPORT_SYMBOL_GPL(clk_divider_recalc_rate); /* * The reverse of DIV_ROUND_UP: The maximum number which * divided by m is r */ #define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1) static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate) { struct clk_divider *divider = to_clk_divider(hw); int i, bestdiv = 0; unsigned long parent_rate, best = 0, now, maxdiv; if (!rate) rate = 1; maxdiv = (1 << divider->width); if (divider->flags & CLK_DIVIDER_ONE_BASED) maxdiv--; if (!best_parent_rate) { parent_rate = __clk_get_rate(__clk_get_parent(hw->clk)); bestdiv = DIV_ROUND_UP(parent_rate, rate); bestdiv = bestdiv == 0 ? 1 : bestdiv; bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; return bestdiv; } /* * The maximum divider we can use without overflowing * unsigned long in rate * i below */ maxdiv = min(ULONG_MAX / rate, maxdiv); for (i = 1; i <= maxdiv; i++) { parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), MULT_ROUND_UP(rate, i)); now = parent_rate / i; if (now <= rate && now > best) { bestdiv = i; best = now; *best_parent_rate = parent_rate; } } if (!bestdiv) { bestdiv = (1 << divider->width); if (divider->flags & CLK_DIVIDER_ONE_BASED) bestdiv--; *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1); } return bestdiv; } static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { int div; div = clk_divider_bestdiv(hw, rate, prate); if (prate) return *prate / div; else { unsigned long r; r = __clk_get_rate(__clk_get_parent(hw->clk)); return r / div; } } EXPORT_SYMBOL_GPL(clk_divider_round_rate); static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate) { struct clk_divider *divider = to_clk_divider(hw); unsigned int div; unsigned long flags = 0; u32 val; div = __clk_get_rate(__clk_get_parent(hw->clk)) / rate; if (!(divider->flags & CLK_DIVIDER_ONE_BASED)) div--; if (div > div_mask(divider)) div = div_mask(divider); if (divider->lock) spin_lock_irqsave(divider->lock, flags); val = readl(divider->reg); val &= ~(div_mask(divider) << divider->shift); val |= div << divider->shift; writel(val, divider->reg); if (divider->lock) spin_unlock_irqrestore(divider->lock, flags); return 0; } EXPORT_SYMBOL_GPL(clk_divider_set_rate); struct clk_ops clk_divider_ops = { .recalc_rate = clk_divider_recalc_rate, .round_rate = clk_divider_round_rate, .set_rate = clk_divider_set_rate, }; EXPORT_SYMBOL_GPL(clk_divider_ops); struct clk *clk_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, spinlock_t *lock) { struct clk_divider *div; struct clk *clk; div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL); if (!div) { pr_err("%s: could not allocate divider clk\n", __func__); return NULL; } /* struct clk_divider assignments */ div->reg = reg; div->shift = shift; div->width = width; div->flags = clk_divider_flags; div->lock = lock; if (parent_name) { div->parent[0] = kstrdup(parent_name, GFP_KERNEL); if (!div->parent[0]) goto out; } clk = clk_register(dev, name, &clk_divider_ops, &div->hw, div->parent, (parent_name ? 1 : 0), flags); if (clk) return clk; out: kfree(div->parent[0]); kfree(div); return NULL; }
gpl-2.0
MoKee/android_kernel_oppo_n3
drivers/usb/otg/twl6030-usb.c
4793
13578
/* * twl6030_usb - TWL6030 USB transceiver, talking to OMAP OTG driver. * * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Author: Hema HK <hemahk@ti.com> * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/usb/otg.h> #include <linux/i2c/twl.h> #include <linux/regulator/consumer.h> #include <linux/err.h> #include <linux/notifier.h> #include <linux/slab.h> #include <linux/delay.h> /* usb register definitions */ #define USB_VENDOR_ID_LSB 0x00 #define USB_VENDOR_ID_MSB 0x01 #define USB_PRODUCT_ID_LSB 0x02 #define USB_PRODUCT_ID_MSB 0x03 #define USB_VBUS_CTRL_SET 0x04 #define USB_VBUS_CTRL_CLR 0x05 #define USB_ID_CTRL_SET 0x06 #define USB_ID_CTRL_CLR 0x07 #define USB_VBUS_INT_SRC 0x08 #define USB_VBUS_INT_LATCH_SET 0x09 #define USB_VBUS_INT_LATCH_CLR 0x0A #define USB_VBUS_INT_EN_LO_SET 0x0B #define USB_VBUS_INT_EN_LO_CLR 0x0C #define USB_VBUS_INT_EN_HI_SET 0x0D #define USB_VBUS_INT_EN_HI_CLR 0x0E #define USB_ID_INT_SRC 0x0F #define USB_ID_INT_LATCH_SET 0x10 #define USB_ID_INT_LATCH_CLR 0x11 #define USB_ID_INT_EN_LO_SET 0x12 #define USB_ID_INT_EN_LO_CLR 0x13 #define USB_ID_INT_EN_HI_SET 0x14 #define USB_ID_INT_EN_HI_CLR 0x15 #define USB_OTG_ADP_CTRL 0x16 #define USB_OTG_ADP_HIGH 0x17 #define USB_OTG_ADP_LOW 0x18 #define USB_OTG_ADP_RISE 0x19 #define USB_OTG_REVISION 0x1A /* to be moved to LDO */ #define TWL6030_MISC2 0xE5 #define TWL6030_CFG_LDO_PD2 0xF5 #define TWL6030_BACKUP_REG 0xFA #define STS_HW_CONDITIONS 0x21 /* In module TWL6030_MODULE_PM_MASTER */ #define STS_HW_CONDITIONS 0x21 #define STS_USB_ID BIT(2) /* In module TWL6030_MODULE_PM_RECEIVER */ #define VUSB_CFG_TRANS 0x71 #define VUSB_CFG_STATE 0x72 #define VUSB_CFG_VOLTAGE 0x73 /* in module TWL6030_MODULE_MAIN_CHARGE */ #define CHARGERUSB_CTRL1 0x8 #define CONTROLLER_STAT1 0x03 #define VBUS_DET BIT(2) struct twl6030_usb { struct usb_phy phy; struct device *dev; /* for vbus reporting with irqs disabled */ spinlock_t lock; struct regulator *usb3v3; /* used to set vbus, in atomic path */ struct work_struct set_vbus_work; int irq1; int irq2; u8 linkstat; u8 asleep; bool irq_enabled; bool vbus_enable; unsigned long features; }; #define phy_to_twl(x) container_of((x), struct twl6030_usb, phy) /*-------------------------------------------------------------------------*/ static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module, u8 data, u8 address) { int ret = 0; ret = twl_i2c_write_u8(module, data, address); if (ret < 0) dev_err(twl->dev, "Write[0x%x] Error %d\n", address, ret); return ret; } static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address) { u8 data, ret = 0; ret = twl_i2c_read_u8(module, &data, address); if (ret >= 0) ret = data; else dev_err(twl->dev, "readb[0x%x,0x%x] Error %d\n", module, address, ret); return ret; } static int twl6030_phy_init(struct usb_phy *x) { struct twl6030_usb *twl; struct device *dev; struct twl4030_usb_data *pdata; twl = phy_to_twl(x); dev = twl->dev; pdata = dev->platform_data; if (twl->linkstat == USB_EVENT_ID) pdata->phy_power(twl->dev, 1, 1); else pdata->phy_power(twl->dev, 0, 1); return 0; } static void twl6030_phy_shutdown(struct usb_phy *x) { struct twl6030_usb *twl; struct device *dev; struct twl4030_usb_data *pdata; twl = phy_to_twl(x); dev = twl->dev; pdata = dev->platform_data; pdata->phy_power(twl->dev, 0, 0); } static int twl6030_phy_suspend(struct usb_phy *x, int suspend) { struct twl6030_usb *twl = phy_to_twl(x); struct device *dev = twl->dev; struct twl4030_usb_data *pdata = dev->platform_data; pdata->phy_suspend(dev, suspend); return 0; } static int twl6030_start_srp(struct usb_otg *otg) { struct twl6030_usb *twl = phy_to_twl(otg->phy); twl6030_writeb(twl, TWL_MODULE_USB, 0x24, USB_VBUS_CTRL_SET); twl6030_writeb(twl, TWL_MODULE_USB, 0x84, USB_VBUS_CTRL_SET); mdelay(100); twl6030_writeb(twl, TWL_MODULE_USB, 0xa0, USB_VBUS_CTRL_CLR); return 0; } static int twl6030_usb_ldo_init(struct twl6030_usb *twl) { char *regulator_name; if (twl->features & TWL6025_SUBCLASS) regulator_name = "ldousb"; else regulator_name = "vusb"; /* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */ twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_BACKUP_REG); /* Program CFG_LDO_PD2 register and set VUSB bit */ twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_CFG_LDO_PD2); /* Program MISC2 register and set bit VUSB_IN_VBAT */ twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x10, TWL6030_MISC2); twl->usb3v3 = regulator_get(twl->dev, regulator_name); if (IS_ERR(twl->usb3v3)) return -ENODEV; /* Program the USB_VBUS_CTRL_SET and set VBUS_ACT_COMP bit */ twl6030_writeb(twl, TWL_MODULE_USB, 0x4, USB_VBUS_CTRL_SET); /* * Program the USB_ID_CTRL_SET register to enable GND drive * and the ID comparators */ twl6030_writeb(twl, TWL_MODULE_USB, 0x14, USB_ID_CTRL_SET); return 0; } static ssize_t twl6030_usb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf) { struct twl6030_usb *twl = dev_get_drvdata(dev); unsigned long flags; int ret = -EINVAL; spin_lock_irqsave(&twl->lock, flags); switch (twl->linkstat) { case USB_EVENT_VBUS: ret = snprintf(buf, PAGE_SIZE, "vbus\n"); break; case USB_EVENT_ID: ret = snprintf(buf, PAGE_SIZE, "id\n"); break; case USB_EVENT_NONE: ret = snprintf(buf, PAGE_SIZE, "none\n"); break; default: ret = snprintf(buf, PAGE_SIZE, "UNKNOWN\n"); } spin_unlock_irqrestore(&twl->lock, flags); return ret; } static DEVICE_ATTR(vbus, 0444, twl6030_usb_vbus_show, NULL); static irqreturn_t twl6030_usb_irq(int irq, void *_twl) { struct twl6030_usb *twl = _twl; struct usb_otg *otg = twl->phy.otg; int status; u8 vbus_state, hw_state; hw_state = twl6030_readb(twl, TWL6030_MODULE_ID0, STS_HW_CONDITIONS); vbus_state = twl6030_readb(twl, TWL_MODULE_MAIN_CHARGE, CONTROLLER_STAT1); if (!(hw_state & STS_USB_ID)) { if (vbus_state & VBUS_DET) { regulator_enable(twl->usb3v3); twl->asleep = 1; status = USB_EVENT_VBUS; otg->default_a = false; twl->phy.state = OTG_STATE_B_IDLE; twl->linkstat = status; twl->phy.last_event = status; atomic_notifier_call_chain(&twl->phy.notifier, status, otg->gadget); } else { status = USB_EVENT_NONE; twl->linkstat = status; twl->phy.last_event = status; atomic_notifier_call_chain(&twl->phy.notifier, status, otg->gadget); if (twl->asleep) { regulator_disable(twl->usb3v3); twl->asleep = 0; } } } sysfs_notify(&twl->dev->kobj, NULL, "vbus"); return IRQ_HANDLED; } static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl) { struct twl6030_usb *twl = _twl; struct usb_otg *otg = twl->phy.otg; int status = USB_EVENT_NONE; u8 hw_state; hw_state = twl6030_readb(twl, TWL6030_MODULE_ID0, STS_HW_CONDITIONS); if (hw_state & STS_USB_ID) { regulator_enable(twl->usb3v3); twl->asleep = 1; twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR, 0x1); twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET, 0x10); status = USB_EVENT_ID; otg->default_a = true; twl->phy.state = OTG_STATE_A_IDLE; twl->linkstat = status; twl->phy.last_event = status; atomic_notifier_call_chain(&twl->phy.notifier, status, otg->gadget); } else { twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR, 0x10); twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET, 0x1); } twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_LATCH_CLR, status); return IRQ_HANDLED; } static int twl6030_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget) { if (!otg) return -ENODEV; otg->gadget = gadget; if (!gadget) otg->phy->state = OTG_STATE_UNDEFINED; return 0; } static int twl6030_enable_irq(struct usb_phy *x) { struct twl6030_usb *twl = phy_to_twl(x); twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET, 0x1); twl6030_interrupt_unmask(0x05, REG_INT_MSK_LINE_C); twl6030_interrupt_unmask(0x05, REG_INT_MSK_STS_C); twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK, REG_INT_MSK_LINE_C); twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK, REG_INT_MSK_STS_C); twl6030_usb_irq(twl->irq2, twl); twl6030_usbotg_irq(twl->irq1, twl); return 0; } static void otg_set_vbus_work(struct work_struct *data) { struct twl6030_usb *twl = container_of(data, struct twl6030_usb, set_vbus_work); /* * Start driving VBUS. Set OPA_MODE bit in CHARGERUSB_CTRL1 * register. This enables boost mode. */ if (twl->vbus_enable) twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE , 0x40, CHARGERUSB_CTRL1); else twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE , 0x00, CHARGERUSB_CTRL1); } static int twl6030_set_vbus(struct usb_otg *otg, bool enabled) { struct twl6030_usb *twl = phy_to_twl(otg->phy); twl->vbus_enable = enabled; schedule_work(&twl->set_vbus_work); return 0; } static int twl6030_set_host(struct usb_otg *otg, struct usb_bus *host) { if (!otg) return -ENODEV; otg->host = host; if (!host) otg->phy->state = OTG_STATE_UNDEFINED; return 0; } static int __devinit twl6030_usb_probe(struct platform_device *pdev) { struct twl6030_usb *twl; int status, err; struct twl4030_usb_data *pdata; struct usb_otg *otg; struct device *dev = &pdev->dev; pdata = dev->platform_data; twl = kzalloc(sizeof *twl, GFP_KERNEL); if (!twl) return -ENOMEM; otg = kzalloc(sizeof *otg, GFP_KERNEL); if (!otg) { kfree(twl); return -ENOMEM; } twl->dev = &pdev->dev; twl->irq1 = platform_get_irq(pdev, 0); twl->irq2 = platform_get_irq(pdev, 1); twl->features = pdata->features; twl->phy.dev = twl->dev; twl->phy.label = "twl6030"; twl->phy.otg = otg; twl->phy.init = twl6030_phy_init; twl->phy.shutdown = twl6030_phy_shutdown; twl->phy.set_suspend = twl6030_phy_suspend; otg->phy = &twl->phy; otg->set_host = twl6030_set_host; otg->set_peripheral = twl6030_set_peripheral; otg->set_vbus = twl6030_set_vbus; otg->start_srp = twl6030_start_srp; /* init spinlock for workqueue */ spin_lock_init(&twl->lock); err = twl6030_usb_ldo_init(twl); if (err) { dev_err(&pdev->dev, "ldo init failed\n"); kfree(otg); kfree(twl); return err; } usb_set_transceiver(&twl->phy); platform_set_drvdata(pdev, twl); if (device_create_file(&pdev->dev, &dev_attr_vbus)) dev_warn(&pdev->dev, "could not create sysfs file\n"); ATOMIC_INIT_NOTIFIER_HEAD(&twl->phy.notifier); INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work); twl->irq_enabled = true; status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, "twl6030_usb", twl); if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", twl->irq1, status); device_remove_file(twl->dev, &dev_attr_vbus); kfree(otg); kfree(twl); return status; } status = request_threaded_irq(twl->irq2, NULL, twl6030_usb_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, "twl6030_usb", twl); if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", twl->irq2, status); free_irq(twl->irq1, twl); device_remove_file(twl->dev, &dev_attr_vbus); kfree(otg); kfree(twl); return status; } twl->asleep = 0; pdata->phy_init(dev); twl6030_phy_suspend(&twl->phy, 0); twl6030_enable_irq(&twl->phy); dev_info(&pdev->dev, "Initialized TWL6030 USB module\n"); return 0; } static int __exit twl6030_usb_remove(struct platform_device *pdev) { struct twl6030_usb *twl = platform_get_drvdata(pdev); struct twl4030_usb_data *pdata; struct device *dev = &pdev->dev; pdata = dev->platform_data; twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK, REG_INT_MSK_LINE_C); twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK, REG_INT_MSK_STS_C); free_irq(twl->irq1, twl); free_irq(twl->irq2, twl); regulator_put(twl->usb3v3); pdata->phy_exit(twl->dev); device_remove_file(twl->dev, &dev_attr_vbus); cancel_work_sync(&twl->set_vbus_work); kfree(twl->phy.otg); kfree(twl); return 0; } static struct platform_driver twl6030_usb_driver = { .probe = twl6030_usb_probe, .remove = __exit_p(twl6030_usb_remove), .driver = { .name = "twl6030_usb", .owner = THIS_MODULE, }, }; static int __init twl6030_usb_init(void) { return platform_driver_register(&twl6030_usb_driver); } subsys_initcall(twl6030_usb_init); static void __exit twl6030_usb_exit(void) { platform_driver_unregister(&twl6030_usb_driver); } module_exit(twl6030_usb_exit); MODULE_ALIAS("platform:twl6030_usb"); MODULE_AUTHOR("Hema HK <hemahk@ti.com>"); MODULE_DESCRIPTION("TWL6030 USB transceiver driver"); MODULE_LICENSE("GPL");
gpl-2.0
zaclimon/Quanta-Flo
drivers/media/dvb/b2c2/flexcop-usb.c
5049
16027
/* * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III * flexcop-usb.c - covers the USB part * see flexcop.c for copyright information */ #define FC_LOG_PREFIX "flexcop_usb" #include "flexcop-usb.h" #include "flexcop-common.h" /* Version information */ #define DRIVER_VERSION "0.1" #define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV USB Driver" #define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>" /* debug */ #ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG #define dprintk(level,args...) \ do { if ((debug & level)) printk(args); } while (0) #define debug_dump(b, l, method) do {\ int i; \ for (i = 0; i < l; i++) \ method("%02x ", b[i]); \ method("\n"); \ } while (0) #define DEBSTATUS "" #else #define dprintk(level, args...) #define debug_dump(b, l, method) #define DEBSTATUS " (debugging is not enabled)" #endif static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,ts=2," "ctrl=4,i2c=8,v8mem=16 (or-able))." DEBSTATUS); #undef DEBSTATUS #define deb_info(args...) dprintk(0x01, args) #define deb_ts(args...) dprintk(0x02, args) #define deb_ctrl(args...) dprintk(0x04, args) #define deb_i2c(args...) dprintk(0x08, args) #define deb_v8(args...) dprintk(0x10, args) /* JLP 111700: we will include the 1 bit gap between the upper and lower 3 bits * in the IBI address, to make the V8 code simpler. * PCI ADDRESS FORMAT: 0x71C -> 0000 0111 0001 1100 (the six bits used) * in general: 0000 0HHH 000L LL00 * IBI ADDRESS FORMAT: RHHH BLLL * * where R is the read(1)/write(0) bit, B is the busy bit * and HHH and LLL are the two sets of three bits from the PCI address. */ #define B2C2_FLEX_PCIOFFSET_TO_INTERNALADDR(usPCI) (u8) \ (((usPCI >> 2) & 0x07) + ((usPCI >> 4) & 0x70)) #define B2C2_FLEX_INTERNALADDR_TO_PCIOFFSET(ucAddr) (u16) \ (((ucAddr & 0x07) << 2) + ((ucAddr & 0x70) << 4)) /* * DKT 020228 * - forget about this VENDOR_BUFFER_SIZE, read and write register * deal with DWORD or 4 bytes, that should be should from now on * - from now on, we don't support anything older than firm 1.00 * I eliminated the write register as a 2 trip of writing hi word and lo word * and force this to write only 4 bytes at a time. * NOTE: this should work with all the firmware from 1.00 and newer */ static int flexcop_usb_readwrite_dw(struct flexcop_device *fc, u16 wRegOffsPCI, u32 *val, u8 read) { struct flexcop_usb *fc_usb = fc->bus_specific; u8 request = read ? B2C2_USB_READ_REG : B2C2_USB_WRITE_REG; u8 request_type = (read ? USB_DIR_IN : USB_DIR_OUT) | USB_TYPE_VENDOR; u8 wAddress = B2C2_FLEX_PCIOFFSET_TO_INTERNALADDR(wRegOffsPCI) | (read ? 0x80 : 0); int len = usb_control_msg(fc_usb->udev, read ? B2C2_USB_CTRL_PIPE_IN : B2C2_USB_CTRL_PIPE_OUT, request, request_type, /* 0xc0 read or 0x40 write */ wAddress, 0, val, sizeof(u32), B2C2_WAIT_FOR_OPERATION_RDW * HZ); if (len != sizeof(u32)) { err("error while %s dword from %d (%d).", read ? "reading" : "writing", wAddress, wRegOffsPCI); return -EIO; } return 0; } /* * DKT 010817 - add support for V8 memory read/write and flash update */ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb, flexcop_usb_request_t req, u8 page, u16 wAddress, u8 *pbBuffer, u32 buflen) { u8 request_type = USB_TYPE_VENDOR; u16 wIndex; int nWaitTime, pipe, len; wIndex = page << 8; switch (req) { case B2C2_USB_READ_V8_MEM: nWaitTime = B2C2_WAIT_FOR_OPERATION_V8READ; request_type |= USB_DIR_IN; pipe = B2C2_USB_CTRL_PIPE_IN; break; case B2C2_USB_WRITE_V8_MEM: wIndex |= pbBuffer[0]; request_type |= USB_DIR_OUT; nWaitTime = B2C2_WAIT_FOR_OPERATION_V8WRITE; pipe = B2C2_USB_CTRL_PIPE_OUT; break; case B2C2_USB_FLASH_BLOCK: request_type |= USB_DIR_OUT; nWaitTime = B2C2_WAIT_FOR_OPERATION_V8FLASH; pipe = B2C2_USB_CTRL_PIPE_OUT; break; default: deb_info("unsupported request for v8_mem_req %x.\n", req); return -EINVAL; } deb_v8("v8mem: %02x %02x %04x %04x, len: %d\n", request_type, req, wAddress, wIndex, buflen); len = usb_control_msg(fc_usb->udev, pipe, req, request_type, wAddress, wIndex, pbBuffer, buflen, nWaitTime * HZ); debug_dump(pbBuffer, len, deb_v8); return len == buflen ? 0 : -EIO; } #define bytes_left_to_read_on_page(paddr,buflen) \ ((V8_MEMORY_PAGE_SIZE - (paddr & V8_MEMORY_PAGE_MASK)) > buflen \ ? buflen : (V8_MEMORY_PAGE_SIZE - (paddr & V8_MEMORY_PAGE_MASK))) static int flexcop_usb_memory_req(struct flexcop_usb *fc_usb, flexcop_usb_request_t req, flexcop_usb_mem_page_t page_start, u32 addr, int extended, u8 *buf, u32 len) { int i,ret = 0; u16 wMax; u32 pagechunk = 0; switch(req) { case B2C2_USB_READ_V8_MEM: wMax = USB_MEM_READ_MAX; break; case B2C2_USB_WRITE_V8_MEM: wMax = USB_MEM_WRITE_MAX; break; case B2C2_USB_FLASH_BLOCK: wMax = USB_FLASH_MAX; break; default: return -EINVAL; break; } for (i = 0; i < len;) { pagechunk = wMax < bytes_left_to_read_on_page(addr, len) ? wMax : bytes_left_to_read_on_page(addr, len); deb_info("%x\n", (addr & V8_MEMORY_PAGE_MASK) | (V8_MEMORY_EXTENDED*extended)); ret = flexcop_usb_v8_memory_req(fc_usb, req, page_start + (addr / V8_MEMORY_PAGE_SIZE), (addr & V8_MEMORY_PAGE_MASK) | (V8_MEMORY_EXTENDED*extended), &buf[i], pagechunk); if (ret < 0) return ret; addr += pagechunk; len -= pagechunk; } return 0; } static int flexcop_usb_get_mac_addr(struct flexcop_device *fc, int extended) { return flexcop_usb_memory_req(fc->bus_specific, B2C2_USB_READ_V8_MEM, V8_MEMORY_PAGE_FLASH, 0x1f010, 1, fc->dvb_adapter.proposed_mac, 6); } #if 0 static int flexcop_usb_utility_req(struct flexcop_usb *fc_usb, int set, flexcop_usb_utility_function_t func, u8 extra, u16 wIndex, u16 buflen, u8 *pvBuffer) { u16 wValue; u8 request_type = (set ? USB_DIR_OUT : USB_DIR_IN) | USB_TYPE_VENDOR; int nWaitTime = 2, pipe = set ? B2C2_USB_CTRL_PIPE_OUT : B2C2_USB_CTRL_PIPE_IN, len; wValue = (func << 8) | extra; len = usb_control_msg(fc_usb->udev,pipe, B2C2_USB_UTILITY, request_type, wValue, wIndex, pvBuffer, buflen, nWaitTime * HZ); return len == buflen ? 0 : -EIO; } #endif /* usb i2c stuff */ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c, flexcop_usb_request_t req, flexcop_usb_i2c_function_t func, u8 chipaddr, u8 addr, u8 *buf, u8 buflen) { struct flexcop_usb *fc_usb = i2c->fc->bus_specific; u16 wValue, wIndex; int nWaitTime,pipe,len; u8 request_type = USB_TYPE_VENDOR; switch (func) { case USB_FUNC_I2C_WRITE: case USB_FUNC_I2C_MULTIWRITE: case USB_FUNC_I2C_REPEATWRITE: /* DKT 020208 - add this to support special case of DiSEqC */ case USB_FUNC_I2C_CHECKWRITE: pipe = B2C2_USB_CTRL_PIPE_OUT; nWaitTime = 2; request_type |= USB_DIR_OUT; break; case USB_FUNC_I2C_READ: case USB_FUNC_I2C_REPEATREAD: pipe = B2C2_USB_CTRL_PIPE_IN; nWaitTime = 2; request_type |= USB_DIR_IN; break; default: deb_info("unsupported function for i2c_req %x\n", func); return -EINVAL; } wValue = (func << 8) | (i2c->port << 4); wIndex = (chipaddr << 8 ) | addr; deb_i2c("i2c %2d: %02x %02x %02x %02x %02x %02x\n", func, request_type, req, wValue & 0xff, wValue >> 8, wIndex & 0xff, wIndex >> 8); len = usb_control_msg(fc_usb->udev,pipe, req, request_type, wValue, wIndex, buf, buflen, nWaitTime * HZ); return len == buflen ? 0 : -EREMOTEIO; } /* actual bus specific access functions, make sure prototype are/will be equal to pci */ static flexcop_ibi_value flexcop_usb_read_ibi_reg(struct flexcop_device *fc, flexcop_ibi_register reg) { flexcop_ibi_value val; val.raw = 0; flexcop_usb_readwrite_dw(fc, reg, &val.raw, 1); return val; } static int flexcop_usb_write_ibi_reg(struct flexcop_device *fc, flexcop_ibi_register reg, flexcop_ibi_value val) { return flexcop_usb_readwrite_dw(fc, reg, &val.raw, 0); } static int flexcop_usb_i2c_request(struct flexcop_i2c_adapter *i2c, flexcop_access_op_t op, u8 chipaddr, u8 addr, u8 *buf, u16 len) { if (op == FC_READ) return flexcop_usb_i2c_req(i2c, B2C2_USB_I2C_REQUEST, USB_FUNC_I2C_READ, chipaddr, addr, buf, len); else return flexcop_usb_i2c_req(i2c, B2C2_USB_I2C_REQUEST, USB_FUNC_I2C_WRITE, chipaddr, addr, buf, len); } static void flexcop_usb_process_frame(struct flexcop_usb *fc_usb, u8 *buffer, int buffer_length) { u8 *b; int l; deb_ts("tmp_buffer_length=%d, buffer_length=%d\n", fc_usb->tmp_buffer_length, buffer_length); if (fc_usb->tmp_buffer_length > 0) { memcpy(fc_usb->tmp_buffer+fc_usb->tmp_buffer_length, buffer, buffer_length); fc_usb->tmp_buffer_length += buffer_length; b = fc_usb->tmp_buffer; l = fc_usb->tmp_buffer_length; } else { b=buffer; l=buffer_length; } while (l >= 190) { if (*b == 0xff) { switch (*(b+1) & 0x03) { case 0x01: /* media packet */ if (*(b+2) == 0x47) flexcop_pass_dmx_packets( fc_usb->fc_dev, b+2, 1); else deb_ts( "not ts packet %02x %02x %02x %02x \n", *(b+2), *(b+3), *(b+4), *(b+5)); b += 190; l -= 190; break; default: deb_ts("wrong packet type\n"); l = 0; break; } } else { deb_ts("wrong header\n"); l = 0; } } if (l>0) memcpy(fc_usb->tmp_buffer, b, l); fc_usb->tmp_buffer_length = l; } static void flexcop_usb_urb_complete(struct urb *urb) { struct flexcop_usb *fc_usb = urb->context; int i; if (urb->actual_length > 0) deb_ts("urb completed, bufsize: %d actlen; %d\n", urb->transfer_buffer_length, urb->actual_length); for (i = 0; i < urb->number_of_packets; i++) { if (urb->iso_frame_desc[i].status < 0) { err("iso frame descriptor %d has an error: %d\n", i, urb->iso_frame_desc[i].status); } else if (urb->iso_frame_desc[i].actual_length > 0) { deb_ts("passed %d bytes to the demux\n", urb->iso_frame_desc[i].actual_length); flexcop_usb_process_frame(fc_usb, urb->transfer_buffer + urb->iso_frame_desc[i].offset, urb->iso_frame_desc[i].actual_length); } urb->iso_frame_desc[i].status = 0; urb->iso_frame_desc[i].actual_length = 0; } usb_submit_urb(urb,GFP_ATOMIC); } static int flexcop_usb_stream_control(struct flexcop_device *fc, int onoff) { /* submit/kill iso packets */ return 0; } static void flexcop_usb_transfer_exit(struct flexcop_usb *fc_usb) { int i; for (i = 0; i < B2C2_USB_NUM_ISO_URB; i++) if (fc_usb->iso_urb[i] != NULL) { deb_ts("unlinking/killing urb no. %d\n",i); usb_kill_urb(fc_usb->iso_urb[i]); usb_free_urb(fc_usb->iso_urb[i]); } if (fc_usb->iso_buffer != NULL) pci_free_consistent(NULL, fc_usb->buffer_size, fc_usb->iso_buffer, fc_usb->dma_addr); } static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb) { u16 frame_size = le16_to_cpu( fc_usb->uintf->cur_altsetting->endpoint[0].desc.wMaxPacketSize); int bufsize = B2C2_USB_NUM_ISO_URB * B2C2_USB_FRAMES_PER_ISO * frame_size, i, j, ret; int buffer_offset = 0; deb_ts("creating %d iso-urbs with %d frames " "each of %d bytes size = %d.\n", B2C2_USB_NUM_ISO_URB, B2C2_USB_FRAMES_PER_ISO, frame_size, bufsize); fc_usb->iso_buffer = pci_alloc_consistent(NULL, bufsize, &fc_usb->dma_addr); if (fc_usb->iso_buffer == NULL) return -ENOMEM; memset(fc_usb->iso_buffer, 0, bufsize); fc_usb->buffer_size = bufsize; /* creating iso urbs */ for (i = 0; i < B2C2_USB_NUM_ISO_URB; i++) { fc_usb->iso_urb[i] = usb_alloc_urb(B2C2_USB_FRAMES_PER_ISO, GFP_ATOMIC); if (fc_usb->iso_urb[i] == NULL) { ret = -ENOMEM; goto urb_error; } } /* initialising and submitting iso urbs */ for (i = 0; i < B2C2_USB_NUM_ISO_URB; i++) { int frame_offset = 0; struct urb *urb = fc_usb->iso_urb[i]; deb_ts("initializing and submitting urb no. %d " "(buf_offset: %d).\n", i, buffer_offset); urb->dev = fc_usb->udev; urb->context = fc_usb; urb->complete = flexcop_usb_urb_complete; urb->pipe = B2C2_USB_DATA_PIPE; urb->transfer_flags = URB_ISO_ASAP; urb->interval = 1; urb->number_of_packets = B2C2_USB_FRAMES_PER_ISO; urb->transfer_buffer_length = frame_size * B2C2_USB_FRAMES_PER_ISO; urb->transfer_buffer = fc_usb->iso_buffer + buffer_offset; buffer_offset += frame_size * B2C2_USB_FRAMES_PER_ISO; for (j = 0; j < B2C2_USB_FRAMES_PER_ISO; j++) { deb_ts("urb no: %d, frame: %d, frame_offset: %d\n", i, j, frame_offset); urb->iso_frame_desc[j].offset = frame_offset; urb->iso_frame_desc[j].length = frame_size; frame_offset += frame_size; } if ((ret = usb_submit_urb(fc_usb->iso_urb[i],GFP_ATOMIC))) { err("submitting urb %d failed with %d.", i, ret); goto urb_error; } deb_ts("submitted urb no. %d.\n",i); } /* SRAM */ flexcop_sram_set_dest(fc_usb->fc_dev, FC_SRAM_DEST_MEDIA | FC_SRAM_DEST_NET | FC_SRAM_DEST_CAO | FC_SRAM_DEST_CAI, FC_SRAM_DEST_TARGET_WAN_USB); flexcop_wan_set_speed(fc_usb->fc_dev, FC_WAN_SPEED_8MBITS); flexcop_sram_ctrl(fc_usb->fc_dev, 1, 1, 1); return 0; urb_error: flexcop_usb_transfer_exit(fc_usb); return ret; } static int flexcop_usb_init(struct flexcop_usb *fc_usb) { /* use the alternate setting with the larges buffer */ usb_set_interface(fc_usb->udev,0,1); switch (fc_usb->udev->speed) { case USB_SPEED_LOW: err("cannot handle USB speed because it is too slow."); return -ENODEV; break; case USB_SPEED_FULL: info("running at FULL speed."); break; case USB_SPEED_HIGH: info("running at HIGH speed."); break; case USB_SPEED_UNKNOWN: /* fall through */ default: err("cannot handle USB speed because it is unknown."); return -ENODEV; } usb_set_intfdata(fc_usb->uintf, fc_usb); return 0; } static void flexcop_usb_exit(struct flexcop_usb *fc_usb) { usb_set_intfdata(fc_usb->uintf, NULL); } static int flexcop_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct flexcop_usb *fc_usb = NULL; struct flexcop_device *fc = NULL; int ret; if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) { err("out of memory\n"); return -ENOMEM; } /* general flexcop init */ fc_usb = fc->bus_specific; fc_usb->fc_dev = fc; fc->read_ibi_reg = flexcop_usb_read_ibi_reg; fc->write_ibi_reg = flexcop_usb_write_ibi_reg; fc->i2c_request = flexcop_usb_i2c_request; fc->get_mac_addr = flexcop_usb_get_mac_addr; fc->stream_control = flexcop_usb_stream_control; fc->pid_filtering = 1; fc->bus_type = FC_USB; fc->dev = &udev->dev; fc->owner = THIS_MODULE; /* bus specific part */ fc_usb->udev = udev; fc_usb->uintf = intf; if ((ret = flexcop_usb_init(fc_usb)) != 0) goto err_kfree; /* init flexcop */ if ((ret = flexcop_device_initialize(fc)) != 0) goto err_usb_exit; /* xfer init */ if ((ret = flexcop_usb_transfer_init(fc_usb)) != 0) goto err_fc_exit; info("%s successfully initialized and connected.", DRIVER_NAME); return 0; err_fc_exit: flexcop_device_exit(fc); err_usb_exit: flexcop_usb_exit(fc_usb); err_kfree: flexcop_device_kfree(fc); return ret; } static void flexcop_usb_disconnect(struct usb_interface *intf) { struct flexcop_usb *fc_usb = usb_get_intfdata(intf); flexcop_usb_transfer_exit(fc_usb); flexcop_device_exit(fc_usb->fc_dev); flexcop_usb_exit(fc_usb); flexcop_device_kfree(fc_usb->fc_dev); info("%s successfully deinitialized and disconnected.", DRIVER_NAME); } static struct usb_device_id flexcop_usb_table [] = { { USB_DEVICE(0x0af7, 0x0101) }, { } }; MODULE_DEVICE_TABLE (usb, flexcop_usb_table); /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver flexcop_usb_driver = { .name = "b2c2_flexcop_usb", .probe = flexcop_usb_probe, .disconnect = flexcop_usb_disconnect, .id_table = flexcop_usb_table, }; module_usb_driver(flexcop_usb_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_NAME); MODULE_LICENSE("GPL");
gpl-2.0
poondog/kangaroo-m7-mkv
fs/ocfs2/mmap.c
5049
5131
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * mmap.c * * Code to deal with the mess that is clustered mmap. * * Copyright (C) 2002, 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/types.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/uio.h> #include <linux/signal.h> #include <linux/rbtree.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "aops.h" #include "dlmglue.h" #include "file.h" #include "inode.h" #include "mmap.h" #include "super.h" #include "ocfs2_trace.h" static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf) { sigset_t oldset; int ret; ocfs2_block_signals(&oldset); ret = filemap_fault(area, vmf); ocfs2_unblock_signals(&oldset); trace_ocfs2_fault(OCFS2_I(area->vm_file->f_mapping->host)->ip_blkno, area, vmf->page, vmf->pgoff); return ret; } static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, struct page *page) { int ret = VM_FAULT_NOPAGE; struct inode *inode = file->f_path.dentry->d_inode; struct address_space *mapping = inode->i_mapping; loff_t pos = page_offset(page); unsigned int len = PAGE_CACHE_SIZE; pgoff_t last_index; struct page *locked_page = NULL; void *fsdata; loff_t size = i_size_read(inode); last_index = (size - 1) >> PAGE_CACHE_SHIFT; /* * There are cases that lead to the page no longer bebongs to the * mapping. * 1) pagecache truncates locally due to memory pressure. * 2) pagecache truncates when another is taking EX lock against * inode lock. see ocfs2_data_convert_worker. * * The i_size check doesn't catch the case where nodes truncated and * then re-extended the file. We'll re-check the page mapping after * taking the page lock inside of ocfs2_write_begin_nolock(). * * Let VM retry with these cases. */ if ((page->mapping != inode->i_mapping) || (!PageUptodate(page)) || (page_offset(page) >= size)) goto out; /* * Call ocfs2_write_begin() and ocfs2_write_end() to take * advantage of the allocation code there. We pass a write * length of the whole page (chopped to i_size) to make sure * the whole thing is allocated. * * Since we know the page is up to date, we don't have to * worry about ocfs2_write_begin() skipping some buffer reads * because the "write" would invalidate their data. */ if (page->index == last_index) len = ((size - 1) & ~PAGE_CACHE_MASK) + 1; ret = ocfs2_write_begin_nolock(file, mapping, pos, len, 0, &locked_page, &fsdata, di_bh, page); if (ret) { if (ret != -ENOSPC) mlog_errno(ret); if (ret == -ENOMEM) ret = VM_FAULT_OOM; else ret = VM_FAULT_SIGBUS; goto out; } if (!locked_page) { ret = VM_FAULT_NOPAGE; goto out; } ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page, fsdata); BUG_ON(ret != len); ret = VM_FAULT_LOCKED; out: return ret; } static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_path.dentry->d_inode; struct buffer_head *di_bh = NULL; sigset_t oldset; int ret; ocfs2_block_signals(&oldset); /* * The cluster locks taken will block a truncate from another * node. Taking the data lock will also ensure that we don't * attempt page truncation as part of a downconvert. */ ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret < 0) { mlog_errno(ret); goto out; } /* * The alloc sem should be enough to serialize with * ocfs2_truncate_file() changing i_size as well as any thread * modifying the inode btree. */ down_write(&OCFS2_I(inode)->ip_alloc_sem); ret = __ocfs2_page_mkwrite(vma->vm_file, di_bh, page); up_write(&OCFS2_I(inode)->ip_alloc_sem); brelse(di_bh); ocfs2_inode_unlock(inode, 1); out: ocfs2_unblock_signals(&oldset); return ret; } static const struct vm_operations_struct ocfs2_file_vm_ops = { .fault = ocfs2_fault, .page_mkwrite = ocfs2_page_mkwrite, }; int ocfs2_mmap(struct file *file, struct vm_area_struct *vma) { int ret = 0, lock_level = 0; ret = ocfs2_inode_lock_atime(file->f_dentry->d_inode, file->f_vfsmnt, &lock_level); if (ret < 0) { mlog_errno(ret); goto out; } ocfs2_inode_unlock(file->f_dentry->d_inode, lock_level); out: vma->vm_ops = &ocfs2_file_vm_ops; vma->vm_flags |= VM_CAN_NONLINEAR; return 0; }
gpl-2.0
zaphodatreides/P8000-Kernel
drivers/video/atafb.c
10425
91065
/* * linux/drivers/video/atafb.c -- Atari builtin chipset frame buffer device * * Copyright (C) 1994 Martin Schaller & Roman Hodek * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * * History: * - 03 Jan 95: Original version by Martin Schaller: The TT driver and * all the device independent stuff * - 09 Jan 95: Roman: I've added the hardware abstraction (hw_switch) * and wrote the Falcon, ST(E), and External drivers * based on the original TT driver. * - 07 May 95: Martin: Added colormap operations for the external driver * - 21 May 95: Martin: Added support for overscan * Andreas: some bug fixes for this * - Jul 95: Guenther Kelleter <guenther@pool.informatik.rwth-aachen.de>: * Programmable Falcon video modes * (thanks to Christian Cartus for documentation * of VIDEL registers). * - 27 Dec 95: Guenther: Implemented user definable video modes "user[0-7]" * on minor 24...31. "user0" may be set on commandline by * "R<x>;<y>;<depth>". (Makes sense only on Falcon) * Video mode switch on Falcon now done at next VBL interrupt * to avoid the annoying right shift of the screen. * - 23 Sep 97: Juergen: added xres_virtual for cards like ProMST * The external-part is legacy, therefore hardware-specific * functions like panning/hardwarescrolling/blanking isn't * supported. * - 29 Sep 97: Juergen: added Romans suggestion for pan_display * (var->xoffset was changed even if no set_screen_base avail.) * - 05 Oct 97: Juergen: extfb (PACKED_PIXEL) is FB_PSEUDOCOLOR 'cause * we know how to set the colors * ext_*palette: read from ext_colors (former MV300_colors) * write to ext_colors and RAMDAC * * To do: * - For the Falcon it is not possible to set random video modes on * SM124 and SC/TV, only the bootup resolution is supported. * */ #define ATAFB_TT #define ATAFB_STE #define ATAFB_EXT #define ATAFB_FALCON #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/setup.h> #include <linux/uaccess.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/atari_stram.h> #include <linux/fb.h> #include <asm/atarikb.h> #include "c2p.h" #include "atafb.h" #define SWITCH_ACIA 0x01 /* modes for switch on OverScan */ #define SWITCH_SND6 0x40 #define SWITCH_SND7 0x80 #define SWITCH_NONE 0x00 #define up(x, r) (((x) + (r) - 1) & ~((r)-1)) /* * Interface to the world */ static int atafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); static int atafb_set_par(struct fb_info *info); static int atafb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info); static int atafb_blank(int blank, struct fb_info *info); static int atafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info); static void atafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); static void atafb_copyarea(struct fb_info *info, const struct fb_copyarea *region); static void atafb_imageblit(struct fb_info *info, const struct fb_image *image); static int atafb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg); static int default_par; /* default resolution (0=none) */ static unsigned long default_mem_req; static int hwscroll = -1; static int use_hwscroll = 1; static int sttt_xres = 640, st_yres = 400, tt_yres = 480; static int sttt_xres_virtual = 640, sttt_yres_virtual = 400; static int ovsc_offset, ovsc_addlen; /* * Hardware parameters for current mode */ static struct atafb_par { void *screen_base; int yres_virtual; u_long next_line; #if defined ATAFB_TT || defined ATAFB_STE union { struct { int mode; int sync; } tt, st; #endif #ifdef ATAFB_FALCON struct falcon_hw { /* Here are fields for storing a video mode, as direct * parameters for the hardware. */ short sync; short line_width; short line_offset; short st_shift; short f_shift; short vid_control; short vid_mode; short xoffset; short hht, hbb, hbe, hdb, hde, hss; short vft, vbb, vbe, vdb, vde, vss; /* auxiliary information */ short mono; short ste_mode; short bpp; u32 pseudo_palette[16]; } falcon; #endif /* Nothing needed for external mode */ } hw; } current_par; /* Don't calculate an own resolution, and thus don't change the one found when * booting (currently used for the Falcon to keep settings for internal video * hardware extensions (e.g. ScreenBlaster) */ static int DontCalcRes = 0; #ifdef ATAFB_FALCON #define HHT hw.falcon.hht #define HBB hw.falcon.hbb #define HBE hw.falcon.hbe #define HDB hw.falcon.hdb #define HDE hw.falcon.hde #define HSS hw.falcon.hss #define VFT hw.falcon.vft #define VBB hw.falcon.vbb #define VBE hw.falcon.vbe #define VDB hw.falcon.vdb #define VDE hw.falcon.vde #define VSS hw.falcon.vss #define VCO_CLOCK25 0x04 #define VCO_CSYPOS 0x10 #define VCO_VSYPOS 0x20 #define VCO_HSYPOS 0x40 #define VCO_SHORTOFFS 0x100 #define VMO_DOUBLE 0x01 #define VMO_INTER 0x02 #define VMO_PREMASK 0x0c #endif static struct fb_info fb_info = { .fix = { .id = "Atari ", .visual = FB_VISUAL_PSEUDOCOLOR, .accel = FB_ACCEL_NONE, } }; static void *screen_base; /* base address of screen */ static void *real_screen_base; /* (only for Overscan) */ static int screen_len; static int current_par_valid; static int mono_moni; #ifdef ATAFB_EXT /* external video handling */ static unsigned int external_xres; static unsigned int external_xres_virtual; static unsigned int external_yres; /* * not needed - atafb will never support panning/hardwarescroll with external * static unsigned int external_yres_virtual; */ static unsigned int external_depth; static int external_pmode; static void *external_addr; static unsigned long external_len; static unsigned long external_vgaiobase; static unsigned int external_bitspercol = 6; /* * JOE <joe@amber.dinoco.de>: * added card type for external driver, is only needed for * colormap handling. */ enum cardtype { IS_VGA, IS_MV300 }; static enum cardtype external_card_type = IS_VGA; /* * The MV300 mixes the color registers. So we need an array of munged * indices in order to access the correct reg. */ static int MV300_reg_1bit[2] = { 0, 1 }; static int MV300_reg_4bit[16] = { 0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15 }; static int MV300_reg_8bit[256] = { 0, 128, 64, 192, 32, 160, 96, 224, 16, 144, 80, 208, 48, 176, 112, 240, 8, 136, 72, 200, 40, 168, 104, 232, 24, 152, 88, 216, 56, 184, 120, 248, 4, 132, 68, 196, 36, 164, 100, 228, 20, 148, 84, 212, 52, 180, 116, 244, 12, 140, 76, 204, 44, 172, 108, 236, 28, 156, 92, 220, 60, 188, 124, 252, 2, 130, 66, 194, 34, 162, 98, 226, 18, 146, 82, 210, 50, 178, 114, 242, 10, 138, 74, 202, 42, 170, 106, 234, 26, 154, 90, 218, 58, 186, 122, 250, 6, 134, 70, 198, 38, 166, 102, 230, 22, 150, 86, 214, 54, 182, 118, 246, 14, 142, 78, 206, 46, 174, 110, 238, 30, 158, 94, 222, 62, 190, 126, 254, 1, 129, 65, 193, 33, 161, 97, 225, 17, 145, 81, 209, 49, 177, 113, 241, 9, 137, 73, 201, 41, 169, 105, 233, 25, 153, 89, 217, 57, 185, 121, 249, 5, 133, 69, 197, 37, 165, 101, 229, 21, 149, 85, 213, 53, 181, 117, 245, 13, 141, 77, 205, 45, 173, 109, 237, 29, 157, 93, 221, 61, 189, 125, 253, 3, 131, 67, 195, 35, 163, 99, 227, 19, 147, 83, 211, 51, 179, 115, 243, 11, 139, 75, 203, 43, 171, 107, 235, 27, 155, 91, 219, 59, 187, 123, 251, 7, 135, 71, 199, 39, 167, 103, 231, 23, 151, 87, 215, 55, 183, 119, 247, 15, 143, 79, 207, 47, 175, 111, 239, 31, 159, 95, 223, 63, 191, 127, 255 }; static int *MV300_reg = MV300_reg_8bit; #endif /* ATAFB_EXT */ static int inverse; extern int fontheight_8x8; extern int fontwidth_8x8; extern unsigned char fontdata_8x8[]; extern int fontheight_8x16; extern int fontwidth_8x16; extern unsigned char fontdata_8x16[]; /* * struct fb_ops { * * open/release and usage marking * struct module *owner; * int (*fb_open)(struct fb_info *info, int user); * int (*fb_release)(struct fb_info *info, int user); * * * For framebuffers with strange non linear layouts or that do not * * work with normal memory mapped access * ssize_t (*fb_read)(struct file *file, char __user *buf, size_t count, loff_t *ppos); * ssize_t (*fb_write)(struct file *file, const char __user *buf, size_t count, loff_t *ppos); * * * checks var and eventually tweaks it to something supported, * * DOES NOT MODIFY PAR * * int (*fb_check_var)(struct fb_var_screeninfo *var, struct fb_info *info); * * * set the video mode according to info->var * * int (*fb_set_par)(struct fb_info *info); * * * set color register * * int (*fb_setcolreg)(unsigned int regno, unsigned int red, unsigned int green, * unsigned int blue, unsigned int transp, struct fb_info *info); * * * set color registers in batch * * int (*fb_setcmap)(struct fb_cmap *cmap, struct fb_info *info); * * * blank display * * int (*fb_blank)(int blank, struct fb_info *info); * * * pan display * * int (*fb_pan_display)(struct fb_var_screeninfo *var, struct fb_info *info); * * *** The meat of the drawing engine *** * * Draws a rectangle * * void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect); * * Copy data from area to another * * void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region); * * Draws a image to the display * * void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image); * * * Draws cursor * * int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor); * * * Rotates the display * * void (*fb_rotate)(struct fb_info *info, int angle); * * * wait for blit idle, optional * * int (*fb_sync)(struct fb_info *info); * * * perform fb specific ioctl (optional) * * int (*fb_ioctl)(struct fb_info *info, unsigned int cmd, * unsigned long arg); * * * Handle 32bit compat ioctl (optional) * * int (*fb_compat_ioctl)(struct fb_info *info, unsigned int cmd, * unsigned long arg); * * * perform fb specific mmap * * int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma); * } ; */ /* ++roman: This structure abstracts from the underlying hardware (ST(e), * TT, or Falcon. * * int (*detect)(void) * This function should detect the current video mode settings and * store them in atafb_predefined[0] for later reference by the * user. Return the index+1 of an equivalent predefined mode or 0 * if there is no such. * * int (*encode_fix)(struct fb_fix_screeninfo *fix, * struct atafb_par *par) * This function should fill in the 'fix' structure based on the * values in the 'par' structure. * !!! Obsolete, perhaps !!! * * int (*decode_var)(struct fb_var_screeninfo *var, * struct atafb_par *par) * Get the video params out of 'var'. If a value doesn't fit, round * it up, if it's too big, return EINVAL. * Round up in the following order: bits_per_pixel, xres, yres, * xres_virtual, yres_virtual, xoffset, yoffset, grayscale, bitfields, * horizontal timing, vertical timing. * * int (*encode_var)(struct fb_var_screeninfo *var, * struct atafb_par *par); * Fill the 'var' structure based on the values in 'par' and maybe * other values read out of the hardware. * * void (*get_par)(struct atafb_par *par) * Fill the hardware's 'par' structure. * !!! Used only by detect() !!! * * void (*set_par)(struct atafb_par *par) * Set the hardware according to 'par'. * * void (*set_screen_base)(void *s_base) * Set the base address of the displayed frame buffer. Only called * if yres_virtual > yres or xres_virtual > xres. * * int (*blank)(int blank_mode) * Blank the screen if blank_mode != 0, else unblank. If blank == NULL then * the caller blanks by setting the CLUT to all black. Return 0 if blanking * succeeded, !=0 if un-/blanking failed due to e.g. a video mode which * doesn't support it. Implements VESA suspend and powerdown modes on * hardware that supports disabling hsync/vsync: * blank_mode == 2: suspend vsync, 3:suspend hsync, 4: powerdown. */ static struct fb_hwswitch { int (*detect)(void); int (*encode_fix)(struct fb_fix_screeninfo *fix, struct atafb_par *par); int (*decode_var)(struct fb_var_screeninfo *var, struct atafb_par *par); int (*encode_var)(struct fb_var_screeninfo *var, struct atafb_par *par); void (*get_par)(struct atafb_par *par); void (*set_par)(struct atafb_par *par); void (*set_screen_base)(void *s_base); int (*blank)(int blank_mode); int (*pan_display)(struct fb_var_screeninfo *var, struct fb_info *info); } *fbhw; static char *autodetect_names[] = { "autodetect", NULL }; static char *stlow_names[] = { "stlow", NULL }; static char *stmid_names[] = { "stmid", "default5", NULL }; static char *sthigh_names[] = { "sthigh", "default4", NULL }; static char *ttlow_names[] = { "ttlow", NULL }; static char *ttmid_names[] = { "ttmid", "default1", NULL }; static char *tthigh_names[] = { "tthigh", "default2", NULL }; static char *vga2_names[] = { "vga2", NULL }; static char *vga4_names[] = { "vga4", NULL }; static char *vga16_names[] = { "vga16", "default3", NULL }; static char *vga256_names[] = { "vga256", NULL }; static char *falh2_names[] = { "falh2", NULL }; static char *falh16_names[] = { "falh16", NULL }; static char **fb_var_names[] = { autodetect_names, stlow_names, stmid_names, sthigh_names, ttlow_names, ttmid_names, tthigh_names, vga2_names, vga4_names, vga16_names, vga256_names, falh2_names, falh16_names, NULL }; static struct fb_var_screeninfo atafb_predefined[] = { /* * yres_virtual == 0 means use hw-scrolling if possible, else yres */ { /* autodetect */ 0, 0, 0, 0, 0, 0, 0, 0, /* xres-grayscale */ {0, 0, 0}, {0, 0, 0}, {0, 0, 0}, {0, 0, 0}, /* red green blue tran*/ 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* st low */ 320, 200, 320, 0, 0, 0, 4, 0, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* st mid */ 640, 200, 640, 0, 0, 0, 2, 0, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* st high */ 640, 400, 640, 0, 0, 0, 1, 0, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* tt low */ 320, 480, 320, 0, 0, 0, 8, 0, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* tt mid */ 640, 480, 640, 0, 0, 0, 4, 0, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* tt high */ 1280, 960, 1280, 0, 0, 0, 1, 0, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* vga2 */ 640, 480, 640, 0, 0, 0, 1, 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* vga4 */ 640, 480, 640, 0, 0, 0, 2, 0, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* vga16 */ 640, 480, 640, 0, 0, 0, 4, 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* vga256 */ 640, 480, 640, 0, 0, 0, 8, 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* falh2 */ 896, 608, 896, 0, 0, 0, 1, 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* falh16 */ 896, 608, 896, 0, 0, 0, 4, 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, }; static int num_atafb_predefined = ARRAY_SIZE(atafb_predefined); static struct fb_videomode atafb_modedb[] __initdata = { /* * Atari Video Modes * * If you change these, make sure to update DEFMODE_* as well! */ /* * ST/TT Video Modes */ { /* 320x200, 15 kHz, 60 Hz (ST low) */ "st-low", 60, 320, 200, 32000, 32, 16, 31, 14, 96, 4, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, { /* 640x200, 15 kHz, 60 Hz (ST medium) */ "st-mid", 60, 640, 200, 32000, 32, 16, 31, 14, 96, 4, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, { /* 640x400, 30.25 kHz, 63.5 Hz (ST high) */ "st-high", 63, 640, 400, 32000, 128, 0, 40, 14, 128, 4, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, { /* 320x480, 15 kHz, 60 Hz (TT low) */ "tt-low", 60, 320, 480, 31041, 120, 100, 8, 16, 140, 30, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, { /* 640x480, 29 kHz, 57 Hz (TT medium) */ "tt-mid", 60, 640, 480, 31041, 120, 100, 8, 16, 140, 30, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, { /* 1280x960, 29 kHz, 60 Hz (TT high) */ "tt-high", 57, 640, 960, 31041, 120, 100, 8, 16, 140, 30, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, /* * VGA Video Modes */ { /* 640x480, 31 kHz, 60 Hz (VGA) */ "vga", 63.5, 640, 480, 32000, 18, 42, 31, 11, 96, 3, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, { /* 640x400, 31 kHz, 70 Hz (VGA) */ "vga70", 70, 640, 400, 32000, 18, 42, 31, 11, 96, 3, FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, /* * Falcon HiRes Video Modes */ { /* 896x608, 31 kHz, 60 Hz (Falcon High) */ "falh", 60, 896, 608, 32000, 18, 42, 31, 1, 96,3, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, }; #define NUM_TOTAL_MODES ARRAY_SIZE(atafb_modedb) static char *mode_option __initdata = NULL; /* default modes */ #define DEFMODE_TT 5 /* "tt-high" for TT */ #define DEFMODE_F30 7 /* "vga70" for Falcon */ #define DEFMODE_STE 2 /* "st-high" for ST/E */ #define DEFMODE_EXT 6 /* "vga" for external */ static int get_video_mode(char *vname) { char ***name_list; char **name; int i; name_list = fb_var_names; for (i = 0; i < num_atafb_predefined; i++) { name = *name_list++; if (!name || !*name) break; while (*name) { if (!strcmp(vname, *name)) return i + 1; name++; } } return 0; } /* ------------------- TT specific functions ---------------------- */ #ifdef ATAFB_TT static int tt_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par) { int mode; strcpy(fix->id, "Atari Builtin"); fix->smem_start = (unsigned long)real_screen_base; fix->smem_len = screen_len; fix->type = FB_TYPE_INTERLEAVED_PLANES; fix->type_aux = 2; fix->visual = FB_VISUAL_PSEUDOCOLOR; mode = par->hw.tt.mode & TT_SHIFTER_MODEMASK; if (mode == TT_SHIFTER_TTHIGH || mode == TT_SHIFTER_STHIGH) { fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; if (mode == TT_SHIFTER_TTHIGH) fix->visual = FB_VISUAL_MONO01; } fix->xpanstep = 0; fix->ypanstep = 1; fix->ywrapstep = 0; fix->line_length = par->next_line; fix->accel = FB_ACCEL_ATARIBLITT; return 0; } static int tt_decode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { int xres = var->xres; int yres = var->yres; int bpp = var->bits_per_pixel; int linelen; int yres_virtual = var->yres_virtual; if (mono_moni) { if (bpp > 1 || xres > sttt_xres * 2 || yres > tt_yres * 2) return -EINVAL; par->hw.tt.mode = TT_SHIFTER_TTHIGH; xres = sttt_xres * 2; yres = tt_yres * 2; bpp = 1; } else { if (bpp > 8 || xres > sttt_xres || yres > tt_yres) return -EINVAL; if (bpp > 4) { if (xres > sttt_xres / 2 || yres > tt_yres) return -EINVAL; par->hw.tt.mode = TT_SHIFTER_TTLOW; xres = sttt_xres / 2; yres = tt_yres; bpp = 8; } else if (bpp > 2) { if (xres > sttt_xres || yres > tt_yres) return -EINVAL; if (xres > sttt_xres / 2 || yres > st_yres / 2) { par->hw.tt.mode = TT_SHIFTER_TTMID; xres = sttt_xres; yres = tt_yres; bpp = 4; } else { par->hw.tt.mode = TT_SHIFTER_STLOW; xres = sttt_xres / 2; yres = st_yres / 2; bpp = 4; } } else if (bpp > 1) { if (xres > sttt_xres || yres > st_yres / 2) return -EINVAL; par->hw.tt.mode = TT_SHIFTER_STMID; xres = sttt_xres; yres = st_yres / 2; bpp = 2; } else if (var->xres > sttt_xres || var->yres > st_yres) { return -EINVAL; } else { par->hw.tt.mode = TT_SHIFTER_STHIGH; xres = sttt_xres; yres = st_yres; bpp = 1; } } if (yres_virtual <= 0) yres_virtual = 0; else if (yres_virtual < yres) yres_virtual = yres; if (var->sync & FB_SYNC_EXT) par->hw.tt.sync = 0; else par->hw.tt.sync = 1; linelen = xres * bpp / 8; if (yres_virtual * linelen > screen_len && screen_len) return -EINVAL; if (yres * linelen > screen_len && screen_len) return -EINVAL; if (var->yoffset + yres > yres_virtual && yres_virtual) return -EINVAL; par->yres_virtual = yres_virtual; par->screen_base = screen_base + var->yoffset * linelen; par->next_line = linelen; return 0; } static int tt_encode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { int linelen; memset(var, 0, sizeof(struct fb_var_screeninfo)); var->red.offset = 0; var->red.length = 4; var->red.msb_right = 0; var->grayscale = 0; var->pixclock = 31041; var->left_margin = 120; /* these may be incorrect */ var->right_margin = 100; var->upper_margin = 8; var->lower_margin = 16; var->hsync_len = 140; var->vsync_len = 30; var->height = -1; var->width = -1; if (par->hw.tt.sync & 1) var->sync = 0; else var->sync = FB_SYNC_EXT; switch (par->hw.tt.mode & TT_SHIFTER_MODEMASK) { case TT_SHIFTER_STLOW: var->xres = sttt_xres / 2; var->xres_virtual = sttt_xres_virtual / 2; var->yres = st_yres / 2; var->bits_per_pixel = 4; break; case TT_SHIFTER_STMID: var->xres = sttt_xres; var->xres_virtual = sttt_xres_virtual; var->yres = st_yres / 2; var->bits_per_pixel = 2; break; case TT_SHIFTER_STHIGH: var->xres = sttt_xres; var->xres_virtual = sttt_xres_virtual; var->yres = st_yres; var->bits_per_pixel = 1; break; case TT_SHIFTER_TTLOW: var->xres = sttt_xres / 2; var->xres_virtual = sttt_xres_virtual / 2; var->yres = tt_yres; var->bits_per_pixel = 8; break; case TT_SHIFTER_TTMID: var->xres = sttt_xres; var->xres_virtual = sttt_xres_virtual; var->yres = tt_yres; var->bits_per_pixel = 4; break; case TT_SHIFTER_TTHIGH: var->red.length = 0; var->xres = sttt_xres * 2; var->xres_virtual = sttt_xres_virtual * 2; var->yres = tt_yres * 2; var->bits_per_pixel = 1; break; } var->blue = var->green = var->red; var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; linelen = var->xres_virtual * var->bits_per_pixel / 8; if (!use_hwscroll) var->yres_virtual = var->yres; else if (screen_len) { if (par->yres_virtual) var->yres_virtual = par->yres_virtual; else /* yres_virtual == 0 means use maximum */ var->yres_virtual = screen_len / linelen; } else { if (hwscroll < 0) var->yres_virtual = 2 * var->yres; else var->yres_virtual = var->yres + hwscroll * 16; } var->xoffset = 0; if (screen_base) var->yoffset = (par->screen_base - screen_base) / linelen; else var->yoffset = 0; var->nonstd = 0; var->activate = 0; var->vmode = FB_VMODE_NONINTERLACED; return 0; } static void tt_get_par(struct atafb_par *par) { unsigned long addr; par->hw.tt.mode = shifter_tt.tt_shiftmode; par->hw.tt.sync = shifter.syncmode; addr = ((shifter.bas_hi & 0xff) << 16) | ((shifter.bas_md & 0xff) << 8) | ((shifter.bas_lo & 0xff)); par->screen_base = phys_to_virt(addr); } static void tt_set_par(struct atafb_par *par) { shifter_tt.tt_shiftmode = par->hw.tt.mode; shifter.syncmode = par->hw.tt.sync; /* only set screen_base if really necessary */ if (current_par.screen_base != par->screen_base) fbhw->set_screen_base(par->screen_base); } static int tt_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info) { if ((shifter_tt.tt_shiftmode & TT_SHIFTER_MODEMASK) == TT_SHIFTER_STHIGH) regno += 254; if (regno > 255) return 1; tt_palette[regno] = (((red >> 12) << 8) | ((green >> 12) << 4) | (blue >> 12)); if ((shifter_tt.tt_shiftmode & TT_SHIFTER_MODEMASK) == TT_SHIFTER_STHIGH && regno == 254) tt_palette[0] = 0; return 0; } static int tt_detect(void) { struct atafb_par par; /* Determine the connected monitor: The DMA sound must be * disabled before reading the MFP GPIP, because the Sound * Done Signal and the Monochrome Detect are XORed together! * * Even on a TT, we should look if there is a DMA sound. It was * announced that the Eagle is TT compatible, but only the PCM is * missing... */ if (ATARIHW_PRESENT(PCM_8BIT)) { tt_dmasnd.ctrl = DMASND_CTRL_OFF; udelay(20); /* wait a while for things to settle down */ } mono_moni = (st_mfp.par_dt_reg & 0x80) == 0; tt_get_par(&par); tt_encode_var(&atafb_predefined[0], &par); return 1; } #endif /* ATAFB_TT */ /* ------------------- Falcon specific functions ---------------------- */ #ifdef ATAFB_FALCON static int mon_type; /* Falcon connected monitor */ static int f030_bus_width; /* Falcon ram bus width (for vid_control) */ #define F_MON_SM 0 #define F_MON_SC 1 #define F_MON_VGA 2 #define F_MON_TV 3 static struct pixel_clock { unsigned long f; /* f/[Hz] */ unsigned long t; /* t/[ps] (=1/f) */ int right, hsync, left; /* standard timing in clock cycles, not pixel */ /* hsync initialized in falcon_detect() */ int sync_mask; /* or-mask for hw.falcon.sync to set this clock */ int control_mask; /* ditto, for hw.falcon.vid_control */ } f25 = { 25175000, 39721, 18, 0, 42, 0x0, VCO_CLOCK25 }, f32 = { 32000000, 31250, 18, 0, 42, 0x0, 0 }, fext = { 0, 0, 18, 0, 42, 0x1, 0 }; /* VIDEL-prescale values [mon_type][pixel_length from VCO] */ static int vdl_prescale[4][3] = { { 4,2,1 }, { 4,2,1 }, { 4,2,2 }, { 4,2,1 } }; /* Default hsync timing [mon_type] in picoseconds */ static long h_syncs[4] = { 3000000, 4875000, 4000000, 4875000 }; static inline int hxx_prescale(struct falcon_hw *hw) { return hw->ste_mode ? 16 : vdl_prescale[mon_type][hw->vid_mode >> 2 & 0x3]; } static int falcon_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par) { strcpy(fix->id, "Atari Builtin"); fix->smem_start = (unsigned long)real_screen_base; fix->smem_len = screen_len; fix->type = FB_TYPE_INTERLEAVED_PLANES; fix->type_aux = 2; fix->visual = FB_VISUAL_PSEUDOCOLOR; fix->xpanstep = 1; fix->ypanstep = 1; fix->ywrapstep = 0; if (par->hw.falcon.mono) { fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; /* no smooth scrolling with longword aligned video mem */ fix->xpanstep = 32; } else if (par->hw.falcon.f_shift & 0x100) { fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; /* Is this ok or should it be DIRECTCOLOR? */ fix->visual = FB_VISUAL_TRUECOLOR; fix->xpanstep = 2; } fix->line_length = par->next_line; fix->accel = FB_ACCEL_ATARIBLITT; return 0; } static int falcon_decode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { int bpp = var->bits_per_pixel; int xres = var->xres; int yres = var->yres; int xres_virtual = var->xres_virtual; int yres_virtual = var->yres_virtual; int left_margin, right_margin, hsync_len; int upper_margin, lower_margin, vsync_len; int linelen; int interlace = 0, doubleline = 0; struct pixel_clock *pclock; int plen; /* width of pixel in clock cycles */ int xstretch; int prescale; int longoffset = 0; int hfreq, vfreq; int hdb_off, hde_off, base_off; int gstart, gend1, gend2, align; /* Get the video params out of 'var'. If a value doesn't fit, round it up, if it's too big, return EINVAL. Round up in the following order: bits_per_pixel, xres, yres, xres_virtual, yres_virtual, xoffset, yoffset, grayscale, bitfields, horizontal timing, vertical timing. There is a maximum of screen resolution determined by pixelclock and minimum frame rate -- (X+hmarg.)*(Y+vmarg.)*vfmin <= pixelclock. In interlace mode this is " * " *vfmin <= pixelclock. Additional constraints: hfreq. Frequency range for multisync monitors is given via command line. For TV and SM124 both frequencies are fixed. X % 16 == 0 to fit 8x?? font (except 1 bitplane modes must use X%32 == 0) Y % 16 == 0 to fit 8x16 font Y % 8 == 0 if Y<400 Currently interlace and doubleline mode in var are ignored. On SM124 and TV only the standard resolutions can be used. */ /* Reject uninitialized mode */ if (!xres || !yres || !bpp) return -EINVAL; if (mon_type == F_MON_SM && bpp != 1) return -EINVAL; if (bpp <= 1) { bpp = 1; par->hw.falcon.f_shift = 0x400; par->hw.falcon.st_shift = 0x200; } else if (bpp <= 2) { bpp = 2; par->hw.falcon.f_shift = 0x000; par->hw.falcon.st_shift = 0x100; } else if (bpp <= 4) { bpp = 4; par->hw.falcon.f_shift = 0x000; par->hw.falcon.st_shift = 0x000; } else if (bpp <= 8) { bpp = 8; par->hw.falcon.f_shift = 0x010; } else if (bpp <= 16) { bpp = 16; /* packed pixel mode */ par->hw.falcon.f_shift = 0x100; /* hicolor, no overlay */ } else return -EINVAL; par->hw.falcon.bpp = bpp; if (mon_type == F_MON_SM || DontCalcRes) { /* Skip all calculations. VGA/TV/SC1224 only supported. */ struct fb_var_screeninfo *myvar = &atafb_predefined[0]; if (bpp > myvar->bits_per_pixel || var->xres > myvar->xres || var->yres > myvar->yres) return -EINVAL; fbhw->get_par(par); /* Current par will be new par */ goto set_screen_base; /* Don't forget this */ } /* Only some fixed resolutions < 640x400 */ if (xres <= 320) xres = 320; else if (xres <= 640 && bpp != 16) xres = 640; if (yres <= 200) yres = 200; else if (yres <= 240) yres = 240; else if (yres <= 400) yres = 400; /* 2 planes must use STE compatibility mode */ par->hw.falcon.ste_mode = bpp == 2; par->hw.falcon.mono = bpp == 1; /* Total and visible scanline length must be a multiple of one longword, * this and the console fontwidth yields the alignment for xres and * xres_virtual. * TODO: this way "odd" fontheights are not supported * * Special case in STE mode: blank and graphic positions don't align, * avoid trash at right margin */ if (par->hw.falcon.ste_mode) xres = (xres + 63) & ~63; else if (bpp == 1) xres = (xres + 31) & ~31; else xres = (xres + 15) & ~15; if (yres >= 400) yres = (yres + 15) & ~15; else yres = (yres + 7) & ~7; if (xres_virtual < xres) xres_virtual = xres; else if (bpp == 1) xres_virtual = (xres_virtual + 31) & ~31; else xres_virtual = (xres_virtual + 15) & ~15; if (yres_virtual <= 0) yres_virtual = 0; else if (yres_virtual < yres) yres_virtual = yres; /* backward bug-compatibility */ if (var->pixclock > 1) var->pixclock -= 1; par->hw.falcon.line_width = bpp * xres / 16; par->hw.falcon.line_offset = bpp * (xres_virtual - xres) / 16; /* single or double pixel width */ xstretch = (xres < 640) ? 2 : 1; #if 0 /* SM124 supports only 640x400, this is rejected above */ if (mon_type == F_MON_SM) { if (xres != 640 && yres != 400) return -EINVAL; plen = 1; pclock = &f32; /* SM124-mode is special */ par->hw.falcon.ste_mode = 1; par->hw.falcon.f_shift = 0x000; par->hw.falcon.st_shift = 0x200; left_margin = hsync_len = 128 / plen; right_margin = 0; /* TODO set all margins */ } else #endif if (mon_type == F_MON_SC || mon_type == F_MON_TV) { plen = 2 * xstretch; if (var->pixclock > f32.t * plen) return -EINVAL; pclock = &f32; if (yres > 240) interlace = 1; if (var->pixclock == 0) { /* set some minimal margins which center the screen */ left_margin = 32; right_margin = 18; hsync_len = pclock->hsync / plen; upper_margin = 31; lower_margin = 14; vsync_len = interlace ? 3 : 4; } else { left_margin = var->left_margin; right_margin = var->right_margin; hsync_len = var->hsync_len; upper_margin = var->upper_margin; lower_margin = var->lower_margin; vsync_len = var->vsync_len; if (var->vmode & FB_VMODE_INTERLACED) { upper_margin = (upper_margin + 1) / 2; lower_margin = (lower_margin + 1) / 2; vsync_len = (vsync_len + 1) / 2; } else if (var->vmode & FB_VMODE_DOUBLE) { upper_margin *= 2; lower_margin *= 2; vsync_len *= 2; } } } else { /* F_MON_VGA */ if (bpp == 16) xstretch = 2; /* Double pixel width only for hicolor */ /* Default values are used for vert./hor. timing if no pixelclock given. */ if (var->pixclock == 0) { int linesize; /* Choose master pixelclock depending on hor. timing */ plen = 1 * xstretch; if ((plen * xres + f25.right + f25.hsync + f25.left) * fb_info.monspecs.hfmin < f25.f) pclock = &f25; else if ((plen * xres + f32.right + f32.hsync + f32.left) * fb_info.monspecs.hfmin < f32.f) pclock = &f32; else if ((plen * xres + fext.right + fext.hsync + fext.left) * fb_info.monspecs.hfmin < fext.f && fext.f) pclock = &fext; else return -EINVAL; left_margin = pclock->left / plen; right_margin = pclock->right / plen; hsync_len = pclock->hsync / plen; linesize = left_margin + xres + right_margin + hsync_len; upper_margin = 31; lower_margin = 11; vsync_len = 3; } else { /* Choose largest pixelclock <= wanted clock */ int i; unsigned long pcl = ULONG_MAX; pclock = 0; for (i = 1; i <= 4; i *= 2) { if (f25.t * i >= var->pixclock && f25.t * i < pcl) { pcl = f25.t * i; pclock = &f25; } if (f32.t * i >= var->pixclock && f32.t * i < pcl) { pcl = f32.t * i; pclock = &f32; } if (fext.t && fext.t * i >= var->pixclock && fext.t * i < pcl) { pcl = fext.t * i; pclock = &fext; } } if (!pclock) return -EINVAL; plen = pcl / pclock->t; left_margin = var->left_margin; right_margin = var->right_margin; hsync_len = var->hsync_len; upper_margin = var->upper_margin; lower_margin = var->lower_margin; vsync_len = var->vsync_len; /* Internal unit is [single lines per (half-)frame] */ if (var->vmode & FB_VMODE_INTERLACED) { /* # lines in half frame */ /* External unit is [lines per full frame] */ upper_margin = (upper_margin + 1) / 2; lower_margin = (lower_margin + 1) / 2; vsync_len = (vsync_len + 1) / 2; } else if (var->vmode & FB_VMODE_DOUBLE) { /* External unit is [double lines per frame] */ upper_margin *= 2; lower_margin *= 2; vsync_len *= 2; } } if (pclock == &fext) longoffset = 1; /* VIDEL doesn't synchronize on short offset */ } /* Is video bus bandwidth (32MB/s) too low for this resolution? */ /* this is definitely wrong if bus clock != 32MHz */ if (pclock->f / plen / 8 * bpp > 32000000L) return -EINVAL; if (vsync_len < 1) vsync_len = 1; /* include sync lengths in right/lower margin for all calculations */ right_margin += hsync_len; lower_margin += vsync_len; /* ! In all calculations of margins we use # of lines in half frame * (which is a full frame in non-interlace mode), so we can switch * between interlace and non-interlace without messing around * with these. */ again: /* Set base_offset 128 and video bus width */ par->hw.falcon.vid_control = mon_type | f030_bus_width; if (!longoffset) par->hw.falcon.vid_control |= VCO_SHORTOFFS; /* base_offset 64 */ if (var->sync & FB_SYNC_HOR_HIGH_ACT) par->hw.falcon.vid_control |= VCO_HSYPOS; if (var->sync & FB_SYNC_VERT_HIGH_ACT) par->hw.falcon.vid_control |= VCO_VSYPOS; /* Pixelclock */ par->hw.falcon.vid_control |= pclock->control_mask; /* External or internal clock */ par->hw.falcon.sync = pclock->sync_mask | 0x2; /* Pixellength and prescale */ par->hw.falcon.vid_mode = (2 / plen) << 2; if (doubleline) par->hw.falcon.vid_mode |= VMO_DOUBLE; if (interlace) par->hw.falcon.vid_mode |= VMO_INTER; /********************* * Horizontal timing: unit = [master clock cycles] * unit of hxx-registers: [master clock cycles * prescale] * Hxx-registers are 9 bit wide * * 1 line = ((hht + 2) * 2 * prescale) clock cycles * * graphic output = hdb & 0x200 ? * ((hht + 2) * 2 - hdb + hde) * prescale - hdboff + hdeoff: * (hht + 2 - hdb + hde) * prescale - hdboff + hdeoff * (this must be a multiple of plen*128/bpp, on VGA pixels * to the right may be cut off with a bigger right margin) * * start of graphics relative to start of 1st halfline = hdb & 0x200 ? * (hdb - hht - 2) * prescale + hdboff : * hdb * prescale + hdboff * * end of graphics relative to start of 1st halfline = * (hde + hht + 2) * prescale + hdeoff *********************/ /* Calculate VIDEL registers */ { prescale = hxx_prescale(&par->hw.falcon); base_off = par->hw.falcon.vid_control & VCO_SHORTOFFS ? 64 : 128; /* Offsets depend on video mode */ /* Offsets are in clock cycles, divide by prescale to * calculate hd[be]-registers */ if (par->hw.falcon.f_shift & 0x100) { align = 1; hde_off = 0; hdb_off = (base_off + 16 * plen) + prescale; } else { align = 128 / bpp; hde_off = ((128 / bpp + 2) * plen); if (par->hw.falcon.ste_mode) hdb_off = (64 + base_off + (128 / bpp + 2) * plen) + prescale; else hdb_off = (base_off + (128 / bpp + 18) * plen) + prescale; } gstart = (prescale / 2 + plen * left_margin) / prescale; /* gend1 is for hde (gend-gstart multiple of align), shifter's xres */ gend1 = gstart + roundup(xres, align) * plen / prescale; /* gend2 is for hbb, visible xres (rest to gend1 is cut off by hblank) */ gend2 = gstart + xres * plen / prescale; par->HHT = plen * (left_margin + xres + right_margin) / (2 * prescale) - 2; /* par->HHT = (gend2 + plen * right_margin / prescale) / 2 - 2;*/ par->HDB = gstart - hdb_off / prescale; par->HBE = gstart; if (par->HDB < 0) par->HDB += par->HHT + 2 + 0x200; par->HDE = gend1 - par->HHT - 2 - hde_off / prescale; par->HBB = gend2 - par->HHT - 2; #if 0 /* One more Videl constraint: data fetch of two lines must not overlap */ if ((par->HDB & 0x200) && (par->HDB & ~0x200) - par->HDE <= 5) { /* if this happens increase margins, decrease hfreq. */ } #endif if (hde_off % prescale) par->HBB++; /* compensate for non matching hde and hbb */ par->HSS = par->HHT + 2 - plen * hsync_len / prescale; if (par->HSS < par->HBB) par->HSS = par->HBB; } /* check hor. frequency */ hfreq = pclock->f / ((par->HHT + 2) * prescale * 2); if (hfreq > fb_info.monspecs.hfmax && mon_type != F_MON_VGA) { /* ++guenther: ^^^^^^^^^^^^^^^^^^^ can't remember why I did this */ /* Too high -> enlarge margin */ left_margin += 1; right_margin += 1; goto again; } if (hfreq > fb_info.monspecs.hfmax || hfreq < fb_info.monspecs.hfmin) return -EINVAL; /* Vxx-registers */ /* All Vxx must be odd in non-interlace, since frame starts in the middle * of the first displayed line! * One frame consists of VFT+1 half lines. VFT+1 must be even in * non-interlace, odd in interlace mode for synchronisation. * Vxx-registers are 11 bit wide */ par->VBE = (upper_margin * 2 + 1); /* must begin on odd halfline */ par->VDB = par->VBE; par->VDE = yres; if (!interlace) par->VDE <<= 1; if (doubleline) par->VDE <<= 1; /* VDE now half lines per (half-)frame */ par->VDE += par->VDB; par->VBB = par->VDE; par->VFT = par->VBB + (lower_margin * 2 - 1) - 1; par->VSS = par->VFT + 1 - (vsync_len * 2 - 1); /* vbb,vss,vft must be even in interlace mode */ if (interlace) { par->VBB++; par->VSS++; par->VFT++; } /* V-frequency check, hope I didn't create any loop here. */ /* Interlace and doubleline are mutually exclusive. */ vfreq = (hfreq * 2) / (par->VFT + 1); if (vfreq > fb_info.monspecs.vfmax && !doubleline && !interlace) { /* Too high -> try again with doubleline */ doubleline = 1; goto again; } else if (vfreq < fb_info.monspecs.vfmin && !interlace && !doubleline) { /* Too low -> try again with interlace */ interlace = 1; goto again; } else if (vfreq < fb_info.monspecs.vfmin && doubleline) { /* Doubleline too low -> clear doubleline and enlarge margins */ int lines; doubleline = 0; for (lines = 0; (hfreq * 2) / (par->VFT + 1 + 4 * lines - 2 * yres) > fb_info.monspecs.vfmax; lines++) ; upper_margin += lines; lower_margin += lines; goto again; } else if (vfreq > fb_info.monspecs.vfmax && doubleline) { /* Doubleline too high -> enlarge margins */ int lines; for (lines = 0; (hfreq * 2) / (par->VFT + 1 + 4 * lines) > fb_info.monspecs.vfmax; lines += 2) ; upper_margin += lines; lower_margin += lines; goto again; } else if (vfreq > fb_info.monspecs.vfmax && interlace) { /* Interlace, too high -> enlarge margins */ int lines; for (lines = 0; (hfreq * 2) / (par->VFT + 1 + 4 * lines) > fb_info.monspecs.vfmax; lines++) ; upper_margin += lines; lower_margin += lines; goto again; } else if (vfreq < fb_info.monspecs.vfmin || vfreq > fb_info.monspecs.vfmax) return -EINVAL; set_screen_base: linelen = xres_virtual * bpp / 8; if (yres_virtual * linelen > screen_len && screen_len) return -EINVAL; if (yres * linelen > screen_len && screen_len) return -EINVAL; if (var->yoffset + yres > yres_virtual && yres_virtual) return -EINVAL; par->yres_virtual = yres_virtual; par->screen_base = screen_base + var->yoffset * linelen; par->hw.falcon.xoffset = 0; par->next_line = linelen; return 0; } static int falcon_encode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { /* !!! only for VGA !!! */ int linelen; int prescale, plen; int hdb_off, hde_off, base_off; struct falcon_hw *hw = &par->hw.falcon; memset(var, 0, sizeof(struct fb_var_screeninfo)); /* possible frequencies: 25.175 or 32MHz */ var->pixclock = hw->sync & 0x1 ? fext.t : hw->vid_control & VCO_CLOCK25 ? f25.t : f32.t; var->height = -1; var->width = -1; var->sync = 0; if (hw->vid_control & VCO_HSYPOS) var->sync |= FB_SYNC_HOR_HIGH_ACT; if (hw->vid_control & VCO_VSYPOS) var->sync |= FB_SYNC_VERT_HIGH_ACT; var->vmode = FB_VMODE_NONINTERLACED; if (hw->vid_mode & VMO_INTER) var->vmode |= FB_VMODE_INTERLACED; if (hw->vid_mode & VMO_DOUBLE) var->vmode |= FB_VMODE_DOUBLE; /* visible y resolution: * Graphics display starts at line VDB and ends at line * VDE. If interlace mode off unit of VC-registers is * half lines, else lines. */ var->yres = hw->vde - hw->vdb; if (!(var->vmode & FB_VMODE_INTERLACED)) var->yres >>= 1; if (var->vmode & FB_VMODE_DOUBLE) var->yres >>= 1; /* * to get bpp, we must examine f_shift and st_shift. * f_shift is valid if any of bits no. 10, 8 or 4 * is set. Priority in f_shift is: 10 ">" 8 ">" 4, i.e. * if bit 10 set then bit 8 and bit 4 don't care... * If all these bits are 0 get display depth from st_shift * (as for ST and STE) */ if (hw->f_shift & 0x400) /* 2 colors */ var->bits_per_pixel = 1; else if (hw->f_shift & 0x100) /* hicolor */ var->bits_per_pixel = 16; else if (hw->f_shift & 0x010) /* 8 bitplanes */ var->bits_per_pixel = 8; else if (hw->st_shift == 0) var->bits_per_pixel = 4; else if (hw->st_shift == 0x100) var->bits_per_pixel = 2; else /* if (hw->st_shift == 0x200) */ var->bits_per_pixel = 1; var->xres = hw->line_width * 16 / var->bits_per_pixel; var->xres_virtual = var->xres + hw->line_offset * 16 / var->bits_per_pixel; if (hw->xoffset) var->xres_virtual += 16; if (var->bits_per_pixel == 16) { var->red.offset = 11; var->red.length = 5; var->red.msb_right = 0; var->green.offset = 5; var->green.length = 6; var->green.msb_right = 0; var->blue.offset = 0; var->blue.length = 5; var->blue.msb_right = 0; } else { var->red.offset = 0; var->red.length = hw->ste_mode ? 4 : 6; if (var->red.length > var->bits_per_pixel) var->red.length = var->bits_per_pixel; var->red.msb_right = 0; var->grayscale = 0; var->blue = var->green = var->red; } var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; linelen = var->xres_virtual * var->bits_per_pixel / 8; if (screen_len) { if (par->yres_virtual) var->yres_virtual = par->yres_virtual; else /* yres_virtual == 0 means use maximum */ var->yres_virtual = screen_len / linelen; } else { if (hwscroll < 0) var->yres_virtual = 2 * var->yres; else var->yres_virtual = var->yres + hwscroll * 16; } var->xoffset = 0; /* TODO change this */ /* hdX-offsets */ prescale = hxx_prescale(hw); plen = 4 >> (hw->vid_mode >> 2 & 0x3); base_off = hw->vid_control & VCO_SHORTOFFS ? 64 : 128; if (hw->f_shift & 0x100) { hde_off = 0; hdb_off = (base_off + 16 * plen) + prescale; } else { hde_off = ((128 / var->bits_per_pixel + 2) * plen); if (hw->ste_mode) hdb_off = (64 + base_off + (128 / var->bits_per_pixel + 2) * plen) + prescale; else hdb_off = (base_off + (128 / var->bits_per_pixel + 18) * plen) + prescale; } /* Right margin includes hsync */ var->left_margin = hdb_off + prescale * ((hw->hdb & 0x1ff) - (hw->hdb & 0x200 ? 2 + hw->hht : 0)); if (hw->ste_mode || mon_type != F_MON_VGA) var->right_margin = prescale * (hw->hht + 2 - hw->hde) - hde_off; else /* can't use this in ste_mode, because hbb is +1 off */ var->right_margin = prescale * (hw->hht + 2 - hw->hbb); var->hsync_len = prescale * (hw->hht + 2 - hw->hss); /* Lower margin includes vsync */ var->upper_margin = hw->vdb / 2; /* round down to full lines */ var->lower_margin = (hw->vft + 1 - hw->vde + 1) / 2; /* round up */ var->vsync_len = (hw->vft + 1 - hw->vss + 1) / 2; /* round up */ if (var->vmode & FB_VMODE_INTERLACED) { var->upper_margin *= 2; var->lower_margin *= 2; var->vsync_len *= 2; } else if (var->vmode & FB_VMODE_DOUBLE) { var->upper_margin = (var->upper_margin + 1) / 2; var->lower_margin = (var->lower_margin + 1) / 2; var->vsync_len = (var->vsync_len + 1) / 2; } var->pixclock *= plen; var->left_margin /= plen; var->right_margin /= plen; var->hsync_len /= plen; var->right_margin -= var->hsync_len; var->lower_margin -= var->vsync_len; if (screen_base) var->yoffset = (par->screen_base - screen_base) / linelen; else var->yoffset = 0; var->nonstd = 0; /* what is this for? */ var->activate = 0; return 0; } static int f_change_mode; static struct falcon_hw f_new_mode; static int f_pan_display; static void falcon_get_par(struct atafb_par *par) { unsigned long addr; struct falcon_hw *hw = &par->hw.falcon; hw->line_width = shifter_f030.scn_width; hw->line_offset = shifter_f030.off_next; hw->st_shift = videl.st_shift & 0x300; hw->f_shift = videl.f_shift; hw->vid_control = videl.control; hw->vid_mode = videl.mode; hw->sync = shifter.syncmode & 0x1; hw->xoffset = videl.xoffset & 0xf; hw->hht = videl.hht; hw->hbb = videl.hbb; hw->hbe = videl.hbe; hw->hdb = videl.hdb; hw->hde = videl.hde; hw->hss = videl.hss; hw->vft = videl.vft; hw->vbb = videl.vbb; hw->vbe = videl.vbe; hw->vdb = videl.vdb; hw->vde = videl.vde; hw->vss = videl.vss; addr = (shifter.bas_hi & 0xff) << 16 | (shifter.bas_md & 0xff) << 8 | (shifter.bas_lo & 0xff); par->screen_base = phys_to_virt(addr); /* derived parameters */ hw->ste_mode = (hw->f_shift & 0x510) == 0 && hw->st_shift == 0x100; hw->mono = (hw->f_shift & 0x400) || ((hw->f_shift & 0x510) == 0 && hw->st_shift == 0x200); } static void falcon_set_par(struct atafb_par *par) { f_change_mode = 0; /* only set screen_base if really necessary */ if (current_par.screen_base != par->screen_base) fbhw->set_screen_base(par->screen_base); /* Don't touch any other registers if we keep the default resolution */ if (DontCalcRes) return; /* Tell vbl-handler to change video mode. * We change modes only on next VBL, to avoid desynchronisation * (a shift to the right and wrap around by a random number of pixels * in all monochrome modes). * This seems to work on my Falcon. */ f_new_mode = par->hw.falcon; f_change_mode = 1; } static irqreturn_t falcon_vbl_switcher(int irq, void *dummy) { struct falcon_hw *hw = &f_new_mode; if (f_change_mode) { f_change_mode = 0; if (hw->sync & 0x1) { /* Enable external pixelclock. This code only for ScreenWonder */ *(volatile unsigned short *)0xffff9202 = 0xffbf; } else { /* Turn off external clocks. Read sets all output bits to 1. */ *(volatile unsigned short *)0xffff9202; } shifter.syncmode = hw->sync; videl.hht = hw->hht; videl.hbb = hw->hbb; videl.hbe = hw->hbe; videl.hdb = hw->hdb; videl.hde = hw->hde; videl.hss = hw->hss; videl.vft = hw->vft; videl.vbb = hw->vbb; videl.vbe = hw->vbe; videl.vdb = hw->vdb; videl.vde = hw->vde; videl.vss = hw->vss; videl.f_shift = 0; /* write enables Falcon palette, 0: 4 planes */ if (hw->ste_mode) { videl.st_shift = hw->st_shift; /* write enables STE palette */ } else { /* IMPORTANT: * set st_shift 0, so we can tell the screen-depth if f_shift == 0. * Writing 0 to f_shift enables 4 plane Falcon mode but * doesn't set st_shift. st_shift != 0 (!= 4planes) is impossible * with Falcon palette. */ videl.st_shift = 0; /* now back to Falcon palette mode */ videl.f_shift = hw->f_shift; } /* writing to st_shift changed scn_width and vid_mode */ videl.xoffset = hw->xoffset; shifter_f030.scn_width = hw->line_width; shifter_f030.off_next = hw->line_offset; videl.control = hw->vid_control; videl.mode = hw->vid_mode; } if (f_pan_display) { f_pan_display = 0; videl.xoffset = current_par.hw.falcon.xoffset; shifter_f030.off_next = current_par.hw.falcon.line_offset; } return IRQ_HANDLED; } static int falcon_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct atafb_par *par = (struct atafb_par *)info->par; int xoffset; int bpp = info->var.bits_per_pixel; if (bpp == 1) var->xoffset = up(var->xoffset, 32); if (bpp != 16) par->hw.falcon.xoffset = var->xoffset & 15; else { par->hw.falcon.xoffset = 0; var->xoffset = up(var->xoffset, 2); } par->hw.falcon.line_offset = bpp * (info->var.xres_virtual - info->var.xres) / 16; if (par->hw.falcon.xoffset) par->hw.falcon.line_offset -= bpp; xoffset = var->xoffset - par->hw.falcon.xoffset; par->screen_base = screen_base + (var->yoffset * info->var.xres_virtual + xoffset) * bpp / 8; if (fbhw->set_screen_base) fbhw->set_screen_base(par->screen_base); else return -EINVAL; /* shouldn't happen */ f_pan_display = 1; return 0; } static int falcon_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info) { if (regno > 255) return 1; f030_col[regno] = (((red & 0xfc00) << 16) | ((green & 0xfc00) << 8) | ((blue & 0xfc00) >> 8)); if (regno < 16) { shifter_tt.color_reg[regno] = (((red & 0xe000) >> 13) | ((red & 0x1000) >> 12) << 8) | (((green & 0xe000) >> 13) | ((green & 0x1000) >> 12) << 4) | ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12); ((u32 *)info->pseudo_palette)[regno] = ((red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11)); } return 0; } static int falcon_blank(int blank_mode) { /* ++guenther: we can switch off graphics by changing VDB and VDE, * so VIDEL doesn't hog the bus while saving. * (this may affect usleep()). */ int vdb, vss, hbe, hss; if (mon_type == F_MON_SM) /* this doesn't work on SM124 */ return 1; vdb = current_par.VDB; vss = current_par.VSS; hbe = current_par.HBE; hss = current_par.HSS; if (blank_mode >= 1) { /* disable graphics output (this speeds up the CPU) ... */ vdb = current_par.VFT + 1; /* ... and blank all lines */ hbe = current_par.HHT + 2; } /* use VESA suspend modes on VGA monitors */ if (mon_type == F_MON_VGA) { if (blank_mode == 2 || blank_mode == 4) vss = current_par.VFT + 1; if (blank_mode == 3 || blank_mode == 4) hss = current_par.HHT + 2; } videl.vdb = vdb; videl.vss = vss; videl.hbe = hbe; videl.hss = hss; return 0; } static int falcon_detect(void) { struct atafb_par par; unsigned char fhw; /* Determine connected monitor and set monitor parameters */ fhw = *(unsigned char *)0xffff8006; mon_type = fhw >> 6 & 0x3; /* bit 1 of fhw: 1=32 bit ram bus, 0=16 bit */ f030_bus_width = fhw << 6 & 0x80; switch (mon_type) { case F_MON_SM: fb_info.monspecs.vfmin = 70; fb_info.monspecs.vfmax = 72; fb_info.monspecs.hfmin = 35713; fb_info.monspecs.hfmax = 35715; break; case F_MON_SC: case F_MON_TV: /* PAL...NTSC */ fb_info.monspecs.vfmin = 49; /* not 50, since TOS defaults to 49.9x Hz */ fb_info.monspecs.vfmax = 60; fb_info.monspecs.hfmin = 15620; fb_info.monspecs.hfmax = 15755; break; } /* initialize hsync-len */ f25.hsync = h_syncs[mon_type] / f25.t; f32.hsync = h_syncs[mon_type] / f32.t; if (fext.t) fext.hsync = h_syncs[mon_type] / fext.t; falcon_get_par(&par); falcon_encode_var(&atafb_predefined[0], &par); /* Detected mode is always the "autodetect" slot */ return 1; } #endif /* ATAFB_FALCON */ /* ------------------- ST(E) specific functions ---------------------- */ #ifdef ATAFB_STE static int stste_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par) { int mode; strcpy(fix->id, "Atari Builtin"); fix->smem_start = (unsigned long)real_screen_base; fix->smem_len = screen_len; fix->type = FB_TYPE_INTERLEAVED_PLANES; fix->type_aux = 2; fix->visual = FB_VISUAL_PSEUDOCOLOR; mode = par->hw.st.mode & 3; if (mode == ST_HIGH) { fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; fix->visual = FB_VISUAL_MONO10; } if (ATARIHW_PRESENT(EXTD_SHIFTER)) { fix->xpanstep = 16; fix->ypanstep = 1; } else { fix->xpanstep = 0; fix->ypanstep = 0; } fix->ywrapstep = 0; fix->line_length = par->next_line; fix->accel = FB_ACCEL_ATARIBLITT; return 0; } static int stste_decode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { int xres = var->xres; int yres = var->yres; int bpp = var->bits_per_pixel; int linelen; int yres_virtual = var->yres_virtual; if (mono_moni) { if (bpp > 1 || xres > sttt_xres || yres > st_yres) return -EINVAL; par->hw.st.mode = ST_HIGH; xres = sttt_xres; yres = st_yres; bpp = 1; } else { if (bpp > 4 || xres > sttt_xres || yres > st_yres) return -EINVAL; if (bpp > 2) { if (xres > sttt_xres / 2 || yres > st_yres / 2) return -EINVAL; par->hw.st.mode = ST_LOW; xres = sttt_xres / 2; yres = st_yres / 2; bpp = 4; } else if (bpp > 1) { if (xres > sttt_xres || yres > st_yres / 2) return -EINVAL; par->hw.st.mode = ST_MID; xres = sttt_xres; yres = st_yres / 2; bpp = 2; } else return -EINVAL; } if (yres_virtual <= 0) yres_virtual = 0; else if (yres_virtual < yres) yres_virtual = yres; if (var->sync & FB_SYNC_EXT) par->hw.st.sync = (par->hw.st.sync & ~1) | 1; else par->hw.st.sync = (par->hw.st.sync & ~1); linelen = xres * bpp / 8; if (yres_virtual * linelen > screen_len && screen_len) return -EINVAL; if (yres * linelen > screen_len && screen_len) return -EINVAL; if (var->yoffset + yres > yres_virtual && yres_virtual) return -EINVAL; par->yres_virtual = yres_virtual; par->screen_base = screen_base + var->yoffset * linelen; par->next_line = linelen; return 0; } static int stste_encode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { int linelen; memset(var, 0, sizeof(struct fb_var_screeninfo)); var->red.offset = 0; var->red.length = ATARIHW_PRESENT(EXTD_SHIFTER) ? 4 : 3; var->red.msb_right = 0; var->grayscale = 0; var->pixclock = 31041; var->left_margin = 120; /* these are incorrect */ var->right_margin = 100; var->upper_margin = 8; var->lower_margin = 16; var->hsync_len = 140; var->vsync_len = 30; var->height = -1; var->width = -1; if (!(par->hw.st.sync & 1)) var->sync = 0; else var->sync = FB_SYNC_EXT; switch (par->hw.st.mode & 3) { case ST_LOW: var->xres = sttt_xres / 2; var->yres = st_yres / 2; var->bits_per_pixel = 4; break; case ST_MID: var->xres = sttt_xres; var->yres = st_yres / 2; var->bits_per_pixel = 2; break; case ST_HIGH: var->xres = sttt_xres; var->yres = st_yres; var->bits_per_pixel = 1; break; } var->blue = var->green = var->red; var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; var->xres_virtual = sttt_xres_virtual; linelen = var->xres_virtual * var->bits_per_pixel / 8; ovsc_addlen = linelen * (sttt_yres_virtual - st_yres); if (!use_hwscroll) var->yres_virtual = var->yres; else if (screen_len) { if (par->yres_virtual) var->yres_virtual = par->yres_virtual; else /* yres_virtual == 0 means use maximum */ var->yres_virtual = screen_len / linelen; } else { if (hwscroll < 0) var->yres_virtual = 2 * var->yres; else var->yres_virtual = var->yres + hwscroll * 16; } var->xoffset = 0; if (screen_base) var->yoffset = (par->screen_base - screen_base) / linelen; else var->yoffset = 0; var->nonstd = 0; var->activate = 0; var->vmode = FB_VMODE_NONINTERLACED; return 0; } static void stste_get_par(struct atafb_par *par) { unsigned long addr; par->hw.st.mode = shifter_tt.st_shiftmode; par->hw.st.sync = shifter.syncmode; addr = ((shifter.bas_hi & 0xff) << 16) | ((shifter.bas_md & 0xff) << 8); if (ATARIHW_PRESENT(EXTD_SHIFTER)) addr |= (shifter.bas_lo & 0xff); par->screen_base = phys_to_virt(addr); } static void stste_set_par(struct atafb_par *par) { shifter_tt.st_shiftmode = par->hw.st.mode; shifter.syncmode = par->hw.st.sync; /* only set screen_base if really necessary */ if (current_par.screen_base != par->screen_base) fbhw->set_screen_base(par->screen_base); } static int stste_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info) { if (regno > 15) return 1; red >>= 12; blue >>= 12; green >>= 12; if (ATARIHW_PRESENT(EXTD_SHIFTER)) shifter_tt.color_reg[regno] = (((red & 0xe) >> 1) | ((red & 1) << 3) << 8) | (((green & 0xe) >> 1) | ((green & 1) << 3) << 4) | ((blue & 0xe) >> 1) | ((blue & 1) << 3); else shifter_tt.color_reg[regno] = ((red & 0xe) << 7) | ((green & 0xe) << 3) | ((blue & 0xe) >> 1); return 0; } static int stste_detect(void) { struct atafb_par par; /* Determine the connected monitor: The DMA sound must be * disabled before reading the MFP GPIP, because the Sound * Done Signal and the Monochrome Detect are XORed together! */ if (ATARIHW_PRESENT(PCM_8BIT)) { tt_dmasnd.ctrl = DMASND_CTRL_OFF; udelay(20); /* wait a while for things to settle down */ } mono_moni = (st_mfp.par_dt_reg & 0x80) == 0; stste_get_par(&par); stste_encode_var(&atafb_predefined[0], &par); if (!ATARIHW_PRESENT(EXTD_SHIFTER)) use_hwscroll = 0; return 1; } static void stste_set_screen_base(void *s_base) { unsigned long addr; addr = virt_to_phys(s_base); /* Setup Screen Memory */ shifter.bas_hi = (unsigned char)((addr & 0xff0000) >> 16); shifter.bas_md = (unsigned char)((addr & 0x00ff00) >> 8); if (ATARIHW_PRESENT(EXTD_SHIFTER)) shifter.bas_lo = (unsigned char)(addr & 0x0000ff); } #endif /* ATAFB_STE */ /* Switching the screen size should be done during vsync, otherwise * the margins may get messed up. This is a well known problem of * the ST's video system. * * Unfortunately there is hardly any way to find the vsync, as the * vertical blank interrupt is no longer in time on machines with * overscan type modifications. * * We can, however, use Timer B to safely detect the black shoulder, * but then we've got to guess an appropriate delay to find the vsync. * This might not work on every machine. * * martin_rogge @ ki.maus.de, 8th Aug 1995 */ #define LINE_DELAY (mono_moni ? 30 : 70) #define SYNC_DELAY (mono_moni ? 1500 : 2000) /* SWITCH_ACIA may be used for Falcon (ScreenBlaster III internal!) */ static void st_ovsc_switch(void) { unsigned long flags; register unsigned char old, new; if (!(atari_switches & ATARI_SWITCH_OVSC_MASK)) return; local_irq_save(flags); st_mfp.tim_ct_b = 0x10; st_mfp.active_edge |= 8; st_mfp.tim_ct_b = 0; st_mfp.tim_dt_b = 0xf0; st_mfp.tim_ct_b = 8; while (st_mfp.tim_dt_b > 1) /* TOS does it this way, don't ask why */ ; new = st_mfp.tim_dt_b; do { udelay(LINE_DELAY); old = new; new = st_mfp.tim_dt_b; } while (old != new); st_mfp.tim_ct_b = 0x10; udelay(SYNC_DELAY); if (atari_switches & ATARI_SWITCH_OVSC_IKBD) acia.key_ctrl = ACIA_DIV64 | ACIA_D8N1S | ACIA_RHTID | ACIA_RIE; if (atari_switches & ATARI_SWITCH_OVSC_MIDI) acia.mid_ctrl = ACIA_DIV16 | ACIA_D8N1S | ACIA_RHTID; if (atari_switches & (ATARI_SWITCH_OVSC_SND6|ATARI_SWITCH_OVSC_SND7)) { sound_ym.rd_data_reg_sel = 14; sound_ym.wd_data = sound_ym.rd_data_reg_sel | ((atari_switches & ATARI_SWITCH_OVSC_SND6) ? 0x40:0) | ((atari_switches & ATARI_SWITCH_OVSC_SND7) ? 0x80:0); } local_irq_restore(flags); } /* ------------------- External Video ---------------------- */ #ifdef ATAFB_EXT static int ext_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par) { strcpy(fix->id, "Unknown Extern"); fix->smem_start = (unsigned long)external_addr; fix->smem_len = PAGE_ALIGN(external_len); if (external_depth == 1) { fix->type = FB_TYPE_PACKED_PIXELS; /* The letters 'n' and 'i' in the "atavideo=external:" stand * for "normal" and "inverted", rsp., in the monochrome case */ fix->visual = (external_pmode == FB_TYPE_INTERLEAVED_PLANES || external_pmode == FB_TYPE_PACKED_PIXELS) ? FB_VISUAL_MONO10 : FB_VISUAL_MONO01; } else { /* Use STATIC if we don't know how to access color registers */ int visual = external_vgaiobase ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR; switch (external_pmode) { case -1: /* truecolor */ fix->type = FB_TYPE_PACKED_PIXELS; fix->visual = FB_VISUAL_TRUECOLOR; break; case FB_TYPE_PACKED_PIXELS: fix->type = FB_TYPE_PACKED_PIXELS; fix->visual = visual; break; case FB_TYPE_PLANES: fix->type = FB_TYPE_PLANES; fix->visual = visual; break; case FB_TYPE_INTERLEAVED_PLANES: fix->type = FB_TYPE_INTERLEAVED_PLANES; fix->type_aux = 2; fix->visual = visual; break; } } fix->xpanstep = 0; fix->ypanstep = 0; fix->ywrapstep = 0; fix->line_length = par->next_line; return 0; } static int ext_decode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { struct fb_var_screeninfo *myvar = &atafb_predefined[0]; if (var->bits_per_pixel > myvar->bits_per_pixel || var->xres > myvar->xres || var->xres_virtual > myvar->xres_virtual || var->yres > myvar->yres || var->xoffset > 0 || var->yoffset > 0) return -EINVAL; par->next_line = external_xres_virtual * external_depth / 8; return 0; } static int ext_encode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { memset(var, 0, sizeof(struct fb_var_screeninfo)); var->red.offset = 0; var->red.length = (external_pmode == -1) ? external_depth / 3 : (external_vgaiobase ? external_bitspercol : 0); var->red.msb_right = 0; var->grayscale = 0; var->pixclock = 31041; var->left_margin = 120; /* these are surely incorrect */ var->right_margin = 100; var->upper_margin = 8; var->lower_margin = 16; var->hsync_len = 140; var->vsync_len = 30; var->height = -1; var->width = -1; var->sync = 0; var->xres = external_xres; var->yres = external_yres; var->xres_virtual = external_xres_virtual; var->bits_per_pixel = external_depth; var->blue = var->green = var->red; var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; var->yres_virtual = var->yres; var->xoffset = 0; var->yoffset = 0; var->nonstd = 0; var->activate = 0; var->vmode = FB_VMODE_NONINTERLACED; return 0; } static void ext_get_par(struct atafb_par *par) { par->screen_base = external_addr; } static void ext_set_par(struct atafb_par *par) { } #define OUTB(port,val) \ *((unsigned volatile char *) ((port)+external_vgaiobase)) = (val) #define INB(port) \ (*((unsigned volatile char *) ((port)+external_vgaiobase))) #define DACDelay \ do { \ unsigned char tmp = INB(0x3da); \ tmp = INB(0x3da); \ } while (0) static int ext_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info) { unsigned char colmask = (1 << external_bitspercol) - 1; if (!external_vgaiobase) return 1; if (regno > 255) return 1; switch (external_card_type) { case IS_VGA: OUTB(0x3c8, regno); DACDelay; OUTB(0x3c9, red & colmask); DACDelay; OUTB(0x3c9, green & colmask); DACDelay; OUTB(0x3c9, blue & colmask); DACDelay; return 0; case IS_MV300: OUTB((MV300_reg[regno] << 2) + 1, red); OUTB((MV300_reg[regno] << 2) + 1, green); OUTB((MV300_reg[regno] << 2) + 1, blue); return 0; default: return 1; } } static int ext_detect(void) { struct fb_var_screeninfo *myvar = &atafb_predefined[0]; struct atafb_par dummy_par; myvar->xres = external_xres; myvar->xres_virtual = external_xres_virtual; myvar->yres = external_yres; myvar->bits_per_pixel = external_depth; ext_encode_var(myvar, &dummy_par); return 1; } #endif /* ATAFB_EXT */ /* ------ This is the same for most hardware types -------- */ static void set_screen_base(void *s_base) { unsigned long addr; addr = virt_to_phys(s_base); /* Setup Screen Memory */ shifter.bas_hi = (unsigned char)((addr & 0xff0000) >> 16); shifter.bas_md = (unsigned char)((addr & 0x00ff00) >> 8); shifter.bas_lo = (unsigned char)(addr & 0x0000ff); } static int pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct atafb_par *par = (struct atafb_par *)info->par; if (!fbhw->set_screen_base || (!ATARIHW_PRESENT(EXTD_SHIFTER) && var->xoffset)) return -EINVAL; var->xoffset = up(var->xoffset, 16); par->screen_base = screen_base + (var->yoffset * info->var.xres_virtual + var->xoffset) * info->var.bits_per_pixel / 8; fbhw->set_screen_base(par->screen_base); return 0; } /* ------------ Interfaces to hardware functions ------------ */ #ifdef ATAFB_TT static struct fb_hwswitch tt_switch = { .detect = tt_detect, .encode_fix = tt_encode_fix, .decode_var = tt_decode_var, .encode_var = tt_encode_var, .get_par = tt_get_par, .set_par = tt_set_par, .set_screen_base = set_screen_base, .pan_display = pan_display, }; #endif #ifdef ATAFB_FALCON static struct fb_hwswitch falcon_switch = { .detect = falcon_detect, .encode_fix = falcon_encode_fix, .decode_var = falcon_decode_var, .encode_var = falcon_encode_var, .get_par = falcon_get_par, .set_par = falcon_set_par, .set_screen_base = set_screen_base, .blank = falcon_blank, .pan_display = falcon_pan_display, }; #endif #ifdef ATAFB_STE static struct fb_hwswitch st_switch = { .detect = stste_detect, .encode_fix = stste_encode_fix, .decode_var = stste_decode_var, .encode_var = stste_encode_var, .get_par = stste_get_par, .set_par = stste_set_par, .set_screen_base = stste_set_screen_base, .pan_display = pan_display }; #endif #ifdef ATAFB_EXT static struct fb_hwswitch ext_switch = { .detect = ext_detect, .encode_fix = ext_encode_fix, .decode_var = ext_decode_var, .encode_var = ext_encode_var, .get_par = ext_get_par, .set_par = ext_set_par, }; #endif static void ata_get_par(struct atafb_par *par) { if (current_par_valid) *par = current_par; else fbhw->get_par(par); } static void ata_set_par(struct atafb_par *par) { fbhw->set_par(par); current_par = *par; current_par_valid = 1; } /* =========================================================== */ /* ============== Hardware Independent Functions ============= */ /* =========================================================== */ /* used for hardware scrolling */ static int do_fb_set_var(struct fb_var_screeninfo *var, int isactive) { int err, activate; struct atafb_par par; err = fbhw->decode_var(var, &par); if (err) return err; activate = var->activate; if (((var->activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) && isactive) ata_set_par(&par); fbhw->encode_var(var, &par); var->activate = activate; return 0; } /* fbhw->encode_fix() must be called with fb_info->mm_lock held * if it is called after the register_framebuffer() - not a case here */ static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info) { struct atafb_par par; int err; // Get fix directly (case con == -1 before)?? err = fbhw->decode_var(&info->var, &par); if (err) return err; memset(fix, 0, sizeof(struct fb_fix_screeninfo)); err = fbhw->encode_fix(fix, &par); return err; } static int atafb_get_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct atafb_par par; ata_get_par(&par); fbhw->encode_var(var, &par); return 0; } // No longer called by fbcon! // Still called by set_var internally static void atafb_set_disp(struct fb_info *info) { atafb_get_var(&info->var, info); atafb_get_fix(&info->fix, info); info->screen_base = (void *)info->fix.smem_start; } static int atafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { red >>= 8; green >>= 8; blue >>= 8; return info->fbops->fb_setcolreg(regno, red, green, blue, transp, info); } static int atafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { int xoffset = var->xoffset; int yoffset = var->yoffset; int err; if (var->vmode & FB_VMODE_YWRAP) { if (yoffset < 0 || yoffset >= info->var.yres_virtual || xoffset) return -EINVAL; } else { if (xoffset + info->var.xres > info->var.xres_virtual || yoffset + info->var.yres > info->var.yres_virtual) return -EINVAL; } if (fbhw->pan_display) { err = fbhw->pan_display(var, info); if (err) return err; } else return -EINVAL; info->var.xoffset = xoffset; info->var.yoffset = yoffset; if (var->vmode & FB_VMODE_YWRAP) info->var.vmode |= FB_VMODE_YWRAP; else info->var.vmode &= ~FB_VMODE_YWRAP; return 0; } /* * generic drawing routines; imageblit needs updating for image depth > 1 */ #if BITS_PER_LONG == 32 #define BYTES_PER_LONG 4 #define SHIFT_PER_LONG 5 #elif BITS_PER_LONG == 64 #define BYTES_PER_LONG 8 #define SHIFT_PER_LONG 6 #else #define Please update me #endif static void atafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct atafb_par *par = (struct atafb_par *)info->par; int x2, y2; u32 width, height; if (!rect->width || !rect->height) return; #ifdef ATAFB_FALCON if (info->var.bits_per_pixel == 16) { cfb_fillrect(info, rect); return; } #endif /* * We could use hardware clipping but on many cards you get around * hardware clipping by writing to framebuffer directly. * */ x2 = rect->dx + rect->width; y2 = rect->dy + rect->height; x2 = x2 < info->var.xres_virtual ? x2 : info->var.xres_virtual; y2 = y2 < info->var.yres_virtual ? y2 : info->var.yres_virtual; width = x2 - rect->dx; height = y2 - rect->dy; if (info->var.bits_per_pixel == 1) atafb_mfb_fillrect(info, par->next_line, rect->color, rect->dy, rect->dx, height, width); else if (info->var.bits_per_pixel == 2) atafb_iplan2p2_fillrect(info, par->next_line, rect->color, rect->dy, rect->dx, height, width); else if (info->var.bits_per_pixel == 4) atafb_iplan2p4_fillrect(info, par->next_line, rect->color, rect->dy, rect->dx, height, width); else atafb_iplan2p8_fillrect(info, par->next_line, rect->color, rect->dy, rect->dx, height, width); return; } static void atafb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct atafb_par *par = (struct atafb_par *)info->par; int x2, y2; u32 dx, dy, sx, sy, width, height; int rev_copy = 0; #ifdef ATAFB_FALCON if (info->var.bits_per_pixel == 16) { cfb_copyarea(info, area); return; } #endif /* clip the destination */ x2 = area->dx + area->width; y2 = area->dy + area->height; dx = area->dx > 0 ? area->dx : 0; dy = area->dy > 0 ? area->dy : 0; x2 = x2 < info->var.xres_virtual ? x2 : info->var.xres_virtual; y2 = y2 < info->var.yres_virtual ? y2 : info->var.yres_virtual; width = x2 - dx; height = y2 - dy; if (area->sx + dx < area->dx || area->sy + dy < area->dy) return; /* update sx,sy */ sx = area->sx + (dx - area->dx); sy = area->sy + (dy - area->dy); /* the source must be completely inside the virtual screen */ if (sx + width > info->var.xres_virtual || sy + height > info->var.yres_virtual) return; if (dy > sy || (dy == sy && dx > sx)) { dy += height; sy += height; rev_copy = 1; } if (info->var.bits_per_pixel == 1) atafb_mfb_copyarea(info, par->next_line, sy, sx, dy, dx, height, width); else if (info->var.bits_per_pixel == 2) atafb_iplan2p2_copyarea(info, par->next_line, sy, sx, dy, dx, height, width); else if (info->var.bits_per_pixel == 4) atafb_iplan2p4_copyarea(info, par->next_line, sy, sx, dy, dx, height, width); else atafb_iplan2p8_copyarea(info, par->next_line, sy, sx, dy, dx, height, width); return; } static void atafb_imageblit(struct fb_info *info, const struct fb_image *image) { struct atafb_par *par = (struct atafb_par *)info->par; int x2, y2; unsigned long *dst; int dst_idx; const char *src; u32 dx, dy, width, height, pitch; #ifdef ATAFB_FALCON if (info->var.bits_per_pixel == 16) { cfb_imageblit(info, image); return; } #endif /* * We could use hardware clipping but on many cards you get around * hardware clipping by writing to framebuffer directly like we are * doing here. */ x2 = image->dx + image->width; y2 = image->dy + image->height; dx = image->dx; dy = image->dy; x2 = x2 < info->var.xres_virtual ? x2 : info->var.xres_virtual; y2 = y2 < info->var.yres_virtual ? y2 : info->var.yres_virtual; width = x2 - dx; height = y2 - dy; if (image->depth == 1) { // used for font data dst = (unsigned long *) ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1)); dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8; dst_idx += dy * par->next_line * 8 + dx; src = image->data; pitch = (image->width + 7) / 8; while (height--) { if (info->var.bits_per_pixel == 1) atafb_mfb_linefill(info, par->next_line, dy, dx, width, src, image->bg_color, image->fg_color); else if (info->var.bits_per_pixel == 2) atafb_iplan2p2_linefill(info, par->next_line, dy, dx, width, src, image->bg_color, image->fg_color); else if (info->var.bits_per_pixel == 4) atafb_iplan2p4_linefill(info, par->next_line, dy, dx, width, src, image->bg_color, image->fg_color); else atafb_iplan2p8_linefill(info, par->next_line, dy, dx, width, src, image->bg_color, image->fg_color); dy++; src += pitch; } } else { c2p_iplan2(info->screen_base, image->data, dx, dy, width, height, par->next_line, image->width, info->var.bits_per_pixel); } } static int atafb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { switch (cmd) { #ifdef FBCMD_GET_CURRENTPAR case FBCMD_GET_CURRENTPAR: if (copy_to_user((void *)arg, (void *)&current_par, sizeof(struct atafb_par))) return -EFAULT; return 0; #endif #ifdef FBCMD_SET_CURRENTPAR case FBCMD_SET_CURRENTPAR: if (copy_from_user((void *)&current_par, (void *)arg, sizeof(struct atafb_par))) return -EFAULT; ata_set_par(&current_par); return 0; #endif } return -EINVAL; } /* (un)blank/poweroff * 0 = unblank * 1 = blank * 2 = suspend vsync * 3 = suspend hsync * 4 = off */ static int atafb_blank(int blank, struct fb_info *info) { unsigned short black[16]; struct fb_cmap cmap; if (fbhw->blank && !fbhw->blank(blank)) return 1; if (blank) { memset(black, 0, 16 * sizeof(unsigned short)); cmap.red = black; cmap.green = black; cmap.blue = black; cmap.transp = NULL; cmap.start = 0; cmap.len = 16; fb_set_cmap(&cmap, info); } #if 0 else do_install_cmap(info); #endif return 0; } /* * New fbcon interface ... */ /* check var by decoding var into hw par, rounding if necessary, * then encoding hw par back into new, validated var */ static int atafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { int err; struct atafb_par par; /* Validate wanted screen parameters */ // if ((err = ata_decode_var(var, &par))) err = fbhw->decode_var(var, &par); if (err) return err; /* Encode (possibly rounded) screen parameters */ fbhw->encode_var(var, &par); return 0; } /* actually set hw par by decoding var, then setting hardware from * hw par just decoded */ static int atafb_set_par(struct fb_info *info) { struct atafb_par *par = (struct atafb_par *)info->par; /* Decode wanted screen parameters */ fbhw->decode_var(&info->var, par); mutex_lock(&info->mm_lock); fbhw->encode_fix(&info->fix, par); mutex_unlock(&info->mm_lock); /* Set new videomode */ ata_set_par(par); return 0; } static struct fb_ops atafb_ops = { .owner = THIS_MODULE, .fb_check_var = atafb_check_var, .fb_set_par = atafb_set_par, .fb_setcolreg = atafb_setcolreg, .fb_blank = atafb_blank, .fb_pan_display = atafb_pan_display, .fb_fillrect = atafb_fillrect, .fb_copyarea = atafb_copyarea, .fb_imageblit = atafb_imageblit, .fb_ioctl = atafb_ioctl, }; static void check_default_par(int detected_mode) { char default_name[10]; int i; struct fb_var_screeninfo var; unsigned long min_mem; /* First try the user supplied mode */ if (default_par) { var = atafb_predefined[default_par - 1]; var.activate = FB_ACTIVATE_TEST; if (do_fb_set_var(&var, 1)) default_par = 0; /* failed */ } /* Next is the autodetected one */ if (!default_par) { var = atafb_predefined[detected_mode - 1]; /* autodetect */ var.activate = FB_ACTIVATE_TEST; if (!do_fb_set_var(&var, 1)) default_par = detected_mode; } /* If that also failed, try some default modes... */ if (!default_par) { /* try default1, default2... */ for (i = 1; i < 10; i++) { sprintf(default_name,"default%d", i); default_par = get_video_mode(default_name); if (!default_par) panic("can't set default video mode"); var = atafb_predefined[default_par - 1]; var.activate = FB_ACTIVATE_TEST; if (!do_fb_set_var(&var,1)) break; /* ok */ } } min_mem = var.xres_virtual * var.yres_virtual * var.bits_per_pixel / 8; if (default_mem_req < min_mem) default_mem_req = min_mem; } #ifdef ATAFB_EXT static void __init atafb_setup_ext(char *spec) { int xres, xres_virtual, yres, depth, planes; unsigned long addr, len; char *p; /* Format is: <xres>;<yres>;<depth>;<plane organ.>; * <screen mem addr> * [;<screen mem length>[;<vgaiobase>[;<bits-per-col>[;<colorreg-type> * [;<xres-virtual>]]]]] * * 09/23/97 Juergen * <xres_virtual>: hardware's x-resolution (f.e. ProMST) * * Even xres_virtual is available, we neither support panning nor hw-scrolling! */ p = strsep(&spec, ";"); if (!p || !*p) return; xres_virtual = xres = simple_strtoul(p, NULL, 10); if (xres <= 0) return; p = strsep(&spec, ";"); if (!p || !*p) return; yres = simple_strtoul(p, NULL, 10); if (yres <= 0) return; p = strsep(&spec, ";"); if (!p || !*p) return; depth = simple_strtoul(p, NULL, 10); if (depth != 1 && depth != 2 && depth != 4 && depth != 8 && depth != 16 && depth != 24) return; p = strsep(&spec, ";"); if (!p || !*p) return; if (*p == 'i') planes = FB_TYPE_INTERLEAVED_PLANES; else if (*p == 'p') planes = FB_TYPE_PACKED_PIXELS; else if (*p == 'n') planes = FB_TYPE_PLANES; else if (*p == 't') planes = -1; /* true color */ else return; p = strsep(&spec, ";"); if (!p || !*p) return; addr = simple_strtoul(p, NULL, 0); p = strsep(&spec, ";"); if (!p || !*p) len = xres * yres * depth / 8; else len = simple_strtoul(p, NULL, 0); p = strsep(&spec, ";"); if (p && *p) external_vgaiobase = simple_strtoul(p, NULL, 0); p = strsep(&spec, ";"); if (p && *p) { external_bitspercol = simple_strtoul(p, NULL, 0); if (external_bitspercol > 8) external_bitspercol = 8; else if (external_bitspercol < 1) external_bitspercol = 1; } p = strsep(&spec, ";"); if (p && *p) { if (!strcmp(p, "vga")) external_card_type = IS_VGA; if (!strcmp(p, "mv300")) external_card_type = IS_MV300; } p = strsep(&spec, ";"); if (p && *p) { xres_virtual = simple_strtoul(p, NULL, 10); if (xres_virtual < xres) xres_virtual = xres; if (xres_virtual * yres * depth / 8 > len) len = xres_virtual * yres * depth / 8; } external_xres = xres; external_xres_virtual = xres_virtual; external_yres = yres; external_depth = depth; external_pmode = planes; external_addr = (void *)addr; external_len = len; if (external_card_type == IS_MV300) { switch (external_depth) { case 1: MV300_reg = MV300_reg_1bit; break; case 4: MV300_reg = MV300_reg_4bit; break; case 8: MV300_reg = MV300_reg_8bit; break; } } } #endif /* ATAFB_EXT */ static void __init atafb_setup_int(char *spec) { /* Format to config extended internal video hardware like OverScan: * "internal:<xres>;<yres>;<xres_max>;<yres_max>;<offset>" * Explanation: * <xres>: x-resolution * <yres>: y-resolution * The following are only needed if you have an overscan which * needs a black border: * <xres_max>: max. length of a line in pixels your OverScan hardware would allow * <yres_max>: max. number of lines your OverScan hardware would allow * <offset>: Offset from physical beginning to visible beginning * of screen in bytes */ int xres; char *p; if (!(p = strsep(&spec, ";")) || !*p) return; xres = simple_strtoul(p, NULL, 10); if (!(p = strsep(&spec, ";")) || !*p) return; sttt_xres = xres; tt_yres = st_yres = simple_strtoul(p, NULL, 10); if ((p = strsep(&spec, ";")) && *p) sttt_xres_virtual = simple_strtoul(p, NULL, 10); if ((p = strsep(&spec, ";")) && *p) sttt_yres_virtual = simple_strtoul(p, NULL, 0); if ((p = strsep(&spec, ";")) && *p) ovsc_offset = simple_strtoul(p, NULL, 0); if (ovsc_offset || (sttt_yres_virtual != st_yres)) use_hwscroll = 0; } #ifdef ATAFB_FALCON static void __init atafb_setup_mcap(char *spec) { char *p; int vmin, vmax, hmin, hmax; /* Format for monitor capabilities is: <Vmin>;<Vmax>;<Hmin>;<Hmax> * <V*> vertical freq. in Hz * <H*> horizontal freq. in kHz */ if (!(p = strsep(&spec, ";")) || !*p) return; vmin = simple_strtoul(p, NULL, 10); if (vmin <= 0) return; if (!(p = strsep(&spec, ";")) || !*p) return; vmax = simple_strtoul(p, NULL, 10); if (vmax <= 0 || vmax <= vmin) return; if (!(p = strsep(&spec, ";")) || !*p) return; hmin = 1000 * simple_strtoul(p, NULL, 10); if (hmin <= 0) return; if (!(p = strsep(&spec, "")) || !*p) return; hmax = 1000 * simple_strtoul(p, NULL, 10); if (hmax <= 0 || hmax <= hmin) return; fb_info.monspecs.vfmin = vmin; fb_info.monspecs.vfmax = vmax; fb_info.monspecs.hfmin = hmin; fb_info.monspecs.hfmax = hmax; } #endif /* ATAFB_FALCON */ static void __init atafb_setup_user(char *spec) { /* Format of user defined video mode is: <xres>;<yres>;<depth> */ char *p; int xres, yres, depth, temp; p = strsep(&spec, ";"); if (!p || !*p) return; xres = simple_strtoul(p, NULL, 10); p = strsep(&spec, ";"); if (!p || !*p) return; yres = simple_strtoul(p, NULL, 10); p = strsep(&spec, ""); if (!p || !*p) return; depth = simple_strtoul(p, NULL, 10); temp = get_video_mode("user0"); if (temp) { default_par = temp; atafb_predefined[default_par - 1].xres = xres; atafb_predefined[default_par - 1].yres = yres; atafb_predefined[default_par - 1].bits_per_pixel = depth; } } int __init atafb_setup(char *options) { char *this_opt; int temp; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!*this_opt) continue; if ((temp = get_video_mode(this_opt))) { default_par = temp; mode_option = this_opt; } else if (!strcmp(this_opt, "inverse")) inverse = 1; else if (!strncmp(this_opt, "hwscroll_", 9)) { hwscroll = simple_strtoul(this_opt + 9, NULL, 10); if (hwscroll < 0) hwscroll = 0; if (hwscroll > 200) hwscroll = 200; } #ifdef ATAFB_EXT else if (!strcmp(this_opt, "mv300")) { external_bitspercol = 8; external_card_type = IS_MV300; } else if (!strncmp(this_opt, "external:", 9)) atafb_setup_ext(this_opt + 9); #endif else if (!strncmp(this_opt, "internal:", 9)) atafb_setup_int(this_opt + 9); #ifdef ATAFB_FALCON else if (!strncmp(this_opt, "eclock:", 7)) { fext.f = simple_strtoul(this_opt + 7, NULL, 10); /* external pixelclock in kHz --> ps */ fext.t = 1000000000 / fext.f; fext.f *= 1000; } else if (!strncmp(this_opt, "monitorcap:", 11)) atafb_setup_mcap(this_opt + 11); #endif else if (!strcmp(this_opt, "keep")) DontCalcRes = 1; else if (!strncmp(this_opt, "R", 1)) atafb_setup_user(this_opt + 1); } return 0; } int __init atafb_init(void) { int pad, detected_mode, error; unsigned int defmode = 0; unsigned long mem_req; #ifndef MODULE char *option = NULL; if (fb_get_options("atafb", &option)) return -ENODEV; atafb_setup(option); #endif printk("atafb_init: start\n"); if (!MACH_IS_ATARI) return -ENODEV; do { #ifdef ATAFB_EXT if (external_addr) { printk("atafb_init: initializing external hw\n"); fbhw = &ext_switch; atafb_ops.fb_setcolreg = &ext_setcolreg; defmode = DEFMODE_EXT; break; } #endif #ifdef ATAFB_TT if (ATARIHW_PRESENT(TT_SHIFTER)) { printk("atafb_init: initializing TT hw\n"); fbhw = &tt_switch; atafb_ops.fb_setcolreg = &tt_setcolreg; defmode = DEFMODE_TT; break; } #endif #ifdef ATAFB_FALCON if (ATARIHW_PRESENT(VIDEL_SHIFTER)) { printk("atafb_init: initializing Falcon hw\n"); fbhw = &falcon_switch; atafb_ops.fb_setcolreg = &falcon_setcolreg; error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, IRQ_TYPE_PRIO, "framebuffer:modeswitch", falcon_vbl_switcher); if (error) return error; defmode = DEFMODE_F30; break; } #endif #ifdef ATAFB_STE if (ATARIHW_PRESENT(STND_SHIFTER) || ATARIHW_PRESENT(EXTD_SHIFTER)) { printk("atafb_init: initializing ST/E hw\n"); fbhw = &st_switch; atafb_ops.fb_setcolreg = &stste_setcolreg; defmode = DEFMODE_STE; break; } fbhw = &st_switch; atafb_ops.fb_setcolreg = &stste_setcolreg; printk("Cannot determine video hardware; defaulting to ST(e)\n"); #else /* ATAFB_STE */ /* no default driver included */ /* Nobody will ever see this message :-) */ panic("Cannot initialize video hardware"); #endif } while (0); /* Multisync monitor capabilities */ /* Atari-TOS defaults if no boot option present */ if (fb_info.monspecs.hfmin == 0) { fb_info.monspecs.hfmin = 31000; fb_info.monspecs.hfmax = 32000; fb_info.monspecs.vfmin = 58; fb_info.monspecs.vfmax = 62; } detected_mode = fbhw->detect(); check_default_par(detected_mode); #ifdef ATAFB_EXT if (!external_addr) { #endif /* ATAFB_EXT */ mem_req = default_mem_req + ovsc_offset + ovsc_addlen; mem_req = PAGE_ALIGN(mem_req) + PAGE_SIZE; screen_base = atari_stram_alloc(mem_req, "atafb"); if (!screen_base) panic("Cannot allocate screen memory"); memset(screen_base, 0, mem_req); pad = -(unsigned long)screen_base & (PAGE_SIZE - 1); screen_base += pad; real_screen_base = screen_base + ovsc_offset; screen_len = (mem_req - pad - ovsc_offset) & PAGE_MASK; st_ovsc_switch(); if (CPU_IS_040_OR_060) { /* On a '040+, the cache mode of video RAM must be set to * write-through also for internal video hardware! */ cache_push(virt_to_phys(screen_base), screen_len); kernel_set_cachemode(screen_base, screen_len, IOMAP_WRITETHROUGH); } printk("atafb: screen_base %p real_screen_base %p screen_len %d\n", screen_base, real_screen_base, screen_len); #ifdef ATAFB_EXT } else { /* Map the video memory (physical address given) to somewhere * in the kernel address space. */ external_addr = ioremap_writethrough((unsigned long)external_addr, external_len); if (external_vgaiobase) external_vgaiobase = (unsigned long)ioremap(external_vgaiobase, 0x10000); screen_base = real_screen_base = external_addr; screen_len = external_len & PAGE_MASK; memset (screen_base, 0, external_len); } #endif /* ATAFB_EXT */ // strcpy(fb_info.mode->name, "Atari Builtin "); fb_info.fbops = &atafb_ops; // try to set default (detected; requested) var do_fb_set_var(&atafb_predefined[default_par - 1], 1); // reads hw state into current par, which may not be sane yet ata_get_par(&current_par); fb_info.par = &current_par; // tries to read from HW which may not be initialized yet // so set sane var first, then call atafb_set_par atafb_get_var(&fb_info.var, &fb_info); #ifdef ATAFB_FALCON fb_info.pseudo_palette = current_par.hw.falcon.pseudo_palette; #endif fb_info.flags = FBINFO_FLAG_DEFAULT; if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, atafb_modedb, NUM_TOTAL_MODES, &atafb_modedb[defmode], fb_info.var.bits_per_pixel)) { return -EINVAL; } fb_videomode_to_modelist(atafb_modedb, NUM_TOTAL_MODES, &fb_info.modelist); atafb_set_disp(&fb_info); fb_alloc_cmap(&(fb_info.cmap), 1 << fb_info.var.bits_per_pixel, 0); printk("Determined %dx%d, depth %d\n", fb_info.var.xres, fb_info.var.yres, fb_info.var.bits_per_pixel); if ((fb_info.var.xres != fb_info.var.xres_virtual) || (fb_info.var.yres != fb_info.var.yres_virtual)) printk(" virtual %dx%d\n", fb_info.var.xres_virtual, fb_info.var.yres_virtual); if (register_framebuffer(&fb_info) < 0) { #ifdef ATAFB_EXT if (external_addr) { iounmap(external_addr); external_addr = NULL; } if (external_vgaiobase) { iounmap((void*)external_vgaiobase); external_vgaiobase = 0; } #endif return -EINVAL; } // FIXME: mode needs setting! //printk("fb%d: %s frame buffer device, using %dK of video memory\n", // fb_info.node, fb_info.mode->name, screen_len>>10); printk("fb%d: frame buffer device, using %dK of video memory\n", fb_info.node, screen_len >> 10); /* TODO: This driver cannot be unloaded yet */ return 0; } module_init(atafb_init); #ifdef MODULE MODULE_LICENSE("GPL"); int cleanup_module(void) { unregister_framebuffer(&fb_info); return atafb_deinit(); } #endif /* MODULE */
gpl-2.0
Shamestick/android_kernel_htc_msm8974
drivers/video/atafb.c
10425
91065
/* * linux/drivers/video/atafb.c -- Atari builtin chipset frame buffer device * * Copyright (C) 1994 Martin Schaller & Roman Hodek * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * * History: * - 03 Jan 95: Original version by Martin Schaller: The TT driver and * all the device independent stuff * - 09 Jan 95: Roman: I've added the hardware abstraction (hw_switch) * and wrote the Falcon, ST(E), and External drivers * based on the original TT driver. * - 07 May 95: Martin: Added colormap operations for the external driver * - 21 May 95: Martin: Added support for overscan * Andreas: some bug fixes for this * - Jul 95: Guenther Kelleter <guenther@pool.informatik.rwth-aachen.de>: * Programmable Falcon video modes * (thanks to Christian Cartus for documentation * of VIDEL registers). * - 27 Dec 95: Guenther: Implemented user definable video modes "user[0-7]" * on minor 24...31. "user0" may be set on commandline by * "R<x>;<y>;<depth>". (Makes sense only on Falcon) * Video mode switch on Falcon now done at next VBL interrupt * to avoid the annoying right shift of the screen. * - 23 Sep 97: Juergen: added xres_virtual for cards like ProMST * The external-part is legacy, therefore hardware-specific * functions like panning/hardwarescrolling/blanking isn't * supported. * - 29 Sep 97: Juergen: added Romans suggestion for pan_display * (var->xoffset was changed even if no set_screen_base avail.) * - 05 Oct 97: Juergen: extfb (PACKED_PIXEL) is FB_PSEUDOCOLOR 'cause * we know how to set the colors * ext_*palette: read from ext_colors (former MV300_colors) * write to ext_colors and RAMDAC * * To do: * - For the Falcon it is not possible to set random video modes on * SM124 and SC/TV, only the bootup resolution is supported. * */ #define ATAFB_TT #define ATAFB_STE #define ATAFB_EXT #define ATAFB_FALCON #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/setup.h> #include <linux/uaccess.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/atari_stram.h> #include <linux/fb.h> #include <asm/atarikb.h> #include "c2p.h" #include "atafb.h" #define SWITCH_ACIA 0x01 /* modes for switch on OverScan */ #define SWITCH_SND6 0x40 #define SWITCH_SND7 0x80 #define SWITCH_NONE 0x00 #define up(x, r) (((x) + (r) - 1) & ~((r)-1)) /* * Interface to the world */ static int atafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); static int atafb_set_par(struct fb_info *info); static int atafb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info); static int atafb_blank(int blank, struct fb_info *info); static int atafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info); static void atafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); static void atafb_copyarea(struct fb_info *info, const struct fb_copyarea *region); static void atafb_imageblit(struct fb_info *info, const struct fb_image *image); static int atafb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg); static int default_par; /* default resolution (0=none) */ static unsigned long default_mem_req; static int hwscroll = -1; static int use_hwscroll = 1; static int sttt_xres = 640, st_yres = 400, tt_yres = 480; static int sttt_xres_virtual = 640, sttt_yres_virtual = 400; static int ovsc_offset, ovsc_addlen; /* * Hardware parameters for current mode */ static struct atafb_par { void *screen_base; int yres_virtual; u_long next_line; #if defined ATAFB_TT || defined ATAFB_STE union { struct { int mode; int sync; } tt, st; #endif #ifdef ATAFB_FALCON struct falcon_hw { /* Here are fields for storing a video mode, as direct * parameters for the hardware. */ short sync; short line_width; short line_offset; short st_shift; short f_shift; short vid_control; short vid_mode; short xoffset; short hht, hbb, hbe, hdb, hde, hss; short vft, vbb, vbe, vdb, vde, vss; /* auxiliary information */ short mono; short ste_mode; short bpp; u32 pseudo_palette[16]; } falcon; #endif /* Nothing needed for external mode */ } hw; } current_par; /* Don't calculate an own resolution, and thus don't change the one found when * booting (currently used for the Falcon to keep settings for internal video * hardware extensions (e.g. ScreenBlaster) */ static int DontCalcRes = 0; #ifdef ATAFB_FALCON #define HHT hw.falcon.hht #define HBB hw.falcon.hbb #define HBE hw.falcon.hbe #define HDB hw.falcon.hdb #define HDE hw.falcon.hde #define HSS hw.falcon.hss #define VFT hw.falcon.vft #define VBB hw.falcon.vbb #define VBE hw.falcon.vbe #define VDB hw.falcon.vdb #define VDE hw.falcon.vde #define VSS hw.falcon.vss #define VCO_CLOCK25 0x04 #define VCO_CSYPOS 0x10 #define VCO_VSYPOS 0x20 #define VCO_HSYPOS 0x40 #define VCO_SHORTOFFS 0x100 #define VMO_DOUBLE 0x01 #define VMO_INTER 0x02 #define VMO_PREMASK 0x0c #endif static struct fb_info fb_info = { .fix = { .id = "Atari ", .visual = FB_VISUAL_PSEUDOCOLOR, .accel = FB_ACCEL_NONE, } }; static void *screen_base; /* base address of screen */ static void *real_screen_base; /* (only for Overscan) */ static int screen_len; static int current_par_valid; static int mono_moni; #ifdef ATAFB_EXT /* external video handling */ static unsigned int external_xres; static unsigned int external_xres_virtual; static unsigned int external_yres; /* * not needed - atafb will never support panning/hardwarescroll with external * static unsigned int external_yres_virtual; */ static unsigned int external_depth; static int external_pmode; static void *external_addr; static unsigned long external_len; static unsigned long external_vgaiobase; static unsigned int external_bitspercol = 6; /* * JOE <joe@amber.dinoco.de>: * added card type for external driver, is only needed for * colormap handling. */ enum cardtype { IS_VGA, IS_MV300 }; static enum cardtype external_card_type = IS_VGA; /* * The MV300 mixes the color registers. So we need an array of munged * indices in order to access the correct reg. */ static int MV300_reg_1bit[2] = { 0, 1 }; static int MV300_reg_4bit[16] = { 0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15 }; static int MV300_reg_8bit[256] = { 0, 128, 64, 192, 32, 160, 96, 224, 16, 144, 80, 208, 48, 176, 112, 240, 8, 136, 72, 200, 40, 168, 104, 232, 24, 152, 88, 216, 56, 184, 120, 248, 4, 132, 68, 196, 36, 164, 100, 228, 20, 148, 84, 212, 52, 180, 116, 244, 12, 140, 76, 204, 44, 172, 108, 236, 28, 156, 92, 220, 60, 188, 124, 252, 2, 130, 66, 194, 34, 162, 98, 226, 18, 146, 82, 210, 50, 178, 114, 242, 10, 138, 74, 202, 42, 170, 106, 234, 26, 154, 90, 218, 58, 186, 122, 250, 6, 134, 70, 198, 38, 166, 102, 230, 22, 150, 86, 214, 54, 182, 118, 246, 14, 142, 78, 206, 46, 174, 110, 238, 30, 158, 94, 222, 62, 190, 126, 254, 1, 129, 65, 193, 33, 161, 97, 225, 17, 145, 81, 209, 49, 177, 113, 241, 9, 137, 73, 201, 41, 169, 105, 233, 25, 153, 89, 217, 57, 185, 121, 249, 5, 133, 69, 197, 37, 165, 101, 229, 21, 149, 85, 213, 53, 181, 117, 245, 13, 141, 77, 205, 45, 173, 109, 237, 29, 157, 93, 221, 61, 189, 125, 253, 3, 131, 67, 195, 35, 163, 99, 227, 19, 147, 83, 211, 51, 179, 115, 243, 11, 139, 75, 203, 43, 171, 107, 235, 27, 155, 91, 219, 59, 187, 123, 251, 7, 135, 71, 199, 39, 167, 103, 231, 23, 151, 87, 215, 55, 183, 119, 247, 15, 143, 79, 207, 47, 175, 111, 239, 31, 159, 95, 223, 63, 191, 127, 255 }; static int *MV300_reg = MV300_reg_8bit; #endif /* ATAFB_EXT */ static int inverse; extern int fontheight_8x8; extern int fontwidth_8x8; extern unsigned char fontdata_8x8[]; extern int fontheight_8x16; extern int fontwidth_8x16; extern unsigned char fontdata_8x16[]; /* * struct fb_ops { * * open/release and usage marking * struct module *owner; * int (*fb_open)(struct fb_info *info, int user); * int (*fb_release)(struct fb_info *info, int user); * * * For framebuffers with strange non linear layouts or that do not * * work with normal memory mapped access * ssize_t (*fb_read)(struct file *file, char __user *buf, size_t count, loff_t *ppos); * ssize_t (*fb_write)(struct file *file, const char __user *buf, size_t count, loff_t *ppos); * * * checks var and eventually tweaks it to something supported, * * DOES NOT MODIFY PAR * * int (*fb_check_var)(struct fb_var_screeninfo *var, struct fb_info *info); * * * set the video mode according to info->var * * int (*fb_set_par)(struct fb_info *info); * * * set color register * * int (*fb_setcolreg)(unsigned int regno, unsigned int red, unsigned int green, * unsigned int blue, unsigned int transp, struct fb_info *info); * * * set color registers in batch * * int (*fb_setcmap)(struct fb_cmap *cmap, struct fb_info *info); * * * blank display * * int (*fb_blank)(int blank, struct fb_info *info); * * * pan display * * int (*fb_pan_display)(struct fb_var_screeninfo *var, struct fb_info *info); * * *** The meat of the drawing engine *** * * Draws a rectangle * * void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect); * * Copy data from area to another * * void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region); * * Draws a image to the display * * void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image); * * * Draws cursor * * int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor); * * * Rotates the display * * void (*fb_rotate)(struct fb_info *info, int angle); * * * wait for blit idle, optional * * int (*fb_sync)(struct fb_info *info); * * * perform fb specific ioctl (optional) * * int (*fb_ioctl)(struct fb_info *info, unsigned int cmd, * unsigned long arg); * * * Handle 32bit compat ioctl (optional) * * int (*fb_compat_ioctl)(struct fb_info *info, unsigned int cmd, * unsigned long arg); * * * perform fb specific mmap * * int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma); * } ; */ /* ++roman: This structure abstracts from the underlying hardware (ST(e), * TT, or Falcon. * * int (*detect)(void) * This function should detect the current video mode settings and * store them in atafb_predefined[0] for later reference by the * user. Return the index+1 of an equivalent predefined mode or 0 * if there is no such. * * int (*encode_fix)(struct fb_fix_screeninfo *fix, * struct atafb_par *par) * This function should fill in the 'fix' structure based on the * values in the 'par' structure. * !!! Obsolete, perhaps !!! * * int (*decode_var)(struct fb_var_screeninfo *var, * struct atafb_par *par) * Get the video params out of 'var'. If a value doesn't fit, round * it up, if it's too big, return EINVAL. * Round up in the following order: bits_per_pixel, xres, yres, * xres_virtual, yres_virtual, xoffset, yoffset, grayscale, bitfields, * horizontal timing, vertical timing. * * int (*encode_var)(struct fb_var_screeninfo *var, * struct atafb_par *par); * Fill the 'var' structure based on the values in 'par' and maybe * other values read out of the hardware. * * void (*get_par)(struct atafb_par *par) * Fill the hardware's 'par' structure. * !!! Used only by detect() !!! * * void (*set_par)(struct atafb_par *par) * Set the hardware according to 'par'. * * void (*set_screen_base)(void *s_base) * Set the base address of the displayed frame buffer. Only called * if yres_virtual > yres or xres_virtual > xres. * * int (*blank)(int blank_mode) * Blank the screen if blank_mode != 0, else unblank. If blank == NULL then * the caller blanks by setting the CLUT to all black. Return 0 if blanking * succeeded, !=0 if un-/blanking failed due to e.g. a video mode which * doesn't support it. Implements VESA suspend and powerdown modes on * hardware that supports disabling hsync/vsync: * blank_mode == 2: suspend vsync, 3:suspend hsync, 4: powerdown. */ static struct fb_hwswitch { int (*detect)(void); int (*encode_fix)(struct fb_fix_screeninfo *fix, struct atafb_par *par); int (*decode_var)(struct fb_var_screeninfo *var, struct atafb_par *par); int (*encode_var)(struct fb_var_screeninfo *var, struct atafb_par *par); void (*get_par)(struct atafb_par *par); void (*set_par)(struct atafb_par *par); void (*set_screen_base)(void *s_base); int (*blank)(int blank_mode); int (*pan_display)(struct fb_var_screeninfo *var, struct fb_info *info); } *fbhw; static char *autodetect_names[] = { "autodetect", NULL }; static char *stlow_names[] = { "stlow", NULL }; static char *stmid_names[] = { "stmid", "default5", NULL }; static char *sthigh_names[] = { "sthigh", "default4", NULL }; static char *ttlow_names[] = { "ttlow", NULL }; static char *ttmid_names[] = { "ttmid", "default1", NULL }; static char *tthigh_names[] = { "tthigh", "default2", NULL }; static char *vga2_names[] = { "vga2", NULL }; static char *vga4_names[] = { "vga4", NULL }; static char *vga16_names[] = { "vga16", "default3", NULL }; static char *vga256_names[] = { "vga256", NULL }; static char *falh2_names[] = { "falh2", NULL }; static char *falh16_names[] = { "falh16", NULL }; static char **fb_var_names[] = { autodetect_names, stlow_names, stmid_names, sthigh_names, ttlow_names, ttmid_names, tthigh_names, vga2_names, vga4_names, vga16_names, vga256_names, falh2_names, falh16_names, NULL }; static struct fb_var_screeninfo atafb_predefined[] = { /* * yres_virtual == 0 means use hw-scrolling if possible, else yres */ { /* autodetect */ 0, 0, 0, 0, 0, 0, 0, 0, /* xres-grayscale */ {0, 0, 0}, {0, 0, 0}, {0, 0, 0}, {0, 0, 0}, /* red green blue tran*/ 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* st low */ 320, 200, 320, 0, 0, 0, 4, 0, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* st mid */ 640, 200, 640, 0, 0, 0, 2, 0, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* st high */ 640, 400, 640, 0, 0, 0, 1, 0, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* tt low */ 320, 480, 320, 0, 0, 0, 8, 0, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* tt mid */ 640, 480, 640, 0, 0, 0, 4, 0, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* tt high */ 1280, 960, 1280, 0, 0, 0, 1, 0, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* vga2 */ 640, 480, 640, 0, 0, 0, 1, 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* vga4 */ 640, 480, 640, 0, 0, 0, 2, 0, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* vga16 */ 640, 480, 640, 0, 0, 0, 4, 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* vga256 */ 640, 480, 640, 0, 0, 0, 8, 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* falh2 */ 896, 608, 896, 0, 0, 0, 1, 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, { /* falh16 */ 896, 608, 896, 0, 0, 0, 4, 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0 }, }; static int num_atafb_predefined = ARRAY_SIZE(atafb_predefined); static struct fb_videomode atafb_modedb[] __initdata = { /* * Atari Video Modes * * If you change these, make sure to update DEFMODE_* as well! */ /* * ST/TT Video Modes */ { /* 320x200, 15 kHz, 60 Hz (ST low) */ "st-low", 60, 320, 200, 32000, 32, 16, 31, 14, 96, 4, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, { /* 640x200, 15 kHz, 60 Hz (ST medium) */ "st-mid", 60, 640, 200, 32000, 32, 16, 31, 14, 96, 4, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, { /* 640x400, 30.25 kHz, 63.5 Hz (ST high) */ "st-high", 63, 640, 400, 32000, 128, 0, 40, 14, 128, 4, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, { /* 320x480, 15 kHz, 60 Hz (TT low) */ "tt-low", 60, 320, 480, 31041, 120, 100, 8, 16, 140, 30, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, { /* 640x480, 29 kHz, 57 Hz (TT medium) */ "tt-mid", 60, 640, 480, 31041, 120, 100, 8, 16, 140, 30, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, { /* 1280x960, 29 kHz, 60 Hz (TT high) */ "tt-high", 57, 640, 960, 31041, 120, 100, 8, 16, 140, 30, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, /* * VGA Video Modes */ { /* 640x480, 31 kHz, 60 Hz (VGA) */ "vga", 63.5, 640, 480, 32000, 18, 42, 31, 11, 96, 3, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, { /* 640x400, 31 kHz, 70 Hz (VGA) */ "vga70", 70, 640, 400, 32000, 18, 42, 31, 11, 96, 3, FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, /* * Falcon HiRes Video Modes */ { /* 896x608, 31 kHz, 60 Hz (Falcon High) */ "falh", 60, 896, 608, 32000, 18, 42, 31, 1, 96,3, 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP }, }; #define NUM_TOTAL_MODES ARRAY_SIZE(atafb_modedb) static char *mode_option __initdata = NULL; /* default modes */ #define DEFMODE_TT 5 /* "tt-high" for TT */ #define DEFMODE_F30 7 /* "vga70" for Falcon */ #define DEFMODE_STE 2 /* "st-high" for ST/E */ #define DEFMODE_EXT 6 /* "vga" for external */ static int get_video_mode(char *vname) { char ***name_list; char **name; int i; name_list = fb_var_names; for (i = 0; i < num_atafb_predefined; i++) { name = *name_list++; if (!name || !*name) break; while (*name) { if (!strcmp(vname, *name)) return i + 1; name++; } } return 0; } /* ------------------- TT specific functions ---------------------- */ #ifdef ATAFB_TT static int tt_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par) { int mode; strcpy(fix->id, "Atari Builtin"); fix->smem_start = (unsigned long)real_screen_base; fix->smem_len = screen_len; fix->type = FB_TYPE_INTERLEAVED_PLANES; fix->type_aux = 2; fix->visual = FB_VISUAL_PSEUDOCOLOR; mode = par->hw.tt.mode & TT_SHIFTER_MODEMASK; if (mode == TT_SHIFTER_TTHIGH || mode == TT_SHIFTER_STHIGH) { fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; if (mode == TT_SHIFTER_TTHIGH) fix->visual = FB_VISUAL_MONO01; } fix->xpanstep = 0; fix->ypanstep = 1; fix->ywrapstep = 0; fix->line_length = par->next_line; fix->accel = FB_ACCEL_ATARIBLITT; return 0; } static int tt_decode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { int xres = var->xres; int yres = var->yres; int bpp = var->bits_per_pixel; int linelen; int yres_virtual = var->yres_virtual; if (mono_moni) { if (bpp > 1 || xres > sttt_xres * 2 || yres > tt_yres * 2) return -EINVAL; par->hw.tt.mode = TT_SHIFTER_TTHIGH; xres = sttt_xres * 2; yres = tt_yres * 2; bpp = 1; } else { if (bpp > 8 || xres > sttt_xres || yres > tt_yres) return -EINVAL; if (bpp > 4) { if (xres > sttt_xres / 2 || yres > tt_yres) return -EINVAL; par->hw.tt.mode = TT_SHIFTER_TTLOW; xres = sttt_xres / 2; yres = tt_yres; bpp = 8; } else if (bpp > 2) { if (xres > sttt_xres || yres > tt_yres) return -EINVAL; if (xres > sttt_xres / 2 || yres > st_yres / 2) { par->hw.tt.mode = TT_SHIFTER_TTMID; xres = sttt_xres; yres = tt_yres; bpp = 4; } else { par->hw.tt.mode = TT_SHIFTER_STLOW; xres = sttt_xres / 2; yres = st_yres / 2; bpp = 4; } } else if (bpp > 1) { if (xres > sttt_xres || yres > st_yres / 2) return -EINVAL; par->hw.tt.mode = TT_SHIFTER_STMID; xres = sttt_xres; yres = st_yres / 2; bpp = 2; } else if (var->xres > sttt_xres || var->yres > st_yres) { return -EINVAL; } else { par->hw.tt.mode = TT_SHIFTER_STHIGH; xres = sttt_xres; yres = st_yres; bpp = 1; } } if (yres_virtual <= 0) yres_virtual = 0; else if (yres_virtual < yres) yres_virtual = yres; if (var->sync & FB_SYNC_EXT) par->hw.tt.sync = 0; else par->hw.tt.sync = 1; linelen = xres * bpp / 8; if (yres_virtual * linelen > screen_len && screen_len) return -EINVAL; if (yres * linelen > screen_len && screen_len) return -EINVAL; if (var->yoffset + yres > yres_virtual && yres_virtual) return -EINVAL; par->yres_virtual = yres_virtual; par->screen_base = screen_base + var->yoffset * linelen; par->next_line = linelen; return 0; } static int tt_encode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { int linelen; memset(var, 0, sizeof(struct fb_var_screeninfo)); var->red.offset = 0; var->red.length = 4; var->red.msb_right = 0; var->grayscale = 0; var->pixclock = 31041; var->left_margin = 120; /* these may be incorrect */ var->right_margin = 100; var->upper_margin = 8; var->lower_margin = 16; var->hsync_len = 140; var->vsync_len = 30; var->height = -1; var->width = -1; if (par->hw.tt.sync & 1) var->sync = 0; else var->sync = FB_SYNC_EXT; switch (par->hw.tt.mode & TT_SHIFTER_MODEMASK) { case TT_SHIFTER_STLOW: var->xres = sttt_xres / 2; var->xres_virtual = sttt_xres_virtual / 2; var->yres = st_yres / 2; var->bits_per_pixel = 4; break; case TT_SHIFTER_STMID: var->xres = sttt_xres; var->xres_virtual = sttt_xres_virtual; var->yres = st_yres / 2; var->bits_per_pixel = 2; break; case TT_SHIFTER_STHIGH: var->xres = sttt_xres; var->xres_virtual = sttt_xres_virtual; var->yres = st_yres; var->bits_per_pixel = 1; break; case TT_SHIFTER_TTLOW: var->xres = sttt_xres / 2; var->xres_virtual = sttt_xres_virtual / 2; var->yres = tt_yres; var->bits_per_pixel = 8; break; case TT_SHIFTER_TTMID: var->xres = sttt_xres; var->xres_virtual = sttt_xres_virtual; var->yres = tt_yres; var->bits_per_pixel = 4; break; case TT_SHIFTER_TTHIGH: var->red.length = 0; var->xres = sttt_xres * 2; var->xres_virtual = sttt_xres_virtual * 2; var->yres = tt_yres * 2; var->bits_per_pixel = 1; break; } var->blue = var->green = var->red; var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; linelen = var->xres_virtual * var->bits_per_pixel / 8; if (!use_hwscroll) var->yres_virtual = var->yres; else if (screen_len) { if (par->yres_virtual) var->yres_virtual = par->yres_virtual; else /* yres_virtual == 0 means use maximum */ var->yres_virtual = screen_len / linelen; } else { if (hwscroll < 0) var->yres_virtual = 2 * var->yres; else var->yres_virtual = var->yres + hwscroll * 16; } var->xoffset = 0; if (screen_base) var->yoffset = (par->screen_base - screen_base) / linelen; else var->yoffset = 0; var->nonstd = 0; var->activate = 0; var->vmode = FB_VMODE_NONINTERLACED; return 0; } static void tt_get_par(struct atafb_par *par) { unsigned long addr; par->hw.tt.mode = shifter_tt.tt_shiftmode; par->hw.tt.sync = shifter.syncmode; addr = ((shifter.bas_hi & 0xff) << 16) | ((shifter.bas_md & 0xff) << 8) | ((shifter.bas_lo & 0xff)); par->screen_base = phys_to_virt(addr); } static void tt_set_par(struct atafb_par *par) { shifter_tt.tt_shiftmode = par->hw.tt.mode; shifter.syncmode = par->hw.tt.sync; /* only set screen_base if really necessary */ if (current_par.screen_base != par->screen_base) fbhw->set_screen_base(par->screen_base); } static int tt_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info) { if ((shifter_tt.tt_shiftmode & TT_SHIFTER_MODEMASK) == TT_SHIFTER_STHIGH) regno += 254; if (regno > 255) return 1; tt_palette[regno] = (((red >> 12) << 8) | ((green >> 12) << 4) | (blue >> 12)); if ((shifter_tt.tt_shiftmode & TT_SHIFTER_MODEMASK) == TT_SHIFTER_STHIGH && regno == 254) tt_palette[0] = 0; return 0; } static int tt_detect(void) { struct atafb_par par; /* Determine the connected monitor: The DMA sound must be * disabled before reading the MFP GPIP, because the Sound * Done Signal and the Monochrome Detect are XORed together! * * Even on a TT, we should look if there is a DMA sound. It was * announced that the Eagle is TT compatible, but only the PCM is * missing... */ if (ATARIHW_PRESENT(PCM_8BIT)) { tt_dmasnd.ctrl = DMASND_CTRL_OFF; udelay(20); /* wait a while for things to settle down */ } mono_moni = (st_mfp.par_dt_reg & 0x80) == 0; tt_get_par(&par); tt_encode_var(&atafb_predefined[0], &par); return 1; } #endif /* ATAFB_TT */ /* ------------------- Falcon specific functions ---------------------- */ #ifdef ATAFB_FALCON static int mon_type; /* Falcon connected monitor */ static int f030_bus_width; /* Falcon ram bus width (for vid_control) */ #define F_MON_SM 0 #define F_MON_SC 1 #define F_MON_VGA 2 #define F_MON_TV 3 static struct pixel_clock { unsigned long f; /* f/[Hz] */ unsigned long t; /* t/[ps] (=1/f) */ int right, hsync, left; /* standard timing in clock cycles, not pixel */ /* hsync initialized in falcon_detect() */ int sync_mask; /* or-mask for hw.falcon.sync to set this clock */ int control_mask; /* ditto, for hw.falcon.vid_control */ } f25 = { 25175000, 39721, 18, 0, 42, 0x0, VCO_CLOCK25 }, f32 = { 32000000, 31250, 18, 0, 42, 0x0, 0 }, fext = { 0, 0, 18, 0, 42, 0x1, 0 }; /* VIDEL-prescale values [mon_type][pixel_length from VCO] */ static int vdl_prescale[4][3] = { { 4,2,1 }, { 4,2,1 }, { 4,2,2 }, { 4,2,1 } }; /* Default hsync timing [mon_type] in picoseconds */ static long h_syncs[4] = { 3000000, 4875000, 4000000, 4875000 }; static inline int hxx_prescale(struct falcon_hw *hw) { return hw->ste_mode ? 16 : vdl_prescale[mon_type][hw->vid_mode >> 2 & 0x3]; } static int falcon_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par) { strcpy(fix->id, "Atari Builtin"); fix->smem_start = (unsigned long)real_screen_base; fix->smem_len = screen_len; fix->type = FB_TYPE_INTERLEAVED_PLANES; fix->type_aux = 2; fix->visual = FB_VISUAL_PSEUDOCOLOR; fix->xpanstep = 1; fix->ypanstep = 1; fix->ywrapstep = 0; if (par->hw.falcon.mono) { fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; /* no smooth scrolling with longword aligned video mem */ fix->xpanstep = 32; } else if (par->hw.falcon.f_shift & 0x100) { fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; /* Is this ok or should it be DIRECTCOLOR? */ fix->visual = FB_VISUAL_TRUECOLOR; fix->xpanstep = 2; } fix->line_length = par->next_line; fix->accel = FB_ACCEL_ATARIBLITT; return 0; } static int falcon_decode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { int bpp = var->bits_per_pixel; int xres = var->xres; int yres = var->yres; int xres_virtual = var->xres_virtual; int yres_virtual = var->yres_virtual; int left_margin, right_margin, hsync_len; int upper_margin, lower_margin, vsync_len; int linelen; int interlace = 0, doubleline = 0; struct pixel_clock *pclock; int plen; /* width of pixel in clock cycles */ int xstretch; int prescale; int longoffset = 0; int hfreq, vfreq; int hdb_off, hde_off, base_off; int gstart, gend1, gend2, align; /* Get the video params out of 'var'. If a value doesn't fit, round it up, if it's too big, return EINVAL. Round up in the following order: bits_per_pixel, xres, yres, xres_virtual, yres_virtual, xoffset, yoffset, grayscale, bitfields, horizontal timing, vertical timing. There is a maximum of screen resolution determined by pixelclock and minimum frame rate -- (X+hmarg.)*(Y+vmarg.)*vfmin <= pixelclock. In interlace mode this is " * " *vfmin <= pixelclock. Additional constraints: hfreq. Frequency range for multisync monitors is given via command line. For TV and SM124 both frequencies are fixed. X % 16 == 0 to fit 8x?? font (except 1 bitplane modes must use X%32 == 0) Y % 16 == 0 to fit 8x16 font Y % 8 == 0 if Y<400 Currently interlace and doubleline mode in var are ignored. On SM124 and TV only the standard resolutions can be used. */ /* Reject uninitialized mode */ if (!xres || !yres || !bpp) return -EINVAL; if (mon_type == F_MON_SM && bpp != 1) return -EINVAL; if (bpp <= 1) { bpp = 1; par->hw.falcon.f_shift = 0x400; par->hw.falcon.st_shift = 0x200; } else if (bpp <= 2) { bpp = 2; par->hw.falcon.f_shift = 0x000; par->hw.falcon.st_shift = 0x100; } else if (bpp <= 4) { bpp = 4; par->hw.falcon.f_shift = 0x000; par->hw.falcon.st_shift = 0x000; } else if (bpp <= 8) { bpp = 8; par->hw.falcon.f_shift = 0x010; } else if (bpp <= 16) { bpp = 16; /* packed pixel mode */ par->hw.falcon.f_shift = 0x100; /* hicolor, no overlay */ } else return -EINVAL; par->hw.falcon.bpp = bpp; if (mon_type == F_MON_SM || DontCalcRes) { /* Skip all calculations. VGA/TV/SC1224 only supported. */ struct fb_var_screeninfo *myvar = &atafb_predefined[0]; if (bpp > myvar->bits_per_pixel || var->xres > myvar->xres || var->yres > myvar->yres) return -EINVAL; fbhw->get_par(par); /* Current par will be new par */ goto set_screen_base; /* Don't forget this */ } /* Only some fixed resolutions < 640x400 */ if (xres <= 320) xres = 320; else if (xres <= 640 && bpp != 16) xres = 640; if (yres <= 200) yres = 200; else if (yres <= 240) yres = 240; else if (yres <= 400) yres = 400; /* 2 planes must use STE compatibility mode */ par->hw.falcon.ste_mode = bpp == 2; par->hw.falcon.mono = bpp == 1; /* Total and visible scanline length must be a multiple of one longword, * this and the console fontwidth yields the alignment for xres and * xres_virtual. * TODO: this way "odd" fontheights are not supported * * Special case in STE mode: blank and graphic positions don't align, * avoid trash at right margin */ if (par->hw.falcon.ste_mode) xres = (xres + 63) & ~63; else if (bpp == 1) xres = (xres + 31) & ~31; else xres = (xres + 15) & ~15; if (yres >= 400) yres = (yres + 15) & ~15; else yres = (yres + 7) & ~7; if (xres_virtual < xres) xres_virtual = xres; else if (bpp == 1) xres_virtual = (xres_virtual + 31) & ~31; else xres_virtual = (xres_virtual + 15) & ~15; if (yres_virtual <= 0) yres_virtual = 0; else if (yres_virtual < yres) yres_virtual = yres; /* backward bug-compatibility */ if (var->pixclock > 1) var->pixclock -= 1; par->hw.falcon.line_width = bpp * xres / 16; par->hw.falcon.line_offset = bpp * (xres_virtual - xres) / 16; /* single or double pixel width */ xstretch = (xres < 640) ? 2 : 1; #if 0 /* SM124 supports only 640x400, this is rejected above */ if (mon_type == F_MON_SM) { if (xres != 640 && yres != 400) return -EINVAL; plen = 1; pclock = &f32; /* SM124-mode is special */ par->hw.falcon.ste_mode = 1; par->hw.falcon.f_shift = 0x000; par->hw.falcon.st_shift = 0x200; left_margin = hsync_len = 128 / plen; right_margin = 0; /* TODO set all margins */ } else #endif if (mon_type == F_MON_SC || mon_type == F_MON_TV) { plen = 2 * xstretch; if (var->pixclock > f32.t * plen) return -EINVAL; pclock = &f32; if (yres > 240) interlace = 1; if (var->pixclock == 0) { /* set some minimal margins which center the screen */ left_margin = 32; right_margin = 18; hsync_len = pclock->hsync / plen; upper_margin = 31; lower_margin = 14; vsync_len = interlace ? 3 : 4; } else { left_margin = var->left_margin; right_margin = var->right_margin; hsync_len = var->hsync_len; upper_margin = var->upper_margin; lower_margin = var->lower_margin; vsync_len = var->vsync_len; if (var->vmode & FB_VMODE_INTERLACED) { upper_margin = (upper_margin + 1) / 2; lower_margin = (lower_margin + 1) / 2; vsync_len = (vsync_len + 1) / 2; } else if (var->vmode & FB_VMODE_DOUBLE) { upper_margin *= 2; lower_margin *= 2; vsync_len *= 2; } } } else { /* F_MON_VGA */ if (bpp == 16) xstretch = 2; /* Double pixel width only for hicolor */ /* Default values are used for vert./hor. timing if no pixelclock given. */ if (var->pixclock == 0) { int linesize; /* Choose master pixelclock depending on hor. timing */ plen = 1 * xstretch; if ((plen * xres + f25.right + f25.hsync + f25.left) * fb_info.monspecs.hfmin < f25.f) pclock = &f25; else if ((plen * xres + f32.right + f32.hsync + f32.left) * fb_info.monspecs.hfmin < f32.f) pclock = &f32; else if ((plen * xres + fext.right + fext.hsync + fext.left) * fb_info.monspecs.hfmin < fext.f && fext.f) pclock = &fext; else return -EINVAL; left_margin = pclock->left / plen; right_margin = pclock->right / plen; hsync_len = pclock->hsync / plen; linesize = left_margin + xres + right_margin + hsync_len; upper_margin = 31; lower_margin = 11; vsync_len = 3; } else { /* Choose largest pixelclock <= wanted clock */ int i; unsigned long pcl = ULONG_MAX; pclock = 0; for (i = 1; i <= 4; i *= 2) { if (f25.t * i >= var->pixclock && f25.t * i < pcl) { pcl = f25.t * i; pclock = &f25; } if (f32.t * i >= var->pixclock && f32.t * i < pcl) { pcl = f32.t * i; pclock = &f32; } if (fext.t && fext.t * i >= var->pixclock && fext.t * i < pcl) { pcl = fext.t * i; pclock = &fext; } } if (!pclock) return -EINVAL; plen = pcl / pclock->t; left_margin = var->left_margin; right_margin = var->right_margin; hsync_len = var->hsync_len; upper_margin = var->upper_margin; lower_margin = var->lower_margin; vsync_len = var->vsync_len; /* Internal unit is [single lines per (half-)frame] */ if (var->vmode & FB_VMODE_INTERLACED) { /* # lines in half frame */ /* External unit is [lines per full frame] */ upper_margin = (upper_margin + 1) / 2; lower_margin = (lower_margin + 1) / 2; vsync_len = (vsync_len + 1) / 2; } else if (var->vmode & FB_VMODE_DOUBLE) { /* External unit is [double lines per frame] */ upper_margin *= 2; lower_margin *= 2; vsync_len *= 2; } } if (pclock == &fext) longoffset = 1; /* VIDEL doesn't synchronize on short offset */ } /* Is video bus bandwidth (32MB/s) too low for this resolution? */ /* this is definitely wrong if bus clock != 32MHz */ if (pclock->f / plen / 8 * bpp > 32000000L) return -EINVAL; if (vsync_len < 1) vsync_len = 1; /* include sync lengths in right/lower margin for all calculations */ right_margin += hsync_len; lower_margin += vsync_len; /* ! In all calculations of margins we use # of lines in half frame * (which is a full frame in non-interlace mode), so we can switch * between interlace and non-interlace without messing around * with these. */ again: /* Set base_offset 128 and video bus width */ par->hw.falcon.vid_control = mon_type | f030_bus_width; if (!longoffset) par->hw.falcon.vid_control |= VCO_SHORTOFFS; /* base_offset 64 */ if (var->sync & FB_SYNC_HOR_HIGH_ACT) par->hw.falcon.vid_control |= VCO_HSYPOS; if (var->sync & FB_SYNC_VERT_HIGH_ACT) par->hw.falcon.vid_control |= VCO_VSYPOS; /* Pixelclock */ par->hw.falcon.vid_control |= pclock->control_mask; /* External or internal clock */ par->hw.falcon.sync = pclock->sync_mask | 0x2; /* Pixellength and prescale */ par->hw.falcon.vid_mode = (2 / plen) << 2; if (doubleline) par->hw.falcon.vid_mode |= VMO_DOUBLE; if (interlace) par->hw.falcon.vid_mode |= VMO_INTER; /********************* * Horizontal timing: unit = [master clock cycles] * unit of hxx-registers: [master clock cycles * prescale] * Hxx-registers are 9 bit wide * * 1 line = ((hht + 2) * 2 * prescale) clock cycles * * graphic output = hdb & 0x200 ? * ((hht + 2) * 2 - hdb + hde) * prescale - hdboff + hdeoff: * (hht + 2 - hdb + hde) * prescale - hdboff + hdeoff * (this must be a multiple of plen*128/bpp, on VGA pixels * to the right may be cut off with a bigger right margin) * * start of graphics relative to start of 1st halfline = hdb & 0x200 ? * (hdb - hht - 2) * prescale + hdboff : * hdb * prescale + hdboff * * end of graphics relative to start of 1st halfline = * (hde + hht + 2) * prescale + hdeoff *********************/ /* Calculate VIDEL registers */ { prescale = hxx_prescale(&par->hw.falcon); base_off = par->hw.falcon.vid_control & VCO_SHORTOFFS ? 64 : 128; /* Offsets depend on video mode */ /* Offsets are in clock cycles, divide by prescale to * calculate hd[be]-registers */ if (par->hw.falcon.f_shift & 0x100) { align = 1; hde_off = 0; hdb_off = (base_off + 16 * plen) + prescale; } else { align = 128 / bpp; hde_off = ((128 / bpp + 2) * plen); if (par->hw.falcon.ste_mode) hdb_off = (64 + base_off + (128 / bpp + 2) * plen) + prescale; else hdb_off = (base_off + (128 / bpp + 18) * plen) + prescale; } gstart = (prescale / 2 + plen * left_margin) / prescale; /* gend1 is for hde (gend-gstart multiple of align), shifter's xres */ gend1 = gstart + roundup(xres, align) * plen / prescale; /* gend2 is for hbb, visible xres (rest to gend1 is cut off by hblank) */ gend2 = gstart + xres * plen / prescale; par->HHT = plen * (left_margin + xres + right_margin) / (2 * prescale) - 2; /* par->HHT = (gend2 + plen * right_margin / prescale) / 2 - 2;*/ par->HDB = gstart - hdb_off / prescale; par->HBE = gstart; if (par->HDB < 0) par->HDB += par->HHT + 2 + 0x200; par->HDE = gend1 - par->HHT - 2 - hde_off / prescale; par->HBB = gend2 - par->HHT - 2; #if 0 /* One more Videl constraint: data fetch of two lines must not overlap */ if ((par->HDB & 0x200) && (par->HDB & ~0x200) - par->HDE <= 5) { /* if this happens increase margins, decrease hfreq. */ } #endif if (hde_off % prescale) par->HBB++; /* compensate for non matching hde and hbb */ par->HSS = par->HHT + 2 - plen * hsync_len / prescale; if (par->HSS < par->HBB) par->HSS = par->HBB; } /* check hor. frequency */ hfreq = pclock->f / ((par->HHT + 2) * prescale * 2); if (hfreq > fb_info.monspecs.hfmax && mon_type != F_MON_VGA) { /* ++guenther: ^^^^^^^^^^^^^^^^^^^ can't remember why I did this */ /* Too high -> enlarge margin */ left_margin += 1; right_margin += 1; goto again; } if (hfreq > fb_info.monspecs.hfmax || hfreq < fb_info.monspecs.hfmin) return -EINVAL; /* Vxx-registers */ /* All Vxx must be odd in non-interlace, since frame starts in the middle * of the first displayed line! * One frame consists of VFT+1 half lines. VFT+1 must be even in * non-interlace, odd in interlace mode for synchronisation. * Vxx-registers are 11 bit wide */ par->VBE = (upper_margin * 2 + 1); /* must begin on odd halfline */ par->VDB = par->VBE; par->VDE = yres; if (!interlace) par->VDE <<= 1; if (doubleline) par->VDE <<= 1; /* VDE now half lines per (half-)frame */ par->VDE += par->VDB; par->VBB = par->VDE; par->VFT = par->VBB + (lower_margin * 2 - 1) - 1; par->VSS = par->VFT + 1 - (vsync_len * 2 - 1); /* vbb,vss,vft must be even in interlace mode */ if (interlace) { par->VBB++; par->VSS++; par->VFT++; } /* V-frequency check, hope I didn't create any loop here. */ /* Interlace and doubleline are mutually exclusive. */ vfreq = (hfreq * 2) / (par->VFT + 1); if (vfreq > fb_info.monspecs.vfmax && !doubleline && !interlace) { /* Too high -> try again with doubleline */ doubleline = 1; goto again; } else if (vfreq < fb_info.monspecs.vfmin && !interlace && !doubleline) { /* Too low -> try again with interlace */ interlace = 1; goto again; } else if (vfreq < fb_info.monspecs.vfmin && doubleline) { /* Doubleline too low -> clear doubleline and enlarge margins */ int lines; doubleline = 0; for (lines = 0; (hfreq * 2) / (par->VFT + 1 + 4 * lines - 2 * yres) > fb_info.monspecs.vfmax; lines++) ; upper_margin += lines; lower_margin += lines; goto again; } else if (vfreq > fb_info.monspecs.vfmax && doubleline) { /* Doubleline too high -> enlarge margins */ int lines; for (lines = 0; (hfreq * 2) / (par->VFT + 1 + 4 * lines) > fb_info.monspecs.vfmax; lines += 2) ; upper_margin += lines; lower_margin += lines; goto again; } else if (vfreq > fb_info.monspecs.vfmax && interlace) { /* Interlace, too high -> enlarge margins */ int lines; for (lines = 0; (hfreq * 2) / (par->VFT + 1 + 4 * lines) > fb_info.monspecs.vfmax; lines++) ; upper_margin += lines; lower_margin += lines; goto again; } else if (vfreq < fb_info.monspecs.vfmin || vfreq > fb_info.monspecs.vfmax) return -EINVAL; set_screen_base: linelen = xres_virtual * bpp / 8; if (yres_virtual * linelen > screen_len && screen_len) return -EINVAL; if (yres * linelen > screen_len && screen_len) return -EINVAL; if (var->yoffset + yres > yres_virtual && yres_virtual) return -EINVAL; par->yres_virtual = yres_virtual; par->screen_base = screen_base + var->yoffset * linelen; par->hw.falcon.xoffset = 0; par->next_line = linelen; return 0; } static int falcon_encode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { /* !!! only for VGA !!! */ int linelen; int prescale, plen; int hdb_off, hde_off, base_off; struct falcon_hw *hw = &par->hw.falcon; memset(var, 0, sizeof(struct fb_var_screeninfo)); /* possible frequencies: 25.175 or 32MHz */ var->pixclock = hw->sync & 0x1 ? fext.t : hw->vid_control & VCO_CLOCK25 ? f25.t : f32.t; var->height = -1; var->width = -1; var->sync = 0; if (hw->vid_control & VCO_HSYPOS) var->sync |= FB_SYNC_HOR_HIGH_ACT; if (hw->vid_control & VCO_VSYPOS) var->sync |= FB_SYNC_VERT_HIGH_ACT; var->vmode = FB_VMODE_NONINTERLACED; if (hw->vid_mode & VMO_INTER) var->vmode |= FB_VMODE_INTERLACED; if (hw->vid_mode & VMO_DOUBLE) var->vmode |= FB_VMODE_DOUBLE; /* visible y resolution: * Graphics display starts at line VDB and ends at line * VDE. If interlace mode off unit of VC-registers is * half lines, else lines. */ var->yres = hw->vde - hw->vdb; if (!(var->vmode & FB_VMODE_INTERLACED)) var->yres >>= 1; if (var->vmode & FB_VMODE_DOUBLE) var->yres >>= 1; /* * to get bpp, we must examine f_shift and st_shift. * f_shift is valid if any of bits no. 10, 8 or 4 * is set. Priority in f_shift is: 10 ">" 8 ">" 4, i.e. * if bit 10 set then bit 8 and bit 4 don't care... * If all these bits are 0 get display depth from st_shift * (as for ST and STE) */ if (hw->f_shift & 0x400) /* 2 colors */ var->bits_per_pixel = 1; else if (hw->f_shift & 0x100) /* hicolor */ var->bits_per_pixel = 16; else if (hw->f_shift & 0x010) /* 8 bitplanes */ var->bits_per_pixel = 8; else if (hw->st_shift == 0) var->bits_per_pixel = 4; else if (hw->st_shift == 0x100) var->bits_per_pixel = 2; else /* if (hw->st_shift == 0x200) */ var->bits_per_pixel = 1; var->xres = hw->line_width * 16 / var->bits_per_pixel; var->xres_virtual = var->xres + hw->line_offset * 16 / var->bits_per_pixel; if (hw->xoffset) var->xres_virtual += 16; if (var->bits_per_pixel == 16) { var->red.offset = 11; var->red.length = 5; var->red.msb_right = 0; var->green.offset = 5; var->green.length = 6; var->green.msb_right = 0; var->blue.offset = 0; var->blue.length = 5; var->blue.msb_right = 0; } else { var->red.offset = 0; var->red.length = hw->ste_mode ? 4 : 6; if (var->red.length > var->bits_per_pixel) var->red.length = var->bits_per_pixel; var->red.msb_right = 0; var->grayscale = 0; var->blue = var->green = var->red; } var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; linelen = var->xres_virtual * var->bits_per_pixel / 8; if (screen_len) { if (par->yres_virtual) var->yres_virtual = par->yres_virtual; else /* yres_virtual == 0 means use maximum */ var->yres_virtual = screen_len / linelen; } else { if (hwscroll < 0) var->yres_virtual = 2 * var->yres; else var->yres_virtual = var->yres + hwscroll * 16; } var->xoffset = 0; /* TODO change this */ /* hdX-offsets */ prescale = hxx_prescale(hw); plen = 4 >> (hw->vid_mode >> 2 & 0x3); base_off = hw->vid_control & VCO_SHORTOFFS ? 64 : 128; if (hw->f_shift & 0x100) { hde_off = 0; hdb_off = (base_off + 16 * plen) + prescale; } else { hde_off = ((128 / var->bits_per_pixel + 2) * plen); if (hw->ste_mode) hdb_off = (64 + base_off + (128 / var->bits_per_pixel + 2) * plen) + prescale; else hdb_off = (base_off + (128 / var->bits_per_pixel + 18) * plen) + prescale; } /* Right margin includes hsync */ var->left_margin = hdb_off + prescale * ((hw->hdb & 0x1ff) - (hw->hdb & 0x200 ? 2 + hw->hht : 0)); if (hw->ste_mode || mon_type != F_MON_VGA) var->right_margin = prescale * (hw->hht + 2 - hw->hde) - hde_off; else /* can't use this in ste_mode, because hbb is +1 off */ var->right_margin = prescale * (hw->hht + 2 - hw->hbb); var->hsync_len = prescale * (hw->hht + 2 - hw->hss); /* Lower margin includes vsync */ var->upper_margin = hw->vdb / 2; /* round down to full lines */ var->lower_margin = (hw->vft + 1 - hw->vde + 1) / 2; /* round up */ var->vsync_len = (hw->vft + 1 - hw->vss + 1) / 2; /* round up */ if (var->vmode & FB_VMODE_INTERLACED) { var->upper_margin *= 2; var->lower_margin *= 2; var->vsync_len *= 2; } else if (var->vmode & FB_VMODE_DOUBLE) { var->upper_margin = (var->upper_margin + 1) / 2; var->lower_margin = (var->lower_margin + 1) / 2; var->vsync_len = (var->vsync_len + 1) / 2; } var->pixclock *= plen; var->left_margin /= plen; var->right_margin /= plen; var->hsync_len /= plen; var->right_margin -= var->hsync_len; var->lower_margin -= var->vsync_len; if (screen_base) var->yoffset = (par->screen_base - screen_base) / linelen; else var->yoffset = 0; var->nonstd = 0; /* what is this for? */ var->activate = 0; return 0; } static int f_change_mode; static struct falcon_hw f_new_mode; static int f_pan_display; static void falcon_get_par(struct atafb_par *par) { unsigned long addr; struct falcon_hw *hw = &par->hw.falcon; hw->line_width = shifter_f030.scn_width; hw->line_offset = shifter_f030.off_next; hw->st_shift = videl.st_shift & 0x300; hw->f_shift = videl.f_shift; hw->vid_control = videl.control; hw->vid_mode = videl.mode; hw->sync = shifter.syncmode & 0x1; hw->xoffset = videl.xoffset & 0xf; hw->hht = videl.hht; hw->hbb = videl.hbb; hw->hbe = videl.hbe; hw->hdb = videl.hdb; hw->hde = videl.hde; hw->hss = videl.hss; hw->vft = videl.vft; hw->vbb = videl.vbb; hw->vbe = videl.vbe; hw->vdb = videl.vdb; hw->vde = videl.vde; hw->vss = videl.vss; addr = (shifter.bas_hi & 0xff) << 16 | (shifter.bas_md & 0xff) << 8 | (shifter.bas_lo & 0xff); par->screen_base = phys_to_virt(addr); /* derived parameters */ hw->ste_mode = (hw->f_shift & 0x510) == 0 && hw->st_shift == 0x100; hw->mono = (hw->f_shift & 0x400) || ((hw->f_shift & 0x510) == 0 && hw->st_shift == 0x200); } static void falcon_set_par(struct atafb_par *par) { f_change_mode = 0; /* only set screen_base if really necessary */ if (current_par.screen_base != par->screen_base) fbhw->set_screen_base(par->screen_base); /* Don't touch any other registers if we keep the default resolution */ if (DontCalcRes) return; /* Tell vbl-handler to change video mode. * We change modes only on next VBL, to avoid desynchronisation * (a shift to the right and wrap around by a random number of pixels * in all monochrome modes). * This seems to work on my Falcon. */ f_new_mode = par->hw.falcon; f_change_mode = 1; } static irqreturn_t falcon_vbl_switcher(int irq, void *dummy) { struct falcon_hw *hw = &f_new_mode; if (f_change_mode) { f_change_mode = 0; if (hw->sync & 0x1) { /* Enable external pixelclock. This code only for ScreenWonder */ *(volatile unsigned short *)0xffff9202 = 0xffbf; } else { /* Turn off external clocks. Read sets all output bits to 1. */ *(volatile unsigned short *)0xffff9202; } shifter.syncmode = hw->sync; videl.hht = hw->hht; videl.hbb = hw->hbb; videl.hbe = hw->hbe; videl.hdb = hw->hdb; videl.hde = hw->hde; videl.hss = hw->hss; videl.vft = hw->vft; videl.vbb = hw->vbb; videl.vbe = hw->vbe; videl.vdb = hw->vdb; videl.vde = hw->vde; videl.vss = hw->vss; videl.f_shift = 0; /* write enables Falcon palette, 0: 4 planes */ if (hw->ste_mode) { videl.st_shift = hw->st_shift; /* write enables STE palette */ } else { /* IMPORTANT: * set st_shift 0, so we can tell the screen-depth if f_shift == 0. * Writing 0 to f_shift enables 4 plane Falcon mode but * doesn't set st_shift. st_shift != 0 (!= 4planes) is impossible * with Falcon palette. */ videl.st_shift = 0; /* now back to Falcon palette mode */ videl.f_shift = hw->f_shift; } /* writing to st_shift changed scn_width and vid_mode */ videl.xoffset = hw->xoffset; shifter_f030.scn_width = hw->line_width; shifter_f030.off_next = hw->line_offset; videl.control = hw->vid_control; videl.mode = hw->vid_mode; } if (f_pan_display) { f_pan_display = 0; videl.xoffset = current_par.hw.falcon.xoffset; shifter_f030.off_next = current_par.hw.falcon.line_offset; } return IRQ_HANDLED; } static int falcon_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct atafb_par *par = (struct atafb_par *)info->par; int xoffset; int bpp = info->var.bits_per_pixel; if (bpp == 1) var->xoffset = up(var->xoffset, 32); if (bpp != 16) par->hw.falcon.xoffset = var->xoffset & 15; else { par->hw.falcon.xoffset = 0; var->xoffset = up(var->xoffset, 2); } par->hw.falcon.line_offset = bpp * (info->var.xres_virtual - info->var.xres) / 16; if (par->hw.falcon.xoffset) par->hw.falcon.line_offset -= bpp; xoffset = var->xoffset - par->hw.falcon.xoffset; par->screen_base = screen_base + (var->yoffset * info->var.xres_virtual + xoffset) * bpp / 8; if (fbhw->set_screen_base) fbhw->set_screen_base(par->screen_base); else return -EINVAL; /* shouldn't happen */ f_pan_display = 1; return 0; } static int falcon_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info) { if (regno > 255) return 1; f030_col[regno] = (((red & 0xfc00) << 16) | ((green & 0xfc00) << 8) | ((blue & 0xfc00) >> 8)); if (regno < 16) { shifter_tt.color_reg[regno] = (((red & 0xe000) >> 13) | ((red & 0x1000) >> 12) << 8) | (((green & 0xe000) >> 13) | ((green & 0x1000) >> 12) << 4) | ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12); ((u32 *)info->pseudo_palette)[regno] = ((red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11)); } return 0; } static int falcon_blank(int blank_mode) { /* ++guenther: we can switch off graphics by changing VDB and VDE, * so VIDEL doesn't hog the bus while saving. * (this may affect usleep()). */ int vdb, vss, hbe, hss; if (mon_type == F_MON_SM) /* this doesn't work on SM124 */ return 1; vdb = current_par.VDB; vss = current_par.VSS; hbe = current_par.HBE; hss = current_par.HSS; if (blank_mode >= 1) { /* disable graphics output (this speeds up the CPU) ... */ vdb = current_par.VFT + 1; /* ... and blank all lines */ hbe = current_par.HHT + 2; } /* use VESA suspend modes on VGA monitors */ if (mon_type == F_MON_VGA) { if (blank_mode == 2 || blank_mode == 4) vss = current_par.VFT + 1; if (blank_mode == 3 || blank_mode == 4) hss = current_par.HHT + 2; } videl.vdb = vdb; videl.vss = vss; videl.hbe = hbe; videl.hss = hss; return 0; } static int falcon_detect(void) { struct atafb_par par; unsigned char fhw; /* Determine connected monitor and set monitor parameters */ fhw = *(unsigned char *)0xffff8006; mon_type = fhw >> 6 & 0x3; /* bit 1 of fhw: 1=32 bit ram bus, 0=16 bit */ f030_bus_width = fhw << 6 & 0x80; switch (mon_type) { case F_MON_SM: fb_info.monspecs.vfmin = 70; fb_info.monspecs.vfmax = 72; fb_info.monspecs.hfmin = 35713; fb_info.monspecs.hfmax = 35715; break; case F_MON_SC: case F_MON_TV: /* PAL...NTSC */ fb_info.monspecs.vfmin = 49; /* not 50, since TOS defaults to 49.9x Hz */ fb_info.monspecs.vfmax = 60; fb_info.monspecs.hfmin = 15620; fb_info.monspecs.hfmax = 15755; break; } /* initialize hsync-len */ f25.hsync = h_syncs[mon_type] / f25.t; f32.hsync = h_syncs[mon_type] / f32.t; if (fext.t) fext.hsync = h_syncs[mon_type] / fext.t; falcon_get_par(&par); falcon_encode_var(&atafb_predefined[0], &par); /* Detected mode is always the "autodetect" slot */ return 1; } #endif /* ATAFB_FALCON */ /* ------------------- ST(E) specific functions ---------------------- */ #ifdef ATAFB_STE static int stste_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par) { int mode; strcpy(fix->id, "Atari Builtin"); fix->smem_start = (unsigned long)real_screen_base; fix->smem_len = screen_len; fix->type = FB_TYPE_INTERLEAVED_PLANES; fix->type_aux = 2; fix->visual = FB_VISUAL_PSEUDOCOLOR; mode = par->hw.st.mode & 3; if (mode == ST_HIGH) { fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; fix->visual = FB_VISUAL_MONO10; } if (ATARIHW_PRESENT(EXTD_SHIFTER)) { fix->xpanstep = 16; fix->ypanstep = 1; } else { fix->xpanstep = 0; fix->ypanstep = 0; } fix->ywrapstep = 0; fix->line_length = par->next_line; fix->accel = FB_ACCEL_ATARIBLITT; return 0; } static int stste_decode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { int xres = var->xres; int yres = var->yres; int bpp = var->bits_per_pixel; int linelen; int yres_virtual = var->yres_virtual; if (mono_moni) { if (bpp > 1 || xres > sttt_xres || yres > st_yres) return -EINVAL; par->hw.st.mode = ST_HIGH; xres = sttt_xres; yres = st_yres; bpp = 1; } else { if (bpp > 4 || xres > sttt_xres || yres > st_yres) return -EINVAL; if (bpp > 2) { if (xres > sttt_xres / 2 || yres > st_yres / 2) return -EINVAL; par->hw.st.mode = ST_LOW; xres = sttt_xres / 2; yres = st_yres / 2; bpp = 4; } else if (bpp > 1) { if (xres > sttt_xres || yres > st_yres / 2) return -EINVAL; par->hw.st.mode = ST_MID; xres = sttt_xres; yres = st_yres / 2; bpp = 2; } else return -EINVAL; } if (yres_virtual <= 0) yres_virtual = 0; else if (yres_virtual < yres) yres_virtual = yres; if (var->sync & FB_SYNC_EXT) par->hw.st.sync = (par->hw.st.sync & ~1) | 1; else par->hw.st.sync = (par->hw.st.sync & ~1); linelen = xres * bpp / 8; if (yres_virtual * linelen > screen_len && screen_len) return -EINVAL; if (yres * linelen > screen_len && screen_len) return -EINVAL; if (var->yoffset + yres > yres_virtual && yres_virtual) return -EINVAL; par->yres_virtual = yres_virtual; par->screen_base = screen_base + var->yoffset * linelen; par->next_line = linelen; return 0; } static int stste_encode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { int linelen; memset(var, 0, sizeof(struct fb_var_screeninfo)); var->red.offset = 0; var->red.length = ATARIHW_PRESENT(EXTD_SHIFTER) ? 4 : 3; var->red.msb_right = 0; var->grayscale = 0; var->pixclock = 31041; var->left_margin = 120; /* these are incorrect */ var->right_margin = 100; var->upper_margin = 8; var->lower_margin = 16; var->hsync_len = 140; var->vsync_len = 30; var->height = -1; var->width = -1; if (!(par->hw.st.sync & 1)) var->sync = 0; else var->sync = FB_SYNC_EXT; switch (par->hw.st.mode & 3) { case ST_LOW: var->xres = sttt_xres / 2; var->yres = st_yres / 2; var->bits_per_pixel = 4; break; case ST_MID: var->xres = sttt_xres; var->yres = st_yres / 2; var->bits_per_pixel = 2; break; case ST_HIGH: var->xres = sttt_xres; var->yres = st_yres; var->bits_per_pixel = 1; break; } var->blue = var->green = var->red; var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; var->xres_virtual = sttt_xres_virtual; linelen = var->xres_virtual * var->bits_per_pixel / 8; ovsc_addlen = linelen * (sttt_yres_virtual - st_yres); if (!use_hwscroll) var->yres_virtual = var->yres; else if (screen_len) { if (par->yres_virtual) var->yres_virtual = par->yres_virtual; else /* yres_virtual == 0 means use maximum */ var->yres_virtual = screen_len / linelen; } else { if (hwscroll < 0) var->yres_virtual = 2 * var->yres; else var->yres_virtual = var->yres + hwscroll * 16; } var->xoffset = 0; if (screen_base) var->yoffset = (par->screen_base - screen_base) / linelen; else var->yoffset = 0; var->nonstd = 0; var->activate = 0; var->vmode = FB_VMODE_NONINTERLACED; return 0; } static void stste_get_par(struct atafb_par *par) { unsigned long addr; par->hw.st.mode = shifter_tt.st_shiftmode; par->hw.st.sync = shifter.syncmode; addr = ((shifter.bas_hi & 0xff) << 16) | ((shifter.bas_md & 0xff) << 8); if (ATARIHW_PRESENT(EXTD_SHIFTER)) addr |= (shifter.bas_lo & 0xff); par->screen_base = phys_to_virt(addr); } static void stste_set_par(struct atafb_par *par) { shifter_tt.st_shiftmode = par->hw.st.mode; shifter.syncmode = par->hw.st.sync; /* only set screen_base if really necessary */ if (current_par.screen_base != par->screen_base) fbhw->set_screen_base(par->screen_base); } static int stste_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info) { if (regno > 15) return 1; red >>= 12; blue >>= 12; green >>= 12; if (ATARIHW_PRESENT(EXTD_SHIFTER)) shifter_tt.color_reg[regno] = (((red & 0xe) >> 1) | ((red & 1) << 3) << 8) | (((green & 0xe) >> 1) | ((green & 1) << 3) << 4) | ((blue & 0xe) >> 1) | ((blue & 1) << 3); else shifter_tt.color_reg[regno] = ((red & 0xe) << 7) | ((green & 0xe) << 3) | ((blue & 0xe) >> 1); return 0; } static int stste_detect(void) { struct atafb_par par; /* Determine the connected monitor: The DMA sound must be * disabled before reading the MFP GPIP, because the Sound * Done Signal and the Monochrome Detect are XORed together! */ if (ATARIHW_PRESENT(PCM_8BIT)) { tt_dmasnd.ctrl = DMASND_CTRL_OFF; udelay(20); /* wait a while for things to settle down */ } mono_moni = (st_mfp.par_dt_reg & 0x80) == 0; stste_get_par(&par); stste_encode_var(&atafb_predefined[0], &par); if (!ATARIHW_PRESENT(EXTD_SHIFTER)) use_hwscroll = 0; return 1; } static void stste_set_screen_base(void *s_base) { unsigned long addr; addr = virt_to_phys(s_base); /* Setup Screen Memory */ shifter.bas_hi = (unsigned char)((addr & 0xff0000) >> 16); shifter.bas_md = (unsigned char)((addr & 0x00ff00) >> 8); if (ATARIHW_PRESENT(EXTD_SHIFTER)) shifter.bas_lo = (unsigned char)(addr & 0x0000ff); } #endif /* ATAFB_STE */ /* Switching the screen size should be done during vsync, otherwise * the margins may get messed up. This is a well known problem of * the ST's video system. * * Unfortunately there is hardly any way to find the vsync, as the * vertical blank interrupt is no longer in time on machines with * overscan type modifications. * * We can, however, use Timer B to safely detect the black shoulder, * but then we've got to guess an appropriate delay to find the vsync. * This might not work on every machine. * * martin_rogge @ ki.maus.de, 8th Aug 1995 */ #define LINE_DELAY (mono_moni ? 30 : 70) #define SYNC_DELAY (mono_moni ? 1500 : 2000) /* SWITCH_ACIA may be used for Falcon (ScreenBlaster III internal!) */ static void st_ovsc_switch(void) { unsigned long flags; register unsigned char old, new; if (!(atari_switches & ATARI_SWITCH_OVSC_MASK)) return; local_irq_save(flags); st_mfp.tim_ct_b = 0x10; st_mfp.active_edge |= 8; st_mfp.tim_ct_b = 0; st_mfp.tim_dt_b = 0xf0; st_mfp.tim_ct_b = 8; while (st_mfp.tim_dt_b > 1) /* TOS does it this way, don't ask why */ ; new = st_mfp.tim_dt_b; do { udelay(LINE_DELAY); old = new; new = st_mfp.tim_dt_b; } while (old != new); st_mfp.tim_ct_b = 0x10; udelay(SYNC_DELAY); if (atari_switches & ATARI_SWITCH_OVSC_IKBD) acia.key_ctrl = ACIA_DIV64 | ACIA_D8N1S | ACIA_RHTID | ACIA_RIE; if (atari_switches & ATARI_SWITCH_OVSC_MIDI) acia.mid_ctrl = ACIA_DIV16 | ACIA_D8N1S | ACIA_RHTID; if (atari_switches & (ATARI_SWITCH_OVSC_SND6|ATARI_SWITCH_OVSC_SND7)) { sound_ym.rd_data_reg_sel = 14; sound_ym.wd_data = sound_ym.rd_data_reg_sel | ((atari_switches & ATARI_SWITCH_OVSC_SND6) ? 0x40:0) | ((atari_switches & ATARI_SWITCH_OVSC_SND7) ? 0x80:0); } local_irq_restore(flags); } /* ------------------- External Video ---------------------- */ #ifdef ATAFB_EXT static int ext_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par) { strcpy(fix->id, "Unknown Extern"); fix->smem_start = (unsigned long)external_addr; fix->smem_len = PAGE_ALIGN(external_len); if (external_depth == 1) { fix->type = FB_TYPE_PACKED_PIXELS; /* The letters 'n' and 'i' in the "atavideo=external:" stand * for "normal" and "inverted", rsp., in the monochrome case */ fix->visual = (external_pmode == FB_TYPE_INTERLEAVED_PLANES || external_pmode == FB_TYPE_PACKED_PIXELS) ? FB_VISUAL_MONO10 : FB_VISUAL_MONO01; } else { /* Use STATIC if we don't know how to access color registers */ int visual = external_vgaiobase ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR; switch (external_pmode) { case -1: /* truecolor */ fix->type = FB_TYPE_PACKED_PIXELS; fix->visual = FB_VISUAL_TRUECOLOR; break; case FB_TYPE_PACKED_PIXELS: fix->type = FB_TYPE_PACKED_PIXELS; fix->visual = visual; break; case FB_TYPE_PLANES: fix->type = FB_TYPE_PLANES; fix->visual = visual; break; case FB_TYPE_INTERLEAVED_PLANES: fix->type = FB_TYPE_INTERLEAVED_PLANES; fix->type_aux = 2; fix->visual = visual; break; } } fix->xpanstep = 0; fix->ypanstep = 0; fix->ywrapstep = 0; fix->line_length = par->next_line; return 0; } static int ext_decode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { struct fb_var_screeninfo *myvar = &atafb_predefined[0]; if (var->bits_per_pixel > myvar->bits_per_pixel || var->xres > myvar->xres || var->xres_virtual > myvar->xres_virtual || var->yres > myvar->yres || var->xoffset > 0 || var->yoffset > 0) return -EINVAL; par->next_line = external_xres_virtual * external_depth / 8; return 0; } static int ext_encode_var(struct fb_var_screeninfo *var, struct atafb_par *par) { memset(var, 0, sizeof(struct fb_var_screeninfo)); var->red.offset = 0; var->red.length = (external_pmode == -1) ? external_depth / 3 : (external_vgaiobase ? external_bitspercol : 0); var->red.msb_right = 0; var->grayscale = 0; var->pixclock = 31041; var->left_margin = 120; /* these are surely incorrect */ var->right_margin = 100; var->upper_margin = 8; var->lower_margin = 16; var->hsync_len = 140; var->vsync_len = 30; var->height = -1; var->width = -1; var->sync = 0; var->xres = external_xres; var->yres = external_yres; var->xres_virtual = external_xres_virtual; var->bits_per_pixel = external_depth; var->blue = var->green = var->red; var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; var->yres_virtual = var->yres; var->xoffset = 0; var->yoffset = 0; var->nonstd = 0; var->activate = 0; var->vmode = FB_VMODE_NONINTERLACED; return 0; } static void ext_get_par(struct atafb_par *par) { par->screen_base = external_addr; } static void ext_set_par(struct atafb_par *par) { } #define OUTB(port,val) \ *((unsigned volatile char *) ((port)+external_vgaiobase)) = (val) #define INB(port) \ (*((unsigned volatile char *) ((port)+external_vgaiobase))) #define DACDelay \ do { \ unsigned char tmp = INB(0x3da); \ tmp = INB(0x3da); \ } while (0) static int ext_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info) { unsigned char colmask = (1 << external_bitspercol) - 1; if (!external_vgaiobase) return 1; if (regno > 255) return 1; switch (external_card_type) { case IS_VGA: OUTB(0x3c8, regno); DACDelay; OUTB(0x3c9, red & colmask); DACDelay; OUTB(0x3c9, green & colmask); DACDelay; OUTB(0x3c9, blue & colmask); DACDelay; return 0; case IS_MV300: OUTB((MV300_reg[regno] << 2) + 1, red); OUTB((MV300_reg[regno] << 2) + 1, green); OUTB((MV300_reg[regno] << 2) + 1, blue); return 0; default: return 1; } } static int ext_detect(void) { struct fb_var_screeninfo *myvar = &atafb_predefined[0]; struct atafb_par dummy_par; myvar->xres = external_xres; myvar->xres_virtual = external_xres_virtual; myvar->yres = external_yres; myvar->bits_per_pixel = external_depth; ext_encode_var(myvar, &dummy_par); return 1; } #endif /* ATAFB_EXT */ /* ------ This is the same for most hardware types -------- */ static void set_screen_base(void *s_base) { unsigned long addr; addr = virt_to_phys(s_base); /* Setup Screen Memory */ shifter.bas_hi = (unsigned char)((addr & 0xff0000) >> 16); shifter.bas_md = (unsigned char)((addr & 0x00ff00) >> 8); shifter.bas_lo = (unsigned char)(addr & 0x0000ff); } static int pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct atafb_par *par = (struct atafb_par *)info->par; if (!fbhw->set_screen_base || (!ATARIHW_PRESENT(EXTD_SHIFTER) && var->xoffset)) return -EINVAL; var->xoffset = up(var->xoffset, 16); par->screen_base = screen_base + (var->yoffset * info->var.xres_virtual + var->xoffset) * info->var.bits_per_pixel / 8; fbhw->set_screen_base(par->screen_base); return 0; } /* ------------ Interfaces to hardware functions ------------ */ #ifdef ATAFB_TT static struct fb_hwswitch tt_switch = { .detect = tt_detect, .encode_fix = tt_encode_fix, .decode_var = tt_decode_var, .encode_var = tt_encode_var, .get_par = tt_get_par, .set_par = tt_set_par, .set_screen_base = set_screen_base, .pan_display = pan_display, }; #endif #ifdef ATAFB_FALCON static struct fb_hwswitch falcon_switch = { .detect = falcon_detect, .encode_fix = falcon_encode_fix, .decode_var = falcon_decode_var, .encode_var = falcon_encode_var, .get_par = falcon_get_par, .set_par = falcon_set_par, .set_screen_base = set_screen_base, .blank = falcon_blank, .pan_display = falcon_pan_display, }; #endif #ifdef ATAFB_STE static struct fb_hwswitch st_switch = { .detect = stste_detect, .encode_fix = stste_encode_fix, .decode_var = stste_decode_var, .encode_var = stste_encode_var, .get_par = stste_get_par, .set_par = stste_set_par, .set_screen_base = stste_set_screen_base, .pan_display = pan_display }; #endif #ifdef ATAFB_EXT static struct fb_hwswitch ext_switch = { .detect = ext_detect, .encode_fix = ext_encode_fix, .decode_var = ext_decode_var, .encode_var = ext_encode_var, .get_par = ext_get_par, .set_par = ext_set_par, }; #endif static void ata_get_par(struct atafb_par *par) { if (current_par_valid) *par = current_par; else fbhw->get_par(par); } static void ata_set_par(struct atafb_par *par) { fbhw->set_par(par); current_par = *par; current_par_valid = 1; } /* =========================================================== */ /* ============== Hardware Independent Functions ============= */ /* =========================================================== */ /* used for hardware scrolling */ static int do_fb_set_var(struct fb_var_screeninfo *var, int isactive) { int err, activate; struct atafb_par par; err = fbhw->decode_var(var, &par); if (err) return err; activate = var->activate; if (((var->activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) && isactive) ata_set_par(&par); fbhw->encode_var(var, &par); var->activate = activate; return 0; } /* fbhw->encode_fix() must be called with fb_info->mm_lock held * if it is called after the register_framebuffer() - not a case here */ static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info) { struct atafb_par par; int err; // Get fix directly (case con == -1 before)?? err = fbhw->decode_var(&info->var, &par); if (err) return err; memset(fix, 0, sizeof(struct fb_fix_screeninfo)); err = fbhw->encode_fix(fix, &par); return err; } static int atafb_get_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct atafb_par par; ata_get_par(&par); fbhw->encode_var(var, &par); return 0; } // No longer called by fbcon! // Still called by set_var internally static void atafb_set_disp(struct fb_info *info) { atafb_get_var(&info->var, info); atafb_get_fix(&info->fix, info); info->screen_base = (void *)info->fix.smem_start; } static int atafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { red >>= 8; green >>= 8; blue >>= 8; return info->fbops->fb_setcolreg(regno, red, green, blue, transp, info); } static int atafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { int xoffset = var->xoffset; int yoffset = var->yoffset; int err; if (var->vmode & FB_VMODE_YWRAP) { if (yoffset < 0 || yoffset >= info->var.yres_virtual || xoffset) return -EINVAL; } else { if (xoffset + info->var.xres > info->var.xres_virtual || yoffset + info->var.yres > info->var.yres_virtual) return -EINVAL; } if (fbhw->pan_display) { err = fbhw->pan_display(var, info); if (err) return err; } else return -EINVAL; info->var.xoffset = xoffset; info->var.yoffset = yoffset; if (var->vmode & FB_VMODE_YWRAP) info->var.vmode |= FB_VMODE_YWRAP; else info->var.vmode &= ~FB_VMODE_YWRAP; return 0; } /* * generic drawing routines; imageblit needs updating for image depth > 1 */ #if BITS_PER_LONG == 32 #define BYTES_PER_LONG 4 #define SHIFT_PER_LONG 5 #elif BITS_PER_LONG == 64 #define BYTES_PER_LONG 8 #define SHIFT_PER_LONG 6 #else #define Please update me #endif static void atafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct atafb_par *par = (struct atafb_par *)info->par; int x2, y2; u32 width, height; if (!rect->width || !rect->height) return; #ifdef ATAFB_FALCON if (info->var.bits_per_pixel == 16) { cfb_fillrect(info, rect); return; } #endif /* * We could use hardware clipping but on many cards you get around * hardware clipping by writing to framebuffer directly. * */ x2 = rect->dx + rect->width; y2 = rect->dy + rect->height; x2 = x2 < info->var.xres_virtual ? x2 : info->var.xres_virtual; y2 = y2 < info->var.yres_virtual ? y2 : info->var.yres_virtual; width = x2 - rect->dx; height = y2 - rect->dy; if (info->var.bits_per_pixel == 1) atafb_mfb_fillrect(info, par->next_line, rect->color, rect->dy, rect->dx, height, width); else if (info->var.bits_per_pixel == 2) atafb_iplan2p2_fillrect(info, par->next_line, rect->color, rect->dy, rect->dx, height, width); else if (info->var.bits_per_pixel == 4) atafb_iplan2p4_fillrect(info, par->next_line, rect->color, rect->dy, rect->dx, height, width); else atafb_iplan2p8_fillrect(info, par->next_line, rect->color, rect->dy, rect->dx, height, width); return; } static void atafb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct atafb_par *par = (struct atafb_par *)info->par; int x2, y2; u32 dx, dy, sx, sy, width, height; int rev_copy = 0; #ifdef ATAFB_FALCON if (info->var.bits_per_pixel == 16) { cfb_copyarea(info, area); return; } #endif /* clip the destination */ x2 = area->dx + area->width; y2 = area->dy + area->height; dx = area->dx > 0 ? area->dx : 0; dy = area->dy > 0 ? area->dy : 0; x2 = x2 < info->var.xres_virtual ? x2 : info->var.xres_virtual; y2 = y2 < info->var.yres_virtual ? y2 : info->var.yres_virtual; width = x2 - dx; height = y2 - dy; if (area->sx + dx < area->dx || area->sy + dy < area->dy) return; /* update sx,sy */ sx = area->sx + (dx - area->dx); sy = area->sy + (dy - area->dy); /* the source must be completely inside the virtual screen */ if (sx + width > info->var.xres_virtual || sy + height > info->var.yres_virtual) return; if (dy > sy || (dy == sy && dx > sx)) { dy += height; sy += height; rev_copy = 1; } if (info->var.bits_per_pixel == 1) atafb_mfb_copyarea(info, par->next_line, sy, sx, dy, dx, height, width); else if (info->var.bits_per_pixel == 2) atafb_iplan2p2_copyarea(info, par->next_line, sy, sx, dy, dx, height, width); else if (info->var.bits_per_pixel == 4) atafb_iplan2p4_copyarea(info, par->next_line, sy, sx, dy, dx, height, width); else atafb_iplan2p8_copyarea(info, par->next_line, sy, sx, dy, dx, height, width); return; } static void atafb_imageblit(struct fb_info *info, const struct fb_image *image) { struct atafb_par *par = (struct atafb_par *)info->par; int x2, y2; unsigned long *dst; int dst_idx; const char *src; u32 dx, dy, width, height, pitch; #ifdef ATAFB_FALCON if (info->var.bits_per_pixel == 16) { cfb_imageblit(info, image); return; } #endif /* * We could use hardware clipping but on many cards you get around * hardware clipping by writing to framebuffer directly like we are * doing here. */ x2 = image->dx + image->width; y2 = image->dy + image->height; dx = image->dx; dy = image->dy; x2 = x2 < info->var.xres_virtual ? x2 : info->var.xres_virtual; y2 = y2 < info->var.yres_virtual ? y2 : info->var.yres_virtual; width = x2 - dx; height = y2 - dy; if (image->depth == 1) { // used for font data dst = (unsigned long *) ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1)); dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8; dst_idx += dy * par->next_line * 8 + dx; src = image->data; pitch = (image->width + 7) / 8; while (height--) { if (info->var.bits_per_pixel == 1) atafb_mfb_linefill(info, par->next_line, dy, dx, width, src, image->bg_color, image->fg_color); else if (info->var.bits_per_pixel == 2) atafb_iplan2p2_linefill(info, par->next_line, dy, dx, width, src, image->bg_color, image->fg_color); else if (info->var.bits_per_pixel == 4) atafb_iplan2p4_linefill(info, par->next_line, dy, dx, width, src, image->bg_color, image->fg_color); else atafb_iplan2p8_linefill(info, par->next_line, dy, dx, width, src, image->bg_color, image->fg_color); dy++; src += pitch; } } else { c2p_iplan2(info->screen_base, image->data, dx, dy, width, height, par->next_line, image->width, info->var.bits_per_pixel); } } static int atafb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { switch (cmd) { #ifdef FBCMD_GET_CURRENTPAR case FBCMD_GET_CURRENTPAR: if (copy_to_user((void *)arg, (void *)&current_par, sizeof(struct atafb_par))) return -EFAULT; return 0; #endif #ifdef FBCMD_SET_CURRENTPAR case FBCMD_SET_CURRENTPAR: if (copy_from_user((void *)&current_par, (void *)arg, sizeof(struct atafb_par))) return -EFAULT; ata_set_par(&current_par); return 0; #endif } return -EINVAL; } /* (un)blank/poweroff * 0 = unblank * 1 = blank * 2 = suspend vsync * 3 = suspend hsync * 4 = off */ static int atafb_blank(int blank, struct fb_info *info) { unsigned short black[16]; struct fb_cmap cmap; if (fbhw->blank && !fbhw->blank(blank)) return 1; if (blank) { memset(black, 0, 16 * sizeof(unsigned short)); cmap.red = black; cmap.green = black; cmap.blue = black; cmap.transp = NULL; cmap.start = 0; cmap.len = 16; fb_set_cmap(&cmap, info); } #if 0 else do_install_cmap(info); #endif return 0; } /* * New fbcon interface ... */ /* check var by decoding var into hw par, rounding if necessary, * then encoding hw par back into new, validated var */ static int atafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { int err; struct atafb_par par; /* Validate wanted screen parameters */ // if ((err = ata_decode_var(var, &par))) err = fbhw->decode_var(var, &par); if (err) return err; /* Encode (possibly rounded) screen parameters */ fbhw->encode_var(var, &par); return 0; } /* actually set hw par by decoding var, then setting hardware from * hw par just decoded */ static int atafb_set_par(struct fb_info *info) { struct atafb_par *par = (struct atafb_par *)info->par; /* Decode wanted screen parameters */ fbhw->decode_var(&info->var, par); mutex_lock(&info->mm_lock); fbhw->encode_fix(&info->fix, par); mutex_unlock(&info->mm_lock); /* Set new videomode */ ata_set_par(par); return 0; } static struct fb_ops atafb_ops = { .owner = THIS_MODULE, .fb_check_var = atafb_check_var, .fb_set_par = atafb_set_par, .fb_setcolreg = atafb_setcolreg, .fb_blank = atafb_blank, .fb_pan_display = atafb_pan_display, .fb_fillrect = atafb_fillrect, .fb_copyarea = atafb_copyarea, .fb_imageblit = atafb_imageblit, .fb_ioctl = atafb_ioctl, }; static void check_default_par(int detected_mode) { char default_name[10]; int i; struct fb_var_screeninfo var; unsigned long min_mem; /* First try the user supplied mode */ if (default_par) { var = atafb_predefined[default_par - 1]; var.activate = FB_ACTIVATE_TEST; if (do_fb_set_var(&var, 1)) default_par = 0; /* failed */ } /* Next is the autodetected one */ if (!default_par) { var = atafb_predefined[detected_mode - 1]; /* autodetect */ var.activate = FB_ACTIVATE_TEST; if (!do_fb_set_var(&var, 1)) default_par = detected_mode; } /* If that also failed, try some default modes... */ if (!default_par) { /* try default1, default2... */ for (i = 1; i < 10; i++) { sprintf(default_name,"default%d", i); default_par = get_video_mode(default_name); if (!default_par) panic("can't set default video mode"); var = atafb_predefined[default_par - 1]; var.activate = FB_ACTIVATE_TEST; if (!do_fb_set_var(&var,1)) break; /* ok */ } } min_mem = var.xres_virtual * var.yres_virtual * var.bits_per_pixel / 8; if (default_mem_req < min_mem) default_mem_req = min_mem; } #ifdef ATAFB_EXT static void __init atafb_setup_ext(char *spec) { int xres, xres_virtual, yres, depth, planes; unsigned long addr, len; char *p; /* Format is: <xres>;<yres>;<depth>;<plane organ.>; * <screen mem addr> * [;<screen mem length>[;<vgaiobase>[;<bits-per-col>[;<colorreg-type> * [;<xres-virtual>]]]]] * * 09/23/97 Juergen * <xres_virtual>: hardware's x-resolution (f.e. ProMST) * * Even xres_virtual is available, we neither support panning nor hw-scrolling! */ p = strsep(&spec, ";"); if (!p || !*p) return; xres_virtual = xres = simple_strtoul(p, NULL, 10); if (xres <= 0) return; p = strsep(&spec, ";"); if (!p || !*p) return; yres = simple_strtoul(p, NULL, 10); if (yres <= 0) return; p = strsep(&spec, ";"); if (!p || !*p) return; depth = simple_strtoul(p, NULL, 10); if (depth != 1 && depth != 2 && depth != 4 && depth != 8 && depth != 16 && depth != 24) return; p = strsep(&spec, ";"); if (!p || !*p) return; if (*p == 'i') planes = FB_TYPE_INTERLEAVED_PLANES; else if (*p == 'p') planes = FB_TYPE_PACKED_PIXELS; else if (*p == 'n') planes = FB_TYPE_PLANES; else if (*p == 't') planes = -1; /* true color */ else return; p = strsep(&spec, ";"); if (!p || !*p) return; addr = simple_strtoul(p, NULL, 0); p = strsep(&spec, ";"); if (!p || !*p) len = xres * yres * depth / 8; else len = simple_strtoul(p, NULL, 0); p = strsep(&spec, ";"); if (p && *p) external_vgaiobase = simple_strtoul(p, NULL, 0); p = strsep(&spec, ";"); if (p && *p) { external_bitspercol = simple_strtoul(p, NULL, 0); if (external_bitspercol > 8) external_bitspercol = 8; else if (external_bitspercol < 1) external_bitspercol = 1; } p = strsep(&spec, ";"); if (p && *p) { if (!strcmp(p, "vga")) external_card_type = IS_VGA; if (!strcmp(p, "mv300")) external_card_type = IS_MV300; } p = strsep(&spec, ";"); if (p && *p) { xres_virtual = simple_strtoul(p, NULL, 10); if (xres_virtual < xres) xres_virtual = xres; if (xres_virtual * yres * depth / 8 > len) len = xres_virtual * yres * depth / 8; } external_xres = xres; external_xres_virtual = xres_virtual; external_yres = yres; external_depth = depth; external_pmode = planes; external_addr = (void *)addr; external_len = len; if (external_card_type == IS_MV300) { switch (external_depth) { case 1: MV300_reg = MV300_reg_1bit; break; case 4: MV300_reg = MV300_reg_4bit; break; case 8: MV300_reg = MV300_reg_8bit; break; } } } #endif /* ATAFB_EXT */ static void __init atafb_setup_int(char *spec) { /* Format to config extended internal video hardware like OverScan: * "internal:<xres>;<yres>;<xres_max>;<yres_max>;<offset>" * Explanation: * <xres>: x-resolution * <yres>: y-resolution * The following are only needed if you have an overscan which * needs a black border: * <xres_max>: max. length of a line in pixels your OverScan hardware would allow * <yres_max>: max. number of lines your OverScan hardware would allow * <offset>: Offset from physical beginning to visible beginning * of screen in bytes */ int xres; char *p; if (!(p = strsep(&spec, ";")) || !*p) return; xres = simple_strtoul(p, NULL, 10); if (!(p = strsep(&spec, ";")) || !*p) return; sttt_xres = xres; tt_yres = st_yres = simple_strtoul(p, NULL, 10); if ((p = strsep(&spec, ";")) && *p) sttt_xres_virtual = simple_strtoul(p, NULL, 10); if ((p = strsep(&spec, ";")) && *p) sttt_yres_virtual = simple_strtoul(p, NULL, 0); if ((p = strsep(&spec, ";")) && *p) ovsc_offset = simple_strtoul(p, NULL, 0); if (ovsc_offset || (sttt_yres_virtual != st_yres)) use_hwscroll = 0; } #ifdef ATAFB_FALCON static void __init atafb_setup_mcap(char *spec) { char *p; int vmin, vmax, hmin, hmax; /* Format for monitor capabilities is: <Vmin>;<Vmax>;<Hmin>;<Hmax> * <V*> vertical freq. in Hz * <H*> horizontal freq. in kHz */ if (!(p = strsep(&spec, ";")) || !*p) return; vmin = simple_strtoul(p, NULL, 10); if (vmin <= 0) return; if (!(p = strsep(&spec, ";")) || !*p) return; vmax = simple_strtoul(p, NULL, 10); if (vmax <= 0 || vmax <= vmin) return; if (!(p = strsep(&spec, ";")) || !*p) return; hmin = 1000 * simple_strtoul(p, NULL, 10); if (hmin <= 0) return; if (!(p = strsep(&spec, "")) || !*p) return; hmax = 1000 * simple_strtoul(p, NULL, 10); if (hmax <= 0 || hmax <= hmin) return; fb_info.monspecs.vfmin = vmin; fb_info.monspecs.vfmax = vmax; fb_info.monspecs.hfmin = hmin; fb_info.monspecs.hfmax = hmax; } #endif /* ATAFB_FALCON */ static void __init atafb_setup_user(char *spec) { /* Format of user defined video mode is: <xres>;<yres>;<depth> */ char *p; int xres, yres, depth, temp; p = strsep(&spec, ";"); if (!p || !*p) return; xres = simple_strtoul(p, NULL, 10); p = strsep(&spec, ";"); if (!p || !*p) return; yres = simple_strtoul(p, NULL, 10); p = strsep(&spec, ""); if (!p || !*p) return; depth = simple_strtoul(p, NULL, 10); temp = get_video_mode("user0"); if (temp) { default_par = temp; atafb_predefined[default_par - 1].xres = xres; atafb_predefined[default_par - 1].yres = yres; atafb_predefined[default_par - 1].bits_per_pixel = depth; } } int __init atafb_setup(char *options) { char *this_opt; int temp; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!*this_opt) continue; if ((temp = get_video_mode(this_opt))) { default_par = temp; mode_option = this_opt; } else if (!strcmp(this_opt, "inverse")) inverse = 1; else if (!strncmp(this_opt, "hwscroll_", 9)) { hwscroll = simple_strtoul(this_opt + 9, NULL, 10); if (hwscroll < 0) hwscroll = 0; if (hwscroll > 200) hwscroll = 200; } #ifdef ATAFB_EXT else if (!strcmp(this_opt, "mv300")) { external_bitspercol = 8; external_card_type = IS_MV300; } else if (!strncmp(this_opt, "external:", 9)) atafb_setup_ext(this_opt + 9); #endif else if (!strncmp(this_opt, "internal:", 9)) atafb_setup_int(this_opt + 9); #ifdef ATAFB_FALCON else if (!strncmp(this_opt, "eclock:", 7)) { fext.f = simple_strtoul(this_opt + 7, NULL, 10); /* external pixelclock in kHz --> ps */ fext.t = 1000000000 / fext.f; fext.f *= 1000; } else if (!strncmp(this_opt, "monitorcap:", 11)) atafb_setup_mcap(this_opt + 11); #endif else if (!strcmp(this_opt, "keep")) DontCalcRes = 1; else if (!strncmp(this_opt, "R", 1)) atafb_setup_user(this_opt + 1); } return 0; } int __init atafb_init(void) { int pad, detected_mode, error; unsigned int defmode = 0; unsigned long mem_req; #ifndef MODULE char *option = NULL; if (fb_get_options("atafb", &option)) return -ENODEV; atafb_setup(option); #endif printk("atafb_init: start\n"); if (!MACH_IS_ATARI) return -ENODEV; do { #ifdef ATAFB_EXT if (external_addr) { printk("atafb_init: initializing external hw\n"); fbhw = &ext_switch; atafb_ops.fb_setcolreg = &ext_setcolreg; defmode = DEFMODE_EXT; break; } #endif #ifdef ATAFB_TT if (ATARIHW_PRESENT(TT_SHIFTER)) { printk("atafb_init: initializing TT hw\n"); fbhw = &tt_switch; atafb_ops.fb_setcolreg = &tt_setcolreg; defmode = DEFMODE_TT; break; } #endif #ifdef ATAFB_FALCON if (ATARIHW_PRESENT(VIDEL_SHIFTER)) { printk("atafb_init: initializing Falcon hw\n"); fbhw = &falcon_switch; atafb_ops.fb_setcolreg = &falcon_setcolreg; error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, IRQ_TYPE_PRIO, "framebuffer:modeswitch", falcon_vbl_switcher); if (error) return error; defmode = DEFMODE_F30; break; } #endif #ifdef ATAFB_STE if (ATARIHW_PRESENT(STND_SHIFTER) || ATARIHW_PRESENT(EXTD_SHIFTER)) { printk("atafb_init: initializing ST/E hw\n"); fbhw = &st_switch; atafb_ops.fb_setcolreg = &stste_setcolreg; defmode = DEFMODE_STE; break; } fbhw = &st_switch; atafb_ops.fb_setcolreg = &stste_setcolreg; printk("Cannot determine video hardware; defaulting to ST(e)\n"); #else /* ATAFB_STE */ /* no default driver included */ /* Nobody will ever see this message :-) */ panic("Cannot initialize video hardware"); #endif } while (0); /* Multisync monitor capabilities */ /* Atari-TOS defaults if no boot option present */ if (fb_info.monspecs.hfmin == 0) { fb_info.monspecs.hfmin = 31000; fb_info.monspecs.hfmax = 32000; fb_info.monspecs.vfmin = 58; fb_info.monspecs.vfmax = 62; } detected_mode = fbhw->detect(); check_default_par(detected_mode); #ifdef ATAFB_EXT if (!external_addr) { #endif /* ATAFB_EXT */ mem_req = default_mem_req + ovsc_offset + ovsc_addlen; mem_req = PAGE_ALIGN(mem_req) + PAGE_SIZE; screen_base = atari_stram_alloc(mem_req, "atafb"); if (!screen_base) panic("Cannot allocate screen memory"); memset(screen_base, 0, mem_req); pad = -(unsigned long)screen_base & (PAGE_SIZE - 1); screen_base += pad; real_screen_base = screen_base + ovsc_offset; screen_len = (mem_req - pad - ovsc_offset) & PAGE_MASK; st_ovsc_switch(); if (CPU_IS_040_OR_060) { /* On a '040+, the cache mode of video RAM must be set to * write-through also for internal video hardware! */ cache_push(virt_to_phys(screen_base), screen_len); kernel_set_cachemode(screen_base, screen_len, IOMAP_WRITETHROUGH); } printk("atafb: screen_base %p real_screen_base %p screen_len %d\n", screen_base, real_screen_base, screen_len); #ifdef ATAFB_EXT } else { /* Map the video memory (physical address given) to somewhere * in the kernel address space. */ external_addr = ioremap_writethrough((unsigned long)external_addr, external_len); if (external_vgaiobase) external_vgaiobase = (unsigned long)ioremap(external_vgaiobase, 0x10000); screen_base = real_screen_base = external_addr; screen_len = external_len & PAGE_MASK; memset (screen_base, 0, external_len); } #endif /* ATAFB_EXT */ // strcpy(fb_info.mode->name, "Atari Builtin "); fb_info.fbops = &atafb_ops; // try to set default (detected; requested) var do_fb_set_var(&atafb_predefined[default_par - 1], 1); // reads hw state into current par, which may not be sane yet ata_get_par(&current_par); fb_info.par = &current_par; // tries to read from HW which may not be initialized yet // so set sane var first, then call atafb_set_par atafb_get_var(&fb_info.var, &fb_info); #ifdef ATAFB_FALCON fb_info.pseudo_palette = current_par.hw.falcon.pseudo_palette; #endif fb_info.flags = FBINFO_FLAG_DEFAULT; if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, atafb_modedb, NUM_TOTAL_MODES, &atafb_modedb[defmode], fb_info.var.bits_per_pixel)) { return -EINVAL; } fb_videomode_to_modelist(atafb_modedb, NUM_TOTAL_MODES, &fb_info.modelist); atafb_set_disp(&fb_info); fb_alloc_cmap(&(fb_info.cmap), 1 << fb_info.var.bits_per_pixel, 0); printk("Determined %dx%d, depth %d\n", fb_info.var.xres, fb_info.var.yres, fb_info.var.bits_per_pixel); if ((fb_info.var.xres != fb_info.var.xres_virtual) || (fb_info.var.yres != fb_info.var.yres_virtual)) printk(" virtual %dx%d\n", fb_info.var.xres_virtual, fb_info.var.yres_virtual); if (register_framebuffer(&fb_info) < 0) { #ifdef ATAFB_EXT if (external_addr) { iounmap(external_addr); external_addr = NULL; } if (external_vgaiobase) { iounmap((void*)external_vgaiobase); external_vgaiobase = 0; } #endif return -EINVAL; } // FIXME: mode needs setting! //printk("fb%d: %s frame buffer device, using %dK of video memory\n", // fb_info.node, fb_info.mode->name, screen_len>>10); printk("fb%d: frame buffer device, using %dK of video memory\n", fb_info.node, screen_len >> 10); /* TODO: This driver cannot be unloaded yet */ return 0; } module_init(atafb_init); #ifdef MODULE MODULE_LICENSE("GPL"); int cleanup_module(void) { unregister_framebuffer(&fb_info); return atafb_deinit(); } #endif /* MODULE */
gpl-2.0
showp1984/bricked-pyramid-3.0
arch/mips/bcm63xx/dev-enet.c
10681
3918
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <bcm63xx_dev_enet.h> #include <bcm63xx_io.h> #include <bcm63xx_regs.h> static struct resource shared_res[] = { { .start = -1, /* filled at runtime */ .end = -1, /* filled at runtime */ .flags = IORESOURCE_MEM, }, }; static struct platform_device bcm63xx_enet_shared_device = { .name = "bcm63xx_enet_shared", .id = 0, .num_resources = ARRAY_SIZE(shared_res), .resource = shared_res, }; static int shared_device_registered; static struct resource enet0_res[] = { { .start = -1, /* filled at runtime */ .end = -1, /* filled at runtime */ .flags = IORESOURCE_MEM, }, { .start = -1, /* filled at runtime */ .flags = IORESOURCE_IRQ, }, { .start = -1, /* filled at runtime */ .flags = IORESOURCE_IRQ, }, { .start = -1, /* filled at runtime */ .flags = IORESOURCE_IRQ, }, }; static struct bcm63xx_enet_platform_data enet0_pd; static struct platform_device bcm63xx_enet0_device = { .name = "bcm63xx_enet", .id = 0, .num_resources = ARRAY_SIZE(enet0_res), .resource = enet0_res, .dev = { .platform_data = &enet0_pd, }, }; static struct resource enet1_res[] = { { .start = -1, /* filled at runtime */ .end = -1, /* filled at runtime */ .flags = IORESOURCE_MEM, }, { .start = -1, /* filled at runtime */ .flags = IORESOURCE_IRQ, }, { .start = -1, /* filled at runtime */ .flags = IORESOURCE_IRQ, }, { .start = -1, /* filled at runtime */ .flags = IORESOURCE_IRQ, }, }; static struct bcm63xx_enet_platform_data enet1_pd; static struct platform_device bcm63xx_enet1_device = { .name = "bcm63xx_enet", .id = 1, .num_resources = ARRAY_SIZE(enet1_res), .resource = enet1_res, .dev = { .platform_data = &enet1_pd, }, }; int __init bcm63xx_enet_register(int unit, const struct bcm63xx_enet_platform_data *pd) { struct platform_device *pdev; struct bcm63xx_enet_platform_data *dpd; int ret; if (unit > 1) return -ENODEV; if (unit == 1 && BCMCPU_IS_6338()) return -ENODEV; if (!shared_device_registered) { shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA); shared_res[0].end = shared_res[0].start; if (BCMCPU_IS_6338()) shared_res[0].end += (RSET_ENETDMA_SIZE / 2) - 1; else shared_res[0].end += (RSET_ENETDMA_SIZE) - 1; ret = platform_device_register(&bcm63xx_enet_shared_device); if (ret) return ret; shared_device_registered = 1; } if (unit == 0) { enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0); enet0_res[0].end = enet0_res[0].start; enet0_res[0].end += RSET_ENET_SIZE - 1; enet0_res[1].start = bcm63xx_get_irq_number(IRQ_ENET0); enet0_res[2].start = bcm63xx_get_irq_number(IRQ_ENET0_RXDMA); enet0_res[3].start = bcm63xx_get_irq_number(IRQ_ENET0_TXDMA); pdev = &bcm63xx_enet0_device; } else { enet1_res[0].start = bcm63xx_regset_address(RSET_ENET1); enet1_res[0].end = enet1_res[0].start; enet1_res[0].end += RSET_ENET_SIZE - 1; enet1_res[1].start = bcm63xx_get_irq_number(IRQ_ENET1); enet1_res[2].start = bcm63xx_get_irq_number(IRQ_ENET1_RXDMA); enet1_res[3].start = bcm63xx_get_irq_number(IRQ_ENET1_TXDMA); pdev = &bcm63xx_enet1_device; } /* copy given platform data */ dpd = pdev->dev.platform_data; memcpy(dpd, pd, sizeof(*pd)); /* adjust them in case internal phy is used */ if (dpd->use_internal_phy) { /* internal phy only exists for enet0 */ if (unit == 1) return -ENODEV; dpd->phy_id = 1; dpd->has_phy_interrupt = 1; dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY); } ret = platform_device_register(pdev); if (ret) return ret; return 0; }
gpl-2.0
abyssxsy/linux-tk1
arch/sh/boards/mach-sdk7786/fpga.c
13241
1775
/* * SDK7786 FPGA Support. * * Copyright (C) 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/io.h> #include <linux/bcd.h> #include <mach/fpga.h> #include <asm/sizes.h> #define FPGA_REGS_OFFSET 0x03fff800 #define FPGA_REGS_SIZE 0x490 /* * The FPGA can be mapped in any of the generally available areas, * so we attempt to scan for it using the fixed SRSTR read magic. * * Once the FPGA is located, the rest of the mapping data for the other * components can be determined dynamically from its section mapping * registers. */ static void __iomem *sdk7786_fpga_probe(void) { unsigned long area; void __iomem *base; /* * Iterate over all of the areas where the FPGA could be mapped. * The possible range is anywhere from area 0 through 6, area 7 * is reserved. */ for (area = PA_AREA0; area < PA_AREA7; area += SZ_64M) { base = ioremap_nocache(area + FPGA_REGS_OFFSET, FPGA_REGS_SIZE); if (!base) { /* Failed to remap this area, move along. */ continue; } if (ioread16(base + SRSTR) == SRSTR_MAGIC) return base; /* Found it! */ iounmap(base); } return NULL; } void __iomem *sdk7786_fpga_base; void __init sdk7786_fpga_init(void) { u16 version, date; sdk7786_fpga_base = sdk7786_fpga_probe(); if (unlikely(!sdk7786_fpga_base)) { panic("FPGA detection failed.\n"); return; } version = fpga_read_reg(FPGAVR); date = fpga_read_reg(FPGADR); pr_info("\tFPGA version:\t%d.%d (built on %d/%d/%d)\n", bcd2bin(version >> 8) & 0xf, bcd2bin(version & 0xf), ((date >> 12) & 0xf) + 2000, (date >> 8) & 0xf, bcd2bin(date & 0xff)); }
gpl-2.0
yytang2012/linux-kvm-arm
arch/sh/boards/mach-sdk7786/fpga.c
13241
1775
/* * SDK7786 FPGA Support. * * Copyright (C) 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/io.h> #include <linux/bcd.h> #include <mach/fpga.h> #include <asm/sizes.h> #define FPGA_REGS_OFFSET 0x03fff800 #define FPGA_REGS_SIZE 0x490 /* * The FPGA can be mapped in any of the generally available areas, * so we attempt to scan for it using the fixed SRSTR read magic. * * Once the FPGA is located, the rest of the mapping data for the other * components can be determined dynamically from its section mapping * registers. */ static void __iomem *sdk7786_fpga_probe(void) { unsigned long area; void __iomem *base; /* * Iterate over all of the areas where the FPGA could be mapped. * The possible range is anywhere from area 0 through 6, area 7 * is reserved. */ for (area = PA_AREA0; area < PA_AREA7; area += SZ_64M) { base = ioremap_nocache(area + FPGA_REGS_OFFSET, FPGA_REGS_SIZE); if (!base) { /* Failed to remap this area, move along. */ continue; } if (ioread16(base + SRSTR) == SRSTR_MAGIC) return base; /* Found it! */ iounmap(base); } return NULL; } void __iomem *sdk7786_fpga_base; void __init sdk7786_fpga_init(void) { u16 version, date; sdk7786_fpga_base = sdk7786_fpga_probe(); if (unlikely(!sdk7786_fpga_base)) { panic("FPGA detection failed.\n"); return; } version = fpga_read_reg(FPGAVR); date = fpga_read_reg(FPGADR); pr_info("\tFPGA version:\t%d.%d (built on %d/%d/%d)\n", bcd2bin(version >> 8) & 0xf, bcd2bin(version & 0xf), ((date >> 12) & 0xf) + 2000, (date >> 8) & 0xf, bcd2bin(date & 0xff)); }
gpl-2.0
pocketbook-free/kernel_622
arch/avr32/kernel/stacktrace.c
13753
1223
/* * Stack trace management functions * * Copyright (C) 2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/sched.h> #include <linux/stacktrace.h> #include <linux/thread_info.h> #include <linux/module.h> register unsigned long current_frame_pointer asm("r7"); struct stackframe { unsigned long lr; unsigned long fp; }; /* * Save stack-backtrace addresses into a stack_trace buffer. */ void save_stack_trace(struct stack_trace *trace) { unsigned long low, high; unsigned long fp; struct stackframe *frame; int skip = trace->skip; low = (unsigned long)task_stack_page(current); high = low + THREAD_SIZE; fp = current_frame_pointer; while (fp >= low && fp <= (high - 8)) { frame = (struct stackframe *)fp; if (skip) { skip--; } else { trace->entries[trace->nr_entries++] = frame->lr; if (trace->nr_entries >= trace->max_entries) break; } /* * The next frame must be at a higher address than the * current frame. */ low = fp + 8; fp = frame->fp; } } EXPORT_SYMBOL_GPL(save_stack_trace);
gpl-2.0
ms705/linux
drivers/gpu/drm/nouveau/nouveau_prime.c
442
2928
/* * Copyright 2011 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie */ #include <drm/drmP.h> #include "nouveau_drm.h" #include "nouveau_gem.h" struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct nouveau_bo *nvbo = nouveau_gem_object(obj); int npages = nvbo->bo.num_pages; return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); } void *nouveau_gem_prime_vmap(struct drm_gem_object *obj) { struct nouveau_bo *nvbo = nouveau_gem_object(obj); int ret; ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages, &nvbo->dma_buf_vmap); if (ret) return ERR_PTR(ret); return nvbo->dma_buf_vmap.virtual; } void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) { struct nouveau_bo *nvbo = nouveau_gem_object(obj); ttm_bo_kunmap(&nvbo->dma_buf_vmap); } struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, size_t size, struct sg_table *sg) { struct nouveau_bo *nvbo; u32 flags = 0; int ret; flags = TTM_PL_FLAG_TT; ret = nouveau_bo_new(dev, size, 0, flags, 0, 0, sg, &nvbo); if (ret) return ERR_PTR(ret); nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; /* Initialize the embedded gem-object. We return a single gem-reference * to the caller, instead of a normal nouveau_bo ttm reference. */ ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size); if (ret) { nouveau_bo_ref(NULL, &nvbo); return ERR_PTR(-ENOMEM); } return &nvbo->gem; } int nouveau_gem_prime_pin(struct drm_gem_object *obj) { struct nouveau_bo *nvbo = nouveau_gem_object(obj); int ret; /* pin buffer into GTT */ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT); if (ret) return -EINVAL; return 0; } void nouveau_gem_prime_unpin(struct drm_gem_object *obj) { struct nouveau_bo *nvbo = nouveau_gem_object(obj); nouveau_bo_unpin(nvbo); }
gpl-2.0
jaronow/LGE970_ICS_kernel
drivers/net/atl1e/atl1e_main.c
1722
69683
/* * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "atl1e.h" #define DRV_VERSION "1.0.0.7-NAPI" char atl1e_driver_name[] = "ATL1E"; char atl1e_driver_version[] = DRV_VERSION; #define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026 /* * atl1e_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ static DEFINE_PCI_DEVICE_TABLE(atl1e_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)}, /* required last entry */ { 0 } }; MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl); MODULE_AUTHOR("Atheros Corporation, <xiong.huang@atheros.com>, Jie Yang <jie.yang@atheros.com>"); MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter); static const u16 atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = { {REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD}, {REG_HOST_RXF1_PAGE0_VLD, REG_HOST_RXF1_PAGE1_VLD}, {REG_HOST_RXF2_PAGE0_VLD, REG_HOST_RXF2_PAGE1_VLD}, {REG_HOST_RXF3_PAGE0_VLD, REG_HOST_RXF3_PAGE1_VLD} }; static const u16 atl1e_rx_page_hi_addr_regs[AT_MAX_RECEIVE_QUEUE] = { REG_RXF0_BASE_ADDR_HI, REG_RXF1_BASE_ADDR_HI, REG_RXF2_BASE_ADDR_HI, REG_RXF3_BASE_ADDR_HI }; static const u16 atl1e_rx_page_lo_addr_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = { {REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO}, {REG_HOST_RXF1_PAGE0_LO, REG_HOST_RXF1_PAGE1_LO}, {REG_HOST_RXF2_PAGE0_LO, REG_HOST_RXF2_PAGE1_LO}, {REG_HOST_RXF3_PAGE0_LO, REG_HOST_RXF3_PAGE1_LO} }; static const u16 atl1e_rx_page_write_offset_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = { {REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO}, {REG_HOST_RXF1_MB0_LO, REG_HOST_RXF1_MB1_LO}, {REG_HOST_RXF2_MB0_LO, REG_HOST_RXF2_MB1_LO}, {REG_HOST_RXF3_MB0_LO, REG_HOST_RXF3_MB1_LO} }; static const u16 atl1e_pay_load_size[] = { 128, 256, 512, 1024, 2048, 4096, }; /* * atl1e_irq_enable - Enable default interrupt generation settings * @adapter: board private structure */ static inline void atl1e_irq_enable(struct atl1e_adapter *adapter) { if (likely(atomic_dec_and_test(&adapter->irq_sem))) { AT_WRITE_REG(&adapter->hw, REG_ISR, 0); AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK); AT_WRITE_FLUSH(&adapter->hw); } } /* * atl1e_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure */ static inline void atl1e_irq_disable(struct atl1e_adapter *adapter) { atomic_inc(&adapter->irq_sem); AT_WRITE_REG(&adapter->hw, REG_IMR, 0); AT_WRITE_FLUSH(&adapter->hw); synchronize_irq(adapter->pdev->irq); } /* * atl1e_irq_reset - reset interrupt confiure on the NIC * @adapter: board private structure */ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter) { atomic_set(&adapter->irq_sem, 0); AT_WRITE_REG(&adapter->hw, REG_ISR, 0); AT_WRITE_REG(&adapter->hw, REG_IMR, 0); AT_WRITE_FLUSH(&adapter->hw); } /* * atl1e_phy_config - Timer Call-back * @data: pointer to netdev cast into an unsigned long */ static void atl1e_phy_config(unsigned long data) { struct atl1e_adapter *adapter = (struct atl1e_adapter *) data; struct atl1e_hw *hw = &adapter->hw; unsigned long flags; spin_lock_irqsave(&adapter->mdio_lock, flags); atl1e_restart_autoneg(hw); spin_unlock_irqrestore(&adapter->mdio_lock, flags); } void atl1e_reinit_locked(struct atl1e_adapter *adapter) { WARN_ON(in_interrupt()); while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) msleep(1); atl1e_down(adapter); atl1e_up(adapter); clear_bit(__AT_RESETTING, &adapter->flags); } static void atl1e_reset_task(struct work_struct *work) { struct atl1e_adapter *adapter; adapter = container_of(work, struct atl1e_adapter, reset_task); atl1e_reinit_locked(adapter); } static int atl1e_check_link(struct atl1e_adapter *adapter) { struct atl1e_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int err = 0; u16 speed, duplex, phy_data; /* MII_BMSR must read twice */ atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); if ((phy_data & BMSR_LSTATUS) == 0) { /* link down */ if (netif_carrier_ok(netdev)) { /* old link state: Up */ u32 value; /* disable rx */ value = AT_READ_REG(hw, REG_MAC_CTRL); value &= ~MAC_CTRL_RX_EN; AT_WRITE_REG(hw, REG_MAC_CTRL, value); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); netif_stop_queue(netdev); } } else { /* Link Up */ err = atl1e_get_speed_and_duplex(hw, &speed, &duplex); if (unlikely(err)) return err; /* link result is our setting */ if (adapter->link_speed != speed || adapter->link_duplex != duplex) { adapter->link_speed = speed; adapter->link_duplex = duplex; atl1e_setup_mac_ctrl(adapter); netdev_info(netdev, "NIC Link is Up <%d Mbps %s Duplex>\n", adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); } if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ netif_carrier_on(netdev); netif_wake_queue(netdev); } } return 0; } /* * atl1e_link_chg_task - deal with link change event Out of interrupt context * @netdev: network interface device structure */ static void atl1e_link_chg_task(struct work_struct *work) { struct atl1e_adapter *adapter; unsigned long flags; adapter = container_of(work, struct atl1e_adapter, link_chg_task); spin_lock_irqsave(&adapter->mdio_lock, flags); atl1e_check_link(adapter); spin_unlock_irqrestore(&adapter->mdio_lock, flags); } static void atl1e_link_chg_event(struct atl1e_adapter *adapter) { struct net_device *netdev = adapter->netdev; u16 phy_data = 0; u16 link_up = 0; spin_lock(&adapter->mdio_lock); atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); spin_unlock(&adapter->mdio_lock); link_up = phy_data & BMSR_LSTATUS; /* notify upper layer link down ASAP */ if (!link_up) { if (netif_carrier_ok(netdev)) { /* old link state: Up */ netdev_info(netdev, "NIC Link is Down\n"); adapter->link_speed = SPEED_0; netif_stop_queue(netdev); } } schedule_work(&adapter->link_chg_task); } static void atl1e_del_timer(struct atl1e_adapter *adapter) { del_timer_sync(&adapter->phy_config_timer); } static void atl1e_cancel_work(struct atl1e_adapter *adapter) { cancel_work_sync(&adapter->reset_task); cancel_work_sync(&adapter->link_chg_task); } /* * atl1e_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure */ static void atl1e_tx_timeout(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ schedule_work(&adapter->reset_task); } /* * atl1e_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_multi entry point is called whenever the multicast address * list or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper multicast, * promiscuous mode, and all-multi behavior. */ static void atl1e_set_multi(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; u32 mac_ctrl_data = 0; u32 hash_value; /* Check for Promiscuous and All Multicast modes */ mac_ctrl_data = AT_READ_REG(hw, REG_MAC_CTRL); if (netdev->flags & IFF_PROMISC) { mac_ctrl_data |= MAC_CTRL_PROMIS_EN; } else if (netdev->flags & IFF_ALLMULTI) { mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN; } else { mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); } AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); /* clear the old settings from the multicast hash table */ AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); /* comoute mc addresses' hash value ,and put it into hash table */ netdev_for_each_mc_addr(ha, netdev) { hash_value = atl1e_hash_mc_addr(hw, ha->addr); atl1e_hash_set(hw, hash_value); } } static void atl1e_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) { struct atl1e_adapter *adapter = netdev_priv(netdev); u32 mac_ctrl_data = 0; netdev_dbg(adapter->netdev, "%s\n", __func__); atl1e_irq_disable(adapter); adapter->vlgrp = grp; mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL); if (grp) { /* enable VLAN tag insert/strip */ mac_ctrl_data |= MAC_CTRL_RMV_VLAN; } else { /* disable VLAN tag insert/strip */ mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN; } AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); atl1e_irq_enable(adapter); } static void atl1e_restore_vlan(struct atl1e_adapter *adapter) { netdev_dbg(adapter->netdev, "%s\n", __func__); atl1e_vlan_rx_register(adapter->netdev, adapter->vlgrp); } /* * atl1e_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure */ static int atl1e_set_mac_addr(struct net_device *netdev, void *p) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (netif_running(netdev)) return -EBUSY; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atl1e_hw_set_mac_addr(&adapter->hw); return 0; } /* * atl1e_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure */ static int atl1e_change_mtu(struct net_device *netdev, int new_mtu) { struct atl1e_adapter *adapter = netdev_priv(netdev); int old_mtu = netdev->mtu; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { netdev_warn(adapter->netdev, "invalid MTU setting\n"); return -EINVAL; } /* set MTU */ if (old_mtu != new_mtu && netif_running(netdev)) { while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) msleep(1); netdev->mtu = new_mtu; adapter->hw.max_frame_size = new_mtu; adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3; atl1e_down(adapter); atl1e_up(adapter); clear_bit(__AT_RESETTING, &adapter->flags); } return 0; } /* * caller should hold mdio_lock */ static int atl1e_mdio_read(struct net_device *netdev, int phy_id, int reg_num) { struct atl1e_adapter *adapter = netdev_priv(netdev); u16 result; atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result); return result; } static void atl1e_mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val) { struct atl1e_adapter *adapter = netdev_priv(netdev); atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); } /* * atl1e_mii_ioctl - * @netdev: * @ifreq: * @cmd: */ static int atl1e_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(ifr); unsigned long flags; int retval = 0; if (!netif_running(netdev)) return -EINVAL; spin_lock_irqsave(&adapter->mdio_lock, flags); switch (cmd) { case SIOCGMIIPHY: data->phy_id = 0; break; case SIOCGMIIREG: if (atl1e_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, &data->val_out)) { retval = -EIO; goto out; } break; case SIOCSMIIREG: if (data->reg_num & ~(0x1F)) { retval = -EFAULT; goto out; } netdev_dbg(adapter->netdev, "<atl1e_mii_ioctl> write %x %x\n", data->reg_num, data->val_in); if (atl1e_write_phy_reg(&adapter->hw, data->reg_num, data->val_in)) { retval = -EIO; goto out; } break; default: retval = -EOPNOTSUPP; break; } out: spin_unlock_irqrestore(&adapter->mdio_lock, flags); return retval; } /* * atl1e_ioctl - * @netdev: * @ifreq: * @cmd: */ static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return atl1e_mii_ioctl(netdev, ifr, cmd); default: return -EOPNOTSUPP; } } static void atl1e_setup_pcicmd(struct pci_dev *pdev) { u16 cmd; pci_read_config_word(pdev, PCI_COMMAND, &cmd); cmd &= ~(PCI_COMMAND_INTX_DISABLE | PCI_COMMAND_IO); cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); pci_write_config_word(pdev, PCI_COMMAND, cmd); /* * some motherboards BIOS(PXE/EFI) driver may set PME * while they transfer control to OS (Windows/Linux) * so we should clear this bit before NIC work normally */ pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0); msleep(1); } /* * atl1e_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * */ static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter) { return 0; } /* * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter) * @adapter: board private structure to initialize * * atl1e_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). */ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter) { struct atl1e_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; u32 phy_status_data = 0; adapter->wol = 0; adapter->link_speed = SPEED_0; /* hardware init */ adapter->link_duplex = FULL_DUPLEX; adapter->num_rx_queues = 1; /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_id = pdev->subsystem_device; hw->revision_id = pdev->revision; pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); /* nic type */ if (hw->revision_id >= 0xF0) { hw->nic_type = athr_l2e_revB; } else { if (phy_status_data & PHY_STATUS_100M) hw->nic_type = athr_l1e; else hw->nic_type = athr_l2e_revA; } phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); if (phy_status_data & PHY_STATUS_EMI_CA) hw->emi_ca = true; else hw->emi_ca = false; hw->phy_configured = false; hw->preamble_len = 7; hw->max_frame_size = adapter->netdev->mtu; hw->rx_jumbo_th = (hw->max_frame_size + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + 7) >> 3; hw->rrs_type = atl1e_rrs_disable; hw->indirect_tab = 0; hw->base_cpu = 0; /* need confirm */ hw->ict = 50000; /* 100ms */ hw->smb_timer = 200000; /* 200ms */ hw->tpd_burst = 5; hw->rrd_thresh = 1; hw->tpd_thresh = adapter->tx_ring.count / 2; hw->rx_count_down = 4; /* 2us resolution */ hw->tx_count_down = hw->imt * 4 / 3; hw->dmar_block = atl1e_dma_req_1024; hw->dmaw_block = atl1e_dma_req_1024; hw->dmar_dly_cnt = 15; hw->dmaw_dly_cnt = 4; if (atl1e_alloc_queues(adapter)) { netdev_err(adapter->netdev, "Unable to allocate memory for queues\n"); return -ENOMEM; } atomic_set(&adapter->irq_sem, 1); spin_lock_init(&adapter->mdio_lock); spin_lock_init(&adapter->tx_lock); set_bit(__AT_DOWN, &adapter->flags); return 0; } /* * atl1e_clean_tx_ring - Free Tx-skb * @adapter: board private structure */ static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter) { struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *) &adapter->tx_ring; struct atl1e_tx_buffer *tx_buffer = NULL; struct pci_dev *pdev = adapter->pdev; u16 index, ring_count; if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL) return; ring_count = tx_ring->count; /* first unmmap dma */ for (index = 0; index < ring_count; index++) { tx_buffer = &tx_ring->tx_buffer[index]; if (tx_buffer->dma) { if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) pci_unmap_single(pdev, tx_buffer->dma, tx_buffer->length, PCI_DMA_TODEVICE); else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) pci_unmap_page(pdev, tx_buffer->dma, tx_buffer->length, PCI_DMA_TODEVICE); tx_buffer->dma = 0; } } /* second free skb */ for (index = 0; index < ring_count; index++) { tx_buffer = &tx_ring->tx_buffer[index]; if (tx_buffer->skb) { dev_kfree_skb_any(tx_buffer->skb); tx_buffer->skb = NULL; } } /* Zero out Tx-buffers */ memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) * ring_count); memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) * ring_count); } /* * atl1e_clean_rx_ring - Free rx-reservation skbs * @adapter: board private structure */ static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter) { struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)&adapter->rx_ring; struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc; u16 i, j; if (adapter->ring_vir_addr == NULL) return; /* Zero out the descriptor ring */ for (i = 0; i < adapter->num_rx_queues; i++) { for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { if (rx_page_desc[i].rx_page[j].addr != NULL) { memset(rx_page_desc[i].rx_page[j].addr, 0, rx_ring->real_page_size); } } } } static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size) { *ring_size = ((u32)(adapter->tx_ring.count * sizeof(struct atl1e_tpd_desc) + 7 /* tx ring, qword align */ + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues + 31 /* rx ring, 32 bytes align */ + (1 + AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues) * sizeof(u32) + 3)); /* tx, rx cmd, dword align */ } static void atl1e_init_ring_resources(struct atl1e_adapter *adapter) { struct atl1e_rx_ring *rx_ring = NULL; rx_ring = &adapter->rx_ring; rx_ring->real_page_size = adapter->rx_ring.page_size + adapter->hw.max_frame_size + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32); atl1e_cal_ring_size(adapter, &adapter->ring_size); adapter->ring_vir_addr = NULL; adapter->rx_ring.desc = NULL; rwlock_init(&adapter->tx_ring.tx_lock); } /* * Read / Write Ptr Initialize: */ static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter) { struct atl1e_tx_ring *tx_ring = NULL; struct atl1e_rx_ring *rx_ring = NULL; struct atl1e_rx_page_desc *rx_page_desc = NULL; int i, j; tx_ring = &adapter->tx_ring; rx_ring = &adapter->rx_ring; rx_page_desc = rx_ring->rx_page_desc; tx_ring->next_to_use = 0; atomic_set(&tx_ring->next_to_clean, 0); for (i = 0; i < adapter->num_rx_queues; i++) { rx_page_desc[i].rx_using = 0; rx_page_desc[i].rx_nxseq = 0; for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { *rx_page_desc[i].rx_page[j].write_offset_addr = 0; rx_page_desc[i].rx_page[j].read_offset = 0; } } } /* * atl1e_free_ring_resources - Free Tx / RX descriptor Resources * @adapter: board private structure * * Free all transmit software resources */ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; atl1e_clean_tx_ring(adapter); atl1e_clean_rx_ring(adapter); if (adapter->ring_vir_addr) { pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr, adapter->ring_dma); adapter->ring_vir_addr = NULL; } if (adapter->tx_ring.tx_buffer) { kfree(adapter->tx_ring.tx_buffer); adapter->tx_ring.tx_buffer = NULL; } } /* * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources * @adapter: board private structure * * Return 0 on success, negative on failure */ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct atl1e_tx_ring *tx_ring; struct atl1e_rx_ring *rx_ring; struct atl1e_rx_page_desc *rx_page_desc; int size, i, j; u32 offset = 0; int err = 0; if (adapter->ring_vir_addr != NULL) return 0; /* alloced already */ tx_ring = &adapter->tx_ring; rx_ring = &adapter->rx_ring; /* real ring DMA buffer */ size = adapter->ring_size; adapter->ring_vir_addr = pci_alloc_consistent(pdev, adapter->ring_size, &adapter->ring_dma); if (adapter->ring_vir_addr == NULL) { netdev_err(adapter->netdev, "pci_alloc_consistent failed, size = D%d\n", size); return -ENOMEM; } memset(adapter->ring_vir_addr, 0, adapter->ring_size); rx_page_desc = rx_ring->rx_page_desc; /* Init TPD Ring */ tx_ring->dma = roundup(adapter->ring_dma, 8); offset = tx_ring->dma - adapter->ring_dma; tx_ring->desc = (struct atl1e_tpd_desc *) (adapter->ring_vir_addr + offset); size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count); tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL); if (tx_ring->tx_buffer == NULL) { netdev_err(adapter->netdev, "kzalloc failed, size = D%d\n", size); err = -ENOMEM; goto failed; } /* Init RXF-Pages */ offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count); offset = roundup(offset, 32); for (i = 0; i < adapter->num_rx_queues; i++) { for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { rx_page_desc[i].rx_page[j].dma = adapter->ring_dma + offset; rx_page_desc[i].rx_page[j].addr = adapter->ring_vir_addr + offset; offset += rx_ring->real_page_size; } } /* Init CMB dma address */ tx_ring->cmb_dma = adapter->ring_dma + offset; tx_ring->cmb = (u32 *)(adapter->ring_vir_addr + offset); offset += sizeof(u32); for (i = 0; i < adapter->num_rx_queues; i++) { for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { rx_page_desc[i].rx_page[j].write_offset_dma = adapter->ring_dma + offset; rx_page_desc[i].rx_page[j].write_offset_addr = adapter->ring_vir_addr + offset; offset += sizeof(u32); } } if (unlikely(offset > adapter->ring_size)) { netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n", offset, adapter->ring_size); err = -1; goto failed; } return 0; failed: if (adapter->ring_vir_addr != NULL) { pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr, adapter->ring_dma); adapter->ring_vir_addr = NULL; } return err; } static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter) { struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)&adapter->rx_ring; struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)&adapter->tx_ring; struct atl1e_rx_page_desc *rx_page_desc = NULL; int i, j; AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, (u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32)); AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO, (u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK)); AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count)); AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO, (u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK)); rx_page_desc = rx_ring->rx_page_desc; /* RXF Page Physical address / Page Length */ for (i = 0; i < AT_MAX_RECEIVE_QUEUE; i++) { AT_WRITE_REG(hw, atl1e_rx_page_hi_addr_regs[i], (u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32)); for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { u32 page_phy_addr; u32 offset_phy_addr; page_phy_addr = rx_page_desc[i].rx_page[j].dma; offset_phy_addr = rx_page_desc[i].rx_page[j].write_offset_dma; AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[i][j], page_phy_addr & AT_DMA_LO_ADDR_MASK); AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[i][j], offset_phy_addr & AT_DMA_LO_ADDR_MASK); AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[i][j], 1); } } /* Page Length */ AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size); /* Load all of base address above */ AT_WRITE_REG(hw, REG_LOAD_PTR, 1); } static inline void atl1e_configure_tx(struct atl1e_adapter *adapter) { struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; u32 dev_ctrl_data = 0; u32 max_pay_load = 0; u32 jumbo_thresh = 0; u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */ /* configure TXQ param */ if (hw->nic_type != athr_l2e_revB) { extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; if (hw->max_frame_size <= 1500) { jumbo_thresh = hw->max_frame_size + extra_size; } else if (hw->max_frame_size < 6*1024) { jumbo_thresh = (hw->max_frame_size + extra_size) * 2 / 3; } else { jumbo_thresh = (hw->max_frame_size + extra_size) / 2; } AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3); } dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL); max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) & DEVICE_CTRL_MAX_PAYLOAD_MASK; hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block); max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) & DEVICE_CTRL_MAX_RREQ_SZ_MASK; hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); if (hw->nic_type != athr_l2e_revB) AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2, atl1e_pay_load_size[hw->dmar_block]); /* enable TXQ */ AT_WRITE_REGW(hw, REG_TXQ_CTRL, (((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK) << TXQ_CTRL_NUM_TPD_BURST_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN); } static inline void atl1e_configure_rx(struct atl1e_adapter *adapter) { struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; u32 rxf_len = 0; u32 rxf_low = 0; u32 rxf_high = 0; u32 rxf_thresh_data = 0; u32 rxq_ctrl_data = 0; if (hw->nic_type != athr_l2e_revB) { AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM, (u16)((hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) << RXQ_JMBOSZ_TH_SHIFT | (1 & RXQ_JMBO_LKAH_MASK) << RXQ_JMBO_LKAH_SHIFT)); rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN); rxf_high = rxf_len * 4 / 5; rxf_low = rxf_len / 5; rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data); } /* RRS */ AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab); AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu); if (hw->rrs_type & atl1e_rrs_ipv4) rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4; if (hw->rrs_type & atl1e_rrs_ipv4_tcp) rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4_TCP; if (hw->rrs_type & atl1e_rrs_ipv6) rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6; if (hw->rrs_type & atl1e_rrs_ipv6_tcp) rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6_TCP; if (hw->rrs_type != atl1e_rrs_disable) rxq_ctrl_data |= (RXQ_CTRL_HASH_ENABLE | RXQ_CTRL_RSS_MODE_MQUESINT); rxq_ctrl_data |= RXQ_CTRL_IPV6_XSUM_VERIFY_EN | RXQ_CTRL_PBA_ALIGN_32 | RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); } static inline void atl1e_configure_dma(struct atl1e_adapter *adapter) { struct atl1e_hw *hw = &adapter->hw; u32 dma_ctrl_data = 0; dma_ctrl_data = DMA_CTRL_RXCMB_EN; dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) << DMA_CTRL_DMAR_BURST_LEN_SHIFT; dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) << DMA_CTRL_DMAW_BURST_LEN_SHIFT; dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER; dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK) << DMA_CTRL_DMAR_DLY_CNT_SHIFT; dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK) << DMA_CTRL_DMAW_DLY_CNT_SHIFT; AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); } static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter) { u32 value; struct atl1e_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; /* Config MAC CTRL Register */ value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN ; if (FULL_DUPLEX == adapter->link_duplex) value |= MAC_CTRL_DUPLX; value |= ((u32)((SPEED_1000 == adapter->link_speed) ? MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); if (adapter->vlgrp) value |= MAC_CTRL_RMV_VLAN; value |= MAC_CTRL_BC_EN; if (netdev->flags & IFF_PROMISC) value |= MAC_CTRL_PROMIS_EN; if (netdev->flags & IFF_ALLMULTI) value |= MAC_CTRL_MC_ALL_EN; AT_WRITE_REG(hw, REG_MAC_CTRL, value); } /* * atl1e_configure - Configure Transmit&Receive Unit after Reset * @adapter: board private structure * * Configure the Tx /Rx unit of the MAC after a reset. */ static int atl1e_configure(struct atl1e_adapter *adapter) { struct atl1e_hw *hw = &adapter->hw; u32 intr_status_data = 0; /* clear interrupt status */ AT_WRITE_REG(hw, REG_ISR, ~0); /* 1. set MAC Address */ atl1e_hw_set_mac_addr(hw); /* 2. Init the Multicast HASH table done by set_muti */ /* 3. Clear any WOL status */ AT_WRITE_REG(hw, REG_WOL_CTRL, 0); /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr * TPD Ring/SMB/RXF0 Page CMBs, they use the same * High 32bits memory */ atl1e_configure_des_ring(adapter); /* 5. set Interrupt Moderator Timer */ AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, hw->imt); AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, hw->imt); AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE | MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN); /* 6. rx/tx threshold to trig interrupt */ AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, hw->rrd_thresh); AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, hw->tpd_thresh); AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, hw->rx_count_down); AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, hw->tx_count_down); /* 7. set Interrupt Clear Timer */ AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, hw->ict); /* 8. set MTU */ AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); /* 9. config TXQ early tx threshold */ atl1e_configure_tx(adapter); /* 10. config RXQ */ atl1e_configure_rx(adapter); /* 11. config DMA Engine */ atl1e_configure_dma(adapter); /* 12. smb timer to trig interrupt */ AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer); intr_status_data = AT_READ_REG(hw, REG_ISR); if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) { netdev_err(adapter->netdev, "atl1e_configure failed, PCIE phy link down\n"); return -1; } AT_WRITE_REG(hw, REG_ISR, 0x7fffffff); return 0; } /* * atl1e_get_stats - Get System Network Statistics * @netdev: network interface device structure * * Returns the address of the device statistics structure. * The statistics are actually updated from the timer callback. */ static struct net_device_stats *atl1e_get_stats(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw_stats *hw_stats = &adapter->hw_stats; struct net_device_stats *net_stats = &netdev->stats; net_stats->rx_packets = hw_stats->rx_ok; net_stats->tx_packets = hw_stats->tx_ok; net_stats->rx_bytes = hw_stats->rx_byte_cnt; net_stats->tx_bytes = hw_stats->tx_byte_cnt; net_stats->multicast = hw_stats->rx_mcast; net_stats->collisions = hw_stats->tx_1_col + hw_stats->tx_2_col * 2 + hw_stats->tx_late_col + hw_stats->tx_abort_col; net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err + hw_stats->rx_len_err + hw_stats->rx_sz_ov + hw_stats->rx_rrd_ov + hw_stats->rx_align_err; net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; net_stats->rx_length_errors = hw_stats->rx_len_err; net_stats->rx_crc_errors = hw_stats->rx_fcs_err; net_stats->rx_frame_errors = hw_stats->rx_align_err; net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col + hw_stats->tx_underrun + hw_stats->tx_trunc; net_stats->tx_fifo_errors = hw_stats->tx_underrun; net_stats->tx_aborted_errors = hw_stats->tx_abort_col; net_stats->tx_window_errors = hw_stats->tx_late_col; return net_stats; } static void atl1e_update_hw_stats(struct atl1e_adapter *adapter) { u16 hw_reg_addr = 0; unsigned long *stats_item = NULL; /* update rx status */ hw_reg_addr = REG_MAC_RX_STATUS_BIN; stats_item = &adapter->hw_stats.rx_ok; while (hw_reg_addr <= REG_MAC_RX_STATUS_END) { *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); stats_item++; hw_reg_addr += 4; } /* update tx status */ hw_reg_addr = REG_MAC_TX_STATUS_BIN; stats_item = &adapter->hw_stats.tx_ok; while (hw_reg_addr <= REG_MAC_TX_STATUS_END) { *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); stats_item++; hw_reg_addr += 4; } } static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter) { u16 phy_data; spin_lock(&adapter->mdio_lock); atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data); spin_unlock(&adapter->mdio_lock); } static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter) { struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *) &adapter->tx_ring; struct atl1e_tx_buffer *tx_buffer = NULL; u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX); u16 next_to_clean = atomic_read(&tx_ring->next_to_clean); while (next_to_clean != hw_next_to_clean) { tx_buffer = &tx_ring->tx_buffer[next_to_clean]; if (tx_buffer->dma) { if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) pci_unmap_single(adapter->pdev, tx_buffer->dma, tx_buffer->length, PCI_DMA_TODEVICE); else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) pci_unmap_page(adapter->pdev, tx_buffer->dma, tx_buffer->length, PCI_DMA_TODEVICE); tx_buffer->dma = 0; } if (tx_buffer->skb) { dev_kfree_skb_irq(tx_buffer->skb); tx_buffer->skb = NULL; } if (++next_to_clean == tx_ring->count) next_to_clean = 0; } atomic_set(&tx_ring->next_to_clean, next_to_clean); if (netif_queue_stopped(adapter->netdev) && netif_carrier_ok(adapter->netdev)) { netif_wake_queue(adapter->netdev); } return true; } /* * atl1e_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure * @pt_regs: CPU registers structure */ static irqreturn_t atl1e_intr(int irq, void *data) { struct net_device *netdev = data; struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; int max_ints = AT_MAX_INT_WORK; int handled = IRQ_NONE; u32 status; do { status = AT_READ_REG(hw, REG_ISR); if ((status & IMR_NORMAL_MASK) == 0 || (status & ISR_DIS_INT) != 0) { if (max_ints != AT_MAX_INT_WORK) handled = IRQ_HANDLED; break; } /* link event */ if (status & ISR_GPHY) atl1e_clear_phy_int(adapter); /* Ack ISR */ AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); handled = IRQ_HANDLED; /* check if PCIE PHY Link down */ if (status & ISR_PHY_LINKDOWN) { netdev_err(adapter->netdev, "pcie phy linkdown %x\n", status); if (netif_running(adapter->netdev)) { /* reset MAC */ atl1e_irq_reset(adapter); schedule_work(&adapter->reset_task); break; } } /* check if DMA read/write error */ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { netdev_err(adapter->netdev, "PCIE DMA RW error (status = 0x%x)\n", status); atl1e_irq_reset(adapter); schedule_work(&adapter->reset_task); break; } if (status & ISR_SMB) atl1e_update_hw_stats(adapter); /* link event */ if (status & (ISR_GPHY | ISR_MANUAL)) { netdev->stats.tx_carrier_errors++; atl1e_link_chg_event(adapter); break; } /* transmit event */ if (status & ISR_TX_EVENT) atl1e_clean_tx_irq(adapter); if (status & ISR_RX_EVENT) { /* * disable rx interrupts, without * the synchronize_irq bit */ AT_WRITE_REG(hw, REG_IMR, IMR_NORMAL_MASK & ~ISR_RX_EVENT); AT_WRITE_FLUSH(hw); if (likely(napi_schedule_prep( &adapter->napi))) __napi_schedule(&adapter->napi); } } while (--max_ints > 0); /* re-enable Interrupt*/ AT_WRITE_REG(&adapter->hw, REG_ISR, 0); return handled; } static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter, struct sk_buff *skb, struct atl1e_recv_ret_status *prrs) { u8 *packet = (u8 *)(prrs + 1); struct iphdr *iph; u16 head_len = ETH_HLEN; u16 pkt_flags; u16 err_flags; skb_checksum_none_assert(skb); pkt_flags = prrs->pkt_flag; err_flags = prrs->err_flag; if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) && ((pkt_flags & RRS_IS_TCP) || (pkt_flags & RRS_IS_UDP))) { if (pkt_flags & RRS_IS_IPV4) { if (pkt_flags & RRS_IS_802_3) head_len += 8; iph = (struct iphdr *) (packet + head_len); if (iph->frag_off != 0 && !(pkt_flags & RRS_IS_IP_DF)) goto hw_xsum; } if (!(err_flags & (RRS_ERR_IP_CSUM | RRS_ERR_L4_CSUM))) { skb->ip_summed = CHECKSUM_UNNECESSARY; return; } } hw_xsum : return; } static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter, u8 que) { struct atl1e_rx_page_desc *rx_page_desc = (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc; u8 rx_using = rx_page_desc[que].rx_using; return (struct atl1e_rx_page *)&(rx_page_desc[que].rx_page[rx_using]); } static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, int *work_done, int work_to_do) { struct net_device *netdev = adapter->netdev; struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *) &adapter->rx_ring; struct atl1e_rx_page_desc *rx_page_desc = (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc; struct sk_buff *skb = NULL; struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter, que); u32 packet_size, write_offset; struct atl1e_recv_ret_status *prrs; write_offset = *(rx_page->write_offset_addr); if (likely(rx_page->read_offset < write_offset)) { do { if (*work_done >= work_to_do) break; (*work_done)++; /* get new packet's rrs */ prrs = (struct atl1e_recv_ret_status *) (rx_page->addr + rx_page->read_offset); /* check sequence number */ if (prrs->seq_num != rx_page_desc[que].rx_nxseq) { netdev_err(netdev, "rx sequence number error (rx=%d) (expect=%d)\n", prrs->seq_num, rx_page_desc[que].rx_nxseq); rx_page_desc[que].rx_nxseq++; /* just for debug use */ AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0, (((u32)prrs->seq_num) << 16) | rx_page_desc[que].rx_nxseq); goto fatal_err; } rx_page_desc[que].rx_nxseq++; /* error packet */ if (prrs->pkt_flag & RRS_IS_ERR_FRAME) { if (prrs->err_flag & (RRS_ERR_BAD_CRC | RRS_ERR_DRIBBLE | RRS_ERR_CODE | RRS_ERR_TRUNC)) { /* hardware error, discard this packet*/ netdev_err(netdev, "rx packet desc error %x\n", *((u32 *)prrs + 1)); goto skip_pkt; } } packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & RRS_PKT_SIZE_MASK) - 4; /* CRC */ skb = netdev_alloc_skb_ip_align(netdev, packet_size); if (skb == NULL) { netdev_warn(netdev, "Memory squeeze, deferring packet\n"); goto skip_pkt; } memcpy(skb->data, (u8 *)(prrs + 1), packet_size); skb_put(skb, packet_size); skb->protocol = eth_type_trans(skb, netdev); atl1e_rx_checksum(adapter, skb, prrs); if (unlikely(adapter->vlgrp && (prrs->pkt_flag & RRS_IS_VLAN_TAG))) { u16 vlan_tag = (prrs->vtag >> 4) | ((prrs->vtag & 7) << 13) | ((prrs->vtag & 8) << 9); netdev_dbg(netdev, "RXD VLAN TAG<RRD>=0x%04x\n", prrs->vtag); vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vlan_tag); } else { netif_receive_skb(skb); } skip_pkt: /* skip current packet whether it's ok or not. */ rx_page->read_offset += (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & RRS_PKT_SIZE_MASK) + sizeof(struct atl1e_recv_ret_status) + 31) & 0xFFFFFFE0); if (rx_page->read_offset >= rx_ring->page_size) { /* mark this page clean */ u16 reg_addr; u8 rx_using; rx_page->read_offset = *(rx_page->write_offset_addr) = 0; rx_using = rx_page_desc[que].rx_using; reg_addr = atl1e_rx_page_vld_regs[que][rx_using]; AT_WRITE_REGB(&adapter->hw, reg_addr, 1); rx_page_desc[que].rx_using ^= 1; rx_page = atl1e_get_rx_page(adapter, que); } write_offset = *(rx_page->write_offset_addr); } while (rx_page->read_offset < write_offset); } return; fatal_err: if (!test_bit(__AT_DOWN, &adapter->flags)) schedule_work(&adapter->reset_task); } /* * atl1e_clean - NAPI Rx polling callback * @adapter: board private structure */ static int atl1e_clean(struct napi_struct *napi, int budget) { struct atl1e_adapter *adapter = container_of(napi, struct atl1e_adapter, napi); u32 imr_data; int work_done = 0; /* Keep link state information with original netdev */ if (!netif_carrier_ok(adapter->netdev)) goto quit_polling; atl1e_clean_rx_irq(adapter, 0, &work_done, budget); /* If no Tx and not enough Rx work done, exit the polling mode */ if (work_done < budget) { quit_polling: napi_complete(napi); imr_data = AT_READ_REG(&adapter->hw, REG_IMR); AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); /* test debug */ if (test_bit(__AT_DOWN, &adapter->flags)) { atomic_dec(&adapter->irq_sem); netdev_err(adapter->netdev, "atl1e_clean is called when AT_DOWN\n"); } /* reenable RX intr */ /*atl1e_irq_enable(adapter); */ } return work_done; } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ static void atl1e_netpoll(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); disable_irq(adapter->pdev->irq); atl1e_intr(adapter->pdev->irq, netdev); enable_irq(adapter->pdev->irq); } #endif static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter) { struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; u16 next_to_use = 0; u16 next_to_clean = 0; next_to_clean = atomic_read(&tx_ring->next_to_clean); next_to_use = tx_ring->next_to_use; return (u16)(next_to_clean > next_to_use) ? (next_to_clean - next_to_use - 1) : (tx_ring->count + next_to_clean - next_to_use - 1); } /* * get next usable tpd * Note: should call atl1e_tdp_avail to make sure * there is enough tpd to use */ static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter) { struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; u16 next_to_use = 0; next_to_use = tx_ring->next_to_use; if (++tx_ring->next_to_use == tx_ring->count) tx_ring->next_to_use = 0; memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc)); return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use]; } static struct atl1e_tx_buffer * atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd) { struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; return &tx_ring->tx_buffer[tpd - tx_ring->desc]; } /* Calculate the transmit packet descript needed*/ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb) { int i = 0; u16 tpd_req = 1; u16 fg_size = 0; u16 proto_hdr_len = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { fg_size = skb_shinfo(skb)->frags[i].size; tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT); } if (skb_is_gso(skb)) { if (skb->protocol == htons(ETH_P_IP) || (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) { proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); if (proto_hdr_len < skb_headlen(skb)) { tpd_req += ((skb_headlen(skb) - proto_hdr_len + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT); } } } return tpd_req; } static int atl1e_tso_csum(struct atl1e_adapter *adapter, struct sk_buff *skb, struct atl1e_tpd_desc *tpd) { u8 hdr_len; u32 real_len; unsigned short offload_type; int err; if (skb_is_gso(skb)) { if (skb_header_cloned(skb)) { err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (unlikely(err)) return -1; } offload_type = skb_shinfo(skb)->gso_type; if (offload_type & SKB_GSO_TCPV4) { real_len = (((unsigned char *)ip_hdr(skb) - skb->data) + ntohs(ip_hdr(skb)->tot_len)); if (real_len < skb->len) pskb_trim(skb, real_len); hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (unlikely(skb->len == hdr_len)) { /* only xsum need */ netdev_warn(adapter->netdev, "IPV4 tso with zero data??\n"); goto check_sum; } else { ip_hdr(skb)->check = 0; ip_hdr(skb)->tot_len = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic( ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); tpd->word3 |= (ip_hdr(skb)->ihl & TDP_V4_IPHL_MASK) << TPD_V4_IPHL_SHIFT; tpd->word3 |= ((tcp_hdrlen(skb) >> 2) & TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; tpd->word3 |= ((skb_shinfo(skb)->gso_size) & TPD_MSS_MASK) << TPD_MSS_SHIFT; tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; } return 0; } } check_sum: if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { u8 css, cso; cso = skb_checksum_start_offset(skb); if (unlikely(cso & 0x1)) { netdev_err(adapter->netdev, "payload offset should not ant event number\n"); return -1; } else { css = cso + skb->csum_offset; tpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) << TPD_PLOADOFFSET_SHIFT; tpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) << TPD_CCSUMOFFSET_SHIFT; tpd->word3 |= 1 << TPD_CC_SEGMENT_EN_SHIFT; } } return 0; } static void atl1e_tx_map(struct atl1e_adapter *adapter, struct sk_buff *skb, struct atl1e_tpd_desc *tpd) { struct atl1e_tpd_desc *use_tpd = NULL; struct atl1e_tx_buffer *tx_buffer = NULL; u16 buf_len = skb_headlen(skb); u16 map_len = 0; u16 mapped_len = 0; u16 hdr_len = 0; u16 nr_frags; u16 f; int segment; nr_frags = skb_shinfo(skb)->nr_frags; segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; if (segment) { /* TSO */ map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); use_tpd = tpd; tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); tx_buffer->length = map_len; tx_buffer->dma = pci_map_single(adapter->pdev, skb->data, hdr_len, PCI_DMA_TODEVICE); ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); mapped_len += map_len; use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | ((cpu_to_le32(tx_buffer->length) & TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); } while (mapped_len < buf_len) { /* mapped_len == 0, means we should use the first tpd, which is given by caller */ if (mapped_len == 0) { use_tpd = tpd; } else { use_tpd = atl1e_get_tpd(adapter); memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); } tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); tx_buffer->skb = NULL; tx_buffer->length = map_len = ((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ? MAX_TX_BUF_LEN : (buf_len - mapped_len); tx_buffer->dma = pci_map_single(adapter->pdev, skb->data + mapped_len, map_len, PCI_DMA_TODEVICE); ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); mapped_len += map_len; use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | ((cpu_to_le32(tx_buffer->length) & TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); } for (f = 0; f < nr_frags; f++) { struct skb_frag_struct *frag; u16 i; u16 seg_num; frag = &skb_shinfo(skb)->frags[f]; buf_len = frag->size; seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; for (i = 0; i < seg_num; i++) { use_tpd = atl1e_get_tpd(adapter); memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); BUG_ON(tx_buffer->skb); tx_buffer->skb = NULL; tx_buffer->length = (buf_len > MAX_TX_BUF_LEN) ? MAX_TX_BUF_LEN : buf_len; buf_len -= tx_buffer->length; tx_buffer->dma = pci_map_page(adapter->pdev, frag->page, frag->page_offset + (i * MAX_TX_BUF_LEN), tx_buffer->length, PCI_DMA_TODEVICE); ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE); use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | ((cpu_to_le32(tx_buffer->length) & TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); } } if ((tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK) /* note this one is a tcp header */ tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; /* The last tpd */ use_tpd->word3 |= 1 << TPD_EOP_SHIFT; /* The last buffer info contain the skb address, so it will be free after unmap */ tx_buffer->skb = skb; } static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count, struct atl1e_tpd_desc *tpd) { struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use); } static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); unsigned long flags; u16 tpd_req = 1; struct atl1e_tpd_desc *tpd; if (test_bit(__AT_DOWN, &adapter->flags)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (unlikely(skb->len <= 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } tpd_req = atl1e_cal_tdp_req(skb); if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) return NETDEV_TX_LOCKED; if (atl1e_tpd_avail(adapter) < tpd_req) { /* no enough descriptor, just stop queue */ netif_stop_queue(netdev); spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_BUSY; } tpd = atl1e_get_tpd(adapter); if (unlikely(vlan_tx_tag_present(skb))) { u16 vlan_tag = vlan_tx_tag_get(skb); u16 atl1e_vlan_tag; tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; AT_VLAN_TAG_TO_TPD_TAG(vlan_tag, atl1e_vlan_tag); tpd->word2 |= (atl1e_vlan_tag & TPD_VLANTAG_MASK) << TPD_VLAN_SHIFT; } if (skb->protocol == htons(ETH_P_8021Q)) tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT; if (skb_network_offset(skb) != ETH_HLEN) tpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; /* 802.3 frame */ /* do TSO and check sum */ if (atl1e_tso_csum(adapter, skb, tpd) != 0) { spin_unlock_irqrestore(&adapter->tx_lock, flags); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } atl1e_tx_map(adapter, skb, tpd); atl1e_tx_queue(adapter, tpd_req, tpd); netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_OK; } static void atl1e_free_irq(struct atl1e_adapter *adapter) { struct net_device *netdev = adapter->netdev; free_irq(adapter->pdev->irq, netdev); if (adapter->have_msi) pci_disable_msi(adapter->pdev); } static int atl1e_request_irq(struct atl1e_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; int flags = 0; int err = 0; adapter->have_msi = true; err = pci_enable_msi(adapter->pdev); if (err) { netdev_dbg(adapter->netdev, "Unable to allocate MSI interrupt Error: %d\n", err); adapter->have_msi = false; } else netdev->irq = pdev->irq; if (!adapter->have_msi) flags |= IRQF_SHARED; err = request_irq(adapter->pdev->irq, atl1e_intr, flags, netdev->name, netdev); if (err) { netdev_dbg(adapter->netdev, "Unable to allocate interrupt Error: %d\n", err); if (adapter->have_msi) pci_disable_msi(adapter->pdev); return err; } netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n"); return err; } int atl1e_up(struct atl1e_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err = 0; u32 val; /* hardware has been reset, we need to reload some things */ err = atl1e_init_hw(&adapter->hw); if (err) { err = -EIO; return err; } atl1e_init_ring_ptrs(adapter); atl1e_set_multi(netdev); atl1e_restore_vlan(adapter); if (atl1e_configure(adapter)) { err = -EIO; goto err_up; } clear_bit(__AT_DOWN, &adapter->flags); napi_enable(&adapter->napi); atl1e_irq_enable(adapter); val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL); AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val | MASTER_CTRL_MANUAL_INT); err_up: return err; } void atl1e_down(struct atl1e_adapter *adapter) { struct net_device *netdev = adapter->netdev; /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__AT_DOWN, &adapter->flags); netif_stop_queue(netdev); /* reset MAC to disable all RX/TX */ atl1e_reset_hw(&adapter->hw); msleep(1); napi_disable(&adapter->napi); atl1e_del_timer(adapter); atl1e_irq_disable(adapter); netif_carrier_off(netdev); adapter->link_speed = SPEED_0; adapter->link_duplex = -1; atl1e_clean_tx_ring(adapter); atl1e_clean_rx_ring(adapter); } /* * atl1e_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. */ static int atl1e_open(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); int err; /* disallow open during test */ if (test_bit(__AT_TESTING, &adapter->flags)) return -EBUSY; /* allocate rx/tx dma buffer & descriptors */ atl1e_init_ring_resources(adapter); err = atl1e_setup_ring_resources(adapter); if (unlikely(err)) return err; err = atl1e_request_irq(adapter); if (unlikely(err)) goto err_req_irq; err = atl1e_up(adapter); if (unlikely(err)) goto err_up; return 0; err_up: atl1e_free_irq(adapter); err_req_irq: atl1e_free_ring_resources(adapter); atl1e_reset_hw(&adapter->hw); return err; } /* * atl1e_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. */ static int atl1e_close(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); atl1e_down(adapter); atl1e_free_irq(adapter); atl1e_free_ring_resources(adapter); return 0; } static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; u32 ctrl = 0; u32 mac_ctrl_data = 0; u32 wol_ctrl_data = 0; u16 mii_advertise_data = 0; u16 mii_bmsr_data = 0; u16 mii_intr_status_data = 0; u32 wufc = adapter->wol; u32 i; #ifdef CONFIG_PM int retval = 0; #endif if (netif_running(netdev)) { WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); atl1e_down(adapter); } netif_device_detach(netdev); #ifdef CONFIG_PM retval = pci_save_state(pdev); if (retval) return retval; #endif if (wufc) { /* get link status */ atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); mii_advertise_data = ADVERTISE_10HALF; if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) || (atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_advertise_data) != 0) || (atl1e_phy_commit(hw)) != 0) { netdev_dbg(adapter->netdev, "set phy register failed\n"); goto wol_dis; } hw->phy_configured = false; /* re-init PHY when resume */ /* turn on magic packet wol */ if (wufc & AT_WUFC_MAG) wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; if (wufc & AT_WUFC_LNKC) { /* if orignal link status is link, just wait for retrive link */ if (mii_bmsr_data & BMSR_LSTATUS) { for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { msleep(100); atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); if (mii_bmsr_data & BMSR_LSTATUS) break; } if ((mii_bmsr_data & BMSR_LSTATUS) == 0) netdev_dbg(adapter->netdev, "Link may change when suspend\n"); } wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; /* only link up can wake up */ if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) { netdev_dbg(adapter->netdev, "read write phy register failed\n"); goto wol_dis; } } /* clear phy interrupt */ atl1e_read_phy_reg(hw, MII_INT_STATUS, &mii_intr_status_data); /* Config MAC Ctrl register */ mac_ctrl_data = MAC_CTRL_RX_EN; /* set to 10/100M halt duplex */ mac_ctrl_data |= MAC_CTRL_SPEED_10_100 << MAC_CTRL_SPEED_SHIFT; mac_ctrl_data |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); if (adapter->vlgrp) mac_ctrl_data |= MAC_CTRL_RMV_VLAN; /* magic packet maybe Broadcast&multicast&Unicast frame */ if (wufc & AT_WUFC_MAG) mac_ctrl_data |= MAC_CTRL_BC_EN; netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n", mac_ctrl_data); AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); /* pcie patch */ ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); goto suspend_exit; } wol_dis: /* WOL disabled */ AT_WRITE_REG(hw, REG_WOL_CTRL, 0); /* pcie patch */ ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); atl1e_force_ps(hw); hw->phy_configured = false; /* re-init PHY when resume */ pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); suspend_exit: if (netif_running(netdev)) atl1e_free_irq(adapter); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } #ifdef CONFIG_PM static int atl1e_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1e_adapter *adapter = netdev_priv(netdev); u32 err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); err = pci_enable_device(pdev); if (err) { netdev_err(adapter->netdev, "Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); AT_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */ pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); if (netif_running(netdev)) { err = atl1e_request_irq(adapter); if (err) return err; } atl1e_reset_hw(&adapter->hw); if (netif_running(netdev)) atl1e_up(adapter); netif_device_attach(netdev); return 0; } #endif static void atl1e_shutdown(struct pci_dev *pdev) { atl1e_suspend(pdev, PMSG_SUSPEND); } static const struct net_device_ops atl1e_netdev_ops = { .ndo_open = atl1e_open, .ndo_stop = atl1e_close, .ndo_start_xmit = atl1e_xmit_frame, .ndo_get_stats = atl1e_get_stats, .ndo_set_multicast_list = atl1e_set_multi, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = atl1e_set_mac_addr, .ndo_change_mtu = atl1e_change_mtu, .ndo_do_ioctl = atl1e_ioctl, .ndo_tx_timeout = atl1e_tx_timeout, .ndo_vlan_rx_register = atl1e_vlan_rx_register, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = atl1e_netpoll, #endif }; static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) { SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); netdev->irq = pdev->irq; netdev->netdev_ops = &atl1e_netdev_ops; netdev->watchdog_timeo = AT_TX_WATCHDOG; atl1e_set_ethtool_ops(netdev); netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_HW_VLAN_TX; netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_RX | NETIF_F_LLTX; return 0; } /* * atl1e_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in atl1e_pci_tbl * * Returns 0 on success, negative on failure * * atl1e_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. */ static int __devinit atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct atl1e_adapter *adapter = NULL; static int cards_found; int err = 0; err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "cannot enable PCI device\n"); return err; } /* * The atl1e chip can DMA to 64-bit addresses, but it uses a single * shared register for the high 32 bits, so only a single, aligned, * 4 GB physical address range can be used at a time. * * Supporting 64-bit DMA on this hardware is more trouble than it's * worth. It is far easier to limit to 32-bit DMA than update * various kernel subsystems to support the mechanics required by a * fixed-high-32-bit system. */ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); goto err_dma; } err = pci_request_regions(pdev, atl1e_driver_name); if (err) { dev_err(&pdev->dev, "cannot obtain PCI resources\n"); goto err_pci_reg; } pci_set_master(pdev); netdev = alloc_etherdev(sizeof(struct atl1e_adapter)); if (netdev == NULL) { err = -ENOMEM; dev_err(&pdev->dev, "etherdev alloc failed\n"); goto err_alloc_etherdev; } err = atl1e_init_netdev(netdev, pdev); if (err) { netdev_err(netdev, "init netdevice failed\n"); goto err_init_netdev; } adapter = netdev_priv(netdev); adapter->bd_number = cards_found; adapter->netdev = netdev; adapter->pdev = pdev; adapter->hw.adapter = adapter; adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0); if (!adapter->hw.hw_addr) { err = -EIO; netdev_err(netdev, "cannot map device registers\n"); goto err_ioremap; } netdev->base_addr = (unsigned long)adapter->hw.hw_addr; /* init mii data */ adapter->mii.dev = netdev; adapter->mii.mdio_read = atl1e_mdio_read; adapter->mii.mdio_write = atl1e_mdio_write; adapter->mii.phy_id_mask = 0x1f; adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK; netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64); init_timer(&adapter->phy_config_timer); adapter->phy_config_timer.function = atl1e_phy_config; adapter->phy_config_timer.data = (unsigned long) adapter; /* get user settings */ atl1e_check_options(adapter); /* * Mark all PCI regions associated with PCI device * pdev as being reserved by owner atl1e_driver_name * Enables bus-mastering on the device and calls * pcibios_set_master to do the needed arch specific settings */ atl1e_setup_pcicmd(pdev); /* setup the private structure */ err = atl1e_sw_init(adapter); if (err) { netdev_err(netdev, "net device private data init failed\n"); goto err_sw_init; } /* Init GPHY as early as possible due to power saving issue */ atl1e_phy_init(&adapter->hw); /* reset the controller to * put the device in a known good starting state */ err = atl1e_reset_hw(&adapter->hw); if (err) { err = -EIO; goto err_reset; } if (atl1e_read_mac_addr(&adapter->hw) != 0) { err = -EIO; netdev_err(netdev, "get mac address failed\n"); goto err_eeprom; } memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr); INIT_WORK(&adapter->reset_task, atl1e_reset_task); INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); err = register_netdev(netdev); if (err) { netdev_err(netdev, "register netdevice failed\n"); goto err_register; } /* assume we have no link for now */ netif_stop_queue(netdev); netif_carrier_off(netdev); cards_found++; return 0; err_reset: err_register: err_sw_init: err_eeprom: iounmap(adapter->hw.hw_addr); err_init_netdev: err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /* * atl1e_remove - Device Removal Routine * @pdev: PCI device information struct * * atl1e_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. */ static void __devexit atl1e_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1e_adapter *adapter = netdev_priv(netdev); /* * flush_scheduled work may reschedule our watchdog task, so * explicitly disable watchdog tasks from being rescheduled */ set_bit(__AT_DOWN, &adapter->flags); atl1e_del_timer(adapter); atl1e_cancel_work(adapter); unregister_netdev(netdev); atl1e_free_ring_resources(adapter); atl1e_force_ps(&adapter->hw); iounmap(adapter->hw.hw_addr); pci_release_regions(pdev); free_netdev(netdev); pci_disable_device(pdev); } /* * atl1e_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1e_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (netif_running(netdev)) atl1e_down(adapter); pci_disable_device(pdev); /* Request a slot slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /* * atl1e_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the e1000_resume routine. */ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1e_adapter *adapter = netdev_priv(netdev); if (pci_enable_device(pdev)) { netdev_err(adapter->netdev, "Cannot re-enable PCI device after reset\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); atl1e_reset_hw(&adapter->hw); return PCI_ERS_RESULT_RECOVERED; } /* * atl1e_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the atl1e_resume routine. */ static void atl1e_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1e_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) { if (atl1e_up(adapter)) { netdev_err(adapter->netdev, "can't bring device back up after reset\n"); return; } } netif_device_attach(netdev); } static struct pci_error_handlers atl1e_err_handler = { .error_detected = atl1e_io_error_detected, .slot_reset = atl1e_io_slot_reset, .resume = atl1e_io_resume, }; static struct pci_driver atl1e_driver = { .name = atl1e_driver_name, .id_table = atl1e_pci_tbl, .probe = atl1e_probe, .remove = __devexit_p(atl1e_remove), /* Power Management Hooks */ #ifdef CONFIG_PM .suspend = atl1e_suspend, .resume = atl1e_resume, #endif .shutdown = atl1e_shutdown, .err_handler = &atl1e_err_handler }; /* * atl1e_init_module - Driver Registration Routine * * atl1e_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. */ static int __init atl1e_init_module(void) { return pci_register_driver(&atl1e_driver); } /* * atl1e_exit_module - Driver Exit Cleanup Routine * * atl1e_exit_module is called just before the driver is removed * from memory. */ static void __exit atl1e_exit_module(void) { pci_unregister_driver(&atl1e_driver); } module_init(atl1e_init_module); module_exit(atl1e_exit_module);
gpl-2.0
revjunkie/N7100
arch/arm/mach-omap2/pm34xx.c
1978
26961
/* * OMAP3 Power Management Routines * * Copyright (C) 2006-2008 Nokia Corporation * Tony Lindgren <tony@atomide.com> * Jouni Hogander * * Copyright (C) 2007 Texas Instruments, Inc. * Rajendra Nayak <rnayak@ti.com> * * Copyright (C) 2005 Texas Instruments, Inc. * Richard Woodruff <r-woodruff2@ti.com> * * Based on pm.c for omap1 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/pm.h> #include <linux/suspend.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/list.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/console.h> #include <trace/events/power.h> #include <plat/sram.h> #include "clockdomain.h" #include "powerdomain.h" #include <plat/serial.h> #include <plat/sdrc.h> #include <plat/prcm.h> #include <plat/gpmc.h> #include <plat/dma.h> #include <asm/tlbflush.h> #include "cm2xxx_3xxx.h" #include "cm-regbits-34xx.h" #include "prm-regbits-34xx.h" #include "prm2xxx_3xxx.h" #include "pm.h" #include "sdrc.h" #include "control.h" #ifdef CONFIG_SUSPEND static suspend_state_t suspend_state = PM_SUSPEND_ON; static inline bool is_suspending(void) { return (suspend_state != PM_SUSPEND_ON); } #else static inline bool is_suspending(void) { return false; } #endif /* Scratchpad offsets */ #define OMAP343X_TABLE_ADDRESS_OFFSET 0xc4 #define OMAP343X_TABLE_VALUE_OFFSET 0xc0 #define OMAP343X_CONTROL_REG_VALUE_OFFSET 0xc8 /* pm34xx errata defined in pm.h */ u16 pm34xx_errata; struct power_state { struct powerdomain *pwrdm; u32 next_state; #ifdef CONFIG_SUSPEND u32 saved_state; #endif struct list_head node; }; static LIST_HEAD(pwrst_list); static void (*_omap_sram_idle)(u32 *addr, int save_state); static int (*_omap_save_secure_sram)(u32 *addr); static struct powerdomain *mpu_pwrdm, *neon_pwrdm; static struct powerdomain *core_pwrdm, *per_pwrdm; static struct powerdomain *cam_pwrdm; static inline void omap3_per_save_context(void) { omap_gpio_save_context(); } static inline void omap3_per_restore_context(void) { omap_gpio_restore_context(); } static void omap3_enable_io_chain(void) { int timeout = 0; if (omap_rev() >= OMAP3430_REV_ES3_1) { omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD, PM_WKEN); /* Do a readback to assure write has been done */ omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN); while (!(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN) & OMAP3430_ST_IO_CHAIN_MASK)) { timeout++; if (timeout > 1000) { printk(KERN_ERR "Wake up daisy chain " "activation failed.\n"); return; } omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK, WKUP_MOD, PM_WKEN); } } } static void omap3_disable_io_chain(void) { if (omap_rev() >= OMAP3430_REV_ES3_1) omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD, PM_WKEN); } static void omap3_core_save_context(void) { omap3_ctrl_save_padconf(); /* * Force write last pad into memory, as this can fail in some * cases according to errata 1.157, 1.185 */ omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14), OMAP343X_CONTROL_MEM_WKUP + 0x2a0); /* Save the Interrupt controller context */ omap_intc_save_context(); /* Save the GPMC context */ omap3_gpmc_save_context(); /* Save the system control module context, padconf already save above*/ omap3_control_save_context(); omap_dma_global_context_save(); } static void omap3_core_restore_context(void) { /* Restore the control module context, padconf restored by h/w */ omap3_control_restore_context(); /* Restore the GPMC context */ omap3_gpmc_restore_context(); /* Restore the interrupt controller context */ omap_intc_restore_context(); omap_dma_global_context_restore(); } /* * FIXME: This function should be called before entering off-mode after * OMAP3 secure services have been accessed. Currently it is only called * once during boot sequence, but this works as we are not using secure * services. */ static void omap3_save_secure_ram_context(void) { u32 ret; int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); if (omap_type() != OMAP2_DEVICE_TYPE_GP) { /* * MPU next state must be set to POWER_ON temporarily, * otherwise the WFI executed inside the ROM code * will hang the system. */ pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); ret = _omap_save_secure_sram((u32 *) __pa(omap3_secure_ram_storage)); pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); /* Following is for error tracking, it should not happen */ if (ret) { printk(KERN_ERR "save_secure_sram() returns %08x\n", ret); while (1) ; } } } /* * PRCM Interrupt Handler Helper Function * * The purpose of this function is to clear any wake-up events latched * in the PRCM PM_WKST_x registers. It is possible that a wake-up event * may occur whilst attempting to clear a PM_WKST_x register and thus * set another bit in this register. A while loop is used to ensure * that any peripheral wake-up events occurring while attempting to * clear the PM_WKST_x are detected and cleared. */ static int prcm_clear_mod_irqs(s16 module, u8 regs) { u32 wkst, fclk, iclk, clken; u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1; u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1; u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1; u16 grpsel_off = (regs == 3) ? OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL; int c = 0; wkst = omap2_prm_read_mod_reg(module, wkst_off); wkst &= omap2_prm_read_mod_reg(module, grpsel_off); if (wkst) { iclk = omap2_cm_read_mod_reg(module, iclk_off); fclk = omap2_cm_read_mod_reg(module, fclk_off); while (wkst) { clken = wkst; omap2_cm_set_mod_reg_bits(clken, module, iclk_off); /* * For USBHOST, we don't know whether HOST1 or * HOST2 woke us up, so enable both f-clocks */ if (module == OMAP3430ES2_USBHOST_MOD) clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT; omap2_cm_set_mod_reg_bits(clken, module, fclk_off); omap2_prm_write_mod_reg(wkst, module, wkst_off); wkst = omap2_prm_read_mod_reg(module, wkst_off); c++; } omap2_cm_write_mod_reg(iclk, module, iclk_off); omap2_cm_write_mod_reg(fclk, module, fclk_off); } return c; } static int _prcm_int_handle_wakeup(void) { int c; c = prcm_clear_mod_irqs(WKUP_MOD, 1); c += prcm_clear_mod_irqs(CORE_MOD, 1); c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1); if (omap_rev() > OMAP3430_REV_ES1_0) { c += prcm_clear_mod_irqs(CORE_MOD, 3); c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1); } return c; } /* * PRCM Interrupt Handler * * The PRM_IRQSTATUS_MPU register indicates if there are any pending * interrupts from the PRCM for the MPU. These bits must be cleared in * order to clear the PRCM interrupt. The PRCM interrupt handler is * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU * register indicates that a wake-up event is pending for the MPU and * this bit can only be cleared if the all the wake-up events latched * in the various PM_WKST_x registers have been cleared. The interrupt * handler is implemented using a do-while loop so that if a wake-up * event occurred during the processing of the prcm interrupt handler * (setting a bit in the corresponding PM_WKST_x register and thus * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register) * this would be handled. */ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) { u32 irqenable_mpu, irqstatus_mpu; int c = 0; irqenable_mpu = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); irqstatus_mpu &= irqenable_mpu; do { if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK | OMAP3430_IO_ST_MASK)) { c = _prcm_int_handle_wakeup(); /* * Is the MPU PRCM interrupt handler racing with the * IVA2 PRCM interrupt handler ? */ WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup " "but no wakeup sources are marked\n"); } else { /* XXX we need to expand our PRCM interrupt handler */ WARN(1, "prcm: WARNING: PRCM interrupt received, but " "no code to handle it (%08x)\n", irqstatus_mpu); } omap2_prm_write_mod_reg(irqstatus_mpu, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); irqstatus_mpu &= irqenable_mpu; } while (irqstatus_mpu); return IRQ_HANDLED; } /* Function to restore the table entry that was modified for enabling MMU */ static void restore_table_entry(void) { void __iomem *scratchpad_address; u32 previous_value, control_reg_value; u32 *address; scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD); /* Get address of entry that was modified */ address = (u32 *)__raw_readl(scratchpad_address + OMAP343X_TABLE_ADDRESS_OFFSET); /* Get the previous value which needs to be restored */ previous_value = __raw_readl(scratchpad_address + OMAP343X_TABLE_VALUE_OFFSET); address = __va(address); *address = previous_value; flush_tlb_all(); control_reg_value = __raw_readl(scratchpad_address + OMAP343X_CONTROL_REG_VALUE_OFFSET); /* This will enable caches and prediction */ set_cr(control_reg_value); } void omap_sram_idle(void) { /* Variable to tell what needs to be saved and restored * in omap_sram_idle*/ /* save_state = 0 => Nothing to save and restored */ /* save_state = 1 => Only L1 and logic lost */ /* save_state = 2 => Only L2 lost */ /* save_state = 3 => L1, L2 and logic lost */ int save_state = 0; int mpu_next_state = PWRDM_POWER_ON; int per_next_state = PWRDM_POWER_ON; int core_next_state = PWRDM_POWER_ON; int per_going_off; int core_prev_state, per_prev_state; u32 sdrc_pwr = 0; if (!_omap_sram_idle) return; pwrdm_clear_all_prev_pwrst(mpu_pwrdm); pwrdm_clear_all_prev_pwrst(neon_pwrdm); pwrdm_clear_all_prev_pwrst(core_pwrdm); pwrdm_clear_all_prev_pwrst(per_pwrdm); mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); switch (mpu_next_state) { case PWRDM_POWER_ON: case PWRDM_POWER_RET: /* No need to save context */ save_state = 0; break; case PWRDM_POWER_OFF: save_state = 3; break; default: /* Invalid state */ printk(KERN_ERR "Invalid mpu state in sram_idle\n"); return; } pwrdm_pre_transition(); /* NEON control */ if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON) pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state); /* Enable IO-PAD and IO-CHAIN wakeups */ per_next_state = pwrdm_read_next_pwrst(per_pwrdm); core_next_state = pwrdm_read_next_pwrst(core_pwrdm); if (omap3_has_io_wakeup() && (per_next_state < PWRDM_POWER_ON || core_next_state < PWRDM_POWER_ON)) { omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN); omap3_enable_io_chain(); } /* Block console output in case it is on one of the OMAP UARTs */ if (!is_suspending()) if (per_next_state < PWRDM_POWER_ON || core_next_state < PWRDM_POWER_ON) if (!console_trylock()) goto console_still_active; /* PER */ if (per_next_state < PWRDM_POWER_ON) { per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0; omap_uart_prepare_idle(2); omap_uart_prepare_idle(3); omap2_gpio_prepare_for_idle(per_going_off); if (per_next_state == PWRDM_POWER_OFF) omap3_per_save_context(); } /* CORE */ if (core_next_state < PWRDM_POWER_ON) { omap_uart_prepare_idle(0); omap_uart_prepare_idle(1); if (core_next_state == PWRDM_POWER_OFF) { omap3_core_save_context(); omap3_cm_save_context(); } } omap3_intc_prepare_idle(); /* * On EMU/HS devices ROM code restores a SRDC value * from scratchpad which has automatic self refresh on timeout * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443. * Hence store/restore the SDRC_POWER register here. */ if (omap_rev() >= OMAP3430_REV_ES3_0 && omap_type() != OMAP2_DEVICE_TYPE_GP && core_next_state == PWRDM_POWER_OFF) sdrc_pwr = sdrc_read_reg(SDRC_POWER); /* * omap3_arm_context is the location where ARM registers * get saved. The restore path then reads from this * location and restores them back. */ _omap_sram_idle(omap3_arm_context, save_state); cpu_init(); /* Restore normal SDRC POWER settings */ if (omap_rev() >= OMAP3430_REV_ES3_0 && omap_type() != OMAP2_DEVICE_TYPE_GP && core_next_state == PWRDM_POWER_OFF) sdrc_write_reg(sdrc_pwr, SDRC_POWER); /* Restore table entry modified during MMU restoration */ if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF) restore_table_entry(); /* CORE */ if (core_next_state < PWRDM_POWER_ON) { core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm); if (core_prev_state == PWRDM_POWER_OFF) { omap3_core_restore_context(); omap3_cm_restore_context(); omap3_sram_restore_context(); omap2_sms_restore_context(); } omap_uart_resume_idle(0); omap_uart_resume_idle(1); if (core_next_state == PWRDM_POWER_OFF) omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK, OMAP3430_GR_MOD, OMAP3_PRM_VOLTCTRL_OFFSET); } omap3_intc_resume_idle(); /* PER */ if (per_next_state < PWRDM_POWER_ON) { per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm); omap2_gpio_resume_after_idle(); if (per_prev_state == PWRDM_POWER_OFF) omap3_per_restore_context(); omap_uart_resume_idle(2); omap_uart_resume_idle(3); } if (!is_suspending()) console_unlock(); console_still_active: /* Disable IO-PAD and IO-CHAIN wakeup */ if (omap3_has_io_wakeup() && (per_next_state < PWRDM_POWER_ON || core_next_state < PWRDM_POWER_ON)) { omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN); omap3_disable_io_chain(); } pwrdm_post_transition(); clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]); } int omap3_can_sleep(void) { if (!sleep_while_idle) return 0; if (!omap_uart_can_sleep()) return 0; return 1; } static void omap3_pm_idle(void) { local_irq_disable(); local_fiq_disable(); if (!omap3_can_sleep()) goto out; if (omap_irq_pending() || need_resched()) goto out; trace_power_start(POWER_CSTATE, 1, smp_processor_id()); trace_cpu_idle(1, smp_processor_id()); omap_sram_idle(); trace_power_end(smp_processor_id()); trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); out: local_fiq_enable(); local_irq_enable(); } #ifdef CONFIG_SUSPEND static int omap3_pm_suspend(void) { struct power_state *pwrst; int state, ret = 0; if (wakeup_timer_seconds || wakeup_timer_milliseconds) omap2_pm_wakeup_on_timer(wakeup_timer_seconds, wakeup_timer_milliseconds); /* Read current next_pwrsts */ list_for_each_entry(pwrst, &pwrst_list, node) pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm); /* Set ones wanted by suspend */ list_for_each_entry(pwrst, &pwrst_list, node) { if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state)) goto restore; if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm)) goto restore; } omap_uart_prepare_suspend(); omap3_intc_suspend(); omap_sram_idle(); restore: /* Restore next_pwrsts */ list_for_each_entry(pwrst, &pwrst_list, node) { state = pwrdm_read_prev_pwrst(pwrst->pwrdm); if (state > pwrst->next_state) { printk(KERN_INFO "Powerdomain (%s) didn't enter " "target state %d\n", pwrst->pwrdm->name, pwrst->next_state); ret = -1; } omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state); } if (ret) printk(KERN_ERR "Could not enter target state in pm_suspend\n"); else printk(KERN_INFO "Successfully put all powerdomains " "to target state\n"); return ret; } static int omap3_pm_enter(suspend_state_t unused) { int ret = 0; switch (suspend_state) { case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: ret = omap3_pm_suspend(); break; default: ret = -EINVAL; } return ret; } /* Hooks to enable / disable UART interrupts during suspend */ static int omap3_pm_begin(suspend_state_t state) { disable_hlt(); suspend_state = state; omap_uart_enable_irqs(0); return 0; } static void omap3_pm_end(void) { suspend_state = PM_SUSPEND_ON; omap_uart_enable_irqs(1); enable_hlt(); return; } static const struct platform_suspend_ops omap_pm_ops = { .begin = omap3_pm_begin, .end = omap3_pm_end, .enter = omap3_pm_enter, .valid = suspend_valid_only_mem, }; #endif /* CONFIG_SUSPEND */ /** * omap3_iva_idle(): ensure IVA is in idle so it can be put into * retention * * In cases where IVA2 is activated by bootcode, it may prevent * full-chip retention or off-mode because it is not idle. This * function forces the IVA2 into idle state so it can go * into retention/off and thus allow full-chip retention/off. * **/ static void __init omap3_iva_idle(void) { /* ensure IVA2 clock is disabled */ omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN); /* if no clock activity, nothing else to do */ if (!(omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) & OMAP3430_CLKACTIVITY_IVA2_MASK)) return; /* Reset IVA2 */ omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK | OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); /* Enable IVA2 clock */ omap2_cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK, OMAP3430_IVA2_MOD, CM_FCLKEN); /* Set IVA2 boot mode to 'idle' */ omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE, OMAP343X_CONTROL_IVA2_BOOTMOD); /* Un-reset IVA2 */ omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); /* Disable IVA2 clock */ omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN); /* Reset IVA2 */ omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK | OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); } static void __init omap3_d2d_idle(void) { u16 mask, padconf; /* In a stand alone OMAP3430 where there is not a stacked * modem for the D2D Idle Ack and D2D MStandby must be pulled * high. S CONTROL_PADCONF_SAD2D_IDLEACK and * CONTROL_PADCONF_SAD2D_MSTDBY to have a pull up. */ mask = (1 << 4) | (1 << 3); /* pull-up, enabled */ padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_MSTANDBY); padconf |= mask; omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_MSTANDBY); padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_IDLEACK); padconf |= mask; omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK); /* reset modem */ omap2_prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK | OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK, CORE_MOD, OMAP2_RM_RSTCTRL); omap2_prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL); } static void __init prcm_setup_regs(void) { u32 omap3630_en_uart4_mask = cpu_is_omap3630() ? OMAP3630_EN_UART4_MASK : 0; u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ? OMAP3630_GRPSEL_UART4_MASK : 0; /* XXX This should be handled by hwmod code or SCM init code */ omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG); /* * Enable control of expternal oscillator through * sys_clkreq. In the long run clock framework should * take care of this. */ omap2_prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK, 1 << OMAP_AUTOEXTCLKMODE_SHIFT, OMAP3430_GR_MOD, OMAP3_PRM_CLKSRC_CTRL_OFFSET); /* setup wakup source */ omap2_prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK | OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK, WKUP_MOD, PM_WKEN); /* No need to write EN_IO, that is always enabled */ omap2_prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK | OMAP3430_GRPSEL_GPT1_MASK | OMAP3430_GRPSEL_GPT12_MASK, WKUP_MOD, OMAP3430_PM_MPUGRPSEL); /* For some reason IO doesn't generate wakeup event even if * it is selected to mpu wakeup goup */ omap2_prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK, OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); /* Enable PM_WKEN to support DSS LPR */ omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK, OMAP3430_DSS_MOD, PM_WKEN); /* Enable wakeups in PER */ omap2_prm_write_mod_reg(omap3630_en_uart4_mask | OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK | OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK | OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK | OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK | OMAP3430_EN_MCBSP4_MASK, OMAP3430_PER_MOD, PM_WKEN); /* and allow them to wake up MPU */ omap2_prm_write_mod_reg(omap3630_grpsel_uart4_mask | OMAP3430_GRPSEL_GPIO2_MASK | OMAP3430_GRPSEL_GPIO3_MASK | OMAP3430_GRPSEL_GPIO4_MASK | OMAP3430_GRPSEL_GPIO5_MASK | OMAP3430_GRPSEL_GPIO6_MASK | OMAP3430_GRPSEL_UART3_MASK | OMAP3430_GRPSEL_MCBSP2_MASK | OMAP3430_GRPSEL_MCBSP3_MASK | OMAP3430_GRPSEL_MCBSP4_MASK, OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL); /* Don't attach IVA interrupts */ omap2_prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL); omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1); omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3); omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL); /* Clear any pending 'reset' flags */ omap2_prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST); omap2_prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST); omap2_prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST); omap2_prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST); omap2_prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST); omap2_prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST); /* Clear any pending PRCM interrupts */ omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); omap3_iva_idle(); omap3_d2d_idle(); } void omap3_pm_off_mode_enable(int enable) { struct power_state *pwrst; u32 state; if (enable) state = PWRDM_POWER_OFF; else state = PWRDM_POWER_RET; list_for_each_entry(pwrst, &pwrst_list, node) { if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) && pwrst->pwrdm == core_pwrdm && state == PWRDM_POWER_OFF) { pwrst->next_state = PWRDM_POWER_RET; pr_warn("%s: Core OFF disabled due to errata i583\n", __func__); } else { pwrst->next_state = state; } omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); } } int omap3_pm_get_suspend_state(struct powerdomain *pwrdm) { struct power_state *pwrst; list_for_each_entry(pwrst, &pwrst_list, node) { if (pwrst->pwrdm == pwrdm) return pwrst->next_state; } return -EINVAL; } int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state) { struct power_state *pwrst; list_for_each_entry(pwrst, &pwrst_list, node) { if (pwrst->pwrdm == pwrdm) { pwrst->next_state = state; return 0; } } return -EINVAL; } static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) { struct power_state *pwrst; if (!pwrdm->pwrsts) return 0; pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC); if (!pwrst) return -ENOMEM; pwrst->pwrdm = pwrdm; pwrst->next_state = PWRDM_POWER_RET; list_add(&pwrst->node, &pwrst_list); if (pwrdm_has_hdwr_sar(pwrdm)) pwrdm_enable_hdwr_sar(pwrdm); return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); } /* * Enable hw supervised mode for all clockdomains if it's * supported. Initiate sleep transition for other clockdomains, if * they are not used */ static int __init clkdms_setup(struct clockdomain *clkdm, void *unused) { if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO) clkdm_allow_idle(clkdm); else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP && atomic_read(&clkdm->usecount) == 0) clkdm_sleep(clkdm); return 0; } void omap_push_sram_idle(void) { _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend, omap34xx_cpu_suspend_sz); if (omap_type() != OMAP2_DEVICE_TYPE_GP) _omap_save_secure_sram = omap_sram_push(save_secure_ram_context, save_secure_ram_context_sz); } static void __init pm_errata_configure(void) { if (cpu_is_omap3630()) { pm34xx_errata |= PM_RTA_ERRATUM_i608; /* Enable the l2 cache toggling in sleep logic */ enable_omap3630_toggle_l2_on_restore(); if (omap_rev() < OMAP3630_REV_ES1_2) pm34xx_errata |= PM_SDRC_WAKEUP_ERRATUM_i583; } } static int __init omap3_pm_init(void) { struct power_state *pwrst, *tmp; struct clockdomain *neon_clkdm, *per_clkdm, *mpu_clkdm, *core_clkdm; int ret; if (!cpu_is_omap34xx()) return -ENODEV; pm_errata_configure(); /* XXX prcm_setup_regs needs to be before enabling hw * supervised mode for powerdomains */ prcm_setup_regs(); ret = request_irq(INT_34XX_PRCM_MPU_IRQ, (irq_handler_t)prcm_interrupt_handler, IRQF_DISABLED, "prcm", NULL); if (ret) { printk(KERN_ERR "request_irq failed to register for 0x%x\n", INT_34XX_PRCM_MPU_IRQ); goto err1; } ret = pwrdm_for_each(pwrdms_setup, NULL); if (ret) { printk(KERN_ERR "Failed to setup powerdomains\n"); goto err2; } (void) clkdm_for_each(clkdms_setup, NULL); mpu_pwrdm = pwrdm_lookup("mpu_pwrdm"); if (mpu_pwrdm == NULL) { printk(KERN_ERR "Failed to get mpu_pwrdm\n"); goto err2; } neon_pwrdm = pwrdm_lookup("neon_pwrdm"); per_pwrdm = pwrdm_lookup("per_pwrdm"); core_pwrdm = pwrdm_lookup("core_pwrdm"); cam_pwrdm = pwrdm_lookup("cam_pwrdm"); neon_clkdm = clkdm_lookup("neon_clkdm"); mpu_clkdm = clkdm_lookup("mpu_clkdm"); per_clkdm = clkdm_lookup("per_clkdm"); core_clkdm = clkdm_lookup("core_clkdm"); omap_push_sram_idle(); #ifdef CONFIG_SUSPEND suspend_set_ops(&omap_pm_ops); #endif /* CONFIG_SUSPEND */ pm_idle = omap3_pm_idle; omap3_idle_init(); /* * RTA is disabled during initialization as per erratum i608 * it is safer to disable RTA by the bootloader, but we would like * to be doubly sure here and prevent any mishaps. */ if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608)) omap3630_ctrl_disable_rta(); clkdm_add_wkdep(neon_clkdm, mpu_clkdm); if (omap_type() != OMAP2_DEVICE_TYPE_GP) { omap3_secure_ram_storage = kmalloc(0x803F, GFP_KERNEL); if (!omap3_secure_ram_storage) printk(KERN_ERR "Memory allocation failed when" "allocating for secure sram context\n"); local_irq_disable(); local_fiq_disable(); omap_dma_global_context_save(); omap3_save_secure_ram_context(); omap_dma_global_context_restore(); local_irq_enable(); local_fiq_enable(); } omap3_save_scratchpad_contents(); err1: return ret; err2: free_irq(INT_34XX_PRCM_MPU_IRQ, NULL); list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) { list_del(&pwrst->node); kfree(pwrst); } return ret; } late_initcall(omap3_pm_init);
gpl-2.0
sayeed99/kernel-FlareM
kernel/padata.c
2234
27767
/* * padata.c - generic interface to process data streams in parallel * * See Documentation/padata.txt for an api documentation. * * Copyright (C) 2008, 2009 secunet Security Networks AG * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/export.h> #include <linux/cpumask.h> #include <linux/err.h> #include <linux/cpu.h> #include <linux/padata.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/rcupdate.h> #define MAX_OBJ_NUM 1000 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) { int cpu, target_cpu; target_cpu = cpumask_first(pd->cpumask.pcpu); for (cpu = 0; cpu < cpu_index; cpu++) target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); return target_cpu; } static int padata_cpu_hash(struct parallel_data *pd) { int cpu_index; /* * Hash the sequence numbers to the cpus by taking * seq_nr mod. number of cpus in use. */ spin_lock(&pd->seq_lock); cpu_index = pd->seq_nr % cpumask_weight(pd->cpumask.pcpu); pd->seq_nr++; spin_unlock(&pd->seq_lock); return padata_index_to_cpu(pd, cpu_index); } static void padata_parallel_worker(struct work_struct *parallel_work) { struct padata_parallel_queue *pqueue; struct parallel_data *pd; struct padata_instance *pinst; LIST_HEAD(local_list); local_bh_disable(); pqueue = container_of(parallel_work, struct padata_parallel_queue, work); pd = pqueue->pd; pinst = pd->pinst; spin_lock(&pqueue->parallel.lock); list_replace_init(&pqueue->parallel.list, &local_list); spin_unlock(&pqueue->parallel.lock); while (!list_empty(&local_list)) { struct padata_priv *padata; padata = list_entry(local_list.next, struct padata_priv, list); list_del_init(&padata->list); padata->parallel(padata); } local_bh_enable(); } /** * padata_do_parallel - padata parallelization function * * @pinst: padata instance * @padata: object to be parallelized * @cb_cpu: cpu the serialization callback function will run on, * must be in the serial cpumask of padata(i.e. cpumask.cbcpu). * * The parallelization callback function will run with BHs off. * Note: Every object which is parallelized by padata_do_parallel * must be seen by padata_do_serial. */ int padata_do_parallel(struct padata_instance *pinst, struct padata_priv *padata, int cb_cpu) { int target_cpu, err; struct padata_parallel_queue *queue; struct parallel_data *pd; rcu_read_lock_bh(); pd = rcu_dereference(pinst->pd); err = -EINVAL; if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) goto out; if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) goto out; err = -EBUSY; if ((pinst->flags & PADATA_RESET)) goto out; if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) goto out; err = 0; atomic_inc(&pd->refcnt); padata->pd = pd; padata->cb_cpu = cb_cpu; target_cpu = padata_cpu_hash(pd); queue = per_cpu_ptr(pd->pqueue, target_cpu); spin_lock(&queue->parallel.lock); list_add_tail(&padata->list, &queue->parallel.list); spin_unlock(&queue->parallel.lock); queue_work_on(target_cpu, pinst->wq, &queue->work); out: rcu_read_unlock_bh(); return err; } EXPORT_SYMBOL(padata_do_parallel); /* * padata_get_next - Get the next object that needs serialization. * * Return values are: * * A pointer to the control struct of the next object that needs * serialization, if present in one of the percpu reorder queues. * * NULL, if all percpu reorder queues are empty. * * -EINPROGRESS, if the next object that needs serialization will * be parallel processed by another cpu and is not yet present in * the cpu's reorder queue. * * -ENODATA, if this cpu has to do the parallel processing for * the next object. */ static struct padata_priv *padata_get_next(struct parallel_data *pd) { int cpu, num_cpus; unsigned int next_nr, next_index; struct padata_parallel_queue *next_queue; struct padata_priv *padata; struct padata_list *reorder; num_cpus = cpumask_weight(pd->cpumask.pcpu); /* * Calculate the percpu reorder queue and the sequence * number of the next object. */ next_nr = pd->processed; next_index = next_nr % num_cpus; cpu = padata_index_to_cpu(pd, next_index); next_queue = per_cpu_ptr(pd->pqueue, cpu); padata = NULL; reorder = &next_queue->reorder; if (!list_empty(&reorder->list)) { padata = list_entry(reorder->list.next, struct padata_priv, list); spin_lock(&reorder->lock); list_del_init(&padata->list); atomic_dec(&pd->reorder_objects); spin_unlock(&reorder->lock); pd->processed++; goto out; } if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { padata = ERR_PTR(-ENODATA); goto out; } padata = ERR_PTR(-EINPROGRESS); out: return padata; } static void padata_reorder(struct parallel_data *pd) { int cb_cpu; struct padata_priv *padata; struct padata_serial_queue *squeue; struct padata_instance *pinst = pd->pinst; /* * We need to ensure that only one cpu can work on dequeueing of * the reorder queue the time. Calculating in which percpu reorder * queue the next object will arrive takes some time. A spinlock * would be highly contended. Also it is not clear in which order * the objects arrive to the reorder queues. So a cpu could wait to * get the lock just to notice that there is nothing to do at the * moment. Therefore we use a trylock and let the holder of the lock * care for all the objects enqueued during the holdtime of the lock. */ if (!spin_trylock_bh(&pd->lock)) return; while (1) { padata = padata_get_next(pd); /* * All reorder queues are empty, or the next object that needs * serialization is parallel processed by another cpu and is * still on it's way to the cpu's reorder queue, nothing to * do for now. */ if (!padata || PTR_ERR(padata) == -EINPROGRESS) break; /* * This cpu has to do the parallel processing of the next * object. It's waiting in the cpu's parallelization queue, * so exit immediately. */ if (PTR_ERR(padata) == -ENODATA) { del_timer(&pd->timer); spin_unlock_bh(&pd->lock); return; } cb_cpu = padata->cb_cpu; squeue = per_cpu_ptr(pd->squeue, cb_cpu); spin_lock(&squeue->serial.lock); list_add_tail(&padata->list, &squeue->serial.list); spin_unlock(&squeue->serial.lock); queue_work_on(cb_cpu, pinst->wq, &squeue->work); } spin_unlock_bh(&pd->lock); /* * The next object that needs serialization might have arrived to * the reorder queues in the meantime, we will be called again * from the timer function if no one else cares for it. */ if (atomic_read(&pd->reorder_objects) && !(pinst->flags & PADATA_RESET)) mod_timer(&pd->timer, jiffies + HZ); else del_timer(&pd->timer); return; } static void padata_reorder_timer(unsigned long arg) { struct parallel_data *pd = (struct parallel_data *)arg; padata_reorder(pd); } static void padata_serial_worker(struct work_struct *serial_work) { struct padata_serial_queue *squeue; struct parallel_data *pd; LIST_HEAD(local_list); local_bh_disable(); squeue = container_of(serial_work, struct padata_serial_queue, work); pd = squeue->pd; spin_lock(&squeue->serial.lock); list_replace_init(&squeue->serial.list, &local_list); spin_unlock(&squeue->serial.lock); while (!list_empty(&local_list)) { struct padata_priv *padata; padata = list_entry(local_list.next, struct padata_priv, list); list_del_init(&padata->list); padata->serial(padata); atomic_dec(&pd->refcnt); } local_bh_enable(); } /** * padata_do_serial - padata serialization function * * @padata: object to be serialized. * * padata_do_serial must be called for every parallelized object. * The serialization callback function will run with BHs off. */ void padata_do_serial(struct padata_priv *padata) { int cpu; struct padata_parallel_queue *pqueue; struct parallel_data *pd; pd = padata->pd; cpu = get_cpu(); pqueue = per_cpu_ptr(pd->pqueue, cpu); spin_lock(&pqueue->reorder.lock); atomic_inc(&pd->reorder_objects); list_add_tail(&padata->list, &pqueue->reorder.list); spin_unlock(&pqueue->reorder.lock); put_cpu(); padata_reorder(pd); } EXPORT_SYMBOL(padata_do_serial); static int padata_setup_cpumasks(struct parallel_data *pd, const struct cpumask *pcpumask, const struct cpumask *cbcpumask) { if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) return -ENOMEM; cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { free_cpumask_var(pd->cpumask.cbcpu); return -ENOMEM; } cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); return 0; } static void __padata_list_init(struct padata_list *pd_list) { INIT_LIST_HEAD(&pd_list->list); spin_lock_init(&pd_list->lock); } /* Initialize all percpu queues used by serial workers */ static void padata_init_squeues(struct parallel_data *pd) { int cpu; struct padata_serial_queue *squeue; for_each_cpu(cpu, pd->cpumask.cbcpu) { squeue = per_cpu_ptr(pd->squeue, cpu); squeue->pd = pd; __padata_list_init(&squeue->serial); INIT_WORK(&squeue->work, padata_serial_worker); } } /* Initialize all percpu queues used by parallel workers */ static void padata_init_pqueues(struct parallel_data *pd) { int cpu_index, cpu; struct padata_parallel_queue *pqueue; cpu_index = 0; for_each_cpu(cpu, pd->cpumask.pcpu) { pqueue = per_cpu_ptr(pd->pqueue, cpu); pqueue->pd = pd; pqueue->cpu_index = cpu_index; cpu_index++; __padata_list_init(&pqueue->reorder); __padata_list_init(&pqueue->parallel); INIT_WORK(&pqueue->work, padata_parallel_worker); atomic_set(&pqueue->num_obj, 0); } } /* Allocate and initialize the internal cpumask dependend resources. */ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, const struct cpumask *pcpumask, const struct cpumask *cbcpumask) { struct parallel_data *pd; pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); if (!pd) goto err; pd->pqueue = alloc_percpu(struct padata_parallel_queue); if (!pd->pqueue) goto err_free_pd; pd->squeue = alloc_percpu(struct padata_serial_queue); if (!pd->squeue) goto err_free_pqueue; if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) goto err_free_squeue; padata_init_pqueues(pd); padata_init_squeues(pd); setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); pd->seq_nr = 0; atomic_set(&pd->reorder_objects, 0); atomic_set(&pd->refcnt, 0); pd->pinst = pinst; spin_lock_init(&pd->lock); return pd; err_free_squeue: free_percpu(pd->squeue); err_free_pqueue: free_percpu(pd->pqueue); err_free_pd: kfree(pd); err: return NULL; } static void padata_free_pd(struct parallel_data *pd) { free_cpumask_var(pd->cpumask.pcpu); free_cpumask_var(pd->cpumask.cbcpu); free_percpu(pd->pqueue); free_percpu(pd->squeue); kfree(pd); } /* Flush all objects out of the padata queues. */ static void padata_flush_queues(struct parallel_data *pd) { int cpu; struct padata_parallel_queue *pqueue; struct padata_serial_queue *squeue; for_each_cpu(cpu, pd->cpumask.pcpu) { pqueue = per_cpu_ptr(pd->pqueue, cpu); flush_work(&pqueue->work); } del_timer_sync(&pd->timer); if (atomic_read(&pd->reorder_objects)) padata_reorder(pd); for_each_cpu(cpu, pd->cpumask.cbcpu) { squeue = per_cpu_ptr(pd->squeue, cpu); flush_work(&squeue->work); } BUG_ON(atomic_read(&pd->refcnt) != 0); } static void __padata_start(struct padata_instance *pinst) { pinst->flags |= PADATA_INIT; } static void __padata_stop(struct padata_instance *pinst) { if (!(pinst->flags & PADATA_INIT)) return; pinst->flags &= ~PADATA_INIT; synchronize_rcu(); get_online_cpus(); padata_flush_queues(pinst->pd); put_online_cpus(); } /* Replace the internal control structure with a new one. */ static void padata_replace(struct padata_instance *pinst, struct parallel_data *pd_new) { struct parallel_data *pd_old = pinst->pd; int notification_mask = 0; pinst->flags |= PADATA_RESET; rcu_assign_pointer(pinst->pd, pd_new); synchronize_rcu(); if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) notification_mask |= PADATA_CPU_PARALLEL; if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) notification_mask |= PADATA_CPU_SERIAL; padata_flush_queues(pd_old); padata_free_pd(pd_old); if (notification_mask) blocking_notifier_call_chain(&pinst->cpumask_change_notifier, notification_mask, &pd_new->cpumask); pinst->flags &= ~PADATA_RESET; } /** * padata_register_cpumask_notifier - Registers a notifier that will be called * if either pcpu or cbcpu or both cpumasks change. * * @pinst: A poineter to padata instance * @nblock: A pointer to notifier block. */ int padata_register_cpumask_notifier(struct padata_instance *pinst, struct notifier_block *nblock) { return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, nblock); } EXPORT_SYMBOL(padata_register_cpumask_notifier); /** * padata_unregister_cpumask_notifier - Unregisters cpumask notifier * registered earlier using padata_register_cpumask_notifier * * @pinst: A pointer to data instance. * @nlock: A pointer to notifier block. */ int padata_unregister_cpumask_notifier(struct padata_instance *pinst, struct notifier_block *nblock) { return blocking_notifier_chain_unregister( &pinst->cpumask_change_notifier, nblock); } EXPORT_SYMBOL(padata_unregister_cpumask_notifier); /* If cpumask contains no active cpu, we mark the instance as invalid. */ static bool padata_validate_cpumask(struct padata_instance *pinst, const struct cpumask *cpumask) { if (!cpumask_intersects(cpumask, cpu_online_mask)) { pinst->flags |= PADATA_INVALID; return false; } pinst->flags &= ~PADATA_INVALID; return true; } static int __padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask, cpumask_var_t cbcpumask) { int valid; struct parallel_data *pd; valid = padata_validate_cpumask(pinst, pcpumask); if (!valid) { __padata_stop(pinst); goto out_replace; } valid = padata_validate_cpumask(pinst, cbcpumask); if (!valid) __padata_stop(pinst); out_replace: pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); if (!pd) return -ENOMEM; cpumask_copy(pinst->cpumask.pcpu, pcpumask); cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); padata_replace(pinst, pd); if (valid) __padata_start(pinst); return 0; } /** * padata_set_cpumasks - Set both parallel and serial cpumasks. The first * one is used by parallel workers and the second one * by the wokers doing serialization. * * @pinst: padata instance * @pcpumask: the cpumask to use for parallel workers * @cbcpumask: the cpumsak to use for serial workers */ int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask, cpumask_var_t cbcpumask) { int err; mutex_lock(&pinst->lock); get_online_cpus(); err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask); put_online_cpus(); mutex_unlock(&pinst->lock); return err; } EXPORT_SYMBOL(padata_set_cpumasks); /** * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value * equivalent to @cpumask. * * @pinst: padata instance * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding * to parallel and serial cpumasks respectively. * @cpumask: the cpumask to use */ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask) { struct cpumask *serial_mask, *parallel_mask; int err = -EINVAL; mutex_lock(&pinst->lock); get_online_cpus(); switch (cpumask_type) { case PADATA_CPU_PARALLEL: serial_mask = pinst->cpumask.cbcpu; parallel_mask = cpumask; break; case PADATA_CPU_SERIAL: parallel_mask = pinst->cpumask.pcpu; serial_mask = cpumask; break; default: goto out; } err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); out: put_online_cpus(); mutex_unlock(&pinst->lock); return err; } EXPORT_SYMBOL(padata_set_cpumask); static int __padata_add_cpu(struct padata_instance *pinst, int cpu) { struct parallel_data *pd; if (cpumask_test_cpu(cpu, cpu_online_mask)) { pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, pinst->cpumask.cbcpu); if (!pd) return -ENOMEM; padata_replace(pinst, pd); if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) __padata_start(pinst); } return 0; } /** * padata_add_cpu - add a cpu to one or both(parallel and serial) * padata cpumasks. * * @pinst: padata instance * @cpu: cpu to add * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added. * The @mask may be any combination of the following flags: * PADATA_CPU_SERIAL - serial cpumask * PADATA_CPU_PARALLEL - parallel cpumask */ int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask) { int err; if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) return -EINVAL; mutex_lock(&pinst->lock); get_online_cpus(); if (mask & PADATA_CPU_SERIAL) cpumask_set_cpu(cpu, pinst->cpumask.cbcpu); if (mask & PADATA_CPU_PARALLEL) cpumask_set_cpu(cpu, pinst->cpumask.pcpu); err = __padata_add_cpu(pinst, cpu); put_online_cpus(); mutex_unlock(&pinst->lock); return err; } EXPORT_SYMBOL(padata_add_cpu); static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) { struct parallel_data *pd = NULL; if (cpumask_test_cpu(cpu, cpu_online_mask)) { if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) __padata_stop(pinst); pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, pinst->cpumask.cbcpu); if (!pd) return -ENOMEM; padata_replace(pinst, pd); cpumask_clear_cpu(cpu, pd->cpumask.cbcpu); cpumask_clear_cpu(cpu, pd->cpumask.pcpu); } return 0; } /** * padata_remove_cpu - remove a cpu from the one or both(serial and parallel) * padata cpumasks. * * @pinst: padata instance * @cpu: cpu to remove * @mask: bitmask specifying from which cpumask @cpu should be removed * The @mask may be any combination of the following flags: * PADATA_CPU_SERIAL - serial cpumask * PADATA_CPU_PARALLEL - parallel cpumask */ int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) { int err; if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) return -EINVAL; mutex_lock(&pinst->lock); get_online_cpus(); if (mask & PADATA_CPU_SERIAL) cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); if (mask & PADATA_CPU_PARALLEL) cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); err = __padata_remove_cpu(pinst, cpu); put_online_cpus(); mutex_unlock(&pinst->lock); return err; } EXPORT_SYMBOL(padata_remove_cpu); /** * padata_start - start the parallel processing * * @pinst: padata instance to start */ int padata_start(struct padata_instance *pinst) { int err = 0; mutex_lock(&pinst->lock); if (pinst->flags & PADATA_INVALID) err =-EINVAL; __padata_start(pinst); mutex_unlock(&pinst->lock); return err; } EXPORT_SYMBOL(padata_start); /** * padata_stop - stop the parallel processing * * @pinst: padata instance to stop */ void padata_stop(struct padata_instance *pinst) { mutex_lock(&pinst->lock); __padata_stop(pinst); mutex_unlock(&pinst->lock); } EXPORT_SYMBOL(padata_stop); #ifdef CONFIG_HOTPLUG_CPU static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) { return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); } static int padata_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int err; struct padata_instance *pinst; int cpu = (unsigned long)hcpu; pinst = container_of(nfb, struct padata_instance, cpu_notifier); switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); err = __padata_add_cpu(pinst, cpu); mutex_unlock(&pinst->lock); if (err) return notifier_from_errno(err); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); err = __padata_remove_cpu(pinst, cpu); mutex_unlock(&pinst->lock); if (err) return notifier_from_errno(err); break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); __padata_remove_cpu(pinst, cpu); mutex_unlock(&pinst->lock); case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); __padata_add_cpu(pinst, cpu); mutex_unlock(&pinst->lock); } return NOTIFY_OK; } #endif static void __padata_free(struct padata_instance *pinst) { #ifdef CONFIG_HOTPLUG_CPU unregister_hotcpu_notifier(&pinst->cpu_notifier); #endif padata_stop(pinst); padata_free_pd(pinst->pd); free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); kfree(pinst); } #define kobj2pinst(_kobj) \ container_of(_kobj, struct padata_instance, kobj) #define attr2pentry(_attr) \ container_of(_attr, struct padata_sysfs_entry, attr) static void padata_sysfs_release(struct kobject *kobj) { struct padata_instance *pinst = kobj2pinst(kobj); __padata_free(pinst); } struct padata_sysfs_entry { struct attribute attr; ssize_t (*show)(struct padata_instance *, struct attribute *, char *); ssize_t (*store)(struct padata_instance *, struct attribute *, const char *, size_t); }; static ssize_t show_cpumask(struct padata_instance *pinst, struct attribute *attr, char *buf) { struct cpumask *cpumask; ssize_t len; mutex_lock(&pinst->lock); if (!strcmp(attr->name, "serial_cpumask")) cpumask = pinst->cpumask.cbcpu; else cpumask = pinst->cpumask.pcpu; len = bitmap_scnprintf(buf, PAGE_SIZE, cpumask_bits(cpumask), nr_cpu_ids); if (PAGE_SIZE - len < 2) len = -EINVAL; else len += sprintf(buf + len, "\n"); mutex_unlock(&pinst->lock); return len; } static ssize_t store_cpumask(struct padata_instance *pinst, struct attribute *attr, const char *buf, size_t count) { cpumask_var_t new_cpumask; ssize_t ret; int mask_type; if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) return -ENOMEM; ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), nr_cpumask_bits); if (ret < 0) goto out; mask_type = !strcmp(attr->name, "serial_cpumask") ? PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; ret = padata_set_cpumask(pinst, mask_type, new_cpumask); if (!ret) ret = count; out: free_cpumask_var(new_cpumask); return ret; } #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ static struct padata_sysfs_entry _name##_attr = \ __ATTR(_name, 0644, _show_name, _store_name) #define PADATA_ATTR_RO(_name, _show_name) \ static struct padata_sysfs_entry _name##_attr = \ __ATTR(_name, 0400, _show_name, NULL) PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); /* * Padata sysfs provides the following objects: * serial_cpumask [RW] - cpumask for serial workers * parallel_cpumask [RW] - cpumask for parallel workers */ static struct attribute *padata_default_attrs[] = { &serial_cpumask_attr.attr, &parallel_cpumask_attr.attr, NULL, }; static ssize_t padata_sysfs_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct padata_instance *pinst; struct padata_sysfs_entry *pentry; ssize_t ret = -EIO; pinst = kobj2pinst(kobj); pentry = attr2pentry(attr); if (pentry->show) ret = pentry->show(pinst, attr, buf); return ret; } static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct padata_instance *pinst; struct padata_sysfs_entry *pentry; ssize_t ret = -EIO; pinst = kobj2pinst(kobj); pentry = attr2pentry(attr); if (pentry->show) ret = pentry->store(pinst, attr, buf, count); return ret; } static const struct sysfs_ops padata_sysfs_ops = { .show = padata_sysfs_show, .store = padata_sysfs_store, }; static struct kobj_type padata_attr_type = { .sysfs_ops = &padata_sysfs_ops, .default_attrs = padata_default_attrs, .release = padata_sysfs_release, }; /** * padata_alloc_possible - Allocate and initialize padata instance. * Use the cpu_possible_mask for serial and * parallel workers. * * @wq: workqueue to use for the allocated padata instance */ struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) { return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); } EXPORT_SYMBOL(padata_alloc_possible); /** * padata_alloc - allocate and initialize a padata instance and specify * cpumasks for serial and parallel workers. * * @wq: workqueue to use for the allocated padata instance * @pcpumask: cpumask that will be used for padata parallelization * @cbcpumask: cpumask that will be used for padata serialization */ struct padata_instance *padata_alloc(struct workqueue_struct *wq, const struct cpumask *pcpumask, const struct cpumask *cbcpumask) { struct padata_instance *pinst; struct parallel_data *pd = NULL; pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); if (!pinst) goto err; get_online_cpus(); if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) goto err_free_inst; if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { free_cpumask_var(pinst->cpumask.pcpu); goto err_free_inst; } if (!padata_validate_cpumask(pinst, pcpumask) || !padata_validate_cpumask(pinst, cbcpumask)) goto err_free_masks; pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); if (!pd) goto err_free_masks; rcu_assign_pointer(pinst->pd, pd); pinst->wq = wq; cpumask_copy(pinst->cpumask.pcpu, pcpumask); cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); pinst->flags = 0; #ifdef CONFIG_HOTPLUG_CPU pinst->cpu_notifier.notifier_call = padata_cpu_callback; pinst->cpu_notifier.priority = 0; register_hotcpu_notifier(&pinst->cpu_notifier); #endif put_online_cpus(); BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); kobject_init(&pinst->kobj, &padata_attr_type); mutex_init(&pinst->lock); return pinst; err_free_masks: free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); err_free_inst: kfree(pinst); put_online_cpus(); err: return NULL; } EXPORT_SYMBOL(padata_alloc); /** * padata_free - free a padata instance * * @padata_inst: padata instance to free */ void padata_free(struct padata_instance *pinst) { kobject_put(&pinst->kobj); } EXPORT_SYMBOL(padata_free);
gpl-2.0
bemolxd/android_kernel_x2xtreme-test
arch/mips/sibyte/common/cfe.c
3002
8412
/* * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/linkage.h> #include <linux/mm.h> #include <linux/blkdev.h> #include <linux/bootmem.h> #include <linux/pm.h> #include <linux/smp.h> #include <asm/bootinfo.h> #include <asm/reboot.h> #include <asm/sibyte/board.h> #include <asm/smp-ops.h> #include <asm/fw/cfe/cfe_api.h> #include <asm/fw/cfe/cfe_error.h> /* Max ram addressable in 32-bit segments */ #ifdef CONFIG_64BIT #define MAX_RAM_SIZE (~0ULL) #else #ifdef CONFIG_HIGHMEM #ifdef CONFIG_64BIT_PHYS_ADDR #define MAX_RAM_SIZE (~0ULL) #else #define MAX_RAM_SIZE (0xffffffffULL) #endif #else #define MAX_RAM_SIZE (0x1fffffffULL) #endif #endif #define SIBYTE_MAX_MEM_REGIONS 8 phys_t board_mem_region_addrs[SIBYTE_MAX_MEM_REGIONS]; phys_t board_mem_region_sizes[SIBYTE_MAX_MEM_REGIONS]; unsigned int board_mem_region_count; int cfe_cons_handle; #ifdef CONFIG_BLK_DEV_INITRD extern unsigned long initrd_start, initrd_end; #endif static void __noreturn cfe_linux_exit(void *arg) { int warm = *(int *)arg; if (smp_processor_id()) { static int reboot_smp; /* Don't repeat the process from another CPU */ if (!reboot_smp) { /* Get CPU 0 to do the cfe_exit */ reboot_smp = 1; smp_call_function(cfe_linux_exit, arg, 0); } } else { printk("Passing control back to CFE...\n"); cfe_exit(warm, 0); printk("cfe_exit returned??\n"); } while (1); } static void __noreturn cfe_linux_restart(char *command) { static const int zero; cfe_linux_exit((void *)&zero); } static void __noreturn cfe_linux_halt(void) { static const int one = 1; cfe_linux_exit((void *)&one); } static __init void prom_meminit(void) { u64 addr, size, type; /* regardless of 64BIT_PHYS_ADDR */ int mem_flags = 0; unsigned int idx; int rd_flag; #ifdef CONFIG_BLK_DEV_INITRD unsigned long initrd_pstart; unsigned long initrd_pend; initrd_pstart = CPHYSADDR(initrd_start); initrd_pend = CPHYSADDR(initrd_end); if (initrd_start && ((initrd_pstart > MAX_RAM_SIZE) || (initrd_pend > MAX_RAM_SIZE))) { panic("initrd out of addressable memory"); } #endif /* INITRD */ for (idx = 0; cfe_enummem(idx, mem_flags, &addr, &size, &type) != CFE_ERR_NOMORE; idx++) { rd_flag = 0; if (type == CFE_MI_AVAILABLE) { /* * See if this block contains (any portion of) the * ramdisk */ #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) { if ((initrd_pstart > addr) && (initrd_pstart < (addr + size))) { add_memory_region(addr, initrd_pstart - addr, BOOT_MEM_RAM); rd_flag = 1; } if ((initrd_pend > addr) && (initrd_pend < (addr + size))) { add_memory_region(initrd_pend, (addr + size) - initrd_pend, BOOT_MEM_RAM); rd_flag = 1; } } #endif if (!rd_flag) { if (addr > MAX_RAM_SIZE) continue; if (addr+size > MAX_RAM_SIZE) size = MAX_RAM_SIZE - (addr+size) + 1; /* * memcpy/__copy_user prefetch, which * will cause a bus error for * KSEG/KUSEG addrs not backed by RAM. * Hence, reserve some padding for the * prefetch distance. */ if (size > 512) size -= 512; add_memory_region(addr, size, BOOT_MEM_RAM); } board_mem_region_addrs[board_mem_region_count] = addr; board_mem_region_sizes[board_mem_region_count] = size; board_mem_region_count++; if (board_mem_region_count == SIBYTE_MAX_MEM_REGIONS) { /* * Too many regions. Need to configure more */ while(1); } } } #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) { add_memory_region(initrd_pstart, initrd_pend - initrd_pstart, BOOT_MEM_RESERVED); } #endif } #ifdef CONFIG_BLK_DEV_INITRD static int __init initrd_setup(char *str) { char rdarg[64]; int idx; char *tmp, *endptr; unsigned long initrd_size; /* Make a copy of the initrd argument so we can smash it up here */ for (idx = 0; idx < sizeof(rdarg)-1; idx++) { if (!str[idx] || (str[idx] == ' ')) break; rdarg[idx] = str[idx]; } rdarg[idx] = 0; str = rdarg; /* *Initrd location comes in the form "<hex size of ramdisk in bytes>@<location in memory>" * e.g. initrd=3abfd@80010000. This is set up by the loader. */ for (tmp = str; *tmp != '@'; tmp++) { if (!*tmp) { goto fail; } } *tmp = 0; tmp++; if (!*tmp) { goto fail; } initrd_size = simple_strtoul(str, &endptr, 16); if (*endptr) { *(tmp-1) = '@'; goto fail; } *(tmp-1) = '@'; initrd_start = simple_strtoul(tmp, &endptr, 16); if (*endptr) { goto fail; } initrd_end = initrd_start + initrd_size; printk("Found initrd of %lx@%lx\n", initrd_size, initrd_start); return 1; fail: printk("Bad initrd argument. Disabling initrd\n"); initrd_start = 0; initrd_end = 0; return 1; } #endif extern struct plat_smp_ops sb_smp_ops; extern struct plat_smp_ops bcm1480_smp_ops; /* * prom_init is called just after the cpu type is determined, from setup_arch() */ void __init prom_init(void) { uint64_t cfe_ept, cfe_handle; unsigned int cfe_eptseal; int argc = fw_arg0; char **envp = (char **) fw_arg2; int *prom_vec = (int *) fw_arg3; _machine_restart = cfe_linux_restart; _machine_halt = cfe_linux_halt; pm_power_off = cfe_linux_halt; /* * Check if a loader was used; if NOT, the 4 arguments are * what CFE gives us (handle, 0, EPT and EPTSEAL) */ if (argc < 0) { cfe_handle = (uint64_t)(long)argc; cfe_ept = (long)envp; cfe_eptseal = (uint32_t)(unsigned long)prom_vec; } else { if ((int32_t)(long)prom_vec < 0) { /* * Old loader; all it gives us is the handle, * so use the "known" entrypoint and assume * the seal. */ cfe_handle = (uint64_t)(long)prom_vec; cfe_ept = (uint64_t)((int32_t)0x9fc00500); cfe_eptseal = CFE_EPTSEAL; } else { /* * Newer loaders bundle the handle/ept/eptseal * Note: prom_vec is in the loader's useg * which is still alive in the TLB. */ cfe_handle = (uint64_t)((int32_t *)prom_vec)[0]; cfe_ept = (uint64_t)((int32_t *)prom_vec)[2]; cfe_eptseal = (unsigned int)((uint32_t *)prom_vec)[3]; } } if (cfe_eptseal != CFE_EPTSEAL) { /* too early for panic to do any good */ printk("CFE's entrypoint seal doesn't match. Spinning."); while (1) ; } cfe_init(cfe_handle, cfe_ept); /* * Get the handle for (at least) prom_putchar, possibly for * boot console */ cfe_cons_handle = cfe_getstdhandle(CFE_STDHANDLE_CONSOLE); if (cfe_getenv("LINUX_CMDLINE", arcs_cmdline, COMMAND_LINE_SIZE) < 0) { if (argc >= 0) { /* The loader should have set the command line */ /* too early for panic to do any good */ printk("LINUX_CMDLINE not defined in cfe."); while (1) ; } } #ifdef CONFIG_BLK_DEV_INITRD { char *ptr; /* Need to find out early whether we've got an initrd. So scan the list looking now */ for (ptr = arcs_cmdline; *ptr; ptr++) { while (*ptr == ' ') { ptr++; } if (!strncmp(ptr, "initrd=", 7)) { initrd_setup(ptr+7); break; } else { while (*ptr && (*ptr != ' ')) { ptr++; } } } } #endif /* CONFIG_BLK_DEV_INITRD */ /* Not sure this is needed, but it's the safe way. */ arcs_cmdline[COMMAND_LINE_SIZE-1] = 0; prom_meminit(); #if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250) register_smp_ops(&sb_smp_ops); #endif #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) register_smp_ops(&bcm1480_smp_ops); #endif } void __init prom_free_prom_memory(void) { /* Not sure what I'm supposed to do here. Nothing, I think */ } void prom_putchar(char c) { int ret; while ((ret = cfe_write(cfe_cons_handle, &c, 1)) == 0) ; }
gpl-2.0
xcore995/lge_h502_kernel
arch/arm/mach-mmp/flint.c
4026
2907
/* * linux/arch/arm/mach-mmp/flint.c * * Support for the Marvell Flint Development Platform. * * Copyright (C) 2009 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/smc91x.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/gpio-pxa.h> #include <linux/interrupt.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/addr-map.h> #include <mach/mfp-mmp2.h> #include <mach/mmp2.h> #include <mach/irqs.h> #include "common.h" #define FLINT_NR_IRQS (MMP_NR_IRQS + 48) static unsigned long flint_pin_config[] __initdata = { /* UART1 */ GPIO45_UART1_RXD, GPIO46_UART1_TXD, /* UART2 */ GPIO47_UART2_RXD, GPIO48_UART2_TXD, /* SMC */ GPIO151_SMC_SCLK, GPIO145_SMC_nCS0, GPIO146_SMC_nCS1, GPIO152_SMC_BE0, GPIO153_SMC_BE1, GPIO154_SMC_IRQ, GPIO113_SMC_RDY, /*Ethernet*/ GPIO155_GPIO, /* DFI */ GPIO168_DFI_D0, GPIO167_DFI_D1, GPIO166_DFI_D2, GPIO165_DFI_D3, GPIO107_DFI_D4, GPIO106_DFI_D5, GPIO105_DFI_D6, GPIO104_DFI_D7, GPIO111_DFI_D8, GPIO164_DFI_D9, GPIO163_DFI_D10, GPIO162_DFI_D11, GPIO161_DFI_D12, GPIO110_DFI_D13, GPIO109_DFI_D14, GPIO108_DFI_D15, GPIO143_ND_nCS0, GPIO144_ND_nCS1, GPIO147_ND_nWE, GPIO148_ND_nRE, GPIO150_ND_ALE, GPIO149_ND_CLE, GPIO112_ND_RDY0, GPIO160_ND_RDY1, }; static struct pxa_gpio_platform_data mmp2_gpio_pdata = { .irq_base = MMP_GPIO_TO_IRQ(0), }; static struct smc91x_platdata flint_smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, }; static struct resource smc91x_resources[] = { [0] = { .start = SMC_CS1_PHYS_BASE + 0x300, .end = SMC_CS1_PHYS_BASE + 0xfffff, .flags = IORESOURCE_MEM, }, [1] = { .start = MMP_GPIO_TO_IRQ(155), .end = MMP_GPIO_TO_IRQ(155), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, } }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .dev = { .platform_data = &flint_smc91x_info, }, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static void __init flint_init(void) { mfp_config(ARRAY_AND_SIZE(flint_pin_config)); /* on-chip devices */ mmp2_add_uart(1); mmp2_add_uart(2); platform_device_add_data(&mmp2_device_gpio, &mmp2_gpio_pdata, sizeof(struct pxa_gpio_platform_data)); platform_device_register(&mmp2_device_gpio); /* off-chip devices */ platform_device_register(&smc91x_device); } MACHINE_START(FLINT, "Flint Development Platform") .map_io = mmp_map_io, .nr_irqs = FLINT_NR_IRQS, .init_irq = mmp2_init_irq, .init_time = mmp2_timer_init, .init_machine = flint_init, .restart = mmp_restart, MACHINE_END
gpl-2.0
GustavoRD78/78Kernel-6.0.1-23.5.B.0.368
fs/btrfs/file-item.c
4282
23286
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/bio.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "print-tree.h" #define __MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \ sizeof(struct btrfs_item) * 2) / \ size) - 1)) #define MAX_CSUM_ITEMS(r, size) (min(__MAX_CSUM_ITEMS(r, size), PAGE_CACHE_SIZE)) #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ sizeof(struct btrfs_ordered_sum)) / \ sizeof(struct btrfs_sector_sum) * \ (r)->sectorsize - (r)->sectorsize) int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 pos, u64 disk_offset, u64 disk_num_bytes, u64 num_bytes, u64 offset, u64 ram_bytes, u8 compression, u8 encryption, u16 other_encoding) { int ret = 0; struct btrfs_file_extent_item *item; struct btrfs_key file_key; struct btrfs_path *path; struct extent_buffer *leaf; path = btrfs_alloc_path(); if (!path) return -ENOMEM; file_key.objectid = objectid; file_key.offset = pos; btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY); path->leave_spinning = 1; ret = btrfs_insert_empty_item(trans, root, path, &file_key, sizeof(*item)); if (ret < 0) goto out; BUG_ON(ret); /* Can't happen */ leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset); btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes); btrfs_set_file_extent_offset(leaf, item, offset); btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes); btrfs_set_file_extent_generation(leaf, item, trans->transid); btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); btrfs_set_file_extent_compression(leaf, item, compression); btrfs_set_file_extent_encryption(leaf, item, encryption); btrfs_set_file_extent_other_encoding(leaf, item, other_encoding); btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); return ret; } struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, int cow) { int ret; struct btrfs_key file_key; struct btrfs_key found_key; struct btrfs_csum_item *item; struct extent_buffer *leaf; u64 csum_offset = 0; u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); int csums_in_item; file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; file_key.offset = bytenr; btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY); ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); if (ret < 0) goto fail; leaf = path->nodes[0]; if (ret > 0) { ret = 1; if (path->slots[0] == 0) goto fail; path->slots[0]--; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY) goto fail; csum_offset = (bytenr - found_key.offset) >> root->fs_info->sb->s_blocksize_bits; csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); csums_in_item /= csum_size; if (csum_offset >= csums_in_item) { ret = -EFBIG; goto fail; } } item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); item = (struct btrfs_csum_item *)((unsigned char *)item + csum_offset * csum_size); return item; fail: if (ret > 0) ret = -ENOENT; return ERR_PTR(ret); } int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid, u64 offset, int mod) { int ret; struct btrfs_key file_key; int ins_len = mod < 0 ? -1 : 0; int cow = mod != 0; file_key.objectid = objectid; file_key.offset = offset; btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY); ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); return ret; } static int __btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, struct bio *bio, u64 logical_offset, u32 *dst, int dio) { u32 sum; struct bio_vec *bvec = bio->bi_io_vec; int bio_index = 0; u64 offset = 0; u64 item_start_offset = 0; u64 item_last_offset = 0; u64 disk_bytenr; u32 diff; u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); int ret; struct btrfs_path *path; struct btrfs_csum_item *item = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (bio->bi_size > PAGE_CACHE_SIZE * 8) path->reada = 2; WARN_ON(bio->bi_vcnt <= 0); /* * the free space stuff is only read when it hasn't been * updated in the current transaction. So, we can safely * read from the commit root and sidestep a nasty deadlock * between reading the free space cache and updating the csum tree. */ if (btrfs_is_free_space_inode(root, inode)) { path->search_commit_root = 1; path->skip_locking = 1; } disk_bytenr = (u64)bio->bi_sector << 9; if (dio) offset = logical_offset; while (bio_index < bio->bi_vcnt) { if (!dio) offset = page_offset(bvec->bv_page) + bvec->bv_offset; ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum); if (ret == 0) goto found; if (!item || disk_bytenr < item_start_offset || disk_bytenr >= item_last_offset) { struct btrfs_key found_key; u32 item_size; if (item) btrfs_release_path(path); item = btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, disk_bytenr, 0); if (IS_ERR(item)) { ret = PTR_ERR(item); if (ret == -ENOENT || ret == -EFBIG) ret = 0; sum = 0; if (BTRFS_I(inode)->root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) { set_extent_bits(io_tree, offset, offset + bvec->bv_len - 1, EXTENT_NODATASUM, GFP_NOFS); } else { printk(KERN_INFO "btrfs no csum found " "for inode %llu start %llu\n", (unsigned long long) btrfs_ino(inode), (unsigned long long)offset); } item = NULL; btrfs_release_path(path); goto found; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); item_start_offset = found_key.offset; item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); item_last_offset = item_start_offset + (item_size / csum_size) * root->sectorsize; item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_csum_item); } /* * this byte range must be able to fit inside * a single leaf so it will also fit inside a u32 */ diff = disk_bytenr - item_start_offset; diff = diff / root->sectorsize; diff = diff * csum_size; read_extent_buffer(path->nodes[0], &sum, ((unsigned long)item) + diff, csum_size); found: if (dst) *dst++ = sum; else set_state_private(io_tree, offset, sum); disk_bytenr += bvec->bv_len; offset += bvec->bv_len; bio_index++; bvec++; } btrfs_free_path(path); return 0; } int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, struct bio *bio, u32 *dst) { return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0); } int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, struct bio *bio, u64 offset, u32 *dst) { return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1); } int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, struct list_head *list, int search_commit) { struct btrfs_key key; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_ordered_sum *sums; struct btrfs_sector_sum *sector_sum; struct btrfs_csum_item *item; LIST_HEAD(tmplist); unsigned long offset; int ret; size_t size; u64 csum_end; u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (search_commit) { path->skip_locking = 1; path->reada = 2; path->search_commit_root = 1; } key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key.offset = start; key.type = BTRFS_EXTENT_CSUM_KEY; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto fail; if (ret > 0 && path->slots[0] > 0) { leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && key.type == BTRFS_EXTENT_CSUM_KEY) { offset = (start - key.offset) >> root->fs_info->sb->s_blocksize_bits; if (offset * csum_size < btrfs_item_size_nr(leaf, path->slots[0] - 1)) path->slots[0]--; } } while (start <= end) { leaf = path->nodes[0]; if (path->slots[0] >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret < 0) goto fail; if (ret > 0) break; leaf = path->nodes[0]; } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || key.type != BTRFS_EXTENT_CSUM_KEY) break; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.offset > end) break; if (key.offset > start) start = key.offset; size = btrfs_item_size_nr(leaf, path->slots[0]); csum_end = key.offset + (size / csum_size) * root->sectorsize; if (csum_end <= start) { path->slots[0]++; continue; } csum_end = min(csum_end, end + 1); item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_csum_item); while (start < csum_end) { size = min_t(size_t, csum_end - start, MAX_ORDERED_SUM_BYTES(root)); sums = kzalloc(btrfs_ordered_sum_size(root, size), GFP_NOFS); if (!sums) { ret = -ENOMEM; goto fail; } sector_sum = sums->sums; sums->bytenr = start; sums->len = size; offset = (start - key.offset) >> root->fs_info->sb->s_blocksize_bits; offset *= csum_size; while (size > 0) { read_extent_buffer(path->nodes[0], &sector_sum->sum, ((unsigned long)item) + offset, csum_size); sector_sum->bytenr = start; size -= root->sectorsize; start += root->sectorsize; offset += csum_size; sector_sum++; } list_add_tail(&sums->list, &tmplist); } path->slots[0]++; } ret = 0; fail: while (ret < 0 && !list_empty(&tmplist)) { sums = list_entry(&tmplist, struct btrfs_ordered_sum, list); list_del(&sums->list); kfree(sums); } list_splice_tail(&tmplist, list); btrfs_free_path(path); return ret; } int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, struct bio *bio, u64 file_start, int contig) { struct btrfs_ordered_sum *sums; struct btrfs_sector_sum *sector_sum; struct btrfs_ordered_extent *ordered; char *data; struct bio_vec *bvec = bio->bi_io_vec; int bio_index = 0; unsigned long total_bytes = 0; unsigned long this_sum_bytes = 0; u64 offset; u64 disk_bytenr; WARN_ON(bio->bi_vcnt <= 0); sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS); if (!sums) return -ENOMEM; sector_sum = sums->sums; disk_bytenr = (u64)bio->bi_sector << 9; sums->len = bio->bi_size; INIT_LIST_HEAD(&sums->list); if (contig) offset = file_start; else offset = page_offset(bvec->bv_page) + bvec->bv_offset; ordered = btrfs_lookup_ordered_extent(inode, offset); BUG_ON(!ordered); /* Logic error */ sums->bytenr = ordered->start; while (bio_index < bio->bi_vcnt) { if (!contig) offset = page_offset(bvec->bv_page) + bvec->bv_offset; if (!contig && (offset >= ordered->file_offset + ordered->len || offset < ordered->file_offset)) { unsigned long bytes_left; sums->len = this_sum_bytes; this_sum_bytes = 0; btrfs_add_ordered_sum(inode, ordered, sums); btrfs_put_ordered_extent(ordered); bytes_left = bio->bi_size - total_bytes; sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), GFP_NOFS); BUG_ON(!sums); /* -ENOMEM */ sector_sum = sums->sums; sums->len = bytes_left; ordered = btrfs_lookup_ordered_extent(inode, offset); BUG_ON(!ordered); /* Logic error */ sums->bytenr = ordered->start; } data = kmap_atomic(bvec->bv_page); sector_sum->sum = ~(u32)0; sector_sum->sum = btrfs_csum_data(root, data + bvec->bv_offset, sector_sum->sum, bvec->bv_len); kunmap_atomic(data); btrfs_csum_final(sector_sum->sum, (char *)&sector_sum->sum); sector_sum->bytenr = disk_bytenr; sector_sum++; bio_index++; total_bytes += bvec->bv_len; this_sum_bytes += bvec->bv_len; disk_bytenr += bvec->bv_len; offset += bvec->bv_len; bvec++; } this_sum_bytes = 0; btrfs_add_ordered_sum(inode, ordered, sums); btrfs_put_ordered_extent(ordered); return 0; } /* * helper function for csum removal, this expects the * key to describe the csum pointed to by the path, and it expects * the csum to overlap the range [bytenr, len] * * The csum should not be entirely contained in the range and the * range should not be entirely contained in the csum. * * This calls btrfs_truncate_item with the correct args based on the * overlap, and fixes up the key as required. */ static noinline void truncate_one_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *key, u64 bytenr, u64 len) { struct extent_buffer *leaf; u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); u64 csum_end; u64 end_byte = bytenr + len; u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits; leaf = path->nodes[0]; csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; csum_end <<= root->fs_info->sb->s_blocksize_bits; csum_end += key->offset; if (key->offset < bytenr && csum_end <= end_byte) { /* * [ bytenr - len ] * [ ] * [csum ] * A simple truncate off the end of the item */ u32 new_size = (bytenr - key->offset) >> blocksize_bits; new_size *= csum_size; btrfs_truncate_item(trans, root, path, new_size, 1); } else if (key->offset >= bytenr && csum_end > end_byte && end_byte > key->offset) { /* * [ bytenr - len ] * [ ] * [csum ] * we need to truncate from the beginning of the csum */ u32 new_size = (csum_end - end_byte) >> blocksize_bits; new_size *= csum_size; btrfs_truncate_item(trans, root, path, new_size, 0); key->offset = end_byte; btrfs_set_item_key_safe(trans, root, path, key); } else { BUG(); } } /* * deletes the csum items from the csum tree for a given * range of bytes. */ int btrfs_del_csums(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 len) { struct btrfs_path *path; struct btrfs_key key; u64 end_byte = bytenr + len; u64 csum_end; struct extent_buffer *leaf; int ret; u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); int blocksize_bits = root->fs_info->sb->s_blocksize_bits; root = root->fs_info->csum_root; path = btrfs_alloc_path(); if (!path) return -ENOMEM; while (1) { key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key.offset = end_byte - 1; key.type = BTRFS_EXTENT_CSUM_KEY; path->leave_spinning = 1; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) { if (path->slots[0] == 0) break; path->slots[0]--; } else if (ret < 0) { break; } leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || key.type != BTRFS_EXTENT_CSUM_KEY) { break; } if (key.offset >= end_byte) break; csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; csum_end <<= blocksize_bits; csum_end += key.offset; /* this csum ends before we start, we're done */ if (csum_end <= bytenr) break; /* delete the entire item, it is inside our range */ if (key.offset >= bytenr && csum_end <= end_byte) { ret = btrfs_del_item(trans, root, path); if (ret) goto out; if (key.offset == bytenr) break; } else if (key.offset < bytenr && csum_end > end_byte) { unsigned long offset; unsigned long shift_len; unsigned long item_offset; /* * [ bytenr - len ] * [csum ] * * Our bytes are in the middle of the csum, * we need to split this item and insert a new one. * * But we can't drop the path because the * csum could change, get removed, extended etc. * * The trick here is the max size of a csum item leaves * enough room in the tree block for a single * item header. So, we split the item in place, * adding a new header pointing to the existing * bytes. Then we loop around again and we have * a nicely formed csum item that we can neatly * truncate. */ offset = (bytenr - key.offset) >> blocksize_bits; offset *= csum_size; shift_len = (len >> blocksize_bits) * csum_size; item_offset = btrfs_item_ptr_offset(leaf, path->slots[0]); memset_extent_buffer(leaf, 0, item_offset + offset, shift_len); key.offset = bytenr; /* * btrfs_split_item returns -EAGAIN when the * item changed size or key */ ret = btrfs_split_item(trans, root, path, &key, offset); if (ret && ret != -EAGAIN) { btrfs_abort_transaction(trans, root, ret); goto out; } key.offset = end_byte - 1; } else { truncate_one_csum(trans, root, path, &key, bytenr, len); if (key.offset < bytenr) break; } btrfs_release_path(path); } ret = 0; out: btrfs_free_path(path); return ret; } int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_ordered_sum *sums) { u64 bytenr; int ret; struct btrfs_key file_key; struct btrfs_key found_key; u64 next_offset; u64 total_bytes = 0; int found_next; struct btrfs_path *path; struct btrfs_csum_item *item; struct btrfs_csum_item *item_end; struct extent_buffer *leaf = NULL; u64 csum_offset; struct btrfs_sector_sum *sector_sum; u32 nritems; u32 ins_size; u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); path = btrfs_alloc_path(); if (!path) return -ENOMEM; sector_sum = sums->sums; again: next_offset = (u64)-1; found_next = 0; file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; file_key.offset = sector_sum->bytenr; bytenr = sector_sum->bytenr; btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY); item = btrfs_lookup_csum(trans, root, path, sector_sum->bytenr, 1); if (!IS_ERR(item)) { leaf = path->nodes[0]; ret = 0; goto found; } ret = PTR_ERR(item); if (ret != -EFBIG && ret != -ENOENT) goto fail_unlock; if (ret == -EFBIG) { u32 item_size; /* we found one, but it isn't big enough yet */ leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, path->slots[0]); if ((item_size / csum_size) >= MAX_CSUM_ITEMS(root, csum_size)) { /* already at max size, make a new one */ goto insert; } } else { int slot = path->slots[0] + 1; /* we didn't find a csum item, insert one */ nritems = btrfs_header_nritems(path->nodes[0]); if (path->slots[0] >= nritems - 1) { ret = btrfs_next_leaf(root, path); if (ret == 1) found_next = 1; if (ret != 0) goto insert; slot = 0; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || found_key.type != BTRFS_EXTENT_CSUM_KEY) { found_next = 1; goto insert; } next_offset = found_key.offset; found_next = 1; goto insert; } /* * at this point, we know the tree has an item, but it isn't big * enough yet to put our csum in. Grow it */ btrfs_release_path(path); ret = btrfs_search_slot(trans, root, &file_key, path, csum_size, 1); if (ret < 0) goto fail_unlock; if (ret > 0) { if (path->slots[0] == 0) goto insert; path->slots[0]--; } leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); csum_offset = (bytenr - found_key.offset) >> root->fs_info->sb->s_blocksize_bits; if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY || found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) { goto insert; } if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) / csum_size) { u32 diff = (csum_offset + 1) * csum_size; /* * is the item big enough already? we dropped our lock * before and need to recheck */ if (diff < btrfs_item_size_nr(leaf, path->slots[0])) goto csum; diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); if (diff != csum_size) goto insert; btrfs_extend_item(trans, root, path, diff); goto csum; } insert: btrfs_release_path(path); csum_offset = 0; if (found_next) { u64 tmp = total_bytes + root->sectorsize; u64 next_sector = sector_sum->bytenr; struct btrfs_sector_sum *next = sector_sum + 1; while (tmp < sums->len) { if (next_sector + root->sectorsize != next->bytenr) break; tmp += root->sectorsize; next_sector = next->bytenr; next++; } tmp = min(tmp, next_offset - file_key.offset); tmp >>= root->fs_info->sb->s_blocksize_bits; tmp = max((u64)1, tmp); tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size)); ins_size = csum_size * tmp; } else { ins_size = csum_size; } path->leave_spinning = 1; ret = btrfs_insert_empty_item(trans, root, path, &file_key, ins_size); path->leave_spinning = 0; if (ret < 0) goto fail_unlock; if (ret != 0) { WARN_ON(1); goto fail_unlock; } csum: leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); ret = 0; item = (struct btrfs_csum_item *)((unsigned char *)item + csum_offset * csum_size); found: item_end = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); item_end = (struct btrfs_csum_item *)((unsigned char *)item_end + btrfs_item_size_nr(leaf, path->slots[0])); next_sector: write_extent_buffer(leaf, &sector_sum->sum, (unsigned long)item, csum_size); total_bytes += root->sectorsize; sector_sum++; if (total_bytes < sums->len) { item = (struct btrfs_csum_item *)((char *)item + csum_size); if (item < item_end && bytenr + PAGE_CACHE_SIZE == sector_sum->bytenr) { bytenr = sector_sum->bytenr; goto next_sector; } } btrfs_mark_buffer_dirty(path->nodes[0]); if (total_bytes < sums->len) { btrfs_release_path(path); cond_resched(); goto again; } out: btrfs_free_path(path); return ret; fail_unlock: goto out; }
gpl-2.0
gamerlulea/linux-3.5-rc
arch/powerpc/kernel/fadump.c
4538
36275
/* * Firmware Assisted dump: A robust mechanism to get reliable kernel crash * dump with assistance from firmware. This approach does not use kexec, * instead firmware assists in booting the kdump kernel while preserving * memory contents. The most of the code implementation has been adapted * from phyp assisted dump implementation written by Linas Vepstas and * Manish Ahuja * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright 2011 IBM Corporation * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> */ #undef DEBUG #define pr_fmt(fmt) "fadump: " fmt #include <linux/string.h> #include <linux/memblock.h> #include <linux/delay.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/crash_dump.h> #include <linux/kobject.h> #include <linux/sysfs.h> #include <asm/page.h> #include <asm/prom.h> #include <asm/rtas.h> #include <asm/fadump.h> #include <asm/debug.h> #include <asm/setup.h> static struct fw_dump fw_dump; static struct fadump_mem_struct fdm; static const struct fadump_mem_struct *fdm_active; static DEFINE_MUTEX(fadump_mutex); struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES]; int crash_mem_ranges; /* Scan the Firmware Assisted dump configuration details. */ int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data) { __be32 *sections; int i, num_sections; unsigned long size; const int *token; if (depth != 1 || strcmp(uname, "rtas") != 0) return 0; /* * Check if Firmware Assisted dump is supported. if yes, check * if dump has been initiated on last reboot. */ token = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump", NULL); if (!token) return 0; fw_dump.fadump_supported = 1; fw_dump.ibm_configure_kernel_dump = *token; /* * The 'ibm,kernel-dump' rtas node is present only if there is * dump data waiting for us. */ fdm_active = of_get_flat_dt_prop(node, "ibm,kernel-dump", NULL); if (fdm_active) fw_dump.dump_active = 1; /* Get the sizes required to store dump data for the firmware provided * dump sections. * For each dump section type supported, a 32bit cell which defines * the ID of a supported section followed by two 32 bit cells which * gives teh size of the section in bytes. */ sections = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump-sizes", &size); if (!sections) return 0; num_sections = size / (3 * sizeof(u32)); for (i = 0; i < num_sections; i++, sections += 3) { u32 type = (u32)of_read_number(sections, 1); switch (type) { case FADUMP_CPU_STATE_DATA: fw_dump.cpu_state_data_size = of_read_ulong(&sections[1], 2); break; case FADUMP_HPTE_REGION: fw_dump.hpte_region_size = of_read_ulong(&sections[1], 2); break; } } return 1; } int is_fadump_active(void) { return fw_dump.dump_active; } /* Print firmware assisted dump configurations for debugging purpose. */ static void fadump_show_config(void) { pr_debug("Support for firmware-assisted dump (fadump): %s\n", (fw_dump.fadump_supported ? "present" : "no support")); if (!fw_dump.fadump_supported) return; pr_debug("Fadump enabled : %s\n", (fw_dump.fadump_enabled ? "yes" : "no")); pr_debug("Dump Active : %s\n", (fw_dump.dump_active ? "yes" : "no")); pr_debug("Dump section sizes:\n"); pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size); pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size); pr_debug("Boot memory size : %lx\n", fw_dump.boot_memory_size); } static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm, unsigned long addr) { if (!fdm) return 0; memset(fdm, 0, sizeof(struct fadump_mem_struct)); addr = addr & PAGE_MASK; fdm->header.dump_format_version = 0x00000001; fdm->header.dump_num_sections = 3; fdm->header.dump_status_flag = 0; fdm->header.offset_first_dump_section = (u32)offsetof(struct fadump_mem_struct, cpu_state_data); /* * Fields for disk dump option. * We are not using disk dump option, hence set these fields to 0. */ fdm->header.dd_block_size = 0; fdm->header.dd_block_offset = 0; fdm->header.dd_num_blocks = 0; fdm->header.dd_offset_disk_path = 0; /* set 0 to disable an automatic dump-reboot. */ fdm->header.max_time_auto = 0; /* Kernel dump sections */ /* cpu state data section. */ fdm->cpu_state_data.request_flag = FADUMP_REQUEST_FLAG; fdm->cpu_state_data.source_data_type = FADUMP_CPU_STATE_DATA; fdm->cpu_state_data.source_address = 0; fdm->cpu_state_data.source_len = fw_dump.cpu_state_data_size; fdm->cpu_state_data.destination_address = addr; addr += fw_dump.cpu_state_data_size; /* hpte region section */ fdm->hpte_region.request_flag = FADUMP_REQUEST_FLAG; fdm->hpte_region.source_data_type = FADUMP_HPTE_REGION; fdm->hpte_region.source_address = 0; fdm->hpte_region.source_len = fw_dump.hpte_region_size; fdm->hpte_region.destination_address = addr; addr += fw_dump.hpte_region_size; /* RMA region section */ fdm->rmr_region.request_flag = FADUMP_REQUEST_FLAG; fdm->rmr_region.source_data_type = FADUMP_REAL_MODE_REGION; fdm->rmr_region.source_address = RMA_START; fdm->rmr_region.source_len = fw_dump.boot_memory_size; fdm->rmr_region.destination_address = addr; addr += fw_dump.boot_memory_size; return addr; } /** * fadump_calculate_reserve_size(): reserve variable boot area 5% of System RAM * * Function to find the largest memory size we need to reserve during early * boot process. This will be the size of the memory that is required for a * kernel to boot successfully. * * This function has been taken from phyp-assisted dump feature implementation. * * returns larger of 256MB or 5% rounded down to multiples of 256MB. * * TODO: Come up with better approach to find out more accurate memory size * that is required for a kernel to boot successfully. * */ static inline unsigned long fadump_calculate_reserve_size(void) { unsigned long size; /* * Check if the size is specified through fadump_reserve_mem= cmdline * option. If yes, then use that. */ if (fw_dump.reserve_bootvar) return fw_dump.reserve_bootvar; /* divide by 20 to get 5% of value */ size = memblock_end_of_DRAM() / 20; /* round it down in multiples of 256 */ size = size & ~0x0FFFFFFFUL; /* Truncate to memory_limit. We don't want to over reserve the memory.*/ if (memory_limit && size > memory_limit) size = memory_limit; return (size > MIN_BOOT_MEM ? size : MIN_BOOT_MEM); } /* * Calculate the total memory size required to be reserved for * firmware-assisted dump registration. */ static unsigned long get_fadump_area_size(void) { unsigned long size = 0; size += fw_dump.cpu_state_data_size; size += fw_dump.hpte_region_size; size += fw_dump.boot_memory_size; size += sizeof(struct fadump_crash_info_header); size += sizeof(struct elfhdr); /* ELF core header.*/ size += sizeof(struct elf_phdr); /* place holder for cpu notes */ /* Program headers for crash memory regions. */ size += sizeof(struct elf_phdr) * (memblock_num_regions(memory) + 2); size = PAGE_ALIGN(size); return size; } int __init fadump_reserve_mem(void) { unsigned long base, size, memory_boundary; if (!fw_dump.fadump_enabled) return 0; if (!fw_dump.fadump_supported) { printk(KERN_INFO "Firmware-assisted dump is not supported on" " this hardware\n"); fw_dump.fadump_enabled = 0; return 0; } /* * Initialize boot memory size * If dump is active then we have already calculated the size during * first kernel. */ if (fdm_active) fw_dump.boot_memory_size = fdm_active->rmr_region.source_len; else fw_dump.boot_memory_size = fadump_calculate_reserve_size(); /* * Calculate the memory boundary. * If memory_limit is less than actual memory boundary then reserve * the memory for fadump beyond the memory_limit and adjust the * memory_limit accordingly, so that the running kernel can run with * specified memory_limit. */ if (memory_limit && memory_limit < memblock_end_of_DRAM()) { size = get_fadump_area_size(); if ((memory_limit + size) < memblock_end_of_DRAM()) memory_limit += size; else memory_limit = memblock_end_of_DRAM(); printk(KERN_INFO "Adjusted memory_limit for firmware-assisted" " dump, now %#016llx\n", (unsigned long long)memory_limit); } if (memory_limit) memory_boundary = memory_limit; else memory_boundary = memblock_end_of_DRAM(); if (fw_dump.dump_active) { printk(KERN_INFO "Firmware-assisted dump is active.\n"); /* * If last boot has crashed then reserve all the memory * above boot_memory_size so that we don't touch it until * dump is written to disk by userspace tool. This memory * will be released for general use once the dump is saved. */ base = fw_dump.boot_memory_size; size = memory_boundary - base; memblock_reserve(base, size); printk(KERN_INFO "Reserved %ldMB of memory at %ldMB " "for saving crash dump\n", (unsigned long)(size >> 20), (unsigned long)(base >> 20)); fw_dump.fadumphdr_addr = fdm_active->rmr_region.destination_address + fdm_active->rmr_region.source_len; pr_debug("fadumphdr_addr = %p\n", (void *) fw_dump.fadumphdr_addr); } else { /* Reserve the memory at the top of memory. */ size = get_fadump_area_size(); base = memory_boundary - size; memblock_reserve(base, size); printk(KERN_INFO "Reserved %ldMB of memory at %ldMB " "for firmware-assisted dump\n", (unsigned long)(size >> 20), (unsigned long)(base >> 20)); } fw_dump.reserve_dump_area_start = base; fw_dump.reserve_dump_area_size = size; return 1; } /* Look for fadump= cmdline option. */ static int __init early_fadump_param(char *p) { if (!p) return 1; if (strncmp(p, "on", 2) == 0) fw_dump.fadump_enabled = 1; else if (strncmp(p, "off", 3) == 0) fw_dump.fadump_enabled = 0; return 0; } early_param("fadump", early_fadump_param); /* Look for fadump_reserve_mem= cmdline option */ static int __init early_fadump_reserve_mem(char *p) { if (p) fw_dump.reserve_bootvar = memparse(p, &p); return 0; } early_param("fadump_reserve_mem", early_fadump_reserve_mem); static void register_fw_dump(struct fadump_mem_struct *fdm) { int rc; unsigned int wait_time; pr_debug("Registering for firmware-assisted kernel dump...\n"); /* TODO: Add upper time limit for the delay */ do { rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL, FADUMP_REGISTER, fdm, sizeof(struct fadump_mem_struct)); wait_time = rtas_busy_delay_time(rc); if (wait_time) mdelay(wait_time); } while (wait_time); switch (rc) { case -1: printk(KERN_ERR "Failed to register firmware-assisted kernel" " dump. Hardware Error(%d).\n", rc); break; case -3: printk(KERN_ERR "Failed to register firmware-assisted kernel" " dump. Parameter Error(%d).\n", rc); break; case -9: printk(KERN_ERR "firmware-assisted kernel dump is already " " registered."); fw_dump.dump_registered = 1; break; case 0: printk(KERN_INFO "firmware-assisted kernel dump registration" " is successful\n"); fw_dump.dump_registered = 1; break; } } void crash_fadump(struct pt_regs *regs, const char *str) { struct fadump_crash_info_header *fdh = NULL; if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr) return; fdh = __va(fw_dump.fadumphdr_addr); crashing_cpu = smp_processor_id(); fdh->crashing_cpu = crashing_cpu; crash_save_vmcoreinfo(); if (regs) fdh->regs = *regs; else ppc_save_regs(&fdh->regs); fdh->cpu_online_mask = *cpu_online_mask; /* Call ibm,os-term rtas call to trigger firmware assisted dump */ rtas_os_term((char *)str); } #define GPR_MASK 0xffffff0000000000 static inline int fadump_gpr_index(u64 id) { int i = -1; char str[3]; if ((id & GPR_MASK) == REG_ID("GPR")) { /* get the digits at the end */ id &= ~GPR_MASK; id >>= 24; str[2] = '\0'; str[1] = id & 0xff; str[0] = (id >> 8) & 0xff; sscanf(str, "%d", &i); if (i > 31) i = -1; } return i; } static inline void fadump_set_regval(struct pt_regs *regs, u64 reg_id, u64 reg_val) { int i; i = fadump_gpr_index(reg_id); if (i >= 0) regs->gpr[i] = (unsigned long)reg_val; else if (reg_id == REG_ID("NIA")) regs->nip = (unsigned long)reg_val; else if (reg_id == REG_ID("MSR")) regs->msr = (unsigned long)reg_val; else if (reg_id == REG_ID("CTR")) regs->ctr = (unsigned long)reg_val; else if (reg_id == REG_ID("LR")) regs->link = (unsigned long)reg_val; else if (reg_id == REG_ID("XER")) regs->xer = (unsigned long)reg_val; else if (reg_id == REG_ID("CR")) regs->ccr = (unsigned long)reg_val; else if (reg_id == REG_ID("DAR")) regs->dar = (unsigned long)reg_val; else if (reg_id == REG_ID("DSISR")) regs->dsisr = (unsigned long)reg_val; } static struct fadump_reg_entry* fadump_read_registers(struct fadump_reg_entry *reg_entry, struct pt_regs *regs) { memset(regs, 0, sizeof(struct pt_regs)); while (reg_entry->reg_id != REG_ID("CPUEND")) { fadump_set_regval(regs, reg_entry->reg_id, reg_entry->reg_value); reg_entry++; } reg_entry++; return reg_entry; } static u32 *fadump_append_elf_note(u32 *buf, char *name, unsigned type, void *data, size_t data_len) { struct elf_note note; note.n_namesz = strlen(name) + 1; note.n_descsz = data_len; note.n_type = type; memcpy(buf, &note, sizeof(note)); buf += (sizeof(note) + 3)/4; memcpy(buf, name, note.n_namesz); buf += (note.n_namesz + 3)/4; memcpy(buf, data, note.n_descsz); buf += (note.n_descsz + 3)/4; return buf; } static void fadump_final_note(u32 *buf) { struct elf_note note; note.n_namesz = 0; note.n_descsz = 0; note.n_type = 0; memcpy(buf, &note, sizeof(note)); } static u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs) { struct elf_prstatus prstatus; memset(&prstatus, 0, sizeof(prstatus)); /* * FIXME: How do i get PID? Do I really need it? * prstatus.pr_pid = ???? */ elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); buf = fadump_append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, &prstatus, sizeof(prstatus)); return buf; } static void fadump_update_elfcore_header(char *bufp) { struct elfhdr *elf; struct elf_phdr *phdr; elf = (struct elfhdr *)bufp; bufp += sizeof(struct elfhdr); /* First note is a place holder for cpu notes info. */ phdr = (struct elf_phdr *)bufp; if (phdr->p_type == PT_NOTE) { phdr->p_paddr = fw_dump.cpu_notes_buf; phdr->p_offset = phdr->p_paddr; phdr->p_filesz = fw_dump.cpu_notes_buf_size; phdr->p_memsz = fw_dump.cpu_notes_buf_size; } return; } static void *fadump_cpu_notes_buf_alloc(unsigned long size) { void *vaddr; struct page *page; unsigned long order, count, i; order = get_order(size); vaddr = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, order); if (!vaddr) return NULL; count = 1 << order; page = virt_to_page(vaddr); for (i = 0; i < count; i++) SetPageReserved(page + i); return vaddr; } static void fadump_cpu_notes_buf_free(unsigned long vaddr, unsigned long size) { struct page *page; unsigned long order, count, i; order = get_order(size); count = 1 << order; page = virt_to_page(vaddr); for (i = 0; i < count; i++) ClearPageReserved(page + i); __free_pages(page, order); } /* * Read CPU state dump data and convert it into ELF notes. * The CPU dump starts with magic number "REGSAVE". NumCpusOffset should be * used to access the data to allow for additional fields to be added without * affecting compatibility. Each list of registers for a CPU starts with * "CPUSTRT" and ends with "CPUEND". Each register entry is of 16 bytes, * 8 Byte ASCII identifier and 8 Byte register value. The register entry * with identifier "CPUSTRT" and "CPUEND" contains 4 byte cpu id as part * of register value. For more details refer to PAPR document. * * Only for the crashing cpu we ignore the CPU dump data and get exact * state from fadump crash info structure populated by first kernel at the * time of crash. */ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm) { struct fadump_reg_save_area_header *reg_header; struct fadump_reg_entry *reg_entry; struct fadump_crash_info_header *fdh = NULL; void *vaddr; unsigned long addr; u32 num_cpus, *note_buf; struct pt_regs regs; int i, rc = 0, cpu = 0; if (!fdm->cpu_state_data.bytes_dumped) return -EINVAL; addr = fdm->cpu_state_data.destination_address; vaddr = __va(addr); reg_header = vaddr; if (reg_header->magic_number != REGSAVE_AREA_MAGIC) { printk(KERN_ERR "Unable to read register save area.\n"); return -ENOENT; } pr_debug("--------CPU State Data------------\n"); pr_debug("Magic Number: %llx\n", reg_header->magic_number); pr_debug("NumCpuOffset: %x\n", reg_header->num_cpu_offset); vaddr += reg_header->num_cpu_offset; num_cpus = *((u32 *)(vaddr)); pr_debug("NumCpus : %u\n", num_cpus); vaddr += sizeof(u32); reg_entry = (struct fadump_reg_entry *)vaddr; /* Allocate buffer to hold cpu crash notes. */ fw_dump.cpu_notes_buf_size = num_cpus * sizeof(note_buf_t); fw_dump.cpu_notes_buf_size = PAGE_ALIGN(fw_dump.cpu_notes_buf_size); note_buf = fadump_cpu_notes_buf_alloc(fw_dump.cpu_notes_buf_size); if (!note_buf) { printk(KERN_ERR "Failed to allocate 0x%lx bytes for " "cpu notes buffer\n", fw_dump.cpu_notes_buf_size); return -ENOMEM; } fw_dump.cpu_notes_buf = __pa(note_buf); pr_debug("Allocated buffer for cpu notes of size %ld at %p\n", (num_cpus * sizeof(note_buf_t)), note_buf); if (fw_dump.fadumphdr_addr) fdh = __va(fw_dump.fadumphdr_addr); for (i = 0; i < num_cpus; i++) { if (reg_entry->reg_id != REG_ID("CPUSTRT")) { printk(KERN_ERR "Unable to read CPU state data\n"); rc = -ENOENT; goto error_out; } /* Lower 4 bytes of reg_value contains logical cpu id */ cpu = reg_entry->reg_value & FADUMP_CPU_ID_MASK; if (!cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) { SKIP_TO_NEXT_CPU(reg_entry); continue; } pr_debug("Reading register data for cpu %d...\n", cpu); if (fdh && fdh->crashing_cpu == cpu) { regs = fdh->regs; note_buf = fadump_regs_to_elf_notes(note_buf, &regs); SKIP_TO_NEXT_CPU(reg_entry); } else { reg_entry++; reg_entry = fadump_read_registers(reg_entry, &regs); note_buf = fadump_regs_to_elf_notes(note_buf, &regs); } } fadump_final_note(note_buf); pr_debug("Updating elfcore header (%llx) with cpu notes\n", fdh->elfcorehdr_addr); fadump_update_elfcore_header((char *)__va(fdh->elfcorehdr_addr)); return 0; error_out: fadump_cpu_notes_buf_free((unsigned long)__va(fw_dump.cpu_notes_buf), fw_dump.cpu_notes_buf_size); fw_dump.cpu_notes_buf = 0; fw_dump.cpu_notes_buf_size = 0; return rc; } /* * Validate and process the dump data stored by firmware before exporting * it through '/proc/vmcore'. */ static int __init process_fadump(const struct fadump_mem_struct *fdm_active) { struct fadump_crash_info_header *fdh; int rc = 0; if (!fdm_active || !fw_dump.fadumphdr_addr) return -EINVAL; /* Check if the dump data is valid. */ if ((fdm_active->header.dump_status_flag == FADUMP_ERROR_FLAG) || (fdm_active->cpu_state_data.error_flags != 0) || (fdm_active->rmr_region.error_flags != 0)) { printk(KERN_ERR "Dump taken by platform is not valid\n"); return -EINVAL; } if ((fdm_active->rmr_region.bytes_dumped != fdm_active->rmr_region.source_len) || !fdm_active->cpu_state_data.bytes_dumped) { printk(KERN_ERR "Dump taken by platform is incomplete\n"); return -EINVAL; } /* Validate the fadump crash info header */ fdh = __va(fw_dump.fadumphdr_addr); if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) { printk(KERN_ERR "Crash info header is not valid.\n"); return -EINVAL; } rc = fadump_build_cpu_notes(fdm_active); if (rc) return rc; /* * We are done validating dump info and elfcore header is now ready * to be exported. set elfcorehdr_addr so that vmcore module will * export the elfcore header through '/proc/vmcore'. */ elfcorehdr_addr = fdh->elfcorehdr_addr; return 0; } static inline void fadump_add_crash_memory(unsigned long long base, unsigned long long end) { if (base == end) return; pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n", crash_mem_ranges, base, end - 1, (end - base)); crash_memory_ranges[crash_mem_ranges].base = base; crash_memory_ranges[crash_mem_ranges].size = end - base; crash_mem_ranges++; } static void fadump_exclude_reserved_area(unsigned long long start, unsigned long long end) { unsigned long long ra_start, ra_end; ra_start = fw_dump.reserve_dump_area_start; ra_end = ra_start + fw_dump.reserve_dump_area_size; if ((ra_start < end) && (ra_end > start)) { if ((start < ra_start) && (end > ra_end)) { fadump_add_crash_memory(start, ra_start); fadump_add_crash_memory(ra_end, end); } else if (start < ra_start) { fadump_add_crash_memory(start, ra_start); } else if (ra_end < end) { fadump_add_crash_memory(ra_end, end); } } else fadump_add_crash_memory(start, end); } static int fadump_init_elfcore_header(char *bufp) { struct elfhdr *elf; elf = (struct elfhdr *) bufp; bufp += sizeof(struct elfhdr); memcpy(elf->e_ident, ELFMAG, SELFMAG); elf->e_ident[EI_CLASS] = ELF_CLASS; elf->e_ident[EI_DATA] = ELF_DATA; elf->e_ident[EI_VERSION] = EV_CURRENT; elf->e_ident[EI_OSABI] = ELF_OSABI; memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); elf->e_type = ET_CORE; elf->e_machine = ELF_ARCH; elf->e_version = EV_CURRENT; elf->e_entry = 0; elf->e_phoff = sizeof(struct elfhdr); elf->e_shoff = 0; elf->e_flags = ELF_CORE_EFLAGS; elf->e_ehsize = sizeof(struct elfhdr); elf->e_phentsize = sizeof(struct elf_phdr); elf->e_phnum = 0; elf->e_shentsize = 0; elf->e_shnum = 0; elf->e_shstrndx = 0; return 0; } /* * Traverse through memblock structure and setup crash memory ranges. These * ranges will be used create PT_LOAD program headers in elfcore header. */ static void fadump_setup_crash_memory_ranges(void) { struct memblock_region *reg; unsigned long long start, end; pr_debug("Setup crash memory ranges.\n"); crash_mem_ranges = 0; /* * add the first memory chunk (RMA_START through boot_memory_size) as * a separate memory chunk. The reason is, at the time crash firmware * will move the content of this memory chunk to different location * specified during fadump registration. We need to create a separate * program header for this chunk with the correct offset. */ fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size); for_each_memblock(memory, reg) { start = (unsigned long long)reg->base; end = start + (unsigned long long)reg->size; if (start == RMA_START && end >= fw_dump.boot_memory_size) start = fw_dump.boot_memory_size; /* add this range excluding the reserved dump area. */ fadump_exclude_reserved_area(start, end); } } /* * If the given physical address falls within the boot memory region then * return the relocated address that points to the dump region reserved * for saving initial boot memory contents. */ static inline unsigned long fadump_relocate(unsigned long paddr) { if (paddr > RMA_START && paddr < fw_dump.boot_memory_size) return fdm.rmr_region.destination_address + paddr; else return paddr; } static int fadump_create_elfcore_headers(char *bufp) { struct elfhdr *elf; struct elf_phdr *phdr; int i; fadump_init_elfcore_header(bufp); elf = (struct elfhdr *)bufp; bufp += sizeof(struct elfhdr); /* * setup ELF PT_NOTE, place holder for cpu notes info. The notes info * will be populated during second kernel boot after crash. Hence * this PT_NOTE will always be the first elf note. * * NOTE: Any new ELF note addition should be placed after this note. */ phdr = (struct elf_phdr *)bufp; bufp += sizeof(struct elf_phdr); phdr->p_type = PT_NOTE; phdr->p_flags = 0; phdr->p_vaddr = 0; phdr->p_align = 0; phdr->p_offset = 0; phdr->p_paddr = 0; phdr->p_filesz = 0; phdr->p_memsz = 0; (elf->e_phnum)++; /* setup ELF PT_NOTE for vmcoreinfo */ phdr = (struct elf_phdr *)bufp; bufp += sizeof(struct elf_phdr); phdr->p_type = PT_NOTE; phdr->p_flags = 0; phdr->p_vaddr = 0; phdr->p_align = 0; phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note()); phdr->p_offset = phdr->p_paddr; phdr->p_memsz = vmcoreinfo_max_size; phdr->p_filesz = vmcoreinfo_max_size; /* Increment number of program headers. */ (elf->e_phnum)++; /* setup PT_LOAD sections. */ for (i = 0; i < crash_mem_ranges; i++) { unsigned long long mbase, msize; mbase = crash_memory_ranges[i].base; msize = crash_memory_ranges[i].size; if (!msize) continue; phdr = (struct elf_phdr *)bufp; bufp += sizeof(struct elf_phdr); phdr->p_type = PT_LOAD; phdr->p_flags = PF_R|PF_W|PF_X; phdr->p_offset = mbase; if (mbase == RMA_START) { /* * The entire RMA region will be moved by firmware * to the specified destination_address. Hence set * the correct offset. */ phdr->p_offset = fdm.rmr_region.destination_address; } phdr->p_paddr = mbase; phdr->p_vaddr = (unsigned long)__va(mbase); phdr->p_filesz = msize; phdr->p_memsz = msize; phdr->p_align = 0; /* Increment number of program headers. */ (elf->e_phnum)++; } return 0; } static unsigned long init_fadump_header(unsigned long addr) { struct fadump_crash_info_header *fdh; if (!addr) return 0; fw_dump.fadumphdr_addr = addr; fdh = __va(addr); addr += sizeof(struct fadump_crash_info_header); memset(fdh, 0, sizeof(struct fadump_crash_info_header)); fdh->magic_number = FADUMP_CRASH_INFO_MAGIC; fdh->elfcorehdr_addr = addr; /* We will set the crashing cpu id in crash_fadump() during crash. */ fdh->crashing_cpu = CPU_UNKNOWN; return addr; } static void register_fadump(void) { unsigned long addr; void *vaddr; /* * If no memory is reserved then we can not register for firmware- * assisted dump. */ if (!fw_dump.reserve_dump_area_size) return; fadump_setup_crash_memory_ranges(); addr = fdm.rmr_region.destination_address + fdm.rmr_region.source_len; /* Initialize fadump crash info header. */ addr = init_fadump_header(addr); vaddr = __va(addr); pr_debug("Creating ELF core headers at %#016lx\n", addr); fadump_create_elfcore_headers(vaddr); /* register the future kernel dump with firmware. */ register_fw_dump(&fdm); } static int fadump_unregister_dump(struct fadump_mem_struct *fdm) { int rc = 0; unsigned int wait_time; pr_debug("Un-register firmware-assisted dump\n"); /* TODO: Add upper time limit for the delay */ do { rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL, FADUMP_UNREGISTER, fdm, sizeof(struct fadump_mem_struct)); wait_time = rtas_busy_delay_time(rc); if (wait_time) mdelay(wait_time); } while (wait_time); if (rc) { printk(KERN_ERR "Failed to un-register firmware-assisted dump." " unexpected error(%d).\n", rc); return rc; } fw_dump.dump_registered = 0; return 0; } static int fadump_invalidate_dump(struct fadump_mem_struct *fdm) { int rc = 0; unsigned int wait_time; pr_debug("Invalidating firmware-assisted dump registration\n"); /* TODO: Add upper time limit for the delay */ do { rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL, FADUMP_INVALIDATE, fdm, sizeof(struct fadump_mem_struct)); wait_time = rtas_busy_delay_time(rc); if (wait_time) mdelay(wait_time); } while (wait_time); if (rc) { printk(KERN_ERR "Failed to invalidate firmware-assisted dump " "rgistration. unexpected error(%d).\n", rc); return rc; } fw_dump.dump_active = 0; fdm_active = NULL; return 0; } void fadump_cleanup(void) { /* Invalidate the registration only if dump is active. */ if (fw_dump.dump_active) { init_fadump_mem_struct(&fdm, fdm_active->cpu_state_data.destination_address); fadump_invalidate_dump(&fdm); } } /* * Release the memory that was reserved in early boot to preserve the memory * contents. The released memory will be available for general use. */ static void fadump_release_memory(unsigned long begin, unsigned long end) { unsigned long addr; unsigned long ra_start, ra_end; ra_start = fw_dump.reserve_dump_area_start; ra_end = ra_start + fw_dump.reserve_dump_area_size; for (addr = begin; addr < end; addr += PAGE_SIZE) { /* * exclude the dump reserve area. Will reuse it for next * fadump registration. */ if (addr <= ra_end && ((addr + PAGE_SIZE) > ra_start)) continue; ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); init_page_count(pfn_to_page(addr >> PAGE_SHIFT)); free_page((unsigned long)__va(addr)); totalram_pages++; } } static void fadump_invalidate_release_mem(void) { unsigned long reserved_area_start, reserved_area_end; unsigned long destination_address; mutex_lock(&fadump_mutex); if (!fw_dump.dump_active) { mutex_unlock(&fadump_mutex); return; } destination_address = fdm_active->cpu_state_data.destination_address; fadump_cleanup(); mutex_unlock(&fadump_mutex); /* * Save the current reserved memory bounds we will require them * later for releasing the memory for general use. */ reserved_area_start = fw_dump.reserve_dump_area_start; reserved_area_end = reserved_area_start + fw_dump.reserve_dump_area_size; /* * Setup reserve_dump_area_start and its size so that we can * reuse this reserved memory for Re-registration. */ fw_dump.reserve_dump_area_start = destination_address; fw_dump.reserve_dump_area_size = get_fadump_area_size(); fadump_release_memory(reserved_area_start, reserved_area_end); if (fw_dump.cpu_notes_buf) { fadump_cpu_notes_buf_free( (unsigned long)__va(fw_dump.cpu_notes_buf), fw_dump.cpu_notes_buf_size); fw_dump.cpu_notes_buf = 0; fw_dump.cpu_notes_buf_size = 0; } /* Initialize the kernel dump memory structure for FAD registration. */ init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start); } static ssize_t fadump_release_memory_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { if (!fw_dump.dump_active) return -EPERM; if (buf[0] == '1') { /* * Take away the '/proc/vmcore'. We are releasing the dump * memory, hence it will not be valid anymore. */ vmcore_cleanup(); fadump_invalidate_release_mem(); } else return -EINVAL; return count; } static ssize_t fadump_enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", fw_dump.fadump_enabled); } static ssize_t fadump_register_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", fw_dump.dump_registered); } static ssize_t fadump_register_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; if (!fw_dump.fadump_enabled || fdm_active) return -EPERM; mutex_lock(&fadump_mutex); switch (buf[0]) { case '0': if (fw_dump.dump_registered == 0) { ret = -EINVAL; goto unlock_out; } /* Un-register Firmware-assisted dump */ fadump_unregister_dump(&fdm); break; case '1': if (fw_dump.dump_registered == 1) { ret = -EINVAL; goto unlock_out; } /* Register Firmware-assisted dump */ register_fadump(); break; default: ret = -EINVAL; break; } unlock_out: mutex_unlock(&fadump_mutex); return ret < 0 ? ret : count; } static int fadump_region_show(struct seq_file *m, void *private) { const struct fadump_mem_struct *fdm_ptr; if (!fw_dump.fadump_enabled) return 0; mutex_lock(&fadump_mutex); if (fdm_active) fdm_ptr = fdm_active; else { mutex_unlock(&fadump_mutex); fdm_ptr = &fdm; } seq_printf(m, "CPU : [%#016llx-%#016llx] %#llx bytes, " "Dumped: %#llx\n", fdm_ptr->cpu_state_data.destination_address, fdm_ptr->cpu_state_data.destination_address + fdm_ptr->cpu_state_data.source_len - 1, fdm_ptr->cpu_state_data.source_len, fdm_ptr->cpu_state_data.bytes_dumped); seq_printf(m, "HPTE: [%#016llx-%#016llx] %#llx bytes, " "Dumped: %#llx\n", fdm_ptr->hpte_region.destination_address, fdm_ptr->hpte_region.destination_address + fdm_ptr->hpte_region.source_len - 1, fdm_ptr->hpte_region.source_len, fdm_ptr->hpte_region.bytes_dumped); seq_printf(m, "DUMP: [%#016llx-%#016llx] %#llx bytes, " "Dumped: %#llx\n", fdm_ptr->rmr_region.destination_address, fdm_ptr->rmr_region.destination_address + fdm_ptr->rmr_region.source_len - 1, fdm_ptr->rmr_region.source_len, fdm_ptr->rmr_region.bytes_dumped); if (!fdm_active || (fw_dump.reserve_dump_area_start == fdm_ptr->cpu_state_data.destination_address)) goto out; /* Dump is active. Show reserved memory region. */ seq_printf(m, " : [%#016llx-%#016llx] %#llx bytes, " "Dumped: %#llx\n", (unsigned long long)fw_dump.reserve_dump_area_start, fdm_ptr->cpu_state_data.destination_address - 1, fdm_ptr->cpu_state_data.destination_address - fw_dump.reserve_dump_area_start, fdm_ptr->cpu_state_data.destination_address - fw_dump.reserve_dump_area_start); out: if (fdm_active) mutex_unlock(&fadump_mutex); return 0; } static struct kobj_attribute fadump_release_attr = __ATTR(fadump_release_mem, 0200, NULL, fadump_release_memory_store); static struct kobj_attribute fadump_attr = __ATTR(fadump_enabled, 0444, fadump_enabled_show, NULL); static struct kobj_attribute fadump_register_attr = __ATTR(fadump_registered, 0644, fadump_register_show, fadump_register_store); static int fadump_region_open(struct inode *inode, struct file *file) { return single_open(file, fadump_region_show, inode->i_private); } static const struct file_operations fadump_region_fops = { .open = fadump_region_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void fadump_init_files(void) { struct dentry *debugfs_file; int rc = 0; rc = sysfs_create_file(kernel_kobj, &fadump_attr.attr); if (rc) printk(KERN_ERR "fadump: unable to create sysfs file" " fadump_enabled (%d)\n", rc); rc = sysfs_create_file(kernel_kobj, &fadump_register_attr.attr); if (rc) printk(KERN_ERR "fadump: unable to create sysfs file" " fadump_registered (%d)\n", rc); debugfs_file = debugfs_create_file("fadump_region", 0444, powerpc_debugfs_root, NULL, &fadump_region_fops); if (!debugfs_file) printk(KERN_ERR "fadump: unable to create debugfs file" " fadump_region\n"); if (fw_dump.dump_active) { rc = sysfs_create_file(kernel_kobj, &fadump_release_attr.attr); if (rc) printk(KERN_ERR "fadump: unable to create sysfs file" " fadump_release_mem (%d)\n", rc); } return; } /* * Prepare for firmware-assisted dump. */ int __init setup_fadump(void) { if (!fw_dump.fadump_enabled) return 0; if (!fw_dump.fadump_supported) { printk(KERN_ERR "Firmware-assisted dump is not supported on" " this hardware\n"); return 0; } fadump_show_config(); /* * If dump data is available then see if it is valid and prepare for * saving it to the disk. */ if (fw_dump.dump_active) { /* * if dump process fails then invalidate the registration * and release memory before proceeding for re-registration. */ if (process_fadump(fdm_active) < 0) fadump_invalidate_release_mem(); } /* Initialize the kernel dump memory structure for FAD registration. */ else if (fw_dump.reserve_dump_area_size) init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start); fadump_init_files(); return 1; } subsys_initcall(setup_fadump);
gpl-2.0
mgherzan/linux-arm
drivers/input/joystick/iforce/iforce-usb.c
4538
6681
/* * Copyright (c) 2000-2002 Vojtech Pavlik <vojtech@ucw.cz> * Copyright (c) 2001-2002, 2007 Johann Deneux <johann.deneux@gmail.com> * * USB/RS232 I-Force joysticks and wheels. */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include "iforce.h" void iforce_usb_xmit(struct iforce *iforce) { int n, c; unsigned long flags; spin_lock_irqsave(&iforce->xmit_lock, flags); if (iforce->xmit.head == iforce->xmit.tail) { clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); spin_unlock_irqrestore(&iforce->xmit_lock, flags); return; } ((char *)iforce->out->transfer_buffer)[0] = iforce->xmit.buf[iforce->xmit.tail]; XMIT_INC(iforce->xmit.tail, 1); n = iforce->xmit.buf[iforce->xmit.tail]; XMIT_INC(iforce->xmit.tail, 1); iforce->out->transfer_buffer_length = n + 1; iforce->out->dev = iforce->usbdev; /* Copy rest of data then */ c = CIRC_CNT_TO_END(iforce->xmit.head, iforce->xmit.tail, XMIT_SIZE); if (n < c) c=n; memcpy(iforce->out->transfer_buffer + 1, &iforce->xmit.buf[iforce->xmit.tail], c); if (n != c) { memcpy(iforce->out->transfer_buffer + 1 + c, &iforce->xmit.buf[0], n-c); } XMIT_INC(iforce->xmit.tail, n); if ( (n=usb_submit_urb(iforce->out, GFP_ATOMIC)) ) { clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); dev_warn(&iforce->intf->dev, "usb_submit_urb failed %d\n", n); } /* The IFORCE_XMIT_RUNNING bit is not cleared here. That's intended. * As long as the urb completion handler is not called, the transmiting * is considered to be running */ spin_unlock_irqrestore(&iforce->xmit_lock, flags); } static void iforce_usb_irq(struct urb *urb) { struct iforce *iforce = urb->context; struct device *dev = &iforce->intf->dev; int status; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, urb->status); return; default: dev_dbg(dev, "%s - urb has status of: %d\n", __func__, urb->status); goto exit; } iforce_process_packet(iforce, (iforce->data[0] << 8) | (urb->actual_length - 1), iforce->data + 1); exit: status = usb_submit_urb (urb, GFP_ATOMIC); if (status) dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, status); } static void iforce_usb_out(struct urb *urb) { struct iforce *iforce = urb->context; if (urb->status) { clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); dev_dbg(&iforce->intf->dev, "urb->status %d, exiting\n", urb->status); return; } iforce_usb_xmit(iforce); wake_up(&iforce->wait); } static void iforce_usb_ctrl(struct urb *urb) { struct iforce *iforce = urb->context; if (urb->status) return; iforce->ecmd = 0xff00 | urb->actual_length; wake_up(&iforce->wait); } static int iforce_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct usb_host_interface *interface; struct usb_endpoint_descriptor *epirq, *epout; struct iforce *iforce; int err = -ENOMEM; interface = intf->cur_altsetting; epirq = &interface->endpoint[0].desc; epout = &interface->endpoint[1].desc; if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL))) goto fail; if (!(iforce->irq = usb_alloc_urb(0, GFP_KERNEL))) goto fail; if (!(iforce->out = usb_alloc_urb(0, GFP_KERNEL))) goto fail; if (!(iforce->ctrl = usb_alloc_urb(0, GFP_KERNEL))) goto fail; iforce->bus = IFORCE_USB; iforce->usbdev = dev; iforce->intf = intf; iforce->cr.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_INTERFACE; iforce->cr.wIndex = 0; iforce->cr.wLength = cpu_to_le16(16); usb_fill_int_urb(iforce->irq, dev, usb_rcvintpipe(dev, epirq->bEndpointAddress), iforce->data, 16, iforce_usb_irq, iforce, epirq->bInterval); usb_fill_int_urb(iforce->out, dev, usb_sndintpipe(dev, epout->bEndpointAddress), iforce + 1, 32, iforce_usb_out, iforce, epout->bInterval); usb_fill_control_urb(iforce->ctrl, dev, usb_rcvctrlpipe(dev, 0), (void*) &iforce->cr, iforce->edata, 16, iforce_usb_ctrl, iforce); err = iforce_init_device(iforce); if (err) goto fail; usb_set_intfdata(intf, iforce); return 0; fail: if (iforce) { usb_free_urb(iforce->irq); usb_free_urb(iforce->out); usb_free_urb(iforce->ctrl); kfree(iforce); } return err; } static void iforce_usb_disconnect(struct usb_interface *intf) { struct iforce *iforce = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); input_unregister_device(iforce->dev); usb_free_urb(iforce->irq); usb_free_urb(iforce->out); usb_free_urb(iforce->ctrl); kfree(iforce); } static struct usb_device_id iforce_usb_ids [] = { { USB_DEVICE(0x044f, 0xa01c) }, /* Thrustmaster Motor Sport GT */ { USB_DEVICE(0x046d, 0xc281) }, /* Logitech WingMan Force */ { USB_DEVICE(0x046d, 0xc291) }, /* Logitech WingMan Formula Force */ { USB_DEVICE(0x05ef, 0x020a) }, /* AVB Top Shot Pegasus */ { USB_DEVICE(0x05ef, 0x8884) }, /* AVB Mag Turbo Force */ { USB_DEVICE(0x05ef, 0x8888) }, /* AVB Top Shot FFB Racing Wheel */ { USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */ { USB_DEVICE(0x061c, 0xc084) }, /* ACT LABS Force RS */ { USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */ { USB_DEVICE(0x06f8, 0x0003) }, /* Guillemot Jet Leader Force Feedback */ { USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */ { USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, iforce_usb_ids); struct usb_driver iforce_usb_driver = { .name = "iforce", .probe = iforce_usb_probe, .disconnect = iforce_usb_disconnect, .id_table = iforce_usb_ids, };
gpl-2.0
Hellybean/android_kernel_samsung_msm8660-common
drivers/pci/hotplug/cpcihp_zt5550.c
5050
8619
/* * cpcihp_zt5550.c * * Intel/Ziatech ZT5550 CompactPCI Host Controller driver * * Copyright 2002 SOMA Networks, Inc. * Copyright 2001 Intel San Luis Obispo * Copyright 2000,2001 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <scottm@somanetworks.com> */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/signal.h> /* IRQF_SHARED */ #include "cpci_hotplug.h" #include "cpcihp_zt5550.h" #define DRIVER_VERSION "0.2" #define DRIVER_AUTHOR "Scott Murray <scottm@somanetworks.com>" #define DRIVER_DESC "ZT5550 CompactPCI Hot Plug Driver" #define MY_NAME "cpcihp_zt5550" #define dbg(format, arg...) \ do { \ if(debug) \ printk (KERN_DEBUG "%s: " format "\n", \ MY_NAME , ## arg); \ } while(0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg) /* local variables */ static int debug; static int poll; static struct cpci_hp_controller_ops zt5550_hpc_ops; static struct cpci_hp_controller zt5550_hpc; /* Primary cPCI bus bridge device */ static struct pci_dev *bus0_dev; static struct pci_bus *bus0; /* Host controller device */ static struct pci_dev *hc_dev; /* Host controller register addresses */ static void __iomem *hc_registers; static void __iomem *csr_hc_index; static void __iomem *csr_hc_data; static void __iomem *csr_int_status; static void __iomem *csr_int_mask; static int zt5550_hc_config(struct pci_dev *pdev) { int ret; /* Since we know that no boards exist with two HC chips, treat it as an error */ if(hc_dev) { err("too many host controller devices?"); return -EBUSY; } ret = pci_enable_device(pdev); if(ret) { err("cannot enable %s\n", pci_name(pdev)); return ret; } hc_dev = pdev; dbg("hc_dev = %p", hc_dev); dbg("pci resource start %llx", (unsigned long long)pci_resource_start(hc_dev, 1)); dbg("pci resource len %llx", (unsigned long long)pci_resource_len(hc_dev, 1)); if(!request_mem_region(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1), MY_NAME)) { err("cannot reserve MMIO region"); ret = -ENOMEM; goto exit_disable_device; } hc_registers = ioremap(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1)); if(!hc_registers) { err("cannot remap MMIO region %llx @ %llx", (unsigned long long)pci_resource_len(hc_dev, 1), (unsigned long long)pci_resource_start(hc_dev, 1)); ret = -ENODEV; goto exit_release_region; } csr_hc_index = hc_registers + CSR_HCINDEX; csr_hc_data = hc_registers + CSR_HCDATA; csr_int_status = hc_registers + CSR_INTSTAT; csr_int_mask = hc_registers + CSR_INTMASK; /* * Disable host control, fault and serial interrupts */ dbg("disabling host control, fault and serial interrupts"); writeb((u8) HC_INT_MASK_REG, csr_hc_index); writeb((u8) ALL_INDEXED_INTS_MASK, csr_hc_data); dbg("disabled host control, fault and serial interrupts"); /* * Disable timer0, timer1 and ENUM interrupts */ dbg("disabling timer0, timer1 and ENUM interrupts"); writeb((u8) ALL_DIRECT_INTS_MASK, csr_int_mask); dbg("disabled timer0, timer1 and ENUM interrupts"); return 0; exit_release_region: release_mem_region(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1)); exit_disable_device: pci_disable_device(hc_dev); return ret; } static int zt5550_hc_cleanup(void) { if(!hc_dev) return -ENODEV; iounmap(hc_registers); release_mem_region(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1)); pci_disable_device(hc_dev); return 0; } static int zt5550_hc_query_enum(void) { u8 value; value = inb_p(ENUM_PORT); return ((value & ENUM_MASK) == ENUM_MASK); } static int zt5550_hc_check_irq(void *dev_id) { int ret; u8 reg; ret = 0; if(dev_id == zt5550_hpc.dev_id) { reg = readb(csr_int_status); if(reg) ret = 1; } return ret; } static int zt5550_hc_enable_irq(void) { u8 reg; if(hc_dev == NULL) { return -ENODEV; } reg = readb(csr_int_mask); reg = reg & ~ENUM_INT_MASK; writeb(reg, csr_int_mask); return 0; } static int zt5550_hc_disable_irq(void) { u8 reg; if(hc_dev == NULL) { return -ENODEV; } reg = readb(csr_int_mask); reg = reg | ENUM_INT_MASK; writeb(reg, csr_int_mask); return 0; } static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { int status; status = zt5550_hc_config(pdev); if(status != 0) { return status; } dbg("returned from zt5550_hc_config"); memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller)); zt5550_hpc_ops.query_enum = zt5550_hc_query_enum; zt5550_hpc.ops = &zt5550_hpc_ops; if(!poll) { zt5550_hpc.irq = hc_dev->irq; zt5550_hpc.irq_flags = IRQF_SHARED; zt5550_hpc.dev_id = hc_dev; zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq; zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq; zt5550_hpc_ops.check_irq = zt5550_hc_check_irq; } else { info("using ENUM# polling mode"); } status = cpci_hp_register_controller(&zt5550_hpc); if(status != 0) { err("could not register cPCI hotplug controller"); goto init_hc_error; } dbg("registered controller"); /* Look for first device matching cPCI bus's bridge vendor and device IDs */ if(!(bus0_dev = pci_get_device(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21154, NULL))) { status = -ENODEV; goto init_register_error; } bus0 = bus0_dev->subordinate; pci_dev_put(bus0_dev); status = cpci_hp_register_bus(bus0, 0x0a, 0x0f); if(status != 0) { err("could not register cPCI hotplug bus"); goto init_register_error; } dbg("registered bus"); status = cpci_hp_start(); if(status != 0) { err("could not started cPCI hotplug system"); cpci_hp_unregister_bus(bus0); goto init_register_error; } dbg("started cpci hp system"); return 0; init_register_error: cpci_hp_unregister_controller(&zt5550_hpc); init_hc_error: err("status = %d", status); zt5550_hc_cleanup(); return status; } static void __devexit zt5550_hc_remove_one(struct pci_dev *pdev) { cpci_hp_stop(); cpci_hp_unregister_bus(bus0); cpci_hp_unregister_controller(&zt5550_hpc); zt5550_hc_cleanup(); } static struct pci_device_id zt5550_hc_pci_tbl[] = { { PCI_VENDOR_ID_ZIATECH, PCI_DEVICE_ID_ZIATECH_5550_HC, PCI_ANY_ID, PCI_ANY_ID, }, { 0, } }; MODULE_DEVICE_TABLE(pci, zt5550_hc_pci_tbl); static struct pci_driver zt5550_hc_driver = { .name = "zt5550_hc", .id_table = zt5550_hc_pci_tbl, .probe = zt5550_hc_init_one, .remove = __devexit_p(zt5550_hc_remove_one), }; static int __init zt5550_init(void) { struct resource* r; int rc; info(DRIVER_DESC " version: " DRIVER_VERSION); r = request_region(ENUM_PORT, 1, "#ENUM hotswap signal register"); if(!r) return -EBUSY; rc = pci_register_driver(&zt5550_hc_driver); if(rc < 0) release_region(ENUM_PORT, 1); return rc; } static void __exit zt5550_exit(void) { pci_unregister_driver(&zt5550_hc_driver); release_region(ENUM_PORT, 1); } module_init(zt5550_init); module_exit(zt5550_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); module_param(poll, bool, 0644); MODULE_PARM_DESC(poll, "#ENUM polling mode enabled or not");
gpl-2.0
curbthepain/NuK3rn3l_m7_sense_lollipop
arch/sh/drivers/pci/fixups-sdk7786.c
7610
1770
/* * SDK7786 FPGA PCIe mux handling * * Copyright (C) 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #define pr_fmt(fmt) "PCI: " fmt #include <linux/init.h> #include <linux/kernel.h> #include <linux/pci.h> #include <mach/fpga.h> /* * The SDK7786 FPGA supports mangling of most of the slots in some way or * another. Slots 3/4 are special in that only one can be supported at a * time, and both appear on port 3 to the PCI bus scan. Enabling slot 4 * (the horizontal edge connector) will disable slot 3 entirely. * * Misconfigurations can be detected through the FPGA via the slot * resistors to determine card presence. Hotplug remains unsupported. */ static unsigned int slot4en __devinitdata; char *__devinit pcibios_setup(char *str) { if (strcmp(str, "slot4en") == 0) { slot4en = 1; return NULL; } return str; } static int __init sdk7786_pci_init(void) { u16 data = fpga_read_reg(PCIECR); /* * Enable slot #4 if it's been specified on the command line. * * Optionally reroute if slot #4 has a card present while slot #3 * does not, regardless of command line value. * * Card presence is logically inverted. */ slot4en ?: (!(data & PCIECR_PRST4) && (data & PCIECR_PRST3)); if (slot4en) { pr_info("Activating PCIe slot#4 (disabling slot#3)\n"); data &= ~PCIECR_PCIEMUX1; fpga_write_reg(data, PCIECR); /* Warn about forced rerouting if slot#3 is occupied */ if ((data & PCIECR_PRST3) == 0) { pr_warning("Unreachable card detected in slot#3\n"); return -EBUSY; } } else pr_info("PCIe slot#4 disabled\n"); return 0; } postcore_initcall(sdk7786_pci_init);
gpl-2.0
Docker-J/Sail_GEE_L
net/irda/irnet/irnet_ppp.c
8378
33314
/* * IrNET protocol module : Synchronous PPP over an IrDA socket. * * Jean II - HPL `00 - <jt@hpl.hp.com> * * This file implement the PPP interface and /dev/irnet character device. * The PPP interface hook to the ppp_generic module, handle all our * relationship to the PPP code in the kernel (and by extension to pppd), * and exchange PPP frames with this module (send/receive). * The /dev/irnet device is used primarily for 2 functions : * 1) as a stub for pppd (the ppp daemon), so that we can appropriately * generate PPP sessions (we pretend we are a tty). * 2) as a control channel (write commands, read events) */ #include <linux/sched.h> #include <linux/slab.h> #include "irnet_ppp.h" /* Private header */ /* Please put other headers in irnet.h - Thanks */ /* Generic PPP callbacks (to call us) */ static const struct ppp_channel_ops irnet_ppp_ops = { .start_xmit = ppp_irnet_send, .ioctl = ppp_irnet_ioctl }; /************************* CONTROL CHANNEL *************************/ /* * When a pppd instance is not active on /dev/irnet, it acts as a control * channel. * Writing allow to set up the IrDA destination of the IrNET channel, * and any application may be read events happening in IrNET... */ /*------------------------------------------------------------------*/ /* * Write is used to send a command to configure a IrNET channel * before it is open by pppd. The syntax is : "command argument" * Currently there is only two defined commands : * o name : set the requested IrDA nickname of the IrNET peer. * o addr : set the requested IrDA address of the IrNET peer. * Note : the code is crude, but effective... */ static inline ssize_t irnet_ctrl_write(irnet_socket * ap, const char __user *buf, size_t count) { char command[IRNET_MAX_COMMAND]; char * start; /* Current command being processed */ char * next; /* Next command to process */ int length; /* Length of current command */ DENTER(CTRL_TRACE, "(ap=0x%p, count=%Zd)\n", ap, count); /* Check for overflow... */ DABORT(count >= IRNET_MAX_COMMAND, -ENOMEM, CTRL_ERROR, "Too much data !!!\n"); /* Get the data in the driver */ if(copy_from_user(command, buf, count)) { DERROR(CTRL_ERROR, "Invalid user space pointer.\n"); return -EFAULT; } /* Safe terminate the string */ command[count] = '\0'; DEBUG(CTRL_INFO, "Command line received is ``%s'' (%Zd).\n", command, count); /* Check every commands in the command line */ next = command; while(next != NULL) { /* Look at the next command */ start = next; /* Scrap whitespaces before the command */ start = skip_spaces(start); /* ',' is our command separator */ next = strchr(start, ','); if(next) { *next = '\0'; /* Terminate command */ length = next - start; /* Length */ next++; /* Skip the '\0' */ } else length = strlen(start); DEBUG(CTRL_INFO, "Found command ``%s'' (%d).\n", start, length); /* Check if we recognised one of the known command * We can't use "switch" with strings, so hack with "continue" */ /* First command : name -> Requested IrDA nickname */ if(!strncmp(start, "name", 4)) { /* Copy the name only if is included and not "any" */ if((length > 5) && (strcmp(start + 5, "any"))) { /* Strip out trailing whitespaces */ while(isspace(start[length - 1])) length--; DABORT(length < 5 || length > NICKNAME_MAX_LEN + 5, -EINVAL, CTRL_ERROR, "Invalid nickname.\n"); /* Copy the name for later reuse */ memcpy(ap->rname, start + 5, length - 5); ap->rname[length - 5] = '\0'; } else ap->rname[0] = '\0'; DEBUG(CTRL_INFO, "Got rname = ``%s''\n", ap->rname); /* Restart the loop */ continue; } /* Second command : addr, daddr -> Requested IrDA destination address * Also process : saddr -> Requested IrDA source address */ if((!strncmp(start, "addr", 4)) || (!strncmp(start, "daddr", 5)) || (!strncmp(start, "saddr", 5))) { __u32 addr = DEV_ADDR_ANY; /* Copy the address only if is included and not "any" */ if((length > 5) && (strcmp(start + 5, "any"))) { char * begp = start + 5; char * endp; /* Scrap whitespaces before the command */ begp = skip_spaces(begp); /* Convert argument to a number (last arg is the base) */ addr = simple_strtoul(begp, &endp, 16); /* Has it worked ? (endp should be start + length) */ DABORT(endp <= (start + 5), -EINVAL, CTRL_ERROR, "Invalid address.\n"); } /* Which type of address ? */ if(start[0] == 's') { /* Save it */ ap->rsaddr = addr; DEBUG(CTRL_INFO, "Got rsaddr = %08x\n", ap->rsaddr); } else { /* Save it */ ap->rdaddr = addr; DEBUG(CTRL_INFO, "Got rdaddr = %08x\n", ap->rdaddr); } /* Restart the loop */ continue; } /* Other possible command : connect N (number of retries) */ /* No command matched -> Failed... */ DABORT(1, -EINVAL, CTRL_ERROR, "Not a recognised IrNET command.\n"); } /* Success : we have parsed all commands successfully */ return count; } #ifdef INITIAL_DISCOVERY /*------------------------------------------------------------------*/ /* * Function irnet_get_discovery_log (self) * * Query the content on the discovery log if not done * * This function query the current content of the discovery log * at the startup of the event channel and save it in the internal struct. */ static void irnet_get_discovery_log(irnet_socket * ap) { __u16 mask = irlmp_service_to_hint(S_LAN); /* Ask IrLMP for the current discovery log */ ap->discoveries = irlmp_get_discoveries(&ap->disco_number, mask, DISCOVERY_DEFAULT_SLOTS); /* Check if the we got some results */ if(ap->discoveries == NULL) ap->disco_number = -1; DEBUG(CTRL_INFO, "Got the log (0x%p), size is %d\n", ap->discoveries, ap->disco_number); } /*------------------------------------------------------------------*/ /* * Function irnet_read_discovery_log (self, event) * * Read the content on the discovery log * * This function dump the current content of the discovery log * at the startup of the event channel. * Return 1 if wrote an event on the control channel... * * State of the ap->disco_XXX variables : * Socket creation : discoveries = NULL ; disco_index = 0 ; disco_number = 0 * While reading : discoveries = ptr ; disco_index = X ; disco_number = Y * After reading : discoveries = NULL ; disco_index = Y ; disco_number = -1 */ static inline int irnet_read_discovery_log(irnet_socket * ap, char * event) { int done_event = 0; DENTER(CTRL_TRACE, "(ap=0x%p, event=0x%p)\n", ap, event); /* Test if we have some work to do or we have already finished */ if(ap->disco_number == -1) { DEBUG(CTRL_INFO, "Already done\n"); return 0; } /* Test if it's the first time and therefore we need to get the log */ if(ap->discoveries == NULL) irnet_get_discovery_log(ap); /* Check if we have more item to dump */ if(ap->disco_index < ap->disco_number) { /* Write an event */ sprintf(event, "Found %08x (%s) behind %08x {hints %02X-%02X}\n", ap->discoveries[ap->disco_index].daddr, ap->discoveries[ap->disco_index].info, ap->discoveries[ap->disco_index].saddr, ap->discoveries[ap->disco_index].hints[0], ap->discoveries[ap->disco_index].hints[1]); DEBUG(CTRL_INFO, "Writing discovery %d : %s\n", ap->disco_index, ap->discoveries[ap->disco_index].info); /* We have an event */ done_event = 1; /* Next discovery */ ap->disco_index++; } /* Check if we have done the last item */ if(ap->disco_index >= ap->disco_number) { /* No more items : remove the log and signal termination */ DEBUG(CTRL_INFO, "Cleaning up log (0x%p)\n", ap->discoveries); if(ap->discoveries != NULL) { /* Cleanup our copy of the discovery log */ kfree(ap->discoveries); ap->discoveries = NULL; } ap->disco_number = -1; } return done_event; } #endif /* INITIAL_DISCOVERY */ /*------------------------------------------------------------------*/ /* * Read is used to get IrNET events */ static inline ssize_t irnet_ctrl_read(irnet_socket * ap, struct file * file, char __user * buf, size_t count) { DECLARE_WAITQUEUE(wait, current); char event[64]; /* Max event is 61 char */ ssize_t ret = 0; DENTER(CTRL_TRACE, "(ap=0x%p, count=%Zd)\n", ap, count); /* Check if we can write an event out in one go */ DABORT(count < sizeof(event), -EOVERFLOW, CTRL_ERROR, "Buffer to small.\n"); #ifdef INITIAL_DISCOVERY /* Check if we have read the log */ if(irnet_read_discovery_log(ap, event)) { /* We have an event !!! Copy it to the user */ if(copy_to_user(buf, event, strlen(event))) { DERROR(CTRL_ERROR, "Invalid user space pointer.\n"); return -EFAULT; } DEXIT(CTRL_TRACE, "\n"); return strlen(event); } #endif /* INITIAL_DISCOVERY */ /* Put ourselves on the wait queue to be woken up */ add_wait_queue(&irnet_events.rwait, &wait); current->state = TASK_INTERRUPTIBLE; for(;;) { /* If there is unread events */ ret = 0; if(ap->event_index != irnet_events.index) break; ret = -EAGAIN; if(file->f_flags & O_NONBLOCK) break; ret = -ERESTARTSYS; if(signal_pending(current)) break; /* Yield and wait to be woken up */ schedule(); } current->state = TASK_RUNNING; remove_wait_queue(&irnet_events.rwait, &wait); /* Did we got it ? */ if(ret != 0) { /* No, return the error code */ DEXIT(CTRL_TRACE, " - ret %Zd\n", ret); return ret; } /* Which event is it ? */ switch(irnet_events.log[ap->event_index].event) { case IRNET_DISCOVER: sprintf(event, "Discovered %08x (%s) behind %08x {hints %02X-%02X}\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].saddr, irnet_events.log[ap->event_index].hints.byte[0], irnet_events.log[ap->event_index].hints.byte[1]); break; case IRNET_EXPIRE: sprintf(event, "Expired %08x (%s) behind %08x {hints %02X-%02X}\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].saddr, irnet_events.log[ap->event_index].hints.byte[0], irnet_events.log[ap->event_index].hints.byte[1]); break; case IRNET_CONNECT_TO: sprintf(event, "Connected to %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_CONNECT_FROM: sprintf(event, "Connection from %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_REQUEST_FROM: sprintf(event, "Request from %08x (%s) behind %08x\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].saddr); break; case IRNET_NOANSWER_FROM: sprintf(event, "No-answer from %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_BLOCKED_LINK: sprintf(event, "Blocked link with %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_DISCONNECT_FROM: sprintf(event, "Disconnection from %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_DISCONNECT_TO: sprintf(event, "Disconnected to %08x (%s)\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name); break; default: sprintf(event, "Bug\n"); } /* Increment our event index */ ap->event_index = (ap->event_index + 1) % IRNET_MAX_EVENTS; DEBUG(CTRL_INFO, "Event is :%s", event); /* Copy it to the user */ if(copy_to_user(buf, event, strlen(event))) { DERROR(CTRL_ERROR, "Invalid user space pointer.\n"); return -EFAULT; } DEXIT(CTRL_TRACE, "\n"); return strlen(event); } /*------------------------------------------------------------------*/ /* * Poll : called when someone do a select on /dev/irnet. * Just check if there are new events... */ static inline unsigned int irnet_ctrl_poll(irnet_socket * ap, struct file * file, poll_table * wait) { unsigned int mask; DENTER(CTRL_TRACE, "(ap=0x%p)\n", ap); poll_wait(file, &irnet_events.rwait, wait); mask = POLLOUT | POLLWRNORM; /* If there is unread events */ if(ap->event_index != irnet_events.index) mask |= POLLIN | POLLRDNORM; #ifdef INITIAL_DISCOVERY if(ap->disco_number != -1) { /* Test if it's the first time and therefore we need to get the log */ if(ap->discoveries == NULL) irnet_get_discovery_log(ap); /* Recheck */ if(ap->disco_number != -1) mask |= POLLIN | POLLRDNORM; } #endif /* INITIAL_DISCOVERY */ DEXIT(CTRL_TRACE, " - mask=0x%X\n", mask); return mask; } /*********************** FILESYSTEM CALLBACKS ***********************/ /* * Implement the usual open, read, write functions that will be called * by the file system when some action is performed on /dev/irnet. * Most of those actions will in fact be performed by "pppd" or * the control channel, we just act as a redirector... */ /*------------------------------------------------------------------*/ /* * Open : when somebody open /dev/irnet * We basically create a new instance of irnet and initialise it. */ static int dev_irnet_open(struct inode * inode, struct file * file) { struct irnet_socket * ap; int err; DENTER(FS_TRACE, "(file=0x%p)\n", file); #ifdef SECURE_DEVIRNET /* This could (should?) be enforced by the permissions on /dev/irnet. */ if(!capable(CAP_NET_ADMIN)) return -EPERM; #endif /* SECURE_DEVIRNET */ /* Allocate a private structure for this IrNET instance */ ap = kzalloc(sizeof(*ap), GFP_KERNEL); DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n"); /* initialize the irnet structure */ ap->file = file; /* PPP channel setup */ ap->ppp_open = 0; ap->chan.private = ap; ap->chan.ops = &irnet_ppp_ops; ap->chan.mtu = (2048 - TTP_MAX_HEADER - 2 - PPP_HDRLEN); ap->chan.hdrlen = 2 + TTP_MAX_HEADER; /* for A/C + Max IrDA hdr */ /* PPP parameters */ ap->mru = (2048 - TTP_MAX_HEADER - 2 - PPP_HDRLEN); ap->xaccm[0] = ~0U; ap->xaccm[3] = 0x60000000U; ap->raccm = ~0U; /* Setup the IrDA part... */ err = irda_irnet_create(ap); if(err) { DERROR(FS_ERROR, "Can't setup IrDA link...\n"); kfree(ap); return err; } /* For the control channel */ ap->event_index = irnet_events.index; /* Cancel all past events */ mutex_init(&ap->lock); /* Put our stuff where we will be able to find it later */ file->private_data = ap; DEXIT(FS_TRACE, " - ap=0x%p\n", ap); return 0; } /*------------------------------------------------------------------*/ /* * Close : when somebody close /dev/irnet * Destroy the instance of /dev/irnet */ static int dev_irnet_close(struct inode * inode, struct file * file) { irnet_socket * ap = file->private_data; DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n", file, ap); DABORT(ap == NULL, 0, FS_ERROR, "ap is NULL !!!\n"); /* Detach ourselves */ file->private_data = NULL; /* Close IrDA stuff */ irda_irnet_destroy(ap); /* Disconnect from the generic PPP layer if not already done */ if(ap->ppp_open) { DERROR(FS_ERROR, "Channel still registered - deregistering !\n"); ap->ppp_open = 0; ppp_unregister_channel(&ap->chan); } kfree(ap); DEXIT(FS_TRACE, "\n"); return 0; } /*------------------------------------------------------------------*/ /* * Write does nothing. * (we receive packet from ppp_generic through ppp_irnet_send()) */ static ssize_t dev_irnet_write(struct file * file, const char __user *buf, size_t count, loff_t * ppos) { irnet_socket * ap = file->private_data; DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n", file, ap, count); DABORT(ap == NULL, -ENXIO, FS_ERROR, "ap is NULL !!!\n"); /* If we are connected to ppp_generic, let it handle the job */ if(ap->ppp_open) return -EAGAIN; else return irnet_ctrl_write(ap, buf, count); } /*------------------------------------------------------------------*/ /* * Read doesn't do much either. * (pppd poll us, but ultimately reads through /dev/ppp) */ static ssize_t dev_irnet_read(struct file * file, char __user * buf, size_t count, loff_t * ppos) { irnet_socket * ap = file->private_data; DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n", file, ap, count); DABORT(ap == NULL, -ENXIO, FS_ERROR, "ap is NULL !!!\n"); /* If we are connected to ppp_generic, let it handle the job */ if(ap->ppp_open) return -EAGAIN; else return irnet_ctrl_read(ap, file, buf, count); } /*------------------------------------------------------------------*/ /* * Poll : called when someone do a select on /dev/irnet */ static unsigned int dev_irnet_poll(struct file * file, poll_table * wait) { irnet_socket * ap = file->private_data; unsigned int mask; DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n", file, ap); mask = POLLOUT | POLLWRNORM; DABORT(ap == NULL, mask, FS_ERROR, "ap is NULL !!!\n"); /* If we are connected to ppp_generic, let it handle the job */ if(!ap->ppp_open) mask |= irnet_ctrl_poll(ap, file, wait); DEXIT(FS_TRACE, " - mask=0x%X\n", mask); return mask; } /*------------------------------------------------------------------*/ /* * IOCtl : Called when someone does some ioctls on /dev/irnet * This is the way pppd configure us and control us while the PPP * instance is active. */ static long dev_irnet_ioctl( struct file * file, unsigned int cmd, unsigned long arg) { irnet_socket * ap = file->private_data; int err; int val; void __user *argp = (void __user *)arg; DENTER(FS_TRACE, "(file=0x%p, ap=0x%p, cmd=0x%X)\n", file, ap, cmd); /* Basic checks... */ DASSERT(ap != NULL, -ENXIO, PPP_ERROR, "ap is NULL...\n"); #ifdef SECURE_DEVIRNET if(!capable(CAP_NET_ADMIN)) return -EPERM; #endif /* SECURE_DEVIRNET */ err = -EFAULT; switch(cmd) { /* Set discipline (should be N_SYNC_PPP or N_TTY) */ case TIOCSETD: if(get_user(val, (int __user *)argp)) break; if((val == N_SYNC_PPP) || (val == N_PPP)) { DEBUG(FS_INFO, "Entering PPP discipline.\n"); /* PPP channel setup (ap->chan in configured in dev_irnet_open())*/ if (mutex_lock_interruptible(&ap->lock)) return -EINTR; err = ppp_register_channel(&ap->chan); if(err == 0) { /* Our ppp side is active */ ap->ppp_open = 1; DEBUG(FS_INFO, "Trying to establish a connection.\n"); /* Setup the IrDA link now - may fail... */ irda_irnet_connect(ap); } else DERROR(FS_ERROR, "Can't setup PPP channel...\n"); mutex_unlock(&ap->lock); } else { /* In theory, should be N_TTY */ DEBUG(FS_INFO, "Exiting PPP discipline.\n"); /* Disconnect from the generic PPP layer */ if (mutex_lock_interruptible(&ap->lock)) return -EINTR; if(ap->ppp_open) { ap->ppp_open = 0; ppp_unregister_channel(&ap->chan); } else DERROR(FS_ERROR, "Channel not registered !\n"); err = 0; mutex_unlock(&ap->lock); } break; /* Query PPP channel and unit number */ case PPPIOCGCHAN: if (mutex_lock_interruptible(&ap->lock)) return -EINTR; if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), (int __user *)argp)) err = 0; mutex_unlock(&ap->lock); break; case PPPIOCGUNIT: if (mutex_lock_interruptible(&ap->lock)) return -EINTR; if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), (int __user *)argp)) err = 0; mutex_unlock(&ap->lock); break; /* All these ioctls can be passed both directly and from ppp_generic, * so we just deal with them in one place... */ case PPPIOCGFLAGS: case PPPIOCSFLAGS: case PPPIOCGASYNCMAP: case PPPIOCSASYNCMAP: case PPPIOCGRASYNCMAP: case PPPIOCSRASYNCMAP: case PPPIOCGXASYNCMAP: case PPPIOCSXASYNCMAP: case PPPIOCGMRU: case PPPIOCSMRU: DEBUG(FS_INFO, "Standard PPP ioctl.\n"); if(!capable(CAP_NET_ADMIN)) err = -EPERM; else { if (mutex_lock_interruptible(&ap->lock)) return -EINTR; err = ppp_irnet_ioctl(&ap->chan, cmd, arg); mutex_unlock(&ap->lock); } break; /* TTY IOCTLs : Pretend that we are a tty, to keep pppd happy */ /* Get termios */ case TCGETS: DEBUG(FS_INFO, "Get termios.\n"); if (mutex_lock_interruptible(&ap->lock)) return -EINTR; #ifndef TCGETS2 if(!kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios)) err = 0; #else if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios)) err = 0; #endif mutex_unlock(&ap->lock); break; /* Set termios */ case TCSETSF: DEBUG(FS_INFO, "Set termios.\n"); if (mutex_lock_interruptible(&ap->lock)) return -EINTR; #ifndef TCGETS2 if(!user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp)) err = 0; #else if(!user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp)) err = 0; #endif mutex_unlock(&ap->lock); break; /* Set DTR/RTS */ case TIOCMBIS: case TIOCMBIC: /* Set exclusive/non-exclusive mode */ case TIOCEXCL: case TIOCNXCL: DEBUG(FS_INFO, "TTY compatibility.\n"); err = 0; break; case TCGETA: DEBUG(FS_INFO, "TCGETA\n"); break; case TCFLSH: DEBUG(FS_INFO, "TCFLSH\n"); /* Note : this will flush buffers in PPP, so it *must* be done * We should also worry that we don't accept junk here and that * we get rid of our own buffers */ #ifdef FLUSH_TO_PPP if (mutex_lock_interruptible(&ap->lock)) return -EINTR; ppp_output_wakeup(&ap->chan); mutex_unlock(&ap->lock); #endif /* FLUSH_TO_PPP */ err = 0; break; case FIONREAD: DEBUG(FS_INFO, "FIONREAD\n"); val = 0; if(put_user(val, (int __user *)argp)) break; err = 0; break; default: DERROR(FS_ERROR, "Unsupported ioctl (0x%X)\n", cmd); err = -ENOTTY; } DEXIT(FS_TRACE, " - err = 0x%X\n", err); return err; } /************************** PPP CALLBACKS **************************/ /* * This are the functions that the generic PPP driver in the kernel * will call to communicate to us. */ /*------------------------------------------------------------------*/ /* * Prepare the ppp frame for transmission over the IrDA socket. * We make sure that the header space is enough, and we change ppp header * according to flags passed by pppd. * This is not a callback, but just a helper function used in ppp_irnet_send() */ static inline struct sk_buff * irnet_prepare_skb(irnet_socket * ap, struct sk_buff * skb) { unsigned char * data; int proto; /* PPP protocol */ int islcp; /* Protocol == LCP */ int needaddr; /* Need PPP address */ DENTER(PPP_TRACE, "(ap=0x%p, skb=0x%p)\n", ap, skb); /* Extract PPP protocol from the frame */ data = skb->data; proto = (data[0] << 8) + data[1]; /* LCP packets with codes between 1 (configure-request) * and 7 (code-reject) must be sent as though no options * have been negotiated. */ islcp = (proto == PPP_LCP) && (1 <= data[2]) && (data[2] <= 7); /* compress protocol field if option enabled */ if((data[0] == 0) && (ap->flags & SC_COMP_PROT) && (!islcp)) skb_pull(skb,1); /* Check if we need address/control fields */ needaddr = 2*((ap->flags & SC_COMP_AC) == 0 || islcp); /* Is the skb headroom large enough to contain all IrDA-headers? */ if((skb_headroom(skb) < (ap->max_header_size + needaddr)) || (skb_shared(skb))) { struct sk_buff * new_skb; DEBUG(PPP_INFO, "Reallocating skb\n"); /* Create a new skb */ new_skb = skb_realloc_headroom(skb, ap->max_header_size + needaddr); /* We have to free the original skb anyway */ dev_kfree_skb(skb); /* Did the realloc succeed ? */ DABORT(new_skb == NULL, NULL, PPP_ERROR, "Could not realloc skb\n"); /* Use the new skb instead */ skb = new_skb; } /* prepend address/control fields if necessary */ if(needaddr) { skb_push(skb, 2); skb->data[0] = PPP_ALLSTATIONS; skb->data[1] = PPP_UI; } DEXIT(PPP_TRACE, "\n"); return skb; } /*------------------------------------------------------------------*/ /* * Send a packet to the peer over the IrTTP connection. * Returns 1 iff the packet was accepted. * Returns 0 iff packet was not consumed. * If the packet was not accepted, we will call ppp_output_wakeup * at some later time to reactivate flow control in ppp_generic. */ static int ppp_irnet_send(struct ppp_channel * chan, struct sk_buff * skb) { irnet_socket * self = (struct irnet_socket *) chan->private; int ret; DENTER(PPP_TRACE, "(channel=0x%p, ap/self=0x%p)\n", chan, self); /* Check if things are somewhat valid... */ DASSERT(self != NULL, 0, PPP_ERROR, "Self is NULL !!!\n"); /* Check if we are connected */ if(!(test_bit(0, &self->ttp_open))) { #ifdef CONNECT_IN_SEND /* Let's try to connect one more time... */ /* Note : we won't be connected after this call, but we should be * ready for next packet... */ /* If we are already connecting, this will fail */ irda_irnet_connect(self); #endif /* CONNECT_IN_SEND */ DEBUG(PPP_INFO, "IrTTP not ready ! (%ld-%ld)\n", self->ttp_open, self->ttp_connect); /* Note : we can either drop the packet or block the packet. * * Blocking the packet allow us a better connection time, * because by calling ppp_output_wakeup() we can have * ppp_generic resending the LCP request immediately to us, * rather than waiting for one of pppd periodic transmission of * LCP request. * * On the other hand, if we block all packet, all those periodic * transmissions of pppd accumulate in ppp_generic, creating a * backlog of LCP request. When we eventually connect later on, * we have to transmit all this backlog before we can connect * proper (if we don't timeout before). * * The current strategy is as follow : * While we are attempting to connect, we block packets to get * a better connection time. * If we fail to connect, we drain the queue and start dropping packets */ #ifdef BLOCK_WHEN_CONNECT /* If we are attempting to connect */ if(test_bit(0, &self->ttp_connect)) { /* Blocking packet, ppp_generic will retry later */ return 0; } #endif /* BLOCK_WHEN_CONNECT */ /* Dropping packet, pppd will retry later */ dev_kfree_skb(skb); return 1; } /* Check if the queue can accept any packet, otherwise block */ if(self->tx_flow != FLOW_START) DRETURN(0, PPP_INFO, "IrTTP queue full (%d skbs)...\n", skb_queue_len(&self->tsap->tx_queue)); /* Prepare ppp frame for transmission */ skb = irnet_prepare_skb(self, skb); DABORT(skb == NULL, 1, PPP_ERROR, "Prepare skb for Tx failed.\n"); /* Send the packet to IrTTP */ ret = irttp_data_request(self->tsap, skb); if(ret < 0) { /* * > IrTTPs tx queue is full, so we just have to * > drop the frame! You might think that we should * > just return -1 and don't deallocate the frame, * > but that is dangerous since it's possible that * > we have replaced the original skb with a new * > one with larger headroom, and that would really * > confuse do_dev_queue_xmit() in dev.c! I have * > tried :-) DB * Correction : we verify the flow control above (self->tx_flow), * so we come here only if IrTTP doesn't like the packet (empty, * too large, IrTTP not connected). In those rare cases, it's ok * to drop it, we don't want to see it here again... * Jean II */ DERROR(PPP_ERROR, "IrTTP doesn't like this packet !!! (0x%X)\n", ret); /* irttp_data_request already free the packet */ } DEXIT(PPP_TRACE, "\n"); return 1; /* Packet has been consumed */ } /*------------------------------------------------------------------*/ /* * Take care of the ioctls that ppp_generic doesn't want to deal with... * Note : we are also called from dev_irnet_ioctl(). */ static int ppp_irnet_ioctl(struct ppp_channel * chan, unsigned int cmd, unsigned long arg) { irnet_socket * ap = (struct irnet_socket *) chan->private; int err; int val; u32 accm[8]; void __user *argp = (void __user *)arg; DENTER(PPP_TRACE, "(channel=0x%p, ap=0x%p, cmd=0x%X)\n", chan, ap, cmd); /* Basic checks... */ DASSERT(ap != NULL, -ENXIO, PPP_ERROR, "ap is NULL...\n"); err = -EFAULT; switch(cmd) { /* PPP flags */ case PPPIOCGFLAGS: val = ap->flags | ap->rbits; if(put_user(val, (int __user *) argp)) break; err = 0; break; case PPPIOCSFLAGS: if(get_user(val, (int __user *) argp)) break; ap->flags = val & ~SC_RCV_BITS; ap->rbits = val & SC_RCV_BITS; err = 0; break; /* Async map stuff - all dummy to please pppd */ case PPPIOCGASYNCMAP: if(put_user(ap->xaccm[0], (u32 __user *) argp)) break; err = 0; break; case PPPIOCSASYNCMAP: if(get_user(ap->xaccm[0], (u32 __user *) argp)) break; err = 0; break; case PPPIOCGRASYNCMAP: if(put_user(ap->raccm, (u32 __user *) argp)) break; err = 0; break; case PPPIOCSRASYNCMAP: if(get_user(ap->raccm, (u32 __user *) argp)) break; err = 0; break; case PPPIOCGXASYNCMAP: if(copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm))) break; err = 0; break; case PPPIOCSXASYNCMAP: if(copy_from_user(accm, argp, sizeof(accm))) break; accm[2] &= ~0x40000000U; /* can't escape 0x5e */ accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */ memcpy(ap->xaccm, accm, sizeof(ap->xaccm)); err = 0; break; /* Max PPP frame size */ case PPPIOCGMRU: if(put_user(ap->mru, (int __user *) argp)) break; err = 0; break; case PPPIOCSMRU: if(get_user(val, (int __user *) argp)) break; if(val < PPP_MRU) val = PPP_MRU; ap->mru = val; err = 0; break; default: DEBUG(PPP_INFO, "Unsupported ioctl (0x%X)\n", cmd); err = -ENOIOCTLCMD; } DEXIT(PPP_TRACE, " - err = 0x%X\n", err); return err; } /************************** INITIALISATION **************************/ /* * Module initialisation and all that jazz... */ /*------------------------------------------------------------------*/ /* * Hook our device callbacks in the filesystem, to connect our code * to /dev/irnet */ static inline int __init ppp_irnet_init(void) { int err = 0; DENTER(MODULE_TRACE, "()\n"); /* Allocate ourselves as a minor in the misc range */ err = misc_register(&irnet_misc_device); DEXIT(MODULE_TRACE, "\n"); return err; } /*------------------------------------------------------------------*/ /* * Cleanup at exit... */ static inline void __exit ppp_irnet_cleanup(void) { DENTER(MODULE_TRACE, "()\n"); /* De-allocate /dev/irnet minor in misc range */ misc_deregister(&irnet_misc_device); DEXIT(MODULE_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Module main entry point */ static int __init irnet_init(void) { int err; /* Initialise both parts... */ err = irda_irnet_init(); if(!err) err = ppp_irnet_init(); return err; } /*------------------------------------------------------------------*/ /* * Module exit */ static void __exit irnet_cleanup(void) { irda_irnet_cleanup(); ppp_irnet_cleanup(); } /*------------------------------------------------------------------*/ /* * Module magic */ module_init(irnet_init); module_exit(irnet_cleanup); MODULE_AUTHOR("Jean Tourrilhes <jt@hpl.hp.com>"); MODULE_DESCRIPTION("IrNET : Synchronous PPP over IrDA"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV(10, 187);
gpl-2.0
invisiblek/android_kernel_lge_v4xx
drivers/net/irda/toim3232-sir.c
8890
12466
/********************************************************************* * * Filename: toim3232-sir.c * Version: 1.0 * Description: Implementation of dongles based on the Vishay/Temic * TOIM3232 SIR Endec chipset. Currently only the * IRWave IR320ST-2 is tested, although it should work * with any TOIM3232 or TOIM4232 chipset based RS232 * dongle with minimal modification. * Based heavily on the Tekram driver (tekram.c), * with thanks to Dag Brattli and Martin Diehl. * Status: Experimental. * Author: David Basden <davidb-irda@rcpt.to> * Created at: Thu Feb 09 23:47:32 2006 * * Copyright (c) 2006 David Basden. * Copyright (c) 1998-1999 Dag Brattli, * Copyright (c) 2002 Martin Diehl, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ /* * This driver has currently only been tested on the IRWave IR320ST-2 * * PROTOCOL: * * The protocol for talking to the TOIM3232 is quite easy, and is * designed to interface with RS232 with only level convertors. The * BR/~D line on the chip is brought high to signal 'command mode', * where a command byte is sent to select the baudrate of the RS232 * interface and the pulse length of the IRDA output. When BR/~D * is brought low, the dongle then changes to the selected baudrate, * and the RS232 interface is used for data until BR/~D is brought * high again. The initial speed for the TOIMx323 after RESET is * 9600 baud. The baudrate for command-mode is the last selected * baud-rate, or 9600 after a RESET. * * The dongle I have (below) adds some extra hardware on the front end, * but this is mostly directed towards pariasitic power from the RS232 * line rather than changing very much about how to communicate with * the TOIM3232. * * The protocol to talk to the TOIM4232 chipset seems to be almost * identical to the TOIM3232 (and the 4232 datasheet is more detailed) * so this code will probably work on that as well, although I haven't * tested it on that hardware. * * Target dongle variations that might be common: * * DTR and RTS function: * The data sheet for the 4232 has a sample implementation that hooks the * DTR and RTS lines to the RESET and BaudRate/~Data lines of the * chip (through line-converters). Given both DTR and RTS would have to * be held low in normal operation, and the TOIMx232 requires +5V to * signal ground, most dongle designers would almost certainly choose * an implementation that kept at least one of DTR or RTS high in * normal operation to provide power to the dongle, but will likely * vary between designs. * * User specified command bits: * There are two user-controllable output lines from the TOIMx232 that * can be set low or high by setting the appropriate bits in the * high-nibble of the command byte (when setting speed and pulse length). * These might be used to switch on and off added hardware or extra * dongle features. * * * Target hardware: IRWave IR320ST-2 * * The IRWave IR320ST-2 is a simple dongle based on the Vishay/Temic * TOIM3232 SIR Endec and the Vishay/Temic TFDS4500 SIR IRDA transceiver. * It uses a hex inverter and some discrete components to buffer and * line convert the RS232 down to 5V. * * The dongle is powered through a voltage regulator, fed by a large * capacitor. To switch the dongle on, DTR is brought high to charge * the capacitor and drive the voltage regulator. DTR isn't associated * with any control lines on the TOIM3232. Parisitic power is also taken * from the RTS, TD and RD lines when brought high, but through resistors. * When DTR is low, the circuit might lose power even with RTS high. * * RTS is inverted and attached to the BR/~D input pin. When RTS * is high, BR/~D is low, and the TOIM3232 is in the normal 'data' mode. * RTS is brought low, BR/~D is high, and the TOIM3232 is in 'command * mode'. * * For some unknown reason, the RESET line isn't actually connected * to anything. This means to reset the dongle to get it to a known * state (9600 baud) you must drop DTR and RTS low, wait for the power * capacitor to discharge, and then bring DTR (and RTS for data mode) * high again, and wait for the capacitor to charge, the power supply * to stabilise, and the oscillator clock to stabilise. * * Fortunately, if the current baudrate is known, the chipset can * easily change speed by entering command mode without having to * reset the dongle first. * * Major Components: * * - Vishay/Temic TOIM3232 SIR Endec to change RS232 pulse timings * to IRDA pulse timings * - 3.6864MHz crystal to drive TOIM3232 clock oscillator * - DM74lS04M Inverting Hex line buffer for RS232 input buffering * and level conversion * - PJ2951AC 150mA voltage regulator * - Vishay/Temic TFDS4500 SIR IRDA front-end transceiver * */ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/sched.h> #include <net/irda/irda.h> #include "sir-dev.h" static int toim3232delay = 150; /* default is 150 ms */ module_param(toim3232delay, int, 0); MODULE_PARM_DESC(toim3232delay, "toim3232 dongle write complete delay"); #if 0 static int toim3232flipdtr = 0; /* default is DTR high to reset */ module_param(toim3232flipdtr, int, 0); MODULE_PARM_DESC(toim3232flipdtr, "toim3232 dongle invert DTR (Reset)"); static int toim3232fliprts = 0; /* default is RTS high for baud change */ module_param(toim3232fliptrs, int, 0); MODULE_PARM_DESC(toim3232fliprts, "toim3232 dongle invert RTS (BR/D)"); #endif static int toim3232_open(struct sir_dev *); static int toim3232_close(struct sir_dev *); static int toim3232_change_speed(struct sir_dev *, unsigned); static int toim3232_reset(struct sir_dev *); #define TOIM3232_115200 0x00 #define TOIM3232_57600 0x01 #define TOIM3232_38400 0x02 #define TOIM3232_19200 0x03 #define TOIM3232_9600 0x06 #define TOIM3232_2400 0x0A #define TOIM3232_PW 0x10 /* Pulse select bit */ static struct dongle_driver toim3232 = { .owner = THIS_MODULE, .driver_name = "Vishay TOIM3232", .type = IRDA_TOIM3232_DONGLE, .open = toim3232_open, .close = toim3232_close, .reset = toim3232_reset, .set_speed = toim3232_change_speed, }; static int __init toim3232_sir_init(void) { if (toim3232delay < 1 || toim3232delay > 500) toim3232delay = 200; IRDA_DEBUG(1, "%s - using %d ms delay\n", toim3232.driver_name, toim3232delay); return irda_register_dongle(&toim3232); } static void __exit toim3232_sir_cleanup(void) { irda_unregister_dongle(&toim3232); } static int toim3232_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; IRDA_DEBUG(2, "%s()\n", __func__); /* Pull the lines high to start with. * * For the IR320ST-2, we need to charge the main supply capacitor to * switch the device on. We keep DTR high throughout to do this. * When RTS, TD and RD are high, they will also trickle-charge the * cap. RTS is high for data transmission, and low for baud rate select. * -- DGB */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* The TOI3232 supports many speeds between 1200bps and 115000bps. * We really only care about those supported by the IRDA spec, but * 38400 seems to be implemented in many places */ qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; /* From the tekram driver. Not sure what a reasonable value is -- DGB */ qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */ irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; } static int toim3232_close(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); return 0; } /* * Function toim3232change_speed (dev, state, speed) * * Set the speed for the TOIM3232 based dongle. Warning, this * function must be called with a process context! * * Algorithm * 1. keep DTR high but clear RTS to bring into baud programming mode * 2. wait at least 7us to enter programming mode * 3. send control word to set baud rate and timing * 4. wait at least 1us * 5. bring RTS high to enter DATA mode (RS232 is passed through to transceiver) * 6. should take effect immediately (although probably worth waiting) */ #define TOIM3232_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1) static int toim3232_change_speed(struct sir_dev *dev, unsigned speed) { unsigned state = dev->fsm.substate; unsigned delay = 0; u8 byte; static int ret = 0; IRDA_DEBUG(2, "%s()\n", __func__); switch(state) { case SIRDEV_STATE_DONGLE_SPEED: /* Figure out what we are going to send as a control byte */ switch (speed) { case 2400: byte = TOIM3232_PW|TOIM3232_2400; break; default: speed = 9600; ret = -EINVAL; /* fall thru */ case 9600: byte = TOIM3232_PW|TOIM3232_9600; break; case 19200: byte = TOIM3232_PW|TOIM3232_19200; break; case 38400: byte = TOIM3232_PW|TOIM3232_38400; break; case 57600: byte = TOIM3232_PW|TOIM3232_57600; break; case 115200: byte = TOIM3232_115200; break; } /* Set DTR, Clear RTS: Go into baud programming mode */ sirdev_set_dtr_rts(dev, TRUE, FALSE); /* Wait at least 7us */ udelay(14); /* Write control byte */ sirdev_raw_write(dev, &byte, 1); dev->speed = speed; state = TOIM3232_STATE_WAIT_SPEED; delay = toim3232delay; break; case TOIM3232_STATE_WAIT_SPEED: /* Have transmitted control byte * Wait for 'at least 1us' */ udelay(14); /* Set DTR, Set RTS: Go into normal data mode */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Wait (TODO: check this is needed) */ udelay(50); break; default: printk(KERN_ERR "%s - undefined state %d\n", __func__, state); ret = -EINVAL; break; } dev->fsm.substate = state; return (delay > 0) ? delay : ret; } /* * Function toim3232reset (driver) * * This function resets the toim3232 dongle. Warning, this function * must be called with a process context!! * * What we should do is: * 0. Pull RESET high * 1. Wait for at least 7us * 2. Pull RESET low * 3. Wait for at least 7us * 4. Pull BR/~D high * 5. Wait for at least 7us * 6. Send control byte to set baud rate * 7. Wait at least 1us after stop bit * 8. Pull BR/~D low * 9. Should then be in data mode * * Because the IR320ST-2 doesn't have the RESET line connected for some reason, * we'll have to do something else. * * The default speed after a RESET is 9600, so lets try just bringing it up in * data mode after switching it off, waiting for the supply capacitor to * discharge, and then switch it back on. This isn't actually pulling RESET * high, but it seems to have the same effect. * * This behaviour will probably work on dongles that have the RESET line connected, * but if not, add a flag for the IR320ST-2, and implment the above-listed proper * behaviour. * * RTS is inverted and then fed to BR/~D, so to put it in programming mode, we * need to have pull RTS low */ static int toim3232_reset(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* Switch off both DTR and RTS to switch off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); /* Should sleep a while. This might be evil doing it this way.*/ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(50)); /* Set DTR, Set RTS (data mode) */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Wait at least 10 ms for power to stabilize again */ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(10)); /* Speed should now be 9600 */ dev->speed = 9600; return 0; } MODULE_AUTHOR("David Basden <davidb-linux@rcpt.to>"); MODULE_DESCRIPTION("Vishay/Temic TOIM3232 based dongle driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("irda-dongle-12"); /* IRDA_TOIM3232_DONGLE */ module_init(toim3232_sir_init); module_exit(toim3232_sir_cleanup);
gpl-2.0
k2wl/android_kernel_samsung_delos3geur
arch/tile/lib/strlen_32.c
9914
1158
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/types.h> #include <linux/string.h> #include <linux/module.h> #undef strlen size_t strlen(const char *s) { /* Get an aligned pointer. */ const uintptr_t s_int = (uintptr_t) s; const uint32_t *p = (const uint32_t *)(s_int & -4); /* Read the first word, but force bytes before the string to be nonzero. * This expression works because we know shift counts are taken mod 32. */ uint32_t v = *p | ((1 << (s_int << 3)) - 1); uint32_t bits; while ((bits = __insn_seqb(v, 0)) == 0) v = *++p; return ((const char *)p) + (__insn_ctz(bits) >> 3) - s; } EXPORT_SYMBOL(strlen);
gpl-2.0
Elnya/aosproject
drivers/scsi/scsi_ioctl.c
11962
9361
/* * Changes: * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 08/23/2000 * - get rid of some verify_areas and use __copy*user and __get/put_user * for the ones that remain */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/string.h> #include <asm/uaccess.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include <scsi/sg.h> #include <scsi/scsi_dbg.h> #include "scsi_logging.h" #define NORMAL_RETRIES 5 #define IOCTL_NORMAL_TIMEOUT (10 * HZ) #define MAX_BUF PAGE_SIZE /** * ioctl_probe -- return host identification * @host: host to identify * @buffer: userspace buffer for identification * * Return an identifying string at @buffer, if @buffer is non-NULL, filling * to the length stored at * (int *) @buffer. */ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer) { unsigned int len, slen; const char *string; if (buffer) { if (get_user(len, (unsigned int __user *) buffer)) return -EFAULT; if (host->hostt->info) string = host->hostt->info(host); else string = host->hostt->name; if (string) { slen = strlen(string); if (len > slen) len = slen + 1; if (copy_to_user(buffer, string, len)) return -EFAULT; } } return 1; } /* * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host. * The IOCTL_NORMAL_TIMEOUT and NORMAL_RETRIES variables are used. * * dev is the SCSI device struct ptr, *(int *) arg is the length of the * input data, if any, not including the command string & counts, * *((int *)arg + 1) is the output buffer size in bytes. * * *(char *) ((int *) arg)[2] the actual command byte. * * Note that if more than MAX_BUF bytes are requested to be transferred, * the ioctl will fail with error EINVAL. * * This size *does not* include the initial lengths that were passed. * * The SCSI command is read from the memory location immediately after the * length words, and the input data is right after the command. The SCSI * routines know the command size based on the opcode decode. * * The output area is then filled in starting from the command byte. */ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, int timeout, int retries) { int result; struct scsi_sense_hdr sshdr; SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd)); result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, timeout, retries, NULL); SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", result)); if ((driver_byte(result) & DRIVER_SENSE) && (scsi_sense_valid(&sshdr))) { switch (sshdr.sense_key) { case ILLEGAL_REQUEST: if (cmd[0] == ALLOW_MEDIUM_REMOVAL) sdev->lockable = 0; else printk(KERN_INFO "ioctl_internal_command: " "ILLEGAL REQUEST asc=0x%x ascq=0x%x\n", sshdr.asc, sshdr.ascq); break; case NOT_READY: /* This happens if there is no disc in drive */ if (sdev->removable) break; case UNIT_ATTENTION: if (sdev->removable) { sdev->changed = 1; result = 0; /* This is no longer considered an error */ break; } default: /* Fall through for non-removable media */ sdev_printk(KERN_INFO, sdev, "ioctl_internal_command return code = %x\n", result); scsi_print_sense_hdr(" ", &sshdr); break; } } SCSI_LOG_IOCTL(2, printk("IOCTL Releasing command\n")); return result; } int scsi_set_medium_removal(struct scsi_device *sdev, char state) { char scsi_cmd[MAX_COMMAND_SIZE]; int ret; if (!sdev->removable || !sdev->lockable) return 0; scsi_cmd[0] = ALLOW_MEDIUM_REMOVAL; scsi_cmd[1] = 0; scsi_cmd[2] = 0; scsi_cmd[3] = 0; scsi_cmd[4] = state; scsi_cmd[5] = 0; ret = ioctl_internal_command(sdev, scsi_cmd, IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES); if (ret == 0) sdev->locked = (state == SCSI_REMOVAL_PREVENT); return ret; } EXPORT_SYMBOL(scsi_set_medium_removal); /* * The scsi_ioctl_get_pci() function places into arg the value * pci_dev::slot_name (8 characters) for the PCI device (if any). * Returns: 0 on success * -ENXIO if there isn't a PCI device pointer * (could be because the SCSI driver hasn't been * updated yet, or because it isn't a SCSI * device) * any copy_to_user() error on failure there */ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg) { struct device *dev = scsi_get_device(sdev->host); const char *name; if (!dev) return -ENXIO; name = dev_name(dev); /* compatibility with old ioctl which only returned * 20 characters */ return copy_to_user(arg, name, min(strlen(name), (size_t)20)) ? -EFAULT: 0; } /** * scsi_ioctl - Dispatch ioctl to scsi device * @sdev: scsi device receiving ioctl * @cmd: which ioctl is it * @arg: data associated with ioctl * * Description: The scsi_ioctl() function differs from most ioctls in that it * does not take a major/minor number as the dev field. Rather, it takes * a pointer to a &struct scsi_device. */ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) { char scsi_cmd[MAX_COMMAND_SIZE]; /* No idea how this happens.... */ if (!sdev) return -ENXIO; /* * If we are in the middle of error recovery, don't let anyone * else try and use this device. Also, if error recovery fails, it * may try and take the device offline, in which case all further * access to the device is prohibited. */ if (!scsi_block_when_processing_errors(sdev)) return -ENODEV; /* Check for deprecated ioctls ... all the ioctls which don't * follow the new unique numbering scheme are deprecated */ switch (cmd) { case SCSI_IOCTL_SEND_COMMAND: case SCSI_IOCTL_TEST_UNIT_READY: case SCSI_IOCTL_BENCHMARK_COMMAND: case SCSI_IOCTL_SYNC: case SCSI_IOCTL_START_UNIT: case SCSI_IOCTL_STOP_UNIT: printk(KERN_WARNING "program %s is using a deprecated SCSI " "ioctl, please convert it to SG_IO\n", current->comm); break; default: break; } switch (cmd) { case SCSI_IOCTL_GET_IDLUN: if (!access_ok(VERIFY_WRITE, arg, sizeof(struct scsi_idlun))) return -EFAULT; __put_user((sdev->id & 0xff) + ((sdev->lun & 0xff) << 8) + ((sdev->channel & 0xff) << 16) + ((sdev->host->host_no & 0xff) << 24), &((struct scsi_idlun __user *)arg)->dev_id); __put_user(sdev->host->unique_id, &((struct scsi_idlun __user *)arg)->host_unique_id); return 0; case SCSI_IOCTL_GET_BUS_NUMBER: return put_user(sdev->host->host_no, (int __user *)arg); case SCSI_IOCTL_PROBE_HOST: return ioctl_probe(sdev->host, arg); case SCSI_IOCTL_SEND_COMMAND: if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; return sg_scsi_ioctl(sdev->request_queue, NULL, 0, arg); case SCSI_IOCTL_DOORLOCK: return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); case SCSI_IOCTL_DOORUNLOCK: return scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); case SCSI_IOCTL_TEST_UNIT_READY: return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES, NULL); case SCSI_IOCTL_START_UNIT: scsi_cmd[0] = START_STOP; scsi_cmd[1] = 0; scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0; scsi_cmd[4] = 1; return ioctl_internal_command(sdev, scsi_cmd, START_STOP_TIMEOUT, NORMAL_RETRIES); case SCSI_IOCTL_STOP_UNIT: scsi_cmd[0] = START_STOP; scsi_cmd[1] = 0; scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0; scsi_cmd[4] = 0; return ioctl_internal_command(sdev, scsi_cmd, START_STOP_TIMEOUT, NORMAL_RETRIES); case SCSI_IOCTL_GET_PCI: return scsi_ioctl_get_pci(sdev, arg); default: if (sdev->host->hostt->ioctl) return sdev->host->hostt->ioctl(sdev, cmd, arg); } return -EINVAL; } EXPORT_SYMBOL(scsi_ioctl); /** * scsi_nonblockable_ioctl() - Handle SG_SCSI_RESET * @sdev: scsi device receiving ioctl * @cmd: Must be SC_SCSI_RESET * @arg: pointer to int containing SG_SCSI_RESET_{DEVICE,BUS,HOST} * @ndelay: file mode O_NDELAY flag */ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd, void __user *arg, int ndelay) { int val, result; /* The first set of iocts may be executed even if we're doing * error processing, as long as the device was opened * non-blocking */ if (ndelay) { if (scsi_host_in_recovery(sdev->host)) return -ENODEV; } else if (!scsi_block_when_processing_errors(sdev)) return -ENODEV; switch (cmd) { case SG_SCSI_RESET: result = get_user(val, (int __user *)arg); if (result) return result; if (val == SG_SCSI_RESET_NOTHING) return 0; switch (val) { case SG_SCSI_RESET_DEVICE: val = SCSI_TRY_RESET_DEVICE; break; case SG_SCSI_RESET_TARGET: val = SCSI_TRY_RESET_TARGET; break; case SG_SCSI_RESET_BUS: val = SCSI_TRY_RESET_BUS; break; case SG_SCSI_RESET_HOST: val = SCSI_TRY_RESET_HOST; break; default: return -EINVAL; } if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; return (scsi_reset_provider(sdev, val) == SUCCESS) ? 0 : -EIO; } return -ENODEV; } EXPORT_SYMBOL(scsi_nonblockable_ioctl);
gpl-2.0
rktaiwala/motog_falcon_sk_kernel
arch/sh/boards/mach-sdk7786/irq.c
13242
1090
/* * SDK7786 FPGA IRQ Controller Support. * * Copyright (C) 2010 Matt Fleming * Copyright (C) 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/irq.h> #include <mach/fpga.h> #include <mach/irq.h> enum { ATA_IRQ_BIT = 1, SPI_BUSY_BIT = 2, LIRQ5_BIT = 3, LIRQ6_BIT = 4, LIRQ7_BIT = 5, LIRQ8_BIT = 6, KEY_IRQ_BIT = 7, PEN_IRQ_BIT = 8, ETH_IRQ_BIT = 9, RTC_ALARM_BIT = 10, CRYSTAL_FAIL_BIT = 12, ETH_PME_BIT = 14, }; void __init sdk7786_init_irq(void) { unsigned int tmp; /* Enable priority encoding for all IRLs */ fpga_write_reg(fpga_read_reg(INTMSR) | 0x0303, INTMSR); /* Clear FPGA interrupt status registers */ fpga_write_reg(0x0000, INTASR); fpga_write_reg(0x0000, INTBSR); /* Unmask FPGA interrupts */ tmp = fpga_read_reg(INTAMR); tmp &= ~(1 << ETH_IRQ_BIT); fpga_write_reg(tmp, INTAMR); plat_irq_setup_pins(IRQ_MODE_IRL7654_MASK); plat_irq_setup_pins(IRQ_MODE_IRL3210_MASK); }
gpl-2.0
jacobrivers123/kernel-nk1-negalite-lt02ltespr
fs/fscache/proc.c
14522
1933
/* FS-Cache statistics viewing interface * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define FSCACHE_DEBUG_LEVEL OPERATION #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "internal.h" /* * initialise the /proc/fs/fscache/ directory */ int __init fscache_proc_init(void) { _enter(""); if (!proc_mkdir("fs/fscache", NULL)) goto error_dir; #ifdef CONFIG_FSCACHE_STATS if (!proc_create("fs/fscache/stats", S_IFREG | 0444, NULL, &fscache_stats_fops)) goto error_stats; #endif #ifdef CONFIG_FSCACHE_HISTOGRAM if (!proc_create("fs/fscache/histogram", S_IFREG | 0444, NULL, &fscache_histogram_fops)) goto error_histogram; #endif #ifdef CONFIG_FSCACHE_OBJECT_LIST if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL, &fscache_objlist_fops)) goto error_objects; #endif _leave(" = 0"); return 0; #ifdef CONFIG_FSCACHE_OBJECT_LIST error_objects: #endif #ifdef CONFIG_FSCACHE_HISTOGRAM remove_proc_entry("fs/fscache/histogram", NULL); error_histogram: #endif #ifdef CONFIG_FSCACHE_STATS remove_proc_entry("fs/fscache/stats", NULL); error_stats: #endif remove_proc_entry("fs/fscache", NULL); error_dir: _leave(" = -ENOMEM"); return -ENOMEM; } /* * clean up the /proc/fs/fscache/ directory */ void fscache_proc_cleanup(void) { #ifdef CONFIG_FSCACHE_OBJECT_LIST remove_proc_entry("fs/fscache/objects", NULL); #endif #ifdef CONFIG_FSCACHE_HISTOGRAM remove_proc_entry("fs/fscache/histogram", NULL); #endif #ifdef CONFIG_FSCACHE_STATS remove_proc_entry("fs/fscache/stats", NULL); #endif remove_proc_entry("fs/fscache", NULL); }
gpl-2.0
thornbirdblue/hero-2.6.29-5f74b252
fs/udf/misc.c
187
7765
/* * misc.c * * PURPOSE * Miscellaneous routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1998 Dave Boynton * (C) 1998-2004 Ben Fennema * (C) 1999-2000 Stelias Computing Inc * * HISTORY * * 04/19/99 blf partial support for reading/writing specific EA's */ #include "udfdecl.h" #include <linux/fs.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/crc-itu-t.h> #include "udf_i.h" #include "udf_sb.h" struct buffer_head *udf_tgetblk(struct super_block *sb, int block) { if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV)) return sb_getblk(sb, udf_fixed_to_variable(block)); else return sb_getblk(sb, block); } struct buffer_head *udf_tread(struct super_block *sb, int block) { if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV)) return sb_bread(sb, udf_fixed_to_variable(block)); else return sb_bread(sb, block); } struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size, uint32_t type, uint8_t loc) { uint8_t *ea = NULL, *ad = NULL; int offset; uint16_t crclen; struct udf_inode_info *iinfo = UDF_I(inode); ea = iinfo->i_ext.i_data; if (iinfo->i_lenEAttr) { ad = iinfo->i_ext.i_data + iinfo->i_lenEAttr; } else { ad = ea; size += sizeof(struct extendedAttrHeaderDesc); } offset = inode->i_sb->s_blocksize - udf_file_entry_alloc_offset(inode) - iinfo->i_lenAlloc; /* TODO - Check for FreeEASpace */ if (loc & 0x01 && offset >= size) { struct extendedAttrHeaderDesc *eahd; eahd = (struct extendedAttrHeaderDesc *)ea; if (iinfo->i_lenAlloc) memmove(&ad[size], ad, iinfo->i_lenAlloc); if (iinfo->i_lenEAttr) { /* check checksum/crc */ if (eahd->descTag.tagIdent != cpu_to_le16(TAG_IDENT_EAHD) || le32_to_cpu(eahd->descTag.tagLocation) != iinfo->i_location.logicalBlockNum) return NULL; } else { struct udf_sb_info *sbi = UDF_SB(inode->i_sb); size -= sizeof(struct extendedAttrHeaderDesc); iinfo->i_lenEAttr += sizeof(struct extendedAttrHeaderDesc); eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD); if (sbi->s_udfrev >= 0x0200) eahd->descTag.descVersion = cpu_to_le16(3); else eahd->descTag.descVersion = cpu_to_le16(2); eahd->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number); eahd->descTag.tagLocation = cpu_to_le32( iinfo->i_location.logicalBlockNum); eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF); eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF); } offset = iinfo->i_lenEAttr; if (type < 2048) { if (le32_to_cpu(eahd->appAttrLocation) < iinfo->i_lenEAttr) { uint32_t aal = le32_to_cpu(eahd->appAttrLocation); memmove(&ea[offset - aal + size], &ea[aal], offset - aal); offset -= aal; eahd->appAttrLocation = cpu_to_le32(aal + size); } if (le32_to_cpu(eahd->impAttrLocation) < iinfo->i_lenEAttr) { uint32_t ial = le32_to_cpu(eahd->impAttrLocation); memmove(&ea[offset - ial + size], &ea[ial], offset - ial); offset -= ial; eahd->impAttrLocation = cpu_to_le32(ial + size); } } else if (type < 65536) { if (le32_to_cpu(eahd->appAttrLocation) < iinfo->i_lenEAttr) { uint32_t aal = le32_to_cpu(eahd->appAttrLocation); memmove(&ea[offset - aal + size], &ea[aal], offset - aal); offset -= aal; eahd->appAttrLocation = cpu_to_le32(aal + size); } } /* rewrite CRC + checksum of eahd */ crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag); eahd->descTag.descCRCLength = cpu_to_le16(crclen); eahd->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)eahd + sizeof(tag), crclen)); eahd->descTag.tagChecksum = udf_tag_checksum(&eahd->descTag); iinfo->i_lenEAttr += size; return (struct genericFormat *)&ea[offset]; } if (loc & 0x02) ; return NULL; } struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type, uint8_t subtype) { struct genericFormat *gaf; uint8_t *ea = NULL; uint32_t offset; struct udf_inode_info *iinfo = UDF_I(inode); ea = iinfo->i_ext.i_data; if (iinfo->i_lenEAttr) { struct extendedAttrHeaderDesc *eahd; eahd = (struct extendedAttrHeaderDesc *)ea; /* check checksum/crc */ if (eahd->descTag.tagIdent != cpu_to_le16(TAG_IDENT_EAHD) || le32_to_cpu(eahd->descTag.tagLocation) != iinfo->i_location.logicalBlockNum) return NULL; if (type < 2048) offset = sizeof(struct extendedAttrHeaderDesc); else if (type < 65536) offset = le32_to_cpu(eahd->impAttrLocation); else offset = le32_to_cpu(eahd->appAttrLocation); while (offset < iinfo->i_lenEAttr) { gaf = (struct genericFormat *)&ea[offset]; if (le32_to_cpu(gaf->attrType) == type && gaf->attrSubtype == subtype) return gaf; else offset += le32_to_cpu(gaf->attrLength); } } return NULL; } /* * udf_read_tagged * * PURPOSE * Read the first block of a tagged descriptor. * * HISTORY * July 1, 1997 - Andrew E. Mileski * Written, tested, and released. */ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block, uint32_t location, uint16_t *ident) { tag *tag_p; struct buffer_head *bh = NULL; /* Read the block */ if (block == 0xFFFFFFFF) return NULL; bh = udf_tread(sb, block); if (!bh) { udf_debug("block=%d, location=%d: read failed\n", block, location); return NULL; } tag_p = (tag *)(bh->b_data); *ident = le16_to_cpu(tag_p->tagIdent); if (location != le32_to_cpu(tag_p->tagLocation)) { udf_debug("location mismatch block %u, tag %u != %u\n", block, le32_to_cpu(tag_p->tagLocation), location); goto error_out; } /* Verify the tag checksum */ if (udf_tag_checksum(tag_p) != tag_p->tagChecksum) { printk(KERN_ERR "udf: tag checksum failed block %d\n", block); goto error_out; } /* Verify the tag version */ if (tag_p->descVersion != cpu_to_le16(0x0002U) && tag_p->descVersion != cpu_to_le16(0x0003U)) { udf_debug("tag version 0x%04x != 0x0002 || 0x0003 block %d\n", le16_to_cpu(tag_p->descVersion), block); goto error_out; } /* Verify the descriptor CRC */ if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize || le16_to_cpu(tag_p->descCRC) == crc_itu_t(0, bh->b_data + sizeof(tag), le16_to_cpu(tag_p->descCRCLength))) return bh; udf_debug("Crc failure block %d: crc = %d, crclen = %d\n", block, le16_to_cpu(tag_p->descCRC), le16_to_cpu(tag_p->descCRCLength)); error_out: brelse(bh); return NULL; } struct buffer_head *udf_read_ptagged(struct super_block *sb, kernel_lb_addr loc, uint32_t offset, uint16_t *ident) { return udf_read_tagged(sb, udf_get_lb_pblock(sb, loc, offset), loc.logicalBlockNum + offset, ident); } void udf_update_tag(char *data, int length) { tag *tptr = (tag *)data; length -= sizeof(tag); tptr->descCRCLength = cpu_to_le16(length); tptr->descCRC = cpu_to_le16(crc_itu_t(0, data + sizeof(tag), length)); tptr->tagChecksum = udf_tag_checksum(tptr); } void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum, uint32_t loc, int length) { tag *tptr = (tag *)data; tptr->tagIdent = cpu_to_le16(ident); tptr->descVersion = cpu_to_le16(version); tptr->tagSerialNum = cpu_to_le16(snum); tptr->tagLocation = cpu_to_le32(loc); udf_update_tag(data, length); } u8 udf_tag_checksum(const tag *t) { u8 *data = (u8 *)t; u8 checksum = 0; int i; for (i = 0; i < sizeof(tag); ++i) if (i != 4) /* position of checksum */ checksum += data[i]; return checksum; }
gpl-2.0
kbc-developers/android_kernel_htc_m7wlj
fs/fuse/inode.c
187
27919
/* FUSE: Filesystem in Userspace Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> This program can be distributed under the terms of the GNU GPL. See the file COPYING. */ #include "fuse_i.h" #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/parser.h> #include <linux/statfs.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/exportfs.h> #include <linux/suspend.h> MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); MODULE_DESCRIPTION("Filesystem in Userspace"); MODULE_LICENSE("GPL"); static struct kmem_cache *fuse_inode_cachep; struct list_head fuse_conn_list; DEFINE_MUTEX(fuse_mutex); static int set_global_limit(const char *val, struct kernel_param *kp); unsigned max_user_bgreq; module_param_call(max_user_bgreq, set_global_limit, param_get_uint, &max_user_bgreq, 0644); __MODULE_PARM_TYPE(max_user_bgreq, "uint"); MODULE_PARM_DESC(max_user_bgreq, "Global limit for the maximum number of backgrounded requests an " "unprivileged user can set"); unsigned max_user_congthresh; module_param_call(max_user_congthresh, set_global_limit, param_get_uint, &max_user_congthresh, 0644); __MODULE_PARM_TYPE(max_user_congthresh, "uint"); MODULE_PARM_DESC(max_user_congthresh, "Global limit for the maximum congestion threshold an " "unprivileged user can set"); #define FUSE_SUPER_MAGIC 0x65735546 #define FUSE_DEFAULT_BLKSIZE 512 #define FUSE_DEFAULT_MAX_BACKGROUND 12 #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4) struct fuse_mount_data { int fd; unsigned rootmode; unsigned user_id; unsigned group_id; unsigned fd_present:1; unsigned rootmode_present:1; unsigned user_id_present:1; unsigned group_id_present:1; unsigned flags; unsigned max_read; unsigned blksize; }; struct fuse_forget_link *fuse_alloc_forget(void) { return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL); } static struct inode *fuse_alloc_inode(struct super_block *sb) { struct inode *inode; struct fuse_inode *fi; inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL); if (!inode) return NULL; fi = get_fuse_inode(inode); fi->i_time = 0; fi->nodeid = 0; fi->nlookup = 0; fi->attr_version = 0; fi->writectr = 0; fi->orig_ino = 0; INIT_LIST_HEAD(&fi->write_files); INIT_LIST_HEAD(&fi->queued_writes); INIT_LIST_HEAD(&fi->writepages); init_waitqueue_head(&fi->page_waitq); fi->forget = fuse_alloc_forget(); if (!fi->forget) { kmem_cache_free(fuse_inode_cachep, inode); return NULL; } return inode; } static void fuse_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(fuse_inode_cachep, inode); } static void fuse_destroy_inode(struct inode *inode) { struct fuse_inode *fi = get_fuse_inode(inode); BUG_ON(!list_empty(&fi->write_files)); BUG_ON(!list_empty(&fi->queued_writes)); kfree(fi->forget); call_rcu(&inode->i_rcu, fuse_i_callback); } static void fuse_evict_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); if (inode->i_sb->s_flags & MS_ACTIVE) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup); fi->forget = NULL; } } static int fuse_remount_fs(struct super_block *sb, int *flags, char *data) { if (*flags & MS_MANDLOCK) return -EINVAL; return 0; } static ino_t fuse_squash_ino(u64 ino64) { ino_t ino = (ino_t) ino64; if (sizeof(ino_t) < sizeof(u64)) ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8; return ino; } void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, u64 attr_valid) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); fi->attr_version = ++fc->attr_version; fi->i_time = attr_valid; inode->i_ino = fuse_squash_ino(attr->ino); inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); set_nlink(inode, attr->nlink); inode->i_uid = attr->uid; inode->i_gid = attr->gid; inode->i_blocks = attr->blocks; inode->i_atime.tv_sec = attr->atime; inode->i_atime.tv_nsec = attr->atimensec; inode->i_mtime.tv_sec = attr->mtime; inode->i_mtime.tv_nsec = attr->mtimensec; inode->i_ctime.tv_sec = attr->ctime; inode->i_ctime.tv_nsec = attr->ctimensec; if (attr->blksize != 0) inode->i_blkbits = ilog2(attr->blksize); else inode->i_blkbits = inode->i_sb->s_blocksize_bits; fi->orig_i_mode = inode->i_mode; if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) inode->i_mode &= ~S_ISVTX; fi->orig_ino = attr->ino; } void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, u64 attr_valid, u64 attr_version) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); loff_t oldsize; spin_lock(&fc->lock); if (attr_version != 0 && fi->attr_version > attr_version) { spin_unlock(&fc->lock); return; } fuse_change_attributes_common(inode, attr, attr_valid); oldsize = inode->i_size; i_size_write(inode, attr->size); spin_unlock(&fc->lock); if (S_ISREG(inode->i_mode) && oldsize != attr->size) { lock_system_sleep(); truncate_pagecache(inode, oldsize, attr->size); invalidate_inode_pages2(inode->i_mapping); unlock_system_sleep(); } } static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr) { inode->i_mode = attr->mode & S_IFMT; inode->i_size = attr->size; if (S_ISREG(inode->i_mode)) { fuse_init_common(inode); fuse_init_file_inode(inode); } else if (S_ISDIR(inode->i_mode)) fuse_init_dir(inode); else if (S_ISLNK(inode->i_mode)) fuse_init_symlink(inode); else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { fuse_init_common(inode); init_special_inode(inode, inode->i_mode, new_decode_dev(attr->rdev)); } else BUG(); } int fuse_inode_eq(struct inode *inode, void *_nodeidp) { u64 nodeid = *(u64 *) _nodeidp; if (get_node_id(inode) == nodeid) return 1; else return 0; } static int fuse_inode_set(struct inode *inode, void *_nodeidp) { u64 nodeid = *(u64 *) _nodeidp; get_fuse_inode(inode)->nodeid = nodeid; return 0; } struct inode *fuse_iget(struct super_block *sb, u64 nodeid, int generation, struct fuse_attr *attr, u64 attr_valid, u64 attr_version) { struct inode *inode; struct fuse_inode *fi; struct fuse_conn *fc = get_fuse_conn_super(sb); retry: inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid); if (!inode) return NULL; if ((inode->i_state & I_NEW)) { inode->i_flags |= S_NOATIME|S_NOCMTIME; inode->i_generation = generation; inode->i_data.backing_dev_info = &fc->bdi; fuse_init_inode(inode, attr); unlock_new_inode(inode); } else if ((inode->i_mode ^ attr->mode) & S_IFMT) { make_bad_inode(inode); iput(inode); goto retry; } fi = get_fuse_inode(inode); spin_lock(&fc->lock); fi->nlookup++; spin_unlock(&fc->lock); fuse_change_attributes(inode, attr, attr_valid, attr_version); return inode; } int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, loff_t offset, loff_t len) { struct inode *inode; pgoff_t pg_start; pgoff_t pg_end; inode = ilookup5(sb, nodeid, fuse_inode_eq, &nodeid); if (!inode) return -ENOENT; fuse_invalidate_attr(inode); if (offset >= 0) { pg_start = offset >> PAGE_CACHE_SHIFT; if (len <= 0) pg_end = -1; else pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT; invalidate_inode_pages2_range(inode->i_mapping, pg_start, pg_end); } iput(inode); return 0; } static void fuse_umount_begin(struct super_block *sb) { fuse_abort_conn(get_fuse_conn_super(sb)); } static void fuse_send_destroy(struct fuse_conn *fc) { struct fuse_req *req = fc->destroy_req; if (req && fc->conn_init) { fc->destroy_req = NULL; req->in.h.opcode = FUSE_DESTROY; req->force = 1; fuse_request_send(fc, req); fuse_put_request(fc, req); } } static void fuse_bdi_destroy(struct fuse_conn *fc) { if (fc->bdi_initialized) bdi_destroy(&fc->bdi); } void fuse_conn_kill(struct fuse_conn *fc) { spin_lock(&fc->lock); fc->connected = 0; fc->blocked = 0; spin_unlock(&fc->lock); kill_fasync(&fc->fasync, SIGIO, POLL_IN); wake_up_all(&fc->waitq); wake_up_all(&fc->blocked_waitq); wake_up_all(&fc->reserved_req_waitq); mutex_lock(&fuse_mutex); list_del(&fc->entry); fuse_ctl_remove_conn(fc); mutex_unlock(&fuse_mutex); fuse_bdi_destroy(fc); } EXPORT_SYMBOL_GPL(fuse_conn_kill); static void fuse_put_super(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); fuse_send_destroy(fc); fuse_conn_kill(fc); fuse_conn_put(fc); } static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr) { stbuf->f_type = FUSE_SUPER_MAGIC; stbuf->f_bsize = attr->bsize; stbuf->f_frsize = attr->frsize; stbuf->f_blocks = attr->blocks; stbuf->f_bfree = attr->bfree; stbuf->f_bavail = attr->bavail; stbuf->f_files = attr->files; stbuf->f_ffree = attr->ffree; stbuf->f_namelen = attr->namelen; } static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct fuse_conn *fc = get_fuse_conn_super(sb); struct fuse_req *req; struct fuse_statfs_out outarg; int err; if (!fuse_allow_task(fc, current)) { buf->f_type = FUSE_SUPER_MAGIC; return 0; } req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); memset(&outarg, 0, sizeof(outarg)); req->in.numargs = 0; req->in.h.opcode = FUSE_STATFS; req->in.h.nodeid = get_node_id(dentry->d_inode); req->out.numargs = 1; req->out.args[0].size = fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; if (!err) convert_fuse_statfs(buf, &outarg.st); fuse_put_request(fc, req); return err; } enum { OPT_FD, OPT_ROOTMODE, OPT_USER_ID, OPT_GROUP_ID, OPT_DEFAULT_PERMISSIONS, OPT_ALLOW_OTHER, OPT_MAX_READ, OPT_BLKSIZE, OPT_ERR }; static const match_table_t tokens = { {OPT_FD, "fd=%u"}, {OPT_ROOTMODE, "rootmode=%o"}, {OPT_USER_ID, "user_id=%u"}, {OPT_GROUP_ID, "group_id=%u"}, {OPT_DEFAULT_PERMISSIONS, "default_permissions"}, {OPT_ALLOW_OTHER, "allow_other"}, {OPT_MAX_READ, "max_read=%u"}, {OPT_BLKSIZE, "blksize=%u"}, {OPT_ERR, NULL} }; static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) { char *p; memset(d, 0, sizeof(struct fuse_mount_data)); d->max_read = ~0; d->blksize = FUSE_DEFAULT_BLKSIZE; while ((p = strsep(&opt, ",")) != NULL) { int token; int value; substring_t args[MAX_OPT_ARGS]; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case OPT_FD: if (match_int(&args[0], &value)) return 0; d->fd = value; d->fd_present = 1; break; case OPT_ROOTMODE: if (match_octal(&args[0], &value)) return 0; if (!fuse_valid_type(value)) return 0; d->rootmode = value; d->rootmode_present = 1; break; case OPT_USER_ID: if (match_int(&args[0], &value)) return 0; d->user_id = value; d->user_id_present = 1; break; case OPT_GROUP_ID: if (match_int(&args[0], &value)) return 0; d->group_id = value; d->group_id_present = 1; break; case OPT_DEFAULT_PERMISSIONS: d->flags |= FUSE_DEFAULT_PERMISSIONS; break; case OPT_ALLOW_OTHER: d->flags |= FUSE_ALLOW_OTHER; break; case OPT_MAX_READ: if (match_int(&args[0], &value)) return 0; d->max_read = value; break; case OPT_BLKSIZE: if (!is_bdev || match_int(&args[0], &value)) return 0; d->blksize = value; break; default: return 0; } } if (!d->fd_present || !d->rootmode_present || !d->user_id_present || !d->group_id_present) return 0; return 1; } static int fuse_show_options(struct seq_file *m, struct dentry *root) { struct super_block *sb = root->d_sb; struct fuse_conn *fc = get_fuse_conn_super(sb); seq_printf(m, ",user_id=%u", fc->user_id); seq_printf(m, ",group_id=%u", fc->group_id); if (fc->flags & FUSE_DEFAULT_PERMISSIONS) seq_puts(m, ",default_permissions"); if (fc->flags & FUSE_ALLOW_OTHER) seq_puts(m, ",allow_other"); if (fc->max_read != ~0) seq_printf(m, ",max_read=%u", fc->max_read); if (sb->s_bdev && sb->s_blocksize != FUSE_DEFAULT_BLKSIZE) seq_printf(m, ",blksize=%lu", sb->s_blocksize); return 0; } void fuse_conn_init(struct fuse_conn *fc) { memset(fc, 0, sizeof(*fc)); spin_lock_init(&fc->lock); mutex_init(&fc->inst_mutex); init_rwsem(&fc->killsb); atomic_set(&fc->count, 1); init_waitqueue_head(&fc->waitq); init_waitqueue_head(&fc->blocked_waitq); init_waitqueue_head(&fc->reserved_req_waitq); INIT_LIST_HEAD(&fc->pending); INIT_LIST_HEAD(&fc->processing); INIT_LIST_HEAD(&fc->io); INIT_LIST_HEAD(&fc->interrupts); INIT_LIST_HEAD(&fc->bg_queue); INIT_LIST_HEAD(&fc->entry); fc->forget_list_tail = &fc->forget_list_head; atomic_set(&fc->num_waiting, 0); fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND; fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD; fc->khctr = 0; fc->polled_files = RB_ROOT; fc->reqctr = 0; fc->blocked = 1; fc->attr_version = 1; get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); } EXPORT_SYMBOL_GPL(fuse_conn_init); void fuse_conn_put(struct fuse_conn *fc) { if (atomic_dec_and_test(&fc->count)) { if (fc->destroy_req) fuse_request_free(fc->destroy_req); mutex_destroy(&fc->inst_mutex); fc->release(fc); } } EXPORT_SYMBOL_GPL(fuse_conn_put); struct fuse_conn *fuse_conn_get(struct fuse_conn *fc) { atomic_inc(&fc->count); return fc; } EXPORT_SYMBOL_GPL(fuse_conn_get); static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode) { struct fuse_attr attr; memset(&attr, 0, sizeof(attr)); attr.mode = mode; attr.ino = FUSE_ROOT_ID; attr.nlink = 1; return fuse_iget(sb, 1, 0, &attr, 0, 0); } struct fuse_inode_handle { u64 nodeid; u32 generation; }; static struct dentry *fuse_get_dentry(struct super_block *sb, struct fuse_inode_handle *handle) { struct fuse_conn *fc = get_fuse_conn_super(sb); struct inode *inode; struct dentry *entry; int err = -ESTALE; if (handle->nodeid == 0) goto out_err; inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid); if (!inode) { struct fuse_entry_out outarg; struct qstr name; if (!fc->export_support) goto out_err; name.len = 1; name.name = "."; err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg, &inode); if (err && err != -ENOENT) goto out_err; if (err || !inode) { err = -ESTALE; goto out_err; } err = -EIO; if (get_node_id(inode) != handle->nodeid) goto out_iput; } err = -ESTALE; if (inode->i_generation != handle->generation) goto out_iput; entry = d_obtain_alias(inode); if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) fuse_invalidate_entry_cache(entry); return entry; out_iput: iput(inode); out_err: return ERR_PTR(err); } static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, int connectable) { struct inode *inode = dentry->d_inode; bool encode_parent = connectable && !S_ISDIR(inode->i_mode); int len = encode_parent ? 6 : 3; u64 nodeid; u32 generation; if (*max_len < len) { *max_len = len; return 255; } nodeid = get_fuse_inode(inode)->nodeid; generation = inode->i_generation; fh[0] = (u32)(nodeid >> 32); fh[1] = (u32)(nodeid & 0xffffffff); fh[2] = generation; if (encode_parent) { struct inode *parent; spin_lock(&dentry->d_lock); parent = dentry->d_parent->d_inode; nodeid = get_fuse_inode(parent)->nodeid; generation = parent->i_generation; spin_unlock(&dentry->d_lock); fh[3] = (u32)(nodeid >> 32); fh[4] = (u32)(nodeid & 0xffffffff); fh[5] = generation; } *max_len = len; return encode_parent ? 0x82 : 0x81; } static struct dentry *fuse_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct fuse_inode_handle handle; if ((fh_type != 0x81 && fh_type != 0x82) || fh_len < 3) return NULL; handle.nodeid = (u64) fid->raw[0] << 32; handle.nodeid |= (u64) fid->raw[1]; handle.generation = fid->raw[2]; return fuse_get_dentry(sb, &handle); } static struct dentry *fuse_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct fuse_inode_handle parent; if (fh_type != 0x82 || fh_len < 6) return NULL; parent.nodeid = (u64) fid->raw[3] << 32; parent.nodeid |= (u64) fid->raw[4]; parent.generation = fid->raw[5]; return fuse_get_dentry(sb, &parent); } static struct dentry *fuse_get_parent(struct dentry *child) { struct inode *child_inode = child->d_inode; struct fuse_conn *fc = get_fuse_conn(child_inode); struct inode *inode; struct dentry *parent; struct fuse_entry_out outarg; struct qstr name; int err; if (!fc->export_support) return ERR_PTR(-ESTALE); name.len = 2; name.name = ".."; err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode), &name, &outarg, &inode); if (err) { if (err == -ENOENT) return ERR_PTR(-ESTALE); return ERR_PTR(err); } parent = d_obtain_alias(inode); if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) fuse_invalidate_entry_cache(parent); return parent; } static const struct export_operations fuse_export_operations = { .fh_to_dentry = fuse_fh_to_dentry, .fh_to_parent = fuse_fh_to_parent, .encode_fh = fuse_encode_fh, .get_parent = fuse_get_parent, }; static const struct super_operations fuse_super_operations = { .alloc_inode = fuse_alloc_inode, .destroy_inode = fuse_destroy_inode, .evict_inode = fuse_evict_inode, .drop_inode = generic_delete_inode, .remount_fs = fuse_remount_fs, .put_super = fuse_put_super, .umount_begin = fuse_umount_begin, .statfs = fuse_statfs, .show_options = fuse_show_options, }; static void sanitize_global_limit(unsigned *limit) { if (*limit == 0) *limit = ((num_physpages << PAGE_SHIFT) >> 13) / sizeof(struct fuse_req); if (*limit >= 1 << 16) *limit = (1 << 16) - 1; } static int set_global_limit(const char *val, struct kernel_param *kp) { int rv; rv = param_set_uint(val, kp); if (rv) return rv; sanitize_global_limit((unsigned *)kp->arg); return 0; } static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg) { int cap_sys_admin = capable(CAP_SYS_ADMIN); if (arg->minor < 13) return; sanitize_global_limit(&max_user_bgreq); sanitize_global_limit(&max_user_congthresh); if (arg->max_background) { fc->max_background = arg->max_background; if (!cap_sys_admin && fc->max_background > max_user_bgreq) fc->max_background = max_user_bgreq; } if (arg->congestion_threshold) { fc->congestion_threshold = arg->congestion_threshold; if (!cap_sys_admin && fc->congestion_threshold > max_user_congthresh) fc->congestion_threshold = max_user_congthresh; } } static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) { struct fuse_init_out *arg = &req->misc.init_out; if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION) fc->conn_error = 1; else { unsigned long ra_pages; process_init_limits(fc, arg); if (arg->minor >= 6) { ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; if (arg->flags & FUSE_ASYNC_READ) fc->async_read = 1; if (!(arg->flags & FUSE_POSIX_LOCKS)) fc->no_lock = 1; if (arg->minor >= 17) { if (!(arg->flags & FUSE_FLOCK_LOCKS)) fc->no_flock = 1; } else { if (!(arg->flags & FUSE_POSIX_LOCKS)) fc->no_flock = 1; } if (arg->flags & FUSE_ATOMIC_O_TRUNC) fc->atomic_o_trunc = 1; if (arg->minor >= 9) { if (arg->flags & FUSE_EXPORT_SUPPORT) fc->export_support = 1; } if (arg->flags & FUSE_BIG_WRITES) fc->big_writes = 1; if (arg->flags & FUSE_DONT_MASK) fc->dont_mask = 1; } else { ra_pages = fc->max_read / PAGE_CACHE_SIZE; fc->no_lock = 1; fc->no_flock = 1; } fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages); fc->minor = arg->minor; fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; fc->max_write = max_t(unsigned, 4096, fc->max_write); fc->conn_init = 1; } fc->blocked = 0; wake_up_all(&fc->blocked_waitq); } static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) { struct fuse_init_in *arg = &req->misc.init_in; arg->major = FUSE_KERNEL_VERSION; arg->minor = FUSE_KERNEL_MINOR_VERSION; arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | FUSE_FLOCK_LOCKS; req->in.h.opcode = FUSE_INIT; req->in.numargs = 1; req->in.args[0].size = sizeof(*arg); req->in.args[0].value = arg; req->out.numargs = 1; req->out.argvar = 1; req->out.args[0].size = sizeof(struct fuse_init_out); req->out.args[0].value = &req->misc.init_out; req->end = process_init_reply; fuse_request_send_background(fc, req); } static void fuse_free_conn(struct fuse_conn *fc) { kfree(fc); } static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) { int err; fc->bdi.name = "fuse"; fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; err = bdi_init(&fc->bdi); if (err) return err; fc->bdi_initialized = 1; if (sb->s_bdev) { err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk", MAJOR(fc->dev), MINOR(fc->dev)); } else { err = bdi_register_dev(&fc->bdi, fc->dev); } if (err) return err; bdi_set_max_ratio(&fc->bdi, 1); return 0; } static int fuse_fill_super(struct super_block *sb, void *data, int silent) { struct fuse_conn *fc; struct inode *root; struct fuse_mount_data d; struct file *file; struct dentry *root_dentry; struct fuse_req *init_req; int err; int is_bdev = sb->s_bdev != NULL; err = -EINVAL; if (sb->s_flags & MS_MANDLOCK) goto err; sb->s_flags &= ~MS_NOSEC; if (!parse_fuse_opt((char *) data, &d, is_bdev)) goto err; if (is_bdev) { #ifdef CONFIG_BLOCK err = -EINVAL; if (!sb_set_blocksize(sb, d.blksize)) goto err; #endif } else { sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; } sb->s_magic = FUSE_SUPER_MAGIC; sb->s_op = &fuse_super_operations; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_time_gran = 1; sb->s_export_op = &fuse_export_operations; file = fget(d.fd); err = -EINVAL; if (!file) goto err; if (file->f_op != &fuse_dev_operations) goto err_fput; fc = kmalloc(sizeof(*fc), GFP_KERNEL); err = -ENOMEM; if (!fc) goto err_fput; fuse_conn_init(fc); fc->dev = sb->s_dev; fc->sb = sb; err = fuse_bdi_init(fc, sb); if (err) goto err_put_conn; sb->s_bdi = &fc->bdi; if (sb->s_flags & MS_POSIXACL) fc->dont_mask = 1; sb->s_flags |= MS_POSIXACL; fc->release = fuse_free_conn; fc->flags = d.flags; fc->user_id = d.user_id; fc->group_id = d.group_id; fc->max_read = max_t(unsigned, 4096, d.max_read); sb->s_fs_info = fc; err = -ENOMEM; root = fuse_get_root_inode(sb, d.rootmode); root_dentry = d_make_root(root); if (!root_dentry) goto err_put_conn; sb->s_d_op = &fuse_dentry_operations; init_req = fuse_request_alloc(); if (!init_req) goto err_put_root; if (is_bdev) { fc->destroy_req = fuse_request_alloc(); if (!fc->destroy_req) goto err_free_init_req; } mutex_lock(&fuse_mutex); err = -EINVAL; if (file->private_data) goto err_unlock; err = fuse_ctl_add_conn(fc); if (err) goto err_unlock; list_add_tail(&fc->entry, &fuse_conn_list); sb->s_root = root_dentry; fc->connected = 1; file->private_data = fuse_conn_get(fc); mutex_unlock(&fuse_mutex); fput(file); fuse_send_init(fc, init_req); return 0; err_unlock: mutex_unlock(&fuse_mutex); err_free_init_req: fuse_request_free(init_req); err_put_root: dput(root_dentry); err_put_conn: fuse_bdi_destroy(fc); fuse_conn_put(fc); err_fput: fput(file); err: return err; } static struct dentry *fuse_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) { return mount_nodev(fs_type, flags, raw_data, fuse_fill_super); } static void fuse_kill_sb_anon(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); if (fc) { down_write(&fc->killsb); fc->sb = NULL; up_write(&fc->killsb); } kill_anon_super(sb); } static struct file_system_type fuse_fs_type = { .owner = THIS_MODULE, .name = "fuse", .fs_flags = FS_HAS_SUBTYPE, .mount = fuse_mount, .kill_sb = fuse_kill_sb_anon, }; #ifdef CONFIG_BLOCK static struct dentry *fuse_mount_blk(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) { return mount_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super); } static void fuse_kill_sb_blk(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); if (fc) { down_write(&fc->killsb); fc->sb = NULL; up_write(&fc->killsb); } kill_block_super(sb); } static struct file_system_type fuseblk_fs_type = { .owner = THIS_MODULE, .name = "fuseblk", .mount = fuse_mount_blk, .kill_sb = fuse_kill_sb_blk, .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE, }; static inline int register_fuseblk(void) { return register_filesystem(&fuseblk_fs_type); } static inline void unregister_fuseblk(void) { unregister_filesystem(&fuseblk_fs_type); } #else static inline int register_fuseblk(void) { return 0; } static inline void unregister_fuseblk(void) { } #endif static void fuse_inode_init_once(void *foo) { struct inode *inode = foo; inode_init_once(inode); } static int __init fuse_fs_init(void) { int err; fuse_inode_cachep = kmem_cache_create("fuse_inode", sizeof(struct fuse_inode), 0, SLAB_HWCACHE_ALIGN, fuse_inode_init_once); err = -ENOMEM; if (!fuse_inode_cachep) goto out; err = register_fuseblk(); if (err) goto out2; err = register_filesystem(&fuse_fs_type); if (err) goto out3; return 0; out3: unregister_fuseblk(); out2: kmem_cache_destroy(fuse_inode_cachep); out: return err; } static void fuse_fs_cleanup(void) { unregister_filesystem(&fuse_fs_type); unregister_fuseblk(); kmem_cache_destroy(fuse_inode_cachep); } static struct kobject *fuse_kobj; static struct kobject *connections_kobj; static int fuse_sysfs_init(void) { int err; fuse_kobj = kobject_create_and_add("fuse", fs_kobj); if (!fuse_kobj) { err = -ENOMEM; goto out_err; } connections_kobj = kobject_create_and_add("connections", fuse_kobj); if (!connections_kobj) { err = -ENOMEM; goto out_fuse_unregister; } return 0; out_fuse_unregister: kobject_put(fuse_kobj); out_err: return err; } static void fuse_sysfs_cleanup(void) { kobject_put(connections_kobj); kobject_put(fuse_kobj); } static int __init fuse_init(void) { int res; printk(KERN_INFO "fuse init (API version %i.%i)\n", FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION); INIT_LIST_HEAD(&fuse_conn_list); res = fuse_fs_init(); if (res) goto err; res = fuse_dev_init(); if (res) goto err_fs_cleanup; res = fuse_sysfs_init(); if (res) goto err_dev_cleanup; res = fuse_ctl_init(); if (res) goto err_sysfs_cleanup; sanitize_global_limit(&max_user_bgreq); sanitize_global_limit(&max_user_congthresh); return 0; err_sysfs_cleanup: fuse_sysfs_cleanup(); err_dev_cleanup: fuse_dev_cleanup(); err_fs_cleanup: fuse_fs_cleanup(); err: return res; } static void __exit fuse_exit(void) { printk(KERN_DEBUG "fuse exit\n"); fuse_ctl_cleanup(); fuse_sysfs_cleanup(); fuse_fs_cleanup(); fuse_dev_cleanup(); } module_init(fuse_init); module_exit(fuse_exit);
gpl-2.0
v1ron/kernel-wm8505
drivers/acpi/container.c
187
7095
/* * acpi_container.c - ACPI Generic Container Driver * ($Revision: ) * * Copyright (C) 2004 Anil S Keshavamurthy (anil.s.keshavamurthy@intel.com) * Copyright (C) 2004 Keiichiro Tokunaga (tokunaga.keiich@jp.fujitsu.com) * Copyright (C) 2004 Motoyuki Ito (motoyuki@soft.fujitsu.com) * Copyright (C) 2004 Intel Corp. * Copyright (C) 2004 FUJITSU LIMITED * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/acpi.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <acpi/container.h> #define ACPI_CONTAINER_DEVICE_NAME "ACPI container device" #define ACPI_CONTAINER_CLASS "container" #define INSTALL_NOTIFY_HANDLER 1 #define UNINSTALL_NOTIFY_HANDLER 2 #define _COMPONENT ACPI_CONTAINER_COMPONENT ACPI_MODULE_NAME("container"); MODULE_AUTHOR("Anil S Keshavamurthy"); MODULE_DESCRIPTION("ACPI container driver"); MODULE_LICENSE("GPL"); static int acpi_container_add(struct acpi_device *device); static int acpi_container_remove(struct acpi_device *device, int type); static const struct acpi_device_id container_device_ids[] = { {"ACPI0004", 0}, {"PNP0A05", 0}, {"PNP0A06", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, container_device_ids); static struct acpi_driver acpi_container_driver = { .name = "container", .class = ACPI_CONTAINER_CLASS, .ids = container_device_ids, .ops = { .add = acpi_container_add, .remove = acpi_container_remove, }, }; /*******************************************************************/ static int is_device_present(acpi_handle handle) { acpi_handle temp; acpi_status status; unsigned long long sta; status = acpi_get_handle(handle, "_STA", &temp); if (ACPI_FAILURE(status)) return 1; /* _STA not found, assume device present */ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); if (ACPI_FAILURE(status)) return 0; /* Firmware error */ return ((sta & ACPI_STA_DEVICE_PRESENT) == ACPI_STA_DEVICE_PRESENT); } /*******************************************************************/ static int acpi_container_add(struct acpi_device *device) { struct acpi_container *container; if (!device) { printk(KERN_ERR PREFIX "device is NULL\n"); return -EINVAL; } container = kzalloc(sizeof(struct acpi_container), GFP_KERNEL); if (!container) return -ENOMEM; container->handle = device->handle; strcpy(acpi_device_name(device), ACPI_CONTAINER_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_CONTAINER_CLASS); device->driver_data = container; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device <%s> bid <%s>\n", acpi_device_name(device), acpi_device_bid(device))); return 0; } static int acpi_container_remove(struct acpi_device *device, int type) { acpi_status status = AE_OK; struct acpi_container *pc = NULL; pc = acpi_driver_data(device); kfree(pc); return status; } static int container_device_add(struct acpi_device **device, acpi_handle handle) { acpi_handle phandle; struct acpi_device *pdev; int result; if (acpi_get_parent(handle, &phandle)) { return -ENODEV; } if (acpi_bus_get_device(phandle, &pdev)) { return -ENODEV; } if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_DEVICE)) { return -ENODEV; } result = acpi_bus_start(*device); return result; } static void container_notify_cb(acpi_handle handle, u32 type, void *context) { struct acpi_device *device = NULL; int result; int present; acpi_status status; present = is_device_present(handle); switch (type) { case ACPI_NOTIFY_BUS_CHECK: /* Fall through */ case ACPI_NOTIFY_DEVICE_CHECK: printk(KERN_WARNING "Container driver received %s event\n", (type == ACPI_NOTIFY_BUS_CHECK) ? "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"); status = acpi_bus_get_device(handle, &device); if (present) { if (ACPI_FAILURE(status) || !device) { result = container_device_add(&device, handle); if (!result) kobject_uevent(&device->dev.kobj, KOBJ_ONLINE); else printk(KERN_WARNING "Failed to add container\n"); } } else { if (ACPI_SUCCESS(status)) { /* device exist and this is a remove request */ kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); } } break; case ACPI_NOTIFY_EJECT_REQUEST: if (!acpi_bus_get_device(handle, &device) && device) { kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); } break; default: break; } return; } static acpi_status container_walk_namespace_cb(acpi_handle handle, u32 lvl, void *context, void **rv) { char *hid = NULL; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_device_info *info; acpi_status status; int *action = context; status = acpi_get_object_info(handle, &buffer); if (ACPI_FAILURE(status) || !buffer.pointer) { return AE_OK; } info = buffer.pointer; if (info->valid & ACPI_VALID_HID) hid = info->hardware_id.value; if (hid == NULL) { goto end; } if (strcmp(hid, "ACPI0004") && strcmp(hid, "PNP0A05") && strcmp(hid, "PNP0A06")) { goto end; } switch (*action) { case INSTALL_NOTIFY_HANDLER: acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, container_notify_cb, NULL); break; case UNINSTALL_NOTIFY_HANDLER: acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, container_notify_cb); break; default: break; } end: kfree(buffer.pointer); return AE_OK; } static int __init acpi_container_init(void) { int result = 0; int action = INSTALL_NOTIFY_HANDLER; result = acpi_bus_register_driver(&acpi_container_driver); if (result < 0) { return (result); } /* register notify handler to every container device */ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, container_walk_namespace_cb, &action, NULL); return (0); } static void __exit acpi_container_exit(void) { int action = UNINSTALL_NOTIFY_HANDLER; acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, container_walk_namespace_cb, &action, NULL); acpi_bus_unregister_driver(&acpi_container_driver); return; } module_init(acpi_container_init); module_exit(acpi_container_exit);
gpl-2.0
judacis/Galaxy-S2-Kernel
sound/pci/oxygen/xonar_pcm179x.c
955
29161
/* * card driver for models with PCM1796 DACs (Xonar D2/D2X/HDAV1.3/ST/STX) * * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this driver; if not, see <http://www.gnu.org/licenses/>. */ /* * Xonar D2/D2X * ------------ * * CMI8788: * * SPI 0 -> 1st PCM1796 (front) * SPI 1 -> 2nd PCM1796 (surround) * SPI 2 -> 3rd PCM1796 (center/LFE) * SPI 4 -> 4th PCM1796 (back) * * GPIO 2 -> M0 of CS5381 * GPIO 3 -> M1 of CS5381 * GPIO 5 <- external power present (D2X only) * GPIO 7 -> ALT * GPIO 8 -> enable output to speakers * * CM9780: * * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input */ /* * Xonar HDAV1.3 (Deluxe) * ---------------------- * * CMI8788: * * I²C <-> PCM1796 (front) * * GPI 0 <- external power present * * GPIO 0 -> enable output to speakers * GPIO 2 -> M0 of CS5381 * GPIO 3 -> M1 of CS5381 * GPIO 8 -> route input jack to line-in (0) or mic-in (1) * * TXD -> HDMI controller * RXD <- HDMI controller * * PCM1796 front: AD1,0 <- 0,0 * * CM9780: * * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input * * no daughterboard * ---------------- * * GPIO 4 <- 1 * * H6 daughterboard * ---------------- * * GPIO 4 <- 0 * GPIO 5 <- 0 * * I²C <-> PCM1796 (surround) * <-> PCM1796 (center/LFE) * <-> PCM1796 (back) * * PCM1796 surround: AD1,0 <- 0,1 * PCM1796 center/LFE: AD1,0 <- 1,0 * PCM1796 back: AD1,0 <- 1,1 * * unknown daughterboard * --------------------- * * GPIO 4 <- 0 * GPIO 5 <- 1 * * I²C <-> CS4362A (surround, center/LFE, back) * * CS4362A: AD0 <- 0 */ /* * Xonar Essence ST (Deluxe)/STX * ----------------------------- * * CMI8788: * * I²C <-> PCM1792A * <-> CS2000 (ST only) * * ADC1 MCLK -> REF_CLK of CS2000 (ST only) * * GPI 0 <- external power present (STX only) * * GPIO 0 -> enable output to speakers * GPIO 1 -> route HP to front panel (0) or rear jack (1) * GPIO 2 -> M0 of CS5381 * GPIO 3 -> M1 of CS5381 * GPIO 7 -> route output to speaker jacks (0) or HP (1) * GPIO 8 -> route input jack to line-in (0) or mic-in (1) * * PCM1792A: * * AD1,0 <- 0,0 * SCK <- CLK_OUT of CS2000 (ST only) * * CS2000: * * AD0 <- 0 * * CM9780: * * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input * * H6 daughterboard * ---------------- * * GPIO 4 <- 0 * GPIO 5 <- 0 */ #include <linux/pci.h> #include <linux/delay.h> #include <linux/mutex.h> #include <sound/ac97_codec.h> #include <sound/control.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/tlv.h> #include "xonar.h" #include "cm9780.h" #include "pcm1796.h" #include "cs2000.h" #define GPIO_D2X_EXT_POWER 0x0020 #define GPIO_D2_ALT 0x0080 #define GPIO_D2_OUTPUT_ENABLE 0x0100 #define GPI_EXT_POWER 0x01 #define GPIO_INPUT_ROUTE 0x0100 #define GPIO_HDAV_OUTPUT_ENABLE 0x0001 #define GPIO_DB_MASK 0x0030 #define GPIO_DB_H6 0x0000 #define GPIO_ST_OUTPUT_ENABLE 0x0001 #define GPIO_ST_HP_REAR 0x0002 #define GPIO_ST_HP 0x0080 #define I2C_DEVICE_PCM1796(i) (0x98 + ((i) << 1)) /* 10011, ii, /W=0 */ #define I2C_DEVICE_CS2000 0x9c /* 100111, 0, /W=0 */ #define PCM1796_REG_BASE 16 struct xonar_pcm179x { struct xonar_generic generic; unsigned int dacs; u8 pcm1796_regs[4][5]; unsigned int current_rate; bool os_128; bool hp_active; s8 hp_gain_offset; bool has_cs2000; u8 cs2000_fun_cfg_1; }; struct xonar_hdav { struct xonar_pcm179x pcm179x; struct xonar_hdmi hdmi; }; static inline void pcm1796_write_spi(struct oxygen *chip, unsigned int codec, u8 reg, u8 value) { /* maps ALSA channel pair number to SPI output */ static const u8 codec_map[4] = { 0, 1, 2, 4 }; oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER | OXYGEN_SPI_DATA_LENGTH_2 | OXYGEN_SPI_CLOCK_160 | (codec_map[codec] << OXYGEN_SPI_CODEC_SHIFT) | OXYGEN_SPI_CEN_LATCH_CLOCK_HI, (reg << 8) | value); } static inline void pcm1796_write_i2c(struct oxygen *chip, unsigned int codec, u8 reg, u8 value) { oxygen_write_i2c(chip, I2C_DEVICE_PCM1796(codec), reg, value); } static void pcm1796_write(struct oxygen *chip, unsigned int codec, u8 reg, u8 value) { struct xonar_pcm179x *data = chip->model_data; if ((chip->model.function_flags & OXYGEN_FUNCTION_2WIRE_SPI_MASK) == OXYGEN_FUNCTION_SPI) pcm1796_write_spi(chip, codec, reg, value); else pcm1796_write_i2c(chip, codec, reg, value); if ((unsigned int)(reg - PCM1796_REG_BASE) < ARRAY_SIZE(data->pcm1796_regs[codec])) data->pcm1796_regs[codec][reg - PCM1796_REG_BASE] = value; } static void pcm1796_write_cached(struct oxygen *chip, unsigned int codec, u8 reg, u8 value) { struct xonar_pcm179x *data = chip->model_data; if (value != data->pcm1796_regs[codec][reg - PCM1796_REG_BASE]) pcm1796_write(chip, codec, reg, value); } static void cs2000_write(struct oxygen *chip, u8 reg, u8 value) { struct xonar_pcm179x *data = chip->model_data; oxygen_write_i2c(chip, I2C_DEVICE_CS2000, reg, value); if (reg == CS2000_FUN_CFG_1) data->cs2000_fun_cfg_1 = value; } static void cs2000_write_cached(struct oxygen *chip, u8 reg, u8 value) { struct xonar_pcm179x *data = chip->model_data; if (reg != CS2000_FUN_CFG_1 || value != data->cs2000_fun_cfg_1) cs2000_write(chip, reg, value); } static void pcm1796_registers_init(struct oxygen *chip) { struct xonar_pcm179x *data = chip->model_data; unsigned int i; s8 gain_offset; gain_offset = data->hp_active ? data->hp_gain_offset : 0; for (i = 0; i < data->dacs; ++i) { /* set ATLD before ATL/ATR */ pcm1796_write(chip, i, 18, data->pcm1796_regs[0][18 - PCM1796_REG_BASE]); pcm1796_write(chip, i, 16, chip->dac_volume[i * 2] + gain_offset); pcm1796_write(chip, i, 17, chip->dac_volume[i * 2 + 1] + gain_offset); pcm1796_write(chip, i, 19, data->pcm1796_regs[0][19 - PCM1796_REG_BASE]); pcm1796_write(chip, i, 20, data->pcm1796_regs[0][20 - PCM1796_REG_BASE]); pcm1796_write(chip, i, 21, 0); } } static void pcm1796_init(struct oxygen *chip) { struct xonar_pcm179x *data = chip->model_data; data->pcm1796_regs[0][18 - PCM1796_REG_BASE] = PCM1796_MUTE | PCM1796_DMF_DISABLED | PCM1796_FMT_24_LJUST | PCM1796_ATLD; data->pcm1796_regs[0][19 - PCM1796_REG_BASE] = PCM1796_FLT_SHARP | PCM1796_ATS_1; data->pcm1796_regs[0][20 - PCM1796_REG_BASE] = PCM1796_OS_64; pcm1796_registers_init(chip); data->current_rate = 48000; } static void xonar_d2_init(struct oxygen *chip) { struct xonar_pcm179x *data = chip->model_data; data->generic.anti_pop_delay = 300; data->generic.output_enable_bit = GPIO_D2_OUTPUT_ENABLE; data->dacs = 4; pcm1796_init(chip); oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_D2_ALT); oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_D2_ALT); oxygen_ac97_set_bits(chip, 0, CM9780_JACK, CM9780_FMIC2MIC); xonar_init_cs53x1(chip); xonar_enable_output(chip); snd_component_add(chip->card, "PCM1796"); snd_component_add(chip->card, "CS5381"); } static void xonar_d2x_init(struct oxygen *chip) { struct xonar_pcm179x *data = chip->model_data; data->generic.ext_power_reg = OXYGEN_GPIO_DATA; data->generic.ext_power_int_reg = OXYGEN_GPIO_INTERRUPT_MASK; data->generic.ext_power_bit = GPIO_D2X_EXT_POWER; oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_D2X_EXT_POWER); xonar_init_ext_power(chip); xonar_d2_init(chip); } static void xonar_hdav_init(struct oxygen *chip) { struct xonar_hdav *data = chip->model_data; oxygen_write16(chip, OXYGEN_2WIRE_BUS_STATUS, OXYGEN_2WIRE_LENGTH_8 | OXYGEN_2WIRE_INTERRUPT_MASK | OXYGEN_2WIRE_SPEED_FAST); data->pcm179x.generic.anti_pop_delay = 100; data->pcm179x.generic.output_enable_bit = GPIO_HDAV_OUTPUT_ENABLE; data->pcm179x.generic.ext_power_reg = OXYGEN_GPI_DATA; data->pcm179x.generic.ext_power_int_reg = OXYGEN_GPI_INTERRUPT_MASK; data->pcm179x.generic.ext_power_bit = GPI_EXT_POWER; data->pcm179x.dacs = chip->model.private_data ? 4 : 1; pcm1796_init(chip); oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_INPUT_ROUTE); oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_INPUT_ROUTE); xonar_init_cs53x1(chip); xonar_init_ext_power(chip); xonar_hdmi_init(chip, &data->hdmi); xonar_enable_output(chip); snd_component_add(chip->card, "PCM1796"); snd_component_add(chip->card, "CS5381"); } static void xonar_st_init_i2c(struct oxygen *chip) { oxygen_write16(chip, OXYGEN_2WIRE_BUS_STATUS, OXYGEN_2WIRE_LENGTH_8 | OXYGEN_2WIRE_INTERRUPT_MASK | OXYGEN_2WIRE_SPEED_FAST); } static void xonar_st_init_common(struct oxygen *chip) { struct xonar_pcm179x *data = chip->model_data; data->generic.anti_pop_delay = 100; data->generic.output_enable_bit = GPIO_ST_OUTPUT_ENABLE; data->dacs = chip->model.private_data ? 4 : 1; data->hp_gain_offset = 2*-18; pcm1796_init(chip); oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR | GPIO_ST_HP); oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR | GPIO_ST_HP); xonar_init_cs53x1(chip); xonar_enable_output(chip); snd_component_add(chip->card, "PCM1792A"); snd_component_add(chip->card, "CS5381"); } static void cs2000_registers_init(struct oxygen *chip) { struct xonar_pcm179x *data = chip->model_data; cs2000_write(chip, CS2000_GLOBAL_CFG, CS2000_FREEZE); cs2000_write(chip, CS2000_DEV_CTRL, 0); cs2000_write(chip, CS2000_DEV_CFG_1, CS2000_R_MOD_SEL_1 | (0 << CS2000_R_SEL_SHIFT) | CS2000_AUX_OUT_SRC_REF_CLK | CS2000_EN_DEV_CFG_1); cs2000_write(chip, CS2000_DEV_CFG_2, (0 << CS2000_LOCK_CLK_SHIFT) | CS2000_FRAC_N_SRC_STATIC); cs2000_write(chip, CS2000_RATIO_0 + 0, 0x00); /* 1.0 */ cs2000_write(chip, CS2000_RATIO_0 + 1, 0x10); cs2000_write(chip, CS2000_RATIO_0 + 2, 0x00); cs2000_write(chip, CS2000_RATIO_0 + 3, 0x00); cs2000_write(chip, CS2000_FUN_CFG_1, data->cs2000_fun_cfg_1); cs2000_write(chip, CS2000_FUN_CFG_2, 0); cs2000_write(chip, CS2000_GLOBAL_CFG, CS2000_EN_DEV_CFG_2); } static void xonar_st_init(struct oxygen *chip) { struct xonar_pcm179x *data = chip->model_data; data->has_cs2000 = 1; data->cs2000_fun_cfg_1 = CS2000_REF_CLK_DIV_1; oxygen_write16(chip, OXYGEN_I2S_A_FORMAT, OXYGEN_RATE_48000 | OXYGEN_I2S_FORMAT_I2S | OXYGEN_I2S_MCLK_128 | OXYGEN_I2S_BITS_16 | OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64); xonar_st_init_i2c(chip); cs2000_registers_init(chip); xonar_st_init_common(chip); snd_component_add(chip->card, "CS2000"); } static void xonar_stx_init(struct oxygen *chip) { struct xonar_pcm179x *data = chip->model_data; xonar_st_init_i2c(chip); data->generic.ext_power_reg = OXYGEN_GPI_DATA; data->generic.ext_power_int_reg = OXYGEN_GPI_INTERRUPT_MASK; data->generic.ext_power_bit = GPI_EXT_POWER; xonar_init_ext_power(chip); xonar_st_init_common(chip); } static void xonar_d2_cleanup(struct oxygen *chip) { xonar_disable_output(chip); } static void xonar_hdav_cleanup(struct oxygen *chip) { xonar_hdmi_cleanup(chip); xonar_disable_output(chip); msleep(2); } static void xonar_st_cleanup(struct oxygen *chip) { xonar_disable_output(chip); } static void xonar_d2_suspend(struct oxygen *chip) { xonar_d2_cleanup(chip); } static void xonar_hdav_suspend(struct oxygen *chip) { xonar_hdav_cleanup(chip); } static void xonar_st_suspend(struct oxygen *chip) { xonar_st_cleanup(chip); } static void xonar_d2_resume(struct oxygen *chip) { pcm1796_registers_init(chip); xonar_enable_output(chip); } static void xonar_hdav_resume(struct oxygen *chip) { struct xonar_hdav *data = chip->model_data; pcm1796_registers_init(chip); xonar_hdmi_resume(chip, &data->hdmi); xonar_enable_output(chip); } static void xonar_stx_resume(struct oxygen *chip) { pcm1796_registers_init(chip); xonar_enable_output(chip); } static void xonar_st_resume(struct oxygen *chip) { cs2000_registers_init(chip); xonar_stx_resume(chip); } static unsigned int mclk_from_rate(struct oxygen *chip, unsigned int rate) { struct xonar_pcm179x *data = chip->model_data; if (rate <= 32000) return OXYGEN_I2S_MCLK_512; else if (rate <= 48000 && data->os_128) return OXYGEN_I2S_MCLK_512; else if (rate <= 96000) return OXYGEN_I2S_MCLK_256; else return OXYGEN_I2S_MCLK_128; } static unsigned int get_pcm1796_i2s_mclk(struct oxygen *chip, unsigned int channel, struct snd_pcm_hw_params *params) { if (channel == PCM_MULTICH) return mclk_from_rate(chip, params_rate(params)); else return oxygen_default_i2s_mclk(chip, channel, params); } static void update_pcm1796_oversampling(struct oxygen *chip) { struct xonar_pcm179x *data = chip->model_data; unsigned int i; u8 reg; if (data->current_rate <= 32000) reg = PCM1796_OS_128; else if (data->current_rate <= 48000 && data->os_128) reg = PCM1796_OS_128; else if (data->current_rate <= 96000 || data->os_128) reg = PCM1796_OS_64; else reg = PCM1796_OS_32; for (i = 0; i < data->dacs; ++i) pcm1796_write_cached(chip, i, 20, reg); } static void set_pcm1796_params(struct oxygen *chip, struct snd_pcm_hw_params *params) { struct xonar_pcm179x *data = chip->model_data; data->current_rate = params_rate(params); update_pcm1796_oversampling(chip); } static void update_pcm1796_volume(struct oxygen *chip) { struct xonar_pcm179x *data = chip->model_data; unsigned int i; s8 gain_offset; gain_offset = data->hp_active ? data->hp_gain_offset : 0; for (i = 0; i < data->dacs; ++i) { pcm1796_write_cached(chip, i, 16, chip->dac_volume[i * 2] + gain_offset); pcm1796_write_cached(chip, i, 17, chip->dac_volume[i * 2 + 1] + gain_offset); } } static void update_pcm1796_mute(struct oxygen *chip) { struct xonar_pcm179x *data = chip->model_data; unsigned int i; u8 value; value = PCM1796_DMF_DISABLED | PCM1796_FMT_24_LJUST | PCM1796_ATLD; if (chip->dac_mute) value |= PCM1796_MUTE; for (i = 0; i < data->dacs; ++i) pcm1796_write_cached(chip, i, 18, value); } static void update_cs2000_rate(struct oxygen *chip, unsigned int rate) { struct xonar_pcm179x *data = chip->model_data; u8 rate_mclk, reg; switch (rate) { /* XXX Why is the I2S A MCLK half the actual I2S MCLK? */ case 32000: rate_mclk = OXYGEN_RATE_32000 | OXYGEN_I2S_MCLK_256; break; case 44100: if (data->os_128) rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256; else rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_128; break; default: /* 48000 */ if (data->os_128) rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256; else rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_128; break; case 64000: rate_mclk = OXYGEN_RATE_32000 | OXYGEN_I2S_MCLK_256; break; case 88200: rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256; break; case 96000: rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256; break; case 176400: rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256; break; case 192000: rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256; break; } oxygen_write16_masked(chip, OXYGEN_I2S_A_FORMAT, rate_mclk, OXYGEN_I2S_RATE_MASK | OXYGEN_I2S_MCLK_MASK); if ((rate_mclk & OXYGEN_I2S_MCLK_MASK) <= OXYGEN_I2S_MCLK_128) reg = CS2000_REF_CLK_DIV_1; else reg = CS2000_REF_CLK_DIV_2; cs2000_write_cached(chip, CS2000_FUN_CFG_1, reg); } static void set_st_params(struct oxygen *chip, struct snd_pcm_hw_params *params) { update_cs2000_rate(chip, params_rate(params)); set_pcm1796_params(chip, params); } static void set_hdav_params(struct oxygen *chip, struct snd_pcm_hw_params *params) { struct xonar_hdav *data = chip->model_data; set_pcm1796_params(chip, params); xonar_set_hdmi_params(chip, &data->hdmi, params); } static const struct snd_kcontrol_new alt_switch = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Loopback Switch", .info = snd_ctl_boolean_mono_info, .get = xonar_gpio_bit_switch_get, .put = xonar_gpio_bit_switch_put, .private_value = GPIO_D2_ALT, }; static int rolloff_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[2] = { "Sharp Roll-off", "Slow Roll-off" }; info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; info->count = 1; info->value.enumerated.items = 2; if (info->value.enumerated.item >= 2) info->value.enumerated.item = 1; strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); return 0; } static int rolloff_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_pcm179x *data = chip->model_data; value->value.enumerated.item[0] = (data->pcm1796_regs[0][19 - PCM1796_REG_BASE] & PCM1796_FLT_MASK) != PCM1796_FLT_SHARP; return 0; } static int rolloff_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_pcm179x *data = chip->model_data; unsigned int i; int changed; u8 reg; mutex_lock(&chip->mutex); reg = data->pcm1796_regs[0][19 - PCM1796_REG_BASE]; reg &= ~PCM1796_FLT_MASK; if (!value->value.enumerated.item[0]) reg |= PCM1796_FLT_SHARP; else reg |= PCM1796_FLT_SLOW; changed = reg != data->pcm1796_regs[0][19 - PCM1796_REG_BASE]; if (changed) { for (i = 0; i < data->dacs; ++i) pcm1796_write(chip, i, 19, reg); } mutex_unlock(&chip->mutex); return changed; } static const struct snd_kcontrol_new rolloff_control = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "DAC Filter Playback Enum", .info = rolloff_info, .get = rolloff_get, .put = rolloff_put, }; static int os_128_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[2] = { "64x", "128x" }; info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; info->count = 1; info->value.enumerated.items = 2; if (info->value.enumerated.item >= 2) info->value.enumerated.item = 1; strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); return 0; } static int os_128_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_pcm179x *data = chip->model_data; value->value.enumerated.item[0] = data->os_128; return 0; } static int os_128_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_pcm179x *data = chip->model_data; int changed; mutex_lock(&chip->mutex); changed = value->value.enumerated.item[0] != data->os_128; if (changed) { data->os_128 = value->value.enumerated.item[0]; if (data->has_cs2000) update_cs2000_rate(chip, data->current_rate); oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT, mclk_from_rate(chip, data->current_rate), OXYGEN_I2S_MCLK_MASK); update_pcm1796_oversampling(chip); } mutex_unlock(&chip->mutex); return changed; } static const struct snd_kcontrol_new os_128_control = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "DAC Oversampling Playback Enum", .info = os_128_info, .get = os_128_get, .put = os_128_put, }; static int st_output_switch_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[3] = { "Speakers", "Headphones", "FP Headphones" }; info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; info->count = 1; info->value.enumerated.items = 3; if (info->value.enumerated.item >= 3) info->value.enumerated.item = 2; strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); return 0; } static int st_output_switch_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; u16 gpio; gpio = oxygen_read16(chip, OXYGEN_GPIO_DATA); if (!(gpio & GPIO_ST_HP)) value->value.enumerated.item[0] = 0; else if (gpio & GPIO_ST_HP_REAR) value->value.enumerated.item[0] = 1; else value->value.enumerated.item[0] = 2; return 0; } static int st_output_switch_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_pcm179x *data = chip->model_data; u16 gpio_old, gpio; mutex_lock(&chip->mutex); gpio_old = oxygen_read16(chip, OXYGEN_GPIO_DATA); gpio = gpio_old; switch (value->value.enumerated.item[0]) { case 0: gpio &= ~(GPIO_ST_HP | GPIO_ST_HP_REAR); break; case 1: gpio |= GPIO_ST_HP | GPIO_ST_HP_REAR; break; case 2: gpio = (gpio | GPIO_ST_HP) & ~GPIO_ST_HP_REAR; break; } oxygen_write16(chip, OXYGEN_GPIO_DATA, gpio); data->hp_active = gpio & GPIO_ST_HP; update_pcm1796_volume(chip); mutex_unlock(&chip->mutex); return gpio != gpio_old; } static int st_hp_volume_offset_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[3] = { "< 64 ohms", "64-300 ohms", "300-600 ohms" }; info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; info->count = 1; info->value.enumerated.items = 3; if (info->value.enumerated.item > 2) info->value.enumerated.item = 2; strcpy(info->value.enumerated.name, names[info->value.enumerated.item]); return 0; } static int st_hp_volume_offset_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_pcm179x *data = chip->model_data; mutex_lock(&chip->mutex); if (data->hp_gain_offset < 2*-6) value->value.enumerated.item[0] = 0; else if (data->hp_gain_offset < 0) value->value.enumerated.item[0] = 1; else value->value.enumerated.item[0] = 2; mutex_unlock(&chip->mutex); return 0; } static int st_hp_volume_offset_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { static const s8 offsets[] = { 2*-18, 2*-6, 0 }; struct oxygen *chip = ctl->private_data; struct xonar_pcm179x *data = chip->model_data; s8 offset; int changed; if (value->value.enumerated.item[0] > 2) return -EINVAL; offset = offsets[value->value.enumerated.item[0]]; mutex_lock(&chip->mutex); changed = offset != data->hp_gain_offset; if (changed) { data->hp_gain_offset = offset; update_pcm1796_volume(chip); } mutex_unlock(&chip->mutex); return changed; } static const struct snd_kcontrol_new st_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Output", .info = st_output_switch_info, .get = st_output_switch_get, .put = st_output_switch_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Headphones Impedance Playback Enum", .info = st_hp_volume_offset_info, .get = st_hp_volume_offset_get, .put = st_hp_volume_offset_put, }, }; static void xonar_line_mic_ac97_switch(struct oxygen *chip, unsigned int reg, unsigned int mute) { if (reg == AC97_LINE) { spin_lock_irq(&chip->reg_lock); oxygen_write16_masked(chip, OXYGEN_GPIO_DATA, mute ? GPIO_INPUT_ROUTE : 0, GPIO_INPUT_ROUTE); spin_unlock_irq(&chip->reg_lock); } } static const DECLARE_TLV_DB_SCALE(pcm1796_db_scale, -6000, 50, 0); static int xonar_d2_control_filter(struct snd_kcontrol_new *template) { if (!strncmp(template->name, "CD Capture ", 11)) /* CD in is actually connected to the video in pin */ template->private_value ^= AC97_CD ^ AC97_VIDEO; return 0; } static int xonar_st_control_filter(struct snd_kcontrol_new *template) { if (!strncmp(template->name, "CD Capture ", 11)) return 1; /* no CD input */ return 0; } static int add_pcm1796_controls(struct oxygen *chip) { int err; err = snd_ctl_add(chip->card, snd_ctl_new1(&rolloff_control, chip)); if (err < 0) return err; err = snd_ctl_add(chip->card, snd_ctl_new1(&os_128_control, chip)); if (err < 0) return err; return 0; } static int xonar_d2_mixer_init(struct oxygen *chip) { int err; err = snd_ctl_add(chip->card, snd_ctl_new1(&alt_switch, chip)); if (err < 0) return err; err = add_pcm1796_controls(chip); if (err < 0) return err; return 0; } static int xonar_hdav_mixer_init(struct oxygen *chip) { return add_pcm1796_controls(chip); } static int xonar_st_mixer_init(struct oxygen *chip) { unsigned int i; int err; for (i = 0; i < ARRAY_SIZE(st_controls); ++i) { err = snd_ctl_add(chip->card, snd_ctl_new1(&st_controls[i], chip)); if (err < 0) return err; } err = add_pcm1796_controls(chip); if (err < 0) return err; return 0; } static const struct oxygen_model model_xonar_d2 = { .longname = "Asus Virtuoso 200", .chip = "AV200", .init = xonar_d2_init, .control_filter = xonar_d2_control_filter, .mixer_init = xonar_d2_mixer_init, .cleanup = xonar_d2_cleanup, .suspend = xonar_d2_suspend, .resume = xonar_d2_resume, .get_i2s_mclk = get_pcm1796_i2s_mclk, .set_dac_params = set_pcm1796_params, .set_adc_params = xonar_set_cs53x1_params, .update_dac_volume = update_pcm1796_volume, .update_dac_mute = update_pcm1796_mute, .dac_tlv = pcm1796_db_scale, .model_data_size = sizeof(struct xonar_pcm179x), .device_config = PLAYBACK_0_TO_I2S | PLAYBACK_1_TO_SPDIF | CAPTURE_0_FROM_I2S_2 | CAPTURE_1_FROM_SPDIF | MIDI_OUTPUT | MIDI_INPUT, .dac_channels = 8, .dac_volume_min = 255 - 2*60, .dac_volume_max = 255, .misc_flags = OXYGEN_MISC_MIDI, .function_flags = OXYGEN_FUNCTION_SPI | OXYGEN_FUNCTION_ENABLE_SPI_4_5, .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, }; static const struct oxygen_model model_xonar_hdav = { .longname = "Asus Virtuoso 200", .chip = "AV200", .init = xonar_hdav_init, .mixer_init = xonar_hdav_mixer_init, .cleanup = xonar_hdav_cleanup, .suspend = xonar_hdav_suspend, .resume = xonar_hdav_resume, .pcm_hardware_filter = xonar_hdmi_pcm_hardware_filter, .get_i2s_mclk = get_pcm1796_i2s_mclk, .set_dac_params = set_hdav_params, .set_adc_params = xonar_set_cs53x1_params, .update_dac_volume = update_pcm1796_volume, .update_dac_mute = update_pcm1796_mute, .uart_input = xonar_hdmi_uart_input, .ac97_switch = xonar_line_mic_ac97_switch, .dac_tlv = pcm1796_db_scale, .model_data_size = sizeof(struct xonar_hdav), .device_config = PLAYBACK_0_TO_I2S | PLAYBACK_1_TO_SPDIF | CAPTURE_0_FROM_I2S_2 | CAPTURE_1_FROM_SPDIF, .dac_channels = 8, .dac_volume_min = 255 - 2*60, .dac_volume_max = 255, .misc_flags = OXYGEN_MISC_MIDI, .function_flags = OXYGEN_FUNCTION_2WIRE, .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, }; static const struct oxygen_model model_xonar_st = { .longname = "Asus Virtuoso 100", .chip = "AV200", .init = xonar_st_init, .control_filter = xonar_st_control_filter, .mixer_init = xonar_st_mixer_init, .cleanup = xonar_st_cleanup, .suspend = xonar_st_suspend, .resume = xonar_st_resume, .get_i2s_mclk = get_pcm1796_i2s_mclk, .set_dac_params = set_st_params, .set_adc_params = xonar_set_cs53x1_params, .update_dac_volume = update_pcm1796_volume, .update_dac_mute = update_pcm1796_mute, .ac97_switch = xonar_line_mic_ac97_switch, .dac_tlv = pcm1796_db_scale, .model_data_size = sizeof(struct xonar_pcm179x), .device_config = PLAYBACK_0_TO_I2S | PLAYBACK_1_TO_SPDIF | CAPTURE_0_FROM_I2S_2, .dac_channels = 2, .dac_volume_min = 255 - 2*60, .dac_volume_max = 255, .function_flags = OXYGEN_FUNCTION_2WIRE, .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, }; int __devinit get_xonar_pcm179x_model(struct oxygen *chip, const struct pci_device_id *id) { switch (id->subdevice) { case 0x8269: chip->model = model_xonar_d2; chip->model.shortname = "Xonar D2"; break; case 0x82b7: chip->model = model_xonar_d2; chip->model.shortname = "Xonar D2X"; chip->model.init = xonar_d2x_init; break; case 0x8314: chip->model = model_xonar_hdav; oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_DB_MASK); switch (oxygen_read16(chip, OXYGEN_GPIO_DATA) & GPIO_DB_MASK) { default: chip->model.shortname = "Xonar HDAV1.3"; break; case GPIO_DB_H6: chip->model.shortname = "Xonar HDAV1.3+H6"; chip->model.private_data = 1; break; } break; case 0x835d: chip->model = model_xonar_st; oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_DB_MASK); switch (oxygen_read16(chip, OXYGEN_GPIO_DATA) & GPIO_DB_MASK) { default: chip->model.shortname = "Xonar ST"; break; case GPIO_DB_H6: chip->model.shortname = "Xonar ST+H6"; chip->model.dac_channels = 8; chip->model.private_data = 1; break; } break; case 0x835c: chip->model = model_xonar_st; chip->model.shortname = "Xonar STX"; chip->model.init = xonar_stx_init; chip->model.resume = xonar_stx_resume; chip->model.set_dac_params = set_pcm1796_params; break; default: return -EINVAL; } return 0; }
gpl-2.0
thegreatergood/LiteKernel---Glide
drivers/mfd/cs5535-mfd.c
1467
5085
/* * cs5535-mfd.c - core MFD driver for CS5535/CS5536 southbridges * * The CS5535 and CS5536 has an ISA bridge on the PCI bus that is * used for accessing GPIOs, MFGPTs, ACPI, etc. Each subdevice has * an IO range that's specified in a single BAR. The BAR order is * hardcoded in the CS553x specifications. * * Copyright (c) 2010 Andres Salomon <dilinger@queued.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mfd/core.h> #include <linux/module.h> #include <linux/pci.h> #include <asm/olpc.h> #define DRV_NAME "cs5535-mfd" enum cs5535_mfd_bars { SMB_BAR = 0, GPIO_BAR = 1, MFGPT_BAR = 2, PMS_BAR = 4, ACPI_BAR = 5, NR_BARS, }; static int cs5535_mfd_res_enable(struct platform_device *pdev) { struct resource *res; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) { dev_err(&pdev->dev, "can't fetch device resource info\n"); return -EIO; } if (!request_region(res->start, resource_size(res), DRV_NAME)) { dev_err(&pdev->dev, "can't request region\n"); return -EIO; } return 0; } static int cs5535_mfd_res_disable(struct platform_device *pdev) { struct resource *res; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) { dev_err(&pdev->dev, "can't fetch device resource info\n"); return -EIO; } release_region(res->start, resource_size(res)); return 0; } static __devinitdata struct resource cs5535_mfd_resources[NR_BARS]; static __devinitdata struct mfd_cell cs5535_mfd_cells[] = { { .id = SMB_BAR, .name = "cs5535-smb", .num_resources = 1, .resources = &cs5535_mfd_resources[SMB_BAR], }, { .id = GPIO_BAR, .name = "cs5535-gpio", .num_resources = 1, .resources = &cs5535_mfd_resources[GPIO_BAR], }, { .id = MFGPT_BAR, .name = "cs5535-mfgpt", .num_resources = 1, .resources = &cs5535_mfd_resources[MFGPT_BAR], }, { .id = PMS_BAR, .name = "cs5535-pms", .num_resources = 1, .resources = &cs5535_mfd_resources[PMS_BAR], .enable = cs5535_mfd_res_enable, .disable = cs5535_mfd_res_disable, }, { .id = ACPI_BAR, .name = "cs5535-acpi", .num_resources = 1, .resources = &cs5535_mfd_resources[ACPI_BAR], .enable = cs5535_mfd_res_enable, .disable = cs5535_mfd_res_disable, }, }; #ifdef CONFIG_OLPC static void __devinit cs5535_clone_olpc_cells(void) { const char *acpi_clones[] = { "olpc-xo1-pm-acpi", "olpc-xo1-sci-acpi" }; if (!machine_is_olpc()) return; mfd_clone_cell("cs5535-acpi", acpi_clones, ARRAY_SIZE(acpi_clones)); } #else static void cs5535_clone_olpc_cells(void) { } #endif static int __devinit cs5535_mfd_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err, i; err = pci_enable_device(pdev); if (err) return err; /* fill in IO range for each cell; subdrivers handle the region */ for (i = 0; i < ARRAY_SIZE(cs5535_mfd_cells); i++) { int bar = cs5535_mfd_cells[i].id; struct resource *r = &cs5535_mfd_resources[bar]; r->flags = IORESOURCE_IO; r->start = pci_resource_start(pdev, bar); r->end = pci_resource_end(pdev, bar); /* id is used for temporarily storing BAR; unset it now */ cs5535_mfd_cells[i].id = 0; } err = mfd_add_devices(&pdev->dev, -1, cs5535_mfd_cells, ARRAY_SIZE(cs5535_mfd_cells), NULL, 0); if (err) { dev_err(&pdev->dev, "MFD add devices failed: %d\n", err); goto err_disable; } cs5535_clone_olpc_cells(); dev_info(&pdev->dev, "%zu devices registered.\n", ARRAY_SIZE(cs5535_mfd_cells)); return 0; err_disable: pci_disable_device(pdev); return err; } static void __devexit cs5535_mfd_remove(struct pci_dev *pdev) { mfd_remove_devices(&pdev->dev); pci_disable_device(pdev); } static struct pci_device_id cs5535_mfd_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) }, { 0, } }; MODULE_DEVICE_TABLE(pci, cs5535_mfd_pci_tbl); static struct pci_driver cs5535_mfd_drv = { .name = DRV_NAME, .id_table = cs5535_mfd_pci_tbl, .probe = cs5535_mfd_probe, .remove = __devexit_p(cs5535_mfd_remove), }; static int __init cs5535_mfd_init(void) { return pci_register_driver(&cs5535_mfd_drv); } static void __exit cs5535_mfd_exit(void) { pci_unregister_driver(&cs5535_mfd_drv); } module_init(cs5535_mfd_init); module_exit(cs5535_mfd_exit); MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); MODULE_DESCRIPTION("MFD driver for CS5535/CS5536 southbridge's ISA PCI device"); MODULE_LICENSE("GPL");
gpl-2.0
Stane1983/odroidc-linux
lib/mpi/mpiutil.c
2491
2646
/* mpiutil.ac - Utility functions for MPI * Copyright (C) 1998, 1999 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include "mpi-internal.h" /**************** * Note: It was a bad idea to use the number of limbs to allocate * because on a alpha the limbs are large but we normally need * integers of n bits - So we should chnage this to bits (or bytes). * * But mpi_alloc is used in a lot of places :-) */ MPI mpi_alloc(unsigned nlimbs) { MPI a; a = kmalloc(sizeof *a, GFP_KERNEL); if (!a) return a; if (nlimbs) { a->d = mpi_alloc_limb_space(nlimbs); if (!a->d) { kfree(a); return NULL; } } else { a->d = NULL; } a->alloced = nlimbs; a->nlimbs = 0; a->sign = 0; a->flags = 0; a->nbits = 0; return a; } EXPORT_SYMBOL_GPL(mpi_alloc); mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs) { size_t len = nlimbs * sizeof(mpi_limb_t); if (!len) return NULL; return kmalloc(len, GFP_KERNEL); } void mpi_free_limb_space(mpi_ptr_t a) { if (!a) return; kfree(a); } void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs) { mpi_free_limb_space(a->d); a->d = ap; a->alloced = nlimbs; } /**************** * Resize the array of A to NLIMBS. the additional space is cleared * (set to 0) [done by m_realloc()] */ int mpi_resize(MPI a, unsigned nlimbs) { void *p; if (nlimbs <= a->alloced) return 0; /* no need to do it */ if (a->d) { p = kmalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL); if (!p) return -ENOMEM; memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t)); kfree(a->d); a->d = p; } else { a->d = kzalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL); if (!a->d) return -ENOMEM; } a->alloced = nlimbs; return 0; } void mpi_free(MPI a) { if (!a) return; if (a->flags & 4) kfree(a->d); else mpi_free_limb_space(a->d); if (a->flags & ~7) pr_info("invalid flag value in mpi\n"); kfree(a); } EXPORT_SYMBOL_GPL(mpi_free);
gpl-2.0
crimeofheart/n7000_tw_jb_kernel
drivers/scsi/t128.c
5051
11795
#define AUTOSENSE #define PSEUDO_DMA /* * Trantor T128/T128F/T228 driver * Note : architecturally, the T100 and T130 are different and won't * work * * Copyright 1993, Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * drew@colorado.edu * +1 (303) 440-4894 * * DISTRIBUTION RELEASE 3. * * For more information, please consult * * Trantor Systems, Ltd. * T128/T128F/T228 SCSI Host Adapter * Hardware Specifications * * Trantor Systems, Ltd. * 5415 Randall Place * Fremont, CA 94538 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910 * * and * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * Options : * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically * for commands that return with a CHECK CONDITION status. * * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance * increase compared to polled I/O. * * PARITY - enable parity checking. Not supported. * * SCSI2 - enable support for SCSI-II tagged queueing. Untested. * * * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You * only really want to use this if you're having a problem with * dropped characters during high speed communications, and even * then, you're going to be better off twiddling with transfersize. * * USLEEP - enable support for devices that don't disconnect. Untested. * * The card is detected and initialized in one of several ways : * 1. Autoprobe (default) - since the board is memory mapped, * a BIOS signature is scanned for to locate the registers. * An interrupt is triggered to autoprobe for the interrupt * line. * * 2. With command line overrides - t128=address,irq may be * used on the LILO command line to override the defaults. * * 3. With the T128_OVERRIDE compile time define. This is * specified as an array of address, irq tuples. Ie, for * one board at the default 0xcc000 address, IRQ5, I could say * -DT128_OVERRIDE={{0xcc000, 5}} * * Note that if the override methods are used, place holders must * be specified for other boards in the system. * * T128/T128F jumper/dipswitch settings (note : on my sample, the switches * were epoxy'd shut, meaning I couldn't change the 0xcc000 base address) : * * T128 Sw7 Sw8 Sw6 = 0ws Sw5 = boot * T128F Sw6 Sw7 Sw5 = 0ws Sw4 = boot Sw8 = floppy disable * cc000 off off * c8000 off on * dc000 on off * d8000 on on * * * Interrupts * There is a 12 pin jumper block, jp1, numbered as follows : * T128 (JP1) T128F (J5) * 2 4 6 8 10 12 11 9 7 5 3 1 * 1 3 5 7 9 11 12 10 8 6 4 2 * * 3 2-4 * 5 1-3 * 7 3-5 * T128F only * 10 8-10 * 12 7-9 * 14 10-12 * 15 9-11 */ /* * $Log: t128.c,v $ */ #include <asm/system.h> #include <linux/signal.h> #include <linux/io.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> #include "scsi.h" #include <scsi/scsi_host.h> #include "t128.h" #define AUTOPROBE_IRQ #include "NCR5380.h" static struct override { unsigned long address; int irq; } overrides #ifdef T128_OVERRIDE [] __initdata = T128_OVERRIDE; #else [4] __initdata = {{0, IRQ_AUTO}, {0, IRQ_AUTO}, {0 ,IRQ_AUTO}, {0, IRQ_AUTO}}; #endif #define NO_OVERRIDES ARRAY_SIZE(overrides) static struct base { unsigned int address; int noauto; } bases[] __initdata = { { 0xcc000, 0}, { 0xc8000, 0}, { 0xdc000, 0}, { 0xd8000, 0} }; #define NO_BASES ARRAY_SIZE(bases) static struct signature { const char *string; int offset; } signatures[] __initdata = { {"TSROM: SCSI BIOS, Version 1.12", 0x36}, }; #define NO_SIGNATURES ARRAY_SIZE(signatures) /* * Function : t128_setup(char *str, int *ints) * * Purpose : LILO command line initialization of the overrides array, * * Inputs : str - unused, ints - array of integer parameters with ints[0] * equal to the number of ints. * */ void __init t128_setup(char *str, int *ints){ static int commandline_current = 0; int i; if (ints[0] != 2) printk("t128_setup : usage t128=address,irq\n"); else if (commandline_current < NO_OVERRIDES) { overrides[commandline_current].address = ints[1]; overrides[commandline_current].irq = ints[2]; for (i = 0; i < NO_BASES; ++i) if (bases[i].address == ints[1]) { bases[i].noauto = 1; break; } ++commandline_current; } } /* * Function : int t128_detect(struct scsi_host_template * tpnt) * * Purpose : detects and initializes T128,T128F, or T228 controllers * that were autoprobed, overridden on the LILO command line, * or specified at compile time. * * Inputs : tpnt - template for this SCSI adapter. * * Returns : 1 if a host adapter was found, 0 if not. * */ int __init t128_detect(struct scsi_host_template * tpnt){ static int current_override = 0, current_base = 0; struct Scsi_Host *instance; unsigned long base; void __iomem *p; int sig, count; tpnt->proc_name = "t128"; tpnt->proc_info = &t128_proc_info; for (count = 0; current_override < NO_OVERRIDES; ++current_override) { base = 0; p = NULL; if (overrides[current_override].address) { base = overrides[current_override].address; p = ioremap(bases[current_base].address, 0x2000); if (!p) base = 0; } else for (; !base && (current_base < NO_BASES); ++current_base) { #if (TDEBUG & TDEBUG_INIT) printk("scsi-t128 : probing address %08x\n", bases[current_base].address); #endif if (bases[current_base].noauto) continue; p = ioremap(bases[current_base].address, 0x2000); if (!p) continue; for (sig = 0; sig < NO_SIGNATURES; ++sig) if (check_signature(p + signatures[sig].offset, signatures[sig].string, strlen(signatures[sig].string))) { base = bases[current_base].address; #if (TDEBUG & TDEBUG_INIT) printk("scsi-t128 : detected board.\n"); #endif goto found; } iounmap(p); } #if defined(TDEBUG) && (TDEBUG & TDEBUG_INIT) printk("scsi-t128 : base = %08x\n", (unsigned int) base); #endif if (!base) break; found: instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); if(instance == NULL) break; instance->base = base; ((struct NCR5380_hostdata *)instance->hostdata)->base = p; NCR5380_init(instance, 0); if (overrides[current_override].irq != IRQ_AUTO) instance->irq = overrides[current_override].irq; else instance->irq = NCR5380_probe_irq(instance, T128_IRQS); if (instance->irq != SCSI_IRQ_NONE) if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128", instance)) { printk("scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; } if (instance->irq == SCSI_IRQ_NONE) { printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); } #if defined(TDEBUG) && (TDEBUG & TDEBUG_INIT) printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); #endif printk("scsi%d : at 0x%08lx", instance->host_no, instance->base); if (instance->irq == SCSI_IRQ_NONE) printk (" interrupts disabled"); else printk (" irq %d", instance->irq); printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", CAN_QUEUE, CMD_PER_LUN, T128_PUBLIC_RELEASE); NCR5380_print_options(instance); printk("\n"); ++current_override; ++count; } return count; } static int t128_release(struct Scsi_Host *shost) { NCR5380_local_declare(); NCR5380_setup(shost); if (shost->irq) free_irq(shost->irq, shost); NCR5380_exit(shost); if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); scsi_unregister(shost); iounmap(base); return 0; } /* * Function : int t128_biosparam(Disk * disk, struct block_device *dev, int *ip) * * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for * the specified device / size. * * Inputs : size = size of device in sectors (512 bytes), dev = block device * major / minor, ip[] = {heads, sectors, cylinders} * * Returns : always 0 (success), initializes ip * */ /* * XXX Most SCSI boards use this mapping, I could be incorrect. Some one * using hard disks on a trantor should verify that this mapping corresponds * to that used by the BIOS / ASPI driver by running the linux fdisk program * and matching the H_C_S coordinates to what DOS uses. */ int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int * ip) { ip[0] = 64; ip[1] = 32; ip[2] = capacity >> 11; return 0; } /* * Function : int NCR5380_pread (struct Scsi_Host *instance, * unsigned char *dst, int len) * * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to * dst * * Inputs : dst = destination, len = length in bytes * * Returns : 0 on success, non zero on a failure such as a watchdog * timeout. */ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, int len) { NCR5380_local_declare(); void __iomem *reg; unsigned char *d = dst; register int i = len; NCR5380_setup(instance); reg = base + T_DATA_REG_OFFSET; #if 0 for (; i; --i) { while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier(); #else while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier(); for (; i; --i) { #endif *d++ = readb(reg); } if (readb(base + T_STATUS_REG_OFFSET) & T_ST_TIM) { unsigned char tmp; void __iomem *foo = base + T_CONTROL_REG_OFFSET; tmp = readb(foo); writeb(tmp | T_CR_CT, foo); writeb(tmp, foo); printk("scsi%d : watchdog timer fired in NCR5380_pread()\n", instance->host_no); return -1; } else return 0; } /* * Function : int NCR5380_pwrite (struct Scsi_Host *instance, * unsigned char *src, int len) * * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from * src * * Inputs : src = source, len = length in bytes * * Returns : 0 on success, non zero on a failure such as a watchdog * timeout. */ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src, int len) { NCR5380_local_declare(); void __iomem *reg; unsigned char *s = src; register int i = len; NCR5380_setup(instance); reg = base + T_DATA_REG_OFFSET; #if 0 for (; i; --i) { while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier(); #else while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier(); for (; i; --i) { #endif writeb(*s++, reg); } if (readb(base + T_STATUS_REG_OFFSET) & T_ST_TIM) { unsigned char tmp; void __iomem *foo = base + T_CONTROL_REG_OFFSET; tmp = readb(foo); writeb(tmp | T_CR_CT, foo); writeb(tmp, foo); printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n", instance->host_no); return -1; } else return 0; } MODULE_LICENSE("GPL"); #include "NCR5380.c" static struct scsi_host_template driver_template = { .name = "Trantor T128/T128F/T228", .detect = t128_detect, .release = t128_release, .queuecommand = t128_queue_command, .eh_abort_handler = t128_abort, .eh_bus_reset_handler = t128_bus_reset, .bios_param = t128_biosparam, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = CMD_PER_LUN, .use_clustering = DISABLE_CLUSTERING, }; #include "scsi_module.c"
gpl-2.0
willcast/kernel_d851
drivers/zorro/proc.c
5307
3848
/* * Procfs interface for the Zorro bus. * * Copyright (C) 1998-2003 Geert Uytterhoeven * * Heavily based on the procfs interface for the PCI bus, which is * * Copyright (C) 1997, 1998 Martin Mares <mj@atrey.karlin.mff.cuni.cz> */ #include <linux/types.h> #include <linux/zorro.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/export.h> #include <asm/uaccess.h> #include <asm/amigahw.h> #include <asm/setup.h> static loff_t proc_bus_zorro_lseek(struct file *file, loff_t off, int whence) { loff_t new = -1; struct inode *inode = file->f_path.dentry->d_inode; mutex_lock(&inode->i_mutex); switch (whence) { case 0: new = off; break; case 1: new = file->f_pos + off; break; case 2: new = sizeof(struct ConfigDev) + off; break; } if (new < 0 || new > sizeof(struct ConfigDev)) new = -EINVAL; else file->f_pos = new; mutex_unlock(&inode->i_mutex); return new; } static ssize_t proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct inode *ino = file->f_path.dentry->d_inode; struct proc_dir_entry *dp = PDE(ino); struct zorro_dev *z = dp->data; struct ConfigDev cd; loff_t pos = *ppos; if (pos >= sizeof(struct ConfigDev)) return 0; if (nbytes >= sizeof(struct ConfigDev)) nbytes = sizeof(struct ConfigDev); if (pos + nbytes > sizeof(struct ConfigDev)) nbytes = sizeof(struct ConfigDev) - pos; /* Construct a ConfigDev */ memset(&cd, 0, sizeof(cd)); cd.cd_Rom = z->rom; cd.cd_SlotAddr = z->slotaddr; cd.cd_SlotSize = z->slotsize; cd.cd_BoardAddr = (void *)zorro_resource_start(z); cd.cd_BoardSize = zorro_resource_len(z); if (copy_to_user(buf, (void *)&cd + pos, nbytes)) return -EFAULT; *ppos += nbytes; return nbytes; } static const struct file_operations proc_bus_zorro_operations = { .owner = THIS_MODULE, .llseek = proc_bus_zorro_lseek, .read = proc_bus_zorro_read, }; static void * zorro_seq_start(struct seq_file *m, loff_t *pos) { return (*pos < zorro_num_autocon) ? pos : NULL; } static void * zorro_seq_next(struct seq_file *m, void *v, loff_t *pos) { (*pos)++; return (*pos < zorro_num_autocon) ? pos : NULL; } static void zorro_seq_stop(struct seq_file *m, void *v) { } static int zorro_seq_show(struct seq_file *m, void *v) { unsigned int slot = *(loff_t *)v; struct zorro_dev *z = &zorro_autocon[slot]; seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id, (unsigned long)zorro_resource_start(z), (unsigned long)zorro_resource_len(z), z->rom.er_Type); return 0; } static const struct seq_operations zorro_devices_seq_ops = { .start = zorro_seq_start, .next = zorro_seq_next, .stop = zorro_seq_stop, .show = zorro_seq_show, }; static int zorro_devices_proc_open(struct inode *inode, struct file *file) { return seq_open(file, &zorro_devices_seq_ops); } static const struct file_operations zorro_devices_proc_fops = { .owner = THIS_MODULE, .open = zorro_devices_proc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static struct proc_dir_entry *proc_bus_zorro_dir; static int __init zorro_proc_attach_device(unsigned int slot) { struct proc_dir_entry *entry; char name[4]; sprintf(name, "%02x", slot); entry = proc_create_data(name, 0, proc_bus_zorro_dir, &proc_bus_zorro_operations, &zorro_autocon[slot]); if (!entry) return -ENOMEM; entry->size = sizeof(struct zorro_dev); return 0; } static int __init zorro_proc_init(void) { unsigned int slot; if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) { proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL); proc_create("devices", 0, proc_bus_zorro_dir, &zorro_devices_proc_fops); for (slot = 0; slot < zorro_num_autocon; slot++) zorro_proc_attach_device(slot); } return 0; } device_initcall(zorro_proc_init);
gpl-2.0
nok07635/UnleaZhed_XTZ
drivers/gpu/drm/gma500/psb_intel_lvds.c
5307
24670
/* * Copyright © 2006-2007 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Authors: * Eric Anholt <eric@anholt.net> * Dave Airlie <airlied@linux.ie> * Jesse Barnes <jesse.barnes@intel.com> */ #include <linux/i2c.h> #include <drm/drmP.h> #include "intel_bios.h" #include "psb_drv.h" #include "psb_intel_drv.h" #include "psb_intel_reg.h" #include "power.h" #include <linux/pm_runtime.h> /* * LVDS I2C backlight control macros */ #define BRIGHTNESS_MAX_LEVEL 100 #define BRIGHTNESS_MASK 0xFF #define BLC_I2C_TYPE 0x01 #define BLC_PWM_TYPT 0x02 #define BLC_POLARITY_NORMAL 0 #define BLC_POLARITY_INVERSE 1 #define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE) #define PSB_BLC_MIN_PWM_REG_FREQ (0x2) #define PSB_BLC_PWM_PRECISION_FACTOR (10) #define PSB_BACKLIGHT_PWM_CTL_SHIFT (16) #define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE) struct psb_intel_lvds_priv { /* * Saved LVDO output states */ uint32_t savePP_ON; uint32_t savePP_OFF; uint32_t saveLVDS; uint32_t savePP_CONTROL; uint32_t savePP_CYCLE; uint32_t savePFIT_CONTROL; uint32_t savePFIT_PGM_RATIOS; uint32_t saveBLC_PWM_CTL; struct psb_intel_i2c_chan *i2c_bus; struct psb_intel_i2c_chan *ddc_bus; }; /* * Returns the maximum level of the backlight duty cycle field. */ static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; u32 ret; if (gma_power_begin(dev, false)) { ret = REG_READ(BLC_PWM_CTL); gma_power_end(dev); } else /* Powered off, use the saved value */ ret = dev_priv->regs.saveBLC_PWM_CTL; /* Top 15bits hold the frequency mask */ ret = (ret & BACKLIGHT_MODULATION_FREQ_MASK) >> BACKLIGHT_MODULATION_FREQ_SHIFT; ret *= 2; /* Return a 16bit range as needed for setting */ if (ret == 0) dev_err(dev->dev, "BL bug: Reg %08x save %08X\n", REG_READ(BLC_PWM_CTL), dev_priv->regs.saveBLC_PWM_CTL); return ret; } /* * Set LVDS backlight level by I2C command * * FIXME: at some point we need to both track this for PM and also * disable runtime pm on MRST if the brightness is nil (ie blanked) */ static int psb_lvds_i2c_set_brightness(struct drm_device *dev, unsigned int level) { struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private; struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus; u8 out_buf[2]; unsigned int blc_i2c_brightness; struct i2c_msg msgs[] = { { .addr = lvds_i2c_bus->slave_addr, .flags = 0, .len = 2, .buf = out_buf, } }; blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level * BRIGHTNESS_MASK / BRIGHTNESS_MAX_LEVEL); if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE) blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness; out_buf[0] = dev_priv->lvds_bl->brightnesscmd; out_buf[1] = (u8)blc_i2c_brightness; if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) { dev_dbg(dev->dev, "I2C set brightness.(command, value) (%d, %d)\n", dev_priv->lvds_bl->brightnesscmd, blc_i2c_brightness); return 0; } dev_err(dev->dev, "I2C transfer error\n"); return -1; } static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level) { struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private; u32 max_pwm_blc; u32 blc_pwm_duty_cycle; max_pwm_blc = psb_intel_lvds_get_max_backlight(dev); /*BLC_PWM_CTL Should be initiated while backlight device init*/ BUG_ON(max_pwm_blc == 0); blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL; if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE) blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle; blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR; REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (blc_pwm_duty_cycle)); dev_info(dev->dev, "Backlight lvds set brightness %08x\n", (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (blc_pwm_duty_cycle)); return 0; } /* * Set LVDS backlight level either by I2C or PWM */ void psb_intel_lvds_set_brightness(struct drm_device *dev, int level) { struct drm_psb_private *dev_priv = dev->dev_private; dev_dbg(dev->dev, "backlight level is %d\n", level); if (!dev_priv->lvds_bl) { dev_err(dev->dev, "NO LVDS backlight info\n"); return; } if (dev_priv->lvds_bl->type == BLC_I2C_TYPE) psb_lvds_i2c_set_brightness(dev, level); else psb_lvds_pwm_set_brightness(dev, level); } /* * Sets the backlight level. * * level: backlight level, from 0 to psb_intel_lvds_get_max_backlight(). */ static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level) { struct drm_psb_private *dev_priv = dev->dev_private; u32 blc_pwm_ctl; if (gma_power_begin(dev, false)) { blc_pwm_ctl = REG_READ(BLC_PWM_CTL); blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl | (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl | (level << BACKLIGHT_DUTY_CYCLE_SHIFT)); gma_power_end(dev); } else { blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL & ~BACKLIGHT_DUTY_CYCLE_MASK; dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl | (level << BACKLIGHT_DUTY_CYCLE_SHIFT)); } } /* * Sets the power state for the panel. */ static void psb_intel_lvds_set_power(struct drm_device *dev, bool on) { struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; u32 pp_status; if (!gma_power_begin(dev, true)) { dev_err(dev->dev, "set power, chip off!\n"); return; } if (on) { REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON); do { pp_status = REG_READ(PP_STATUS); } while ((pp_status & PP_ON) == 0); psb_intel_lvds_set_backlight(dev, mode_dev->backlight_duty_cycle); } else { psb_intel_lvds_set_backlight(dev, 0); REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON); do { pp_status = REG_READ(PP_STATUS); } while (pp_status & PP_ON); } gma_power_end(dev); } static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; if (mode == DRM_MODE_DPMS_ON) psb_intel_lvds_set_power(dev, true); else psb_intel_lvds_set_power(dev, false); /* XXX: We never power down the LVDS pairs. */ } static void psb_intel_lvds_save(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private; struct psb_intel_encoder *psb_intel_encoder = psb_intel_attached_encoder(connector); struct psb_intel_lvds_priv *lvds_priv = (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv; lvds_priv->savePP_ON = REG_READ(LVDSPP_ON); lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF); lvds_priv->saveLVDS = REG_READ(LVDS); lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL); lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE); /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/ lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL); lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS); /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/ dev_priv->backlight_duty_cycle = (dev_priv->regs.saveBLC_PWM_CTL & BACKLIGHT_DUTY_CYCLE_MASK); /* * If the light is off at server startup, * just make it full brightness */ if (dev_priv->backlight_duty_cycle == 0) dev_priv->backlight_duty_cycle = psb_intel_lvds_get_max_backlight(dev); dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", lvds_priv->savePP_ON, lvds_priv->savePP_OFF, lvds_priv->saveLVDS, lvds_priv->savePP_CONTROL, lvds_priv->savePP_CYCLE, lvds_priv->saveBLC_PWM_CTL); } static void psb_intel_lvds_restore(struct drm_connector *connector) { struct drm_device *dev = connector->dev; u32 pp_status; struct psb_intel_encoder *psb_intel_encoder = psb_intel_attached_encoder(connector); struct psb_intel_lvds_priv *lvds_priv = (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv; dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", lvds_priv->savePP_ON, lvds_priv->savePP_OFF, lvds_priv->saveLVDS, lvds_priv->savePP_CONTROL, lvds_priv->savePP_CYCLE, lvds_priv->saveBLC_PWM_CTL); REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL); REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL); REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS); REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON); REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF); /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/ REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE); REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL); REG_WRITE(LVDS, lvds_priv->saveLVDS); if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) { REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON); do { pp_status = REG_READ(PP_STATUS); } while ((pp_status & PP_ON) == 0); } else { REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON); do { pp_status = REG_READ(PP_STATUS); } while (pp_status & PP_ON); } } int psb_intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_psb_private *dev_priv = connector->dev->dev_private; struct psb_intel_encoder *psb_intel_encoder = psb_intel_attached_encoder(connector); struct drm_display_mode *fixed_mode = dev_priv->mode_dev.panel_fixed_mode; if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2) fixed_mode = dev_priv->mode_dev.panel_fixed_mode2; /* just in case */ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; /* just in case */ if (mode->flags & DRM_MODE_FLAG_INTERLACE) return MODE_NO_INTERLACE; if (fixed_mode) { if (mode->hdisplay > fixed_mode->hdisplay) return MODE_PANEL; if (mode->vdisplay > fixed_mode->vdisplay) return MODE_PANEL; } return MODE_OK; } bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc); struct drm_encoder *tmp_encoder; struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode; struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder); if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2) panel_fixed_mode = mode_dev->panel_fixed_mode2; /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */ if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) { printk(KERN_ERR "Can't support LVDS on pipe A\n"); return false; } if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) { printk(KERN_ERR "Must use PIPE A\n"); return false; } /* Should never happen!! */ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) { if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) { printk(KERN_ERR "Can't enable LVDS and another " "encoder on the same pipe\n"); return false; } } /* * If we have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ if (panel_fixed_mode != NULL) { adjusted_mode->hdisplay = panel_fixed_mode->hdisplay; adjusted_mode->hsync_start = panel_fixed_mode->hsync_start; adjusted_mode->hsync_end = panel_fixed_mode->hsync_end; adjusted_mode->htotal = panel_fixed_mode->htotal; adjusted_mode->vdisplay = panel_fixed_mode->vdisplay; adjusted_mode->vsync_start = panel_fixed_mode->vsync_start; adjusted_mode->vsync_end = panel_fixed_mode->vsync_end; adjusted_mode->vtotal = panel_fixed_mode->vtotal; adjusted_mode->clock = panel_fixed_mode->clock; drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); } /* * XXX: It would be nice to support lower refresh rates on the * panels to reduce power consumption, and perhaps match the * user's requested refresh rate. */ return true; } static void psb_intel_lvds_prepare(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; if (!gma_power_begin(dev, true)) return; mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL & BACKLIGHT_DUTY_CYCLE_MASK); psb_intel_lvds_set_power(dev, false); gma_power_end(dev); } static void psb_intel_lvds_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; if (mode_dev->backlight_duty_cycle == 0) mode_dev->backlight_duty_cycle = psb_intel_lvds_get_max_backlight(dev); psb_intel_lvds_set_power(dev, true); } static void psb_intel_lvds_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_psb_private *dev_priv = dev->dev_private; u32 pfit_control; /* * The LVDS pin pair will already have been turned on in the * psb_intel_crtc_mode_set since it has a large impact on the DPLL * settings. */ /* * Enable automatic panel scaling so that non-native modes fill the * screen. Should be enabled before the pipe is enabled, according to * register description and PRM. */ if (mode->hdisplay != adjusted_mode->hdisplay || mode->vdisplay != adjusted_mode->vdisplay) pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR | HORIZ_INTERP_BILINEAR); else pfit_control = 0; if (dev_priv->lvds_dither) pfit_control |= PANEL_8TO6_DITHER_ENABLE; REG_WRITE(PFIT_CONTROL, pfit_control); } /* * Detect the LVDS connection. * * This always returns CONNECTOR_STATUS_CONNECTED. * This connector should only have * been set up if the LVDS was actually connected anyway. */ static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector *connector, bool force) { return connector_status_connected; } /* * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. */ static int psb_intel_lvds_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; struct psb_intel_encoder *psb_intel_encoder = psb_intel_attached_encoder(connector); struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv; int ret = 0; if (!IS_MRST(dev)) ret = psb_intel_ddc_get_modes(connector, &lvds_priv->i2c_bus->adapter); if (ret) return ret; /* Didn't get an EDID, so * Set wide sync ranges so we get all modes * handed to valid_mode for checking */ connector->display_info.min_vfreq = 0; connector->display_info.max_vfreq = 200; connector->display_info.min_hfreq = 0; connector->display_info.max_hfreq = 200; if (mode_dev->panel_fixed_mode != NULL) { struct drm_display_mode *mode = drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); drm_mode_probed_add(connector, mode); return 1; } return 0; } /** * psb_intel_lvds_destroy - unregister and free LVDS structures * @connector: connector to free * * Unregister the DDC bus for this connector then free the driver private * structure. */ void psb_intel_lvds_destroy(struct drm_connector *connector) { struct psb_intel_encoder *psb_intel_encoder = psb_intel_attached_encoder(connector); struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv; if (lvds_priv->ddc_bus) psb_intel_i2c_destroy(lvds_priv->ddc_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); } int psb_intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { struct drm_encoder *encoder = connector->encoder; if (!encoder) return -1; if (!strcmp(property->name, "scaling mode")) { struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc); uint64_t curval; if (!crtc) goto set_prop_error; switch (value) { case DRM_MODE_SCALE_FULLSCREEN: break; case DRM_MODE_SCALE_NO_SCALE: break; case DRM_MODE_SCALE_ASPECT: break; default: goto set_prop_error; } if (drm_connector_property_get_value(connector, property, &curval)) goto set_prop_error; if (curval == value) goto set_prop_done; if (drm_connector_property_set_value(connector, property, value)) goto set_prop_error; if (crtc->saved_mode.hdisplay != 0 && crtc->saved_mode.vdisplay != 0) { if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode, encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb)) goto set_prop_error; } } else if (!strcmp(property->name, "backlight")) { if (drm_connector_property_set_value(connector, property, value)) goto set_prop_error; else { #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE struct drm_psb_private *devp = encoder->dev->dev_private; struct backlight_device *bd = devp->backlight_device; if (bd) { bd->props.brightness = value; backlight_update_status(bd); } #endif } } else if (!strcmp(property->name, "DPMS")) { struct drm_encoder_helper_funcs *hfuncs = encoder->helper_private; hfuncs->dpms(encoder, value); } set_prop_done: return 0; set_prop_error: return -1; } static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = { .dpms = psb_intel_lvds_encoder_dpms, .mode_fixup = psb_intel_lvds_mode_fixup, .prepare = psb_intel_lvds_prepare, .mode_set = psb_intel_lvds_mode_set, .commit = psb_intel_lvds_commit, }; const struct drm_connector_helper_funcs psb_intel_lvds_connector_helper_funcs = { .get_modes = psb_intel_lvds_get_modes, .mode_valid = psb_intel_lvds_mode_valid, .best_encoder = psb_intel_best_encoder, }; const struct drm_connector_funcs psb_intel_lvds_connector_funcs = { .dpms = drm_helper_connector_dpms, .save = psb_intel_lvds_save, .restore = psb_intel_lvds_restore, .detect = psb_intel_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = psb_intel_lvds_set_property, .destroy = psb_intel_lvds_destroy, }; static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); } const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = { .destroy = psb_intel_lvds_enc_destroy, }; /** * psb_intel_lvds_init - setup LVDS connectors on this device * @dev: drm device * * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). */ void psb_intel_lvds_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { struct psb_intel_encoder *psb_intel_encoder; struct psb_intel_connector *psb_intel_connector; struct psb_intel_lvds_priv *lvds_priv; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_display_mode *scan; /* *modes, *bios_mode; */ struct drm_crtc *crtc; struct drm_psb_private *dev_priv = dev->dev_private; u32 lvds; int pipe; psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); if (!psb_intel_encoder) { dev_err(dev->dev, "psb_intel_encoder allocation error\n"); return; } psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); if (!psb_intel_connector) { dev_err(dev->dev, "psb_intel_connector allocation error\n"); goto failed_encoder; } lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL); if (!lvds_priv) { dev_err(dev->dev, "LVDS private allocation error\n"); goto failed_connector; } psb_intel_encoder->dev_priv = lvds_priv; connector = &psb_intel_connector->base; encoder = &psb_intel_encoder->base; drm_connector_init(dev, connector, &psb_intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); psb_intel_encoder->type = INTEL_OUTPUT_LVDS; drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs); drm_connector_helper_add(connector, &psb_intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; /*Attach connector properties*/ drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL); /* * Set up I2C bus * FIXME: distroy i2c_bus when exit */ lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B"); if (!lvds_priv->i2c_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "I2C bus registration failed.\n"); goto failed_blc_i2c; } lvds_priv->i2c_bus->slave_addr = 0x2C; dev_priv->lvds_i2c_bus = lvds_priv->i2c_bus; /* * LVDS discovery: * 1) check for EDID on DDC * 2) check for VBT data * 3) check to see if LVDS is already on * if none of the above, no panel * 4) make sure lid is open * if closed, act like it's not there for now */ /* Set up the DDC bus. */ lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C"); if (!lvds_priv->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " "failed.\n"); goto failed_ddc; } /* * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ psb_intel_ddc_get_modes(connector, &lvds_priv->ddc_bus->adapter); list_for_each_entry(scan, &connector->probed_modes, head) { if (scan->type & DRM_MODE_TYPE_PREFERRED) { mode_dev->panel_fixed_mode = drm_mode_duplicate(dev, scan); goto out; /* FIXME: check for quirks */ } } /* Failed to get EDID, what about VBT? do we need this? */ if (mode_dev->vbt_mode) mode_dev->panel_fixed_mode = drm_mode_duplicate(dev, mode_dev->vbt_mode); if (!mode_dev->panel_fixed_mode) if (dev_priv->lfp_lvds_vbt_mode) mode_dev->panel_fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); /* * If we didn't get EDID, try checking if the panel is already turned * on. If so, assume that whatever is currently programmed is the * correct mode. */ lvds = REG_READ(LVDS); pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; crtc = psb_intel_get_crtc_from_pipe(dev, pipe); if (crtc && (lvds & LVDS_PORT_EN)) { mode_dev->panel_fixed_mode = psb_intel_crtc_mode_get(dev, crtc); if (mode_dev->panel_fixed_mode) { mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; goto out; /* FIXME: check for quirks */ } } /* If we still don't have a mode after all that, give up. */ if (!mode_dev->panel_fixed_mode) { dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n"); goto failed_find; } /* * Blacklist machines with BIOSes that list an LVDS panel without * actually having one. */ out: drm_sysfs_connector_add(connector); return; failed_find: if (lvds_priv->ddc_bus) psb_intel_i2c_destroy(lvds_priv->ddc_bus); failed_ddc: if (lvds_priv->i2c_bus) psb_intel_i2c_destroy(lvds_priv->i2c_bus); failed_blc_i2c: drm_encoder_cleanup(encoder); drm_connector_cleanup(connector); failed_connector: kfree(psb_intel_connector); failed_encoder: kfree(psb_intel_encoder); }
gpl-2.0
percy-g2/android_kernel_msm8625
arch/powerpc/platforms/powermac/setup.c
7355
16022
/* * Powermac setup and early boot code plus other random bits. * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Adapted for Power Macintosh by Paul Mackerras * Copyright (C) 1996 Paul Mackerras (paulus@samba.org) * * Derived from "arch/alpha/kernel/setup.c" * Copyright (C) 1995 Linus Torvalds * * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ /* * bootup setup stuff.. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/export.h> #include <linux/user.h> #include <linux/tty.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/major.h> #include <linux/initrd.h> #include <linux/vt_kern.h> #include <linux/console.h> #include <linux/pci.h> #include <linux/adb.h> #include <linux/cuda.h> #include <linux/pmu.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/bitops.h> #include <linux/suspend.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/memblock.h> #include <asm/reg.h> #include <asm/sections.h> #include <asm/prom.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/pci-bridge.h> #include <asm/ohare.h> #include <asm/mediabay.h> #include <asm/machdep.h> #include <asm/dma.h> #include <asm/cputable.h> #include <asm/btext.h> #include <asm/pmac_feature.h> #include <asm/time.h> #include <asm/mmu_context.h> #include <asm/iommu.h> #include <asm/smu.h> #include <asm/pmc.h> #include <asm/udbg.h> #include "pmac.h" #undef SHOW_GATWICK_IRQS int ppc_override_l2cr = 0; int ppc_override_l2cr_value; int has_l2cache = 0; int pmac_newworld; static int current_root_goodness = -1; extern struct machdep_calls pmac_md; #define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */ #ifdef CONFIG_PPC64 int sccdbg; #endif sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN; EXPORT_SYMBOL(sys_ctrler); #ifdef CONFIG_PMAC_SMU unsigned long smu_cmdbuf_abs; EXPORT_SYMBOL(smu_cmdbuf_abs); #endif static void pmac_show_cpuinfo(struct seq_file *m) { struct device_node *np; const char *pp; int plen; int mbmodel; unsigned int mbflags; char* mbname; mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_MODEL, 0); mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_FLAGS, 0); if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME, (long) &mbname) != 0) mbname = "Unknown"; /* find motherboard type */ seq_printf(m, "machine\t\t: "); np = of_find_node_by_path("/"); if (np != NULL) { pp = of_get_property(np, "model", NULL); if (pp != NULL) seq_printf(m, "%s\n", pp); else seq_printf(m, "PowerMac\n"); pp = of_get_property(np, "compatible", &plen); if (pp != NULL) { seq_printf(m, "motherboard\t:"); while (plen > 0) { int l = strlen(pp) + 1; seq_printf(m, " %s", pp); plen -= l; pp += l; } seq_printf(m, "\n"); } of_node_put(np); } else seq_printf(m, "PowerMac\n"); /* print parsed model */ seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname); seq_printf(m, "pmac flags\t: %08x\n", mbflags); /* find l2 cache info */ np = of_find_node_by_name(NULL, "l2-cache"); if (np == NULL) np = of_find_node_by_type(NULL, "cache"); if (np != NULL) { const unsigned int *ic = of_get_property(np, "i-cache-size", NULL); const unsigned int *dc = of_get_property(np, "d-cache-size", NULL); seq_printf(m, "L2 cache\t:"); has_l2cache = 1; if (of_get_property(np, "cache-unified", NULL) != 0 && dc) { seq_printf(m, " %dK unified", *dc / 1024); } else { if (ic) seq_printf(m, " %dK instruction", *ic / 1024); if (dc) seq_printf(m, "%s %dK data", (ic? " +": ""), *dc / 1024); } pp = of_get_property(np, "ram-type", NULL); if (pp) seq_printf(m, " %s", pp); seq_printf(m, "\n"); of_node_put(np); } /* Indicate newworld/oldworld */ seq_printf(m, "pmac-generation\t: %s\n", pmac_newworld ? "NewWorld" : "OldWorld"); } #ifndef CONFIG_ADB_CUDA int find_via_cuda(void) { struct device_node *dn = of_find_node_by_name(NULL, "via-cuda"); if (!dn) return 0; of_node_put(dn); printk("WARNING ! Your machine is CUDA-based but your kernel\n"); printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n"); return 0; } #endif #ifndef CONFIG_ADB_PMU int find_via_pmu(void) { struct device_node *dn = of_find_node_by_name(NULL, "via-pmu"); if (!dn) return 0; of_node_put(dn); printk("WARNING ! Your machine is PMU-based but your kernel\n"); printk(" wasn't compiled with CONFIG_ADB_PMU option !\n"); return 0; } #endif #ifndef CONFIG_PMAC_SMU int smu_init(void) { /* should check and warn if SMU is present */ return 0; } #endif #ifdef CONFIG_PPC32 static volatile u32 *sysctrl_regs; static void __init ohare_init(void) { struct device_node *dn; /* this area has the CPU identification register and some registers used by smp boards */ sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000); /* * Turn on the L2 cache. * We assume that we have a PSX memory controller iff * we have an ohare I/O controller. */ dn = of_find_node_by_name(NULL, "ohare"); if (dn) { of_node_put(dn); if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) { if (sysctrl_regs[4] & 0x10) sysctrl_regs[4] |= 0x04000020; else sysctrl_regs[4] |= 0x04000000; if(has_l2cache) printk(KERN_INFO "Level 2 cache enabled\n"); } } } static void __init l2cr_init(void) { /* Checks "l2cr-value" property in the registry */ if (cpu_has_feature(CPU_FTR_L2CR)) { struct device_node *np = of_find_node_by_name(NULL, "cpus"); if (np == 0) np = of_find_node_by_type(NULL, "cpu"); if (np != 0) { const unsigned int *l2cr = of_get_property(np, "l2cr-value", NULL); if (l2cr != 0) { ppc_override_l2cr = 1; ppc_override_l2cr_value = *l2cr; _set_L2CR(0); _set_L2CR(ppc_override_l2cr_value); } of_node_put(np); } } if (ppc_override_l2cr) printk(KERN_INFO "L2CR overridden (0x%x), " "backside cache is %s\n", ppc_override_l2cr_value, (ppc_override_l2cr_value & 0x80000000) ? "enabled" : "disabled"); } #endif static void __init pmac_setup_arch(void) { struct device_node *cpu, *ic; const int *fp; unsigned long pvr; pvr = PVR_VER(mfspr(SPRN_PVR)); /* Set loops_per_jiffy to a half-way reasonable value, for use until calibrate_delay gets called. */ loops_per_jiffy = 50000000 / HZ; cpu = of_find_node_by_type(NULL, "cpu"); if (cpu != NULL) { fp = of_get_property(cpu, "clock-frequency", NULL); if (fp != NULL) { if (pvr >= 0x30 && pvr < 0x80) /* PPC970 etc. */ loops_per_jiffy = *fp / (3 * HZ); else if (pvr == 4 || pvr >= 8) /* 604, G3, G4 etc. */ loops_per_jiffy = *fp / HZ; else /* 601, 603, etc. */ loops_per_jiffy = *fp / (2 * HZ); } of_node_put(cpu); } /* See if newworld or oldworld */ ic = of_find_node_with_property(NULL, "interrupt-controller"); if (ic) { pmac_newworld = 1; of_node_put(ic); } /* Lookup PCI hosts */ pmac_pci_init(); #ifdef CONFIG_PPC32 ohare_init(); l2cr_init(); #endif /* CONFIG_PPC32 */ find_via_cuda(); find_via_pmu(); smu_init(); #if defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) || \ defined(CONFIG_PPC64) pmac_nvram_init(); #endif #ifdef CONFIG_PPC32 #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) ROOT_DEV = Root_RAM0; else #endif ROOT_DEV = DEFAULT_ROOT_DEVICE; #endif #ifdef CONFIG_ADB if (strstr(cmd_line, "adb_sync")) { extern int __adb_probe_sync; __adb_probe_sync = 1; } #endif /* CONFIG_ADB */ } #ifdef CONFIG_SCSI void note_scsi_host(struct device_node *node, void *host) { } EXPORT_SYMBOL(note_scsi_host); #endif static int initializing = 1; static int pmac_late_init(void) { initializing = 0; return 0; } machine_late_initcall(powermac, pmac_late_init); /* * This is __init_refok because we check for "initializing" before * touching any of the __init sensitive things and "initializing" * will be false after __init time. This can't be __init because it * can be called whenever a disk is first accessed. */ void __init_refok note_bootable_part(dev_t dev, int part, int goodness) { char *p; if (!initializing) return; if ((goodness <= current_root_goodness) && ROOT_DEV != DEFAULT_ROOT_DEVICE) return; p = strstr(boot_command_line, "root="); if (p != NULL && (p == boot_command_line || p[-1] == ' ')) return; ROOT_DEV = dev + part; current_root_goodness = goodness; } #ifdef CONFIG_ADB_CUDA static void cuda_restart(void) { struct adb_request req; cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM); for (;;) cuda_poll(); } static void cuda_shutdown(void) { struct adb_request req; cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN); for (;;) cuda_poll(); } #else #define cuda_restart() #define cuda_shutdown() #endif #ifndef CONFIG_ADB_PMU #define pmu_restart() #define pmu_shutdown() #endif #ifndef CONFIG_PMAC_SMU #define smu_restart() #define smu_shutdown() #endif static void pmac_restart(char *cmd) { switch (sys_ctrler) { case SYS_CTRLER_CUDA: cuda_restart(); break; case SYS_CTRLER_PMU: pmu_restart(); break; case SYS_CTRLER_SMU: smu_restart(); break; default: ; } } static void pmac_power_off(void) { switch (sys_ctrler) { case SYS_CTRLER_CUDA: cuda_shutdown(); break; case SYS_CTRLER_PMU: pmu_shutdown(); break; case SYS_CTRLER_SMU: smu_shutdown(); break; default: ; } } static void pmac_halt(void) { pmac_power_off(); } /* * Early initialization. */ static void __init pmac_init_early(void) { /* Enable early btext debug if requested */ if (strstr(cmd_line, "btextdbg")) { udbg_adb_init_early(); register_early_udbg_console(); } /* Probe motherboard chipset */ pmac_feature_init(); /* Initialize debug stuff */ udbg_scc_init(!!strstr(cmd_line, "sccdbg")); udbg_adb_init(!!strstr(cmd_line, "btextdbg")); #ifdef CONFIG_PPC64 iommu_init_early_dart(); #endif /* SMP Init has to be done early as we need to patch up * cpu_possible_mask before interrupt stacks are allocated * or kaboom... */ #ifdef CONFIG_SMP pmac_setup_smp(); #endif } static int __init pmac_declare_of_platform_devices(void) { struct device_node *np; if (machine_is(chrp)) return -1; np = of_find_node_by_name(NULL, "valkyrie"); if (np) { of_platform_device_create(np, "valkyrie", NULL); of_node_put(np); } np = of_find_node_by_name(NULL, "platinum"); if (np) { of_platform_device_create(np, "platinum", NULL); of_node_put(np); } np = of_find_node_by_type(NULL, "smu"); if (np) { of_platform_device_create(np, "smu", NULL); of_node_put(np); } np = of_find_node_by_type(NULL, "fcu"); if (np == NULL) { /* Some machines have strangely broken device-tree */ np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e"); } if (np) { of_platform_device_create(np, "temperature", NULL); of_node_put(np); } return 0; } machine_device_initcall(powermac, pmac_declare_of_platform_devices); #ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE /* * This is called very early, as part of console_init() (typically just after * time_init()). This function is respondible for trying to find a good * default console on serial ports. It tries to match the open firmware * default output with one of the available serial console drivers. */ static int __init check_pmac_serial_console(void) { struct device_node *prom_stdout = NULL; int offset = 0; const char *name; #ifdef CONFIG_SERIAL_PMACZILOG_TTYS char *devname = "ttyS"; #else char *devname = "ttyPZ"; #endif pr_debug(" -> check_pmac_serial_console()\n"); /* The user has requested a console so this is already set up. */ if (strstr(boot_command_line, "console=")) { pr_debug(" console was specified !\n"); return -EBUSY; } if (!of_chosen) { pr_debug(" of_chosen is NULL !\n"); return -ENODEV; } /* We are getting a weird phandle from OF ... */ /* ... So use the full path instead */ name = of_get_property(of_chosen, "linux,stdout-path", NULL); if (name == NULL) { pr_debug(" no linux,stdout-path !\n"); return -ENODEV; } prom_stdout = of_find_node_by_path(name); if (!prom_stdout) { pr_debug(" can't find stdout package %s !\n", name); return -ENODEV; } pr_debug("stdout is %s\n", prom_stdout->full_name); name = of_get_property(prom_stdout, "name", NULL); if (!name) { pr_debug(" stdout package has no name !\n"); goto not_found; } if (strcmp(name, "ch-a") == 0) offset = 0; else if (strcmp(name, "ch-b") == 0) offset = 1; else goto not_found; of_node_put(prom_stdout); pr_debug("Found serial console at %s%d\n", devname, offset); return add_preferred_console(devname, offset, NULL); not_found: pr_debug("No preferred console found !\n"); of_node_put(prom_stdout); return -ENODEV; } console_initcall(check_pmac_serial_console); #endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */ /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init pmac_probe(void) { unsigned long root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "Power Macintosh") && !of_flat_dt_is_compatible(root, "MacRISC")) return 0; #ifdef CONFIG_PPC64 /* * On U3, the DART (iommu) must be allocated now since it * has an impact on htab_initialize (due to the large page it * occupies having to be broken up so the DART itself is not * part of the cacheable linar mapping */ alloc_dart_table(); hpte_init_native(); #endif #ifdef CONFIG_PPC32 /* isa_io_base gets set in pmac_pci_init */ ISA_DMA_THRESHOLD = ~0L; DMA_MODE_READ = 1; DMA_MODE_WRITE = 2; #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PMAC_SMU /* * SMU based G5s need some memory below 2Gb, at least the current * driver needs that. We have to allocate it now. We allocate 4k * (1 small page) for now. */ smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL); #endif /* CONFIG_PMAC_SMU */ return 1; } #ifdef CONFIG_PPC64 /* Move that to pci.c */ static int pmac_pci_probe_mode(struct pci_bus *bus) { struct device_node *node = pci_bus_to_OF_node(bus); /* We need to use normal PCI probing for the AGP bus, * since the device for the AGP bridge isn't in the tree. * Same for the PCIe host on U4 and the HT host bridge. */ if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") || of_device_is_compatible(node, "u4-pcie") || of_device_is_compatible(node, "u3-ht"))) return PCI_PROBE_NORMAL; return PCI_PROBE_DEVTREE; } #endif /* CONFIG_PPC64 */ define_machine(powermac) { .name = "PowerMac", .probe = pmac_probe, .setup_arch = pmac_setup_arch, .init_early = pmac_init_early, .show_cpuinfo = pmac_show_cpuinfo, .init_IRQ = pmac_pic_init, .get_irq = NULL, /* changed later */ .pci_irq_fixup = pmac_pci_irq_fixup, .restart = pmac_restart, .power_off = pmac_power_off, .halt = pmac_halt, .time_init = pmac_time_init, .get_boot_time = pmac_get_boot_time, .set_rtc_time = pmac_set_rtc_time, .get_rtc_time = pmac_get_rtc_time, .calibrate_decr = pmac_calibrate_decr, .feature_call = pmac_do_feature_call, .progress = udbg_progress, #ifdef CONFIG_PPC64 .pci_probe_mode = pmac_pci_probe_mode, .power_save = power4_idle, .enable_pmcs = power4_enable_pmcs, #endif /* CONFIG_PPC64 */ #ifdef CONFIG_PPC32 .pcibios_enable_device_hook = pmac_pci_enable_device_hook, .pcibios_after_init = pmac_pcibios_after_init, .phys_mem_access_prot = pci_phys_mem_access_prot, #endif };
gpl-2.0
ruslan250283/alcatel_6036
arch/x86/kernel/mmconf-fam10h_64.c
10171
5500
/* * AMD Family 10h mmconfig enablement */ #include <linux/types.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/dmi.h> #include <linux/range.h> #include <asm/pci-direct.h> #include <linux/sort.h> #include <asm/io.h> #include <asm/msr.h> #include <asm/acpi.h> #include <asm/mmconfig.h> #include <asm/pci_x86.h> struct pci_hostbridge_probe { u32 bus; u32 slot; u32 vendor; u32 device; }; static u64 __cpuinitdata fam10h_pci_mmconf_base; static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, }; static int __cpuinit cmp_range(const void *x1, const void *x2) { const struct range *r1 = x1; const struct range *r2 = x2; int start1, start2; start1 = r1->start >> 32; start2 = r2->start >> 32; return start1 - start2; } #define MMCONF_UNIT (1ULL << FAM10H_MMIO_CONF_BASE_SHIFT) #define MMCONF_MASK (~(MMCONF_UNIT - 1)) #define MMCONF_SIZE (MMCONF_UNIT << 8) /* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */ #define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) #define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) static void __cpuinit get_fam10h_pci_mmconf_base(void) { int i; unsigned bus; unsigned slot; int found; u64 val; u32 address; u64 tom2; u64 base = FAM10H_PCI_MMCONF_BASE; int hi_mmio_num; struct range range[8]; /* only try to get setting from BSP */ if (fam10h_pci_mmconf_base) return; if (!early_pci_allowed()) return; found = 0; for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { u32 id; u16 device; u16 vendor; bus = pci_probes[i].bus; slot = pci_probes[i].slot; id = read_pci_config(bus, slot, 0, PCI_VENDOR_ID); vendor = id & 0xffff; device = (id>>16) & 0xffff; if (pci_probes[i].vendor == vendor && pci_probes[i].device == device) { found = 1; break; } } if (!found) return; /* SYS_CFG */ address = MSR_K8_SYSCFG; rdmsrl(address, val); /* TOP_MEM2 is not enabled? */ if (!(val & (1<<21))) { tom2 = 1ULL << 32; } else { /* TOP_MEM2 */ address = MSR_K8_TOP_MEM2; rdmsrl(address, val); tom2 = max(val & 0xffffff800000ULL, 1ULL << 32); } if (base <= tom2) base = (tom2 + 2 * MMCONF_UNIT - 1) & MMCONF_MASK; /* * need to check if the range is in the high mmio range that is * above 4G */ hi_mmio_num = 0; for (i = 0; i < 8; i++) { u32 reg; u64 start; u64 end; reg = read_pci_config(bus, slot, 1, 0x80 + (i << 3)); if (!(reg & 3)) continue; start = (u64)(reg & 0xffffff00) << 8; /* 39:16 on 31:8*/ reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3)); end = ((u64)(reg & 0xffffff00) << 8) | 0xffff; /* 39:16 on 31:8*/ if (end < tom2) continue; range[hi_mmio_num].start = start; range[hi_mmio_num].end = end; hi_mmio_num++; } if (!hi_mmio_num) goto out; /* sort the range */ sort(range, hi_mmio_num, sizeof(struct range), cmp_range, NULL); if (range[hi_mmio_num - 1].end < base) goto out; if (range[0].start > base + MMCONF_SIZE) goto out; /* need to find one window */ base = (range[0].start & MMCONF_MASK) - MMCONF_UNIT; if ((base > tom2) && BASE_VALID(base)) goto out; base = (range[hi_mmio_num - 1].end + MMCONF_UNIT) & MMCONF_MASK; if (BASE_VALID(base)) goto out; /* need to find window between ranges */ for (i = 1; i < hi_mmio_num; i++) { base = (range[i - 1].end + MMCONF_UNIT) & MMCONF_MASK; val = range[i].start & MMCONF_MASK; if (val >= base + MMCONF_SIZE && BASE_VALID(base)) goto out; } return; out: fam10h_pci_mmconf_base = base; } void __cpuinit fam10h_check_enable_mmcfg(void) { u64 val; u32 address; if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF)) return; address = MSR_FAM10H_MMIO_CONF_BASE; rdmsrl(address, val); /* try to make sure that AP's setting is identical to BSP setting */ if (val & FAM10H_MMIO_CONF_ENABLE) { unsigned busnbits; busnbits = (val >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & FAM10H_MMIO_CONF_BUSRANGE_MASK; /* only trust the one handle 256 buses, if acpi=off */ if (!acpi_pci_disabled || busnbits >= 8) { u64 base = val & MMCONF_MASK; if (!fam10h_pci_mmconf_base) { fam10h_pci_mmconf_base = base; return; } else if (fam10h_pci_mmconf_base == base) return; } } /* * if it is not enabled, try to enable it and assume only one segment * with 256 buses */ get_fam10h_pci_mmconf_base(); if (!fam10h_pci_mmconf_base) { pci_probe &= ~PCI_CHECK_ENABLE_AMD_MMCONF; return; } printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n"); val &= ~((FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT) | (FAM10H_MMIO_CONF_BUSRANGE_MASK<<FAM10H_MMIO_CONF_BUSRANGE_SHIFT)); val |= fam10h_pci_mmconf_base | (8 << FAM10H_MMIO_CONF_BUSRANGE_SHIFT) | FAM10H_MMIO_CONF_ENABLE; wrmsrl(address, val); } static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d) { pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF; return 0; } static const struct dmi_system_id __initconst mmconf_dmi_table[] = { { .callback = set_check_enable_amd_mmconf, .ident = "Sun Microsystems Machine", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sun Microsystems"), }, }, {} }; /* Called from a __cpuinit function, but only on the BSP. */ void __ref check_enable_amd_mmconf_dmi(void) { dmi_check_system(mmconf_dmi_table); }
gpl-2.0
jeboo/kernel-msm
fs/jffs2/xattr_trusted.c
12731
1447
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2006 NEC Corporation * * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/jffs2.h> #include <linux/xattr.h> #include <linux/mtd/mtd.h> #include "nodelist.h" static int jffs2_trusted_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { if (!strcmp(name, "")) return -EINVAL; return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size); } static int jffs2_trusted_setxattr(struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags, int type) { if (!strcmp(name, "")) return -EINVAL; return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size, flags); } static size_t jffs2_trusted_listxattr(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t name_len, int type) { size_t retlen = XATTR_TRUSTED_PREFIX_LEN + name_len + 1; if (list && retlen<=list_size) { strcpy(list, XATTR_TRUSTED_PREFIX); strcpy(list + XATTR_TRUSTED_PREFIX_LEN, name); } return retlen; } const struct xattr_handler jffs2_trusted_xattr_handler = { .prefix = XATTR_TRUSTED_PREFIX, .list = jffs2_trusted_listxattr, .set = jffs2_trusted_setxattr, .get = jffs2_trusted_getxattr };
gpl-2.0
zlux/tq
arch/cris/arch-v10/kernel/dma.c
188
8009
/* Wrapper for DMA channel allocator that updates DMA client muxing. * Copyright 2004-2007, Axis Communications AB */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <asm/dma.h> #include <arch/svinto.h> /* Macro to access ETRAX 100 registers */ #define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \ IO_STATE_(reg##_, field##_, _##val) static char used_dma_channels[MAX_DMA_CHANNELS]; static const char * used_dma_channels_users[MAX_DMA_CHANNELS]; int cris_request_dma(unsigned int dmanr, const char * device_id, unsigned options, enum dma_owner owner) { unsigned long flags; unsigned long int gens; int fail = -EINVAL; if ((dmanr < 0) || (dmanr >= MAX_DMA_CHANNELS)) { printk(KERN_CRIT "cris_request_dma: invalid DMA channel %u\n", dmanr); return -EINVAL; } local_irq_save(flags); if (used_dma_channels[dmanr]) { local_irq_restore(flags); if (options & DMA_VERBOSE_ON_ERROR) { printk(KERN_CRIT "Failed to request DMA %i for %s, already allocated by %s\n", dmanr, device_id, used_dma_channels_users[dmanr]); } if (options & DMA_PANIC_ON_ERROR) { panic("request_dma error!"); } return -EBUSY; } gens = genconfig_shadow; switch(owner) { case dma_eth: if ((dmanr != NETWORK_TX_DMA_NBR) && (dmanr != NETWORK_RX_DMA_NBR)) { printk(KERN_CRIT "Invalid DMA channel for eth\n"); goto bail; } break; case dma_ser0: if (dmanr == SER0_TX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma6, serial0); } else if (dmanr == SER0_RX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma7, serial0); } else { printk(KERN_CRIT "Invalid DMA channel for ser0\n"); goto bail; } break; case dma_ser1: if (dmanr == SER1_TX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma8, serial1); } else if (dmanr == SER1_RX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma9, serial1); } else { printk(KERN_CRIT "Invalid DMA channel for ser1\n"); goto bail; } break; case dma_ser2: if (dmanr == SER2_TX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma2, serial2); } else if (dmanr == SER2_RX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma3, serial2); } else { printk(KERN_CRIT "Invalid DMA channel for ser2\n"); goto bail; } break; case dma_ser3: if (dmanr == SER3_TX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma4, serial3); } else if (dmanr == SER3_RX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma5, serial3); } else { printk(KERN_CRIT "Invalid DMA channel for ser3\n"); goto bail; } break; case dma_ata: if (dmanr == ATA_TX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma2, ata); } else if (dmanr == ATA_RX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma3, ata); } else { printk(KERN_CRIT "Invalid DMA channel for ata\n"); goto bail; } break; case dma_ext0: if (dmanr == EXTDMA0_TX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma4, extdma0); } else if (dmanr == EXTDMA0_RX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma5, extdma0); } else { printk(KERN_CRIT "Invalid DMA channel for ext0\n"); goto bail; } break; case dma_ext1: if (dmanr == EXTDMA1_TX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma6, extdma1); } else if (dmanr == EXTDMA1_RX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma7, extdma1); } else { printk(KERN_CRIT "Invalid DMA channel for ext1\n"); goto bail; } break; case dma_int6: if (dmanr == MEM2MEM_RX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma7, intdma6); } else { printk(KERN_CRIT "Invalid DMA channel for int6\n"); goto bail; } break; case dma_int7: if (dmanr == MEM2MEM_TX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma6, intdma7); } else { printk(KERN_CRIT "Invalid DMA channel for int7\n"); goto bail; } break; case dma_usb: if (dmanr == USB_TX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma8, usb); } else if (dmanr == USB_RX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma9, usb); } else { printk(KERN_CRIT "Invalid DMA channel for usb\n"); goto bail; } break; case dma_scsi0: if (dmanr == SCSI0_TX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma2, scsi0); } else if (dmanr == SCSI0_RX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma3, scsi0); } else { printk(KERN_CRIT "Invalid DMA channel for scsi0\n"); goto bail; } break; case dma_scsi1: if (dmanr == SCSI1_TX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma4, scsi1); } else if (dmanr == SCSI1_RX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma5, scsi1); } else { printk(KERN_CRIT "Invalid DMA channel for scsi1\n"); goto bail; } break; case dma_par0: if (dmanr == PAR0_TX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma2, par0); } else if (dmanr == PAR0_RX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma3, par0); } else { printk(KERN_CRIT "Invalid DMA channel for par0\n"); goto bail; } break; case dma_par1: if (dmanr == PAR1_TX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma4, par1); } else if (dmanr == PAR1_RX_DMA_NBR) { SETS(gens, R_GEN_CONFIG, dma5, par1); } else { printk(KERN_CRIT "Invalid DMA channel for par1\n"); goto bail; } break; default: printk(KERN_CRIT "Invalid DMA owner.\n"); goto bail; } used_dma_channels[dmanr] = 1; used_dma_channels_users[dmanr] = device_id; { volatile int i; genconfig_shadow = gens; *R_GEN_CONFIG = genconfig_shadow; /* Wait 12 cycles before doing any DMA command */ for(i = 6; i > 0; i--) nop(); } fail = 0; bail: local_irq_restore(flags); return fail; } void cris_free_dma(unsigned int dmanr, const char * device_id) { unsigned long flags; if ((dmanr < 0) || (dmanr >= MAX_DMA_CHANNELS)) { printk(KERN_CRIT "cris_free_dma: invalid DMA channel %u\n", dmanr); return; } local_irq_save(flags); if (!used_dma_channels[dmanr]) { printk(KERN_CRIT "cris_free_dma: DMA channel %u not allocated\n", dmanr); } else if (device_id != used_dma_channels_users[dmanr]) { printk(KERN_CRIT "cris_free_dma: DMA channel %u not allocated by device\n", dmanr); } else { switch(dmanr) { case 0: *R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH0_CMD, cmd, *R_DMA_CH0_CMD) == IO_STATE_VALUE(R_DMA_CH0_CMD, cmd, reset)); break; case 1: *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH1_CMD, cmd, *R_DMA_CH1_CMD) == IO_STATE_VALUE(R_DMA_CH1_CMD, cmd, reset)); break; case 2: *R_DMA_CH2_CMD = IO_STATE(R_DMA_CH2_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH2_CMD, cmd, *R_DMA_CH2_CMD) == IO_STATE_VALUE(R_DMA_CH2_CMD, cmd, reset)); break; case 3: *R_DMA_CH3_CMD = IO_STATE(R_DMA_CH3_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH3_CMD, cmd, *R_DMA_CH3_CMD) == IO_STATE_VALUE(R_DMA_CH3_CMD, cmd, reset)); break; case 4: *R_DMA_CH4_CMD = IO_STATE(R_DMA_CH4_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH4_CMD, cmd, *R_DMA_CH4_CMD) == IO_STATE_VALUE(R_DMA_CH4_CMD, cmd, reset)); break; case 5: *R_DMA_CH5_CMD = IO_STATE(R_DMA_CH5_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH5_CMD, cmd, *R_DMA_CH5_CMD) == IO_STATE_VALUE(R_DMA_CH5_CMD, cmd, reset)); break; case 6: *R_DMA_CH6_CMD = IO_STATE(R_DMA_CH6_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *R_DMA_CH6_CMD) == IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset)); break; case 7: *R_DMA_CH7_CMD = IO_STATE(R_DMA_CH7_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH7_CMD, cmd, *R_DMA_CH7_CMD) == IO_STATE_VALUE(R_DMA_CH7_CMD, cmd, reset)); break; case 8: *R_DMA_CH8_CMD = IO_STATE(R_DMA_CH8_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH8_CMD, cmd, *R_DMA_CH8_CMD) == IO_STATE_VALUE(R_DMA_CH8_CMD, cmd, reset)); break; case 9: *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, reset); while (IO_EXTRACT(R_DMA_CH9_CMD, cmd, *R_DMA_CH9_CMD) == IO_STATE_VALUE(R_DMA_CH9_CMD, cmd, reset)); break; } used_dma_channels[dmanr] = 0; } local_irq_restore(flags); } EXPORT_SYMBOL(cris_request_dma); EXPORT_SYMBOL(cris_free_dma);
gpl-2.0
AndyLavr/Aspire-SW5-012_Kernel_4.8
drivers/gpu/ipu-v3/ipu-dc.c
188
13196
/* * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de> * Copyright (C) 2005-2009 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. */ #include <linux/export.h> #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <video/imx-ipu-v3.h> #include "ipu-prv.h" #define DC_MAP_CONF_PTR(n) (0x108 + ((n) & ~0x1) * 2) #define DC_MAP_CONF_VAL(n) (0x144 + ((n) & ~0x1) * 2) #define DC_EVT_NF 0 #define DC_EVT_NL 1 #define DC_EVT_EOF 2 #define DC_EVT_NFIELD 3 #define DC_EVT_EOL 4 #define DC_EVT_EOFIELD 5 #define DC_EVT_NEW_ADDR 6 #define DC_EVT_NEW_CHAN 7 #define DC_EVT_NEW_DATA 8 #define DC_EVT_NEW_ADDR_W_0 0 #define DC_EVT_NEW_ADDR_W_1 1 #define DC_EVT_NEW_CHAN_W_0 2 #define DC_EVT_NEW_CHAN_W_1 3 #define DC_EVT_NEW_DATA_W_0 4 #define DC_EVT_NEW_DATA_W_1 5 #define DC_EVT_NEW_ADDR_R_0 6 #define DC_EVT_NEW_ADDR_R_1 7 #define DC_EVT_NEW_CHAN_R_0 8 #define DC_EVT_NEW_CHAN_R_1 9 #define DC_EVT_NEW_DATA_R_0 10 #define DC_EVT_NEW_DATA_R_1 11 #define DC_WR_CH_CONF 0x0 #define DC_WR_CH_ADDR 0x4 #define DC_RL_CH(evt) (8 + ((evt) & ~0x1) * 2) #define DC_GEN 0xd4 #define DC_DISP_CONF1(disp) (0xd8 + (disp) * 4) #define DC_DISP_CONF2(disp) (0xe8 + (disp) * 4) #define DC_STAT 0x1c8 #define WROD(lf) (0x18 | ((lf) << 1)) #define WRG 0x01 #define WCLK 0xc9 #define SYNC_WAVE 0 #define NULL_WAVE (-1) #define DC_GEN_SYNC_1_6_SYNC (2 << 1) #define DC_GEN_SYNC_PRIORITY_1 (1 << 7) #define DC_WR_CH_CONF_WORD_SIZE_8 (0 << 0) #define DC_WR_CH_CONF_WORD_SIZE_16 (1 << 0) #define DC_WR_CH_CONF_WORD_SIZE_24 (2 << 0) #define DC_WR_CH_CONF_WORD_SIZE_32 (3 << 0) #define DC_WR_CH_CONF_DISP_ID_PARALLEL(i) (((i) & 0x1) << 3) #define DC_WR_CH_CONF_DISP_ID_SERIAL (2 << 3) #define DC_WR_CH_CONF_DISP_ID_ASYNC (3 << 4) #define DC_WR_CH_CONF_FIELD_MODE (1 << 9) #define DC_WR_CH_CONF_PROG_TYPE_NORMAL (4 << 5) #define DC_WR_CH_CONF_PROG_TYPE_MASK (7 << 5) #define DC_WR_CH_CONF_PROG_DI_ID (1 << 2) #define DC_WR_CH_CONF_PROG_DISP_ID(i) (((i) & 0x1) << 3) #define IPU_DC_NUM_CHANNELS 10 struct ipu_dc_priv; enum ipu_dc_map { IPU_DC_MAP_RGB24, IPU_DC_MAP_RGB565, IPU_DC_MAP_GBR24, /* TVEv2 */ IPU_DC_MAP_BGR666, IPU_DC_MAP_LVDS666, IPU_DC_MAP_BGR24, }; struct ipu_dc { /* The display interface number assigned to this dc channel */ unsigned int di; void __iomem *base; struct ipu_dc_priv *priv; int chno; bool in_use; }; struct ipu_dc_priv { void __iomem *dc_reg; void __iomem *dc_tmpl_reg; struct ipu_soc *ipu; struct device *dev; struct ipu_dc channels[IPU_DC_NUM_CHANNELS]; struct mutex mutex; struct completion comp; int dc_irq; int dp_irq; int use_count; }; static void dc_link_event(struct ipu_dc *dc, int event, int addr, int priority) { u32 reg; reg = readl(dc->base + DC_RL_CH(event)); reg &= ~(0xffff << (16 * (event & 0x1))); reg |= ((addr << 8) | priority) << (16 * (event & 0x1)); writel(reg, dc->base + DC_RL_CH(event)); } static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand, int map, int wave, int glue, int sync, int stop) { struct ipu_dc_priv *priv = dc->priv; u32 reg1, reg2; if (opcode == WCLK) { reg1 = (operand << 20) & 0xfff00000; reg2 = operand >> 12 | opcode << 1 | stop << 9; } else if (opcode == WRG) { reg1 = sync | glue << 4 | ++wave << 11 | ((operand << 15) & 0xffff8000); reg2 = operand >> 17 | opcode << 7 | stop << 9; } else { reg1 = sync | glue << 4 | ++wave << 11 | ++map << 15 | ((operand << 20) & 0xfff00000); reg2 = operand >> 12 | opcode << 4 | stop << 9; } writel(reg1, priv->dc_tmpl_reg + word * 8); writel(reg2, priv->dc_tmpl_reg + word * 8 + 4); } static int ipu_bus_format_to_map(u32 fmt) { switch (fmt) { default: WARN_ON(1); /* fall-through */ case MEDIA_BUS_FMT_RGB888_1X24: return IPU_DC_MAP_RGB24; case MEDIA_BUS_FMT_RGB565_1X16: return IPU_DC_MAP_RGB565; case MEDIA_BUS_FMT_GBR888_1X24: return IPU_DC_MAP_GBR24; case MEDIA_BUS_FMT_RGB666_1X18: return IPU_DC_MAP_BGR666; case MEDIA_BUS_FMT_RGB666_1X24_CPADHI: return IPU_DC_MAP_LVDS666; case MEDIA_BUS_FMT_BGR888_1X24: return IPU_DC_MAP_BGR24; } } int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced, u32 bus_format, u32 width) { struct ipu_dc_priv *priv = dc->priv; int addr, sync; u32 reg = 0; int map; dc->di = ipu_di_get_num(di); map = ipu_bus_format_to_map(bus_format); /* * In interlaced mode we need more counters to create the asymmetric * per-field VSYNC signals. The pixel active signal synchronising DC * to DI moves to signal generator #6 (see ipu-di.c). In progressive * mode counter #5 is used. */ sync = interlaced ? 6 : 5; /* Reserve 5 microcode template words for each DI */ if (dc->di) addr = 5; else addr = 0; if (interlaced) { dc_link_event(dc, DC_EVT_NL, addr, 3); dc_link_event(dc, DC_EVT_EOL, addr, 2); dc_link_event(dc, DC_EVT_NEW_DATA, addr, 1); /* Init template microcode */ dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1); } else { dc_link_event(dc, DC_EVT_NL, addr + 2, 3); dc_link_event(dc, DC_EVT_EOL, addr + 3, 2); dc_link_event(dc, DC_EVT_NEW_DATA, addr + 1, 1); /* Init template microcode */ dc_write_tmpl(dc, addr + 2, WROD(0), 0, map, SYNC_WAVE, 8, sync, 1); dc_write_tmpl(dc, addr + 3, WROD(0), 0, map, SYNC_WAVE, 4, sync, 0); dc_write_tmpl(dc, addr + 4, WRG, 0, map, NULL_WAVE, 0, 0, 1); dc_write_tmpl(dc, addr + 1, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1); } dc_link_event(dc, DC_EVT_NF, 0, 0); dc_link_event(dc, DC_EVT_NFIELD, 0, 0); dc_link_event(dc, DC_EVT_EOF, 0, 0); dc_link_event(dc, DC_EVT_EOFIELD, 0, 0); dc_link_event(dc, DC_EVT_NEW_CHAN, 0, 0); dc_link_event(dc, DC_EVT_NEW_ADDR, 0, 0); reg = readl(dc->base + DC_WR_CH_CONF); if (interlaced) reg |= DC_WR_CH_CONF_FIELD_MODE; else reg &= ~DC_WR_CH_CONF_FIELD_MODE; writel(reg, dc->base + DC_WR_CH_CONF); writel(0x0, dc->base + DC_WR_CH_ADDR); writel(width, priv->dc_reg + DC_DISP_CONF2(dc->di)); return 0; } EXPORT_SYMBOL_GPL(ipu_dc_init_sync); void ipu_dc_enable(struct ipu_soc *ipu) { struct ipu_dc_priv *priv = ipu->dc_priv; mutex_lock(&priv->mutex); if (!priv->use_count) ipu_module_enable(priv->ipu, IPU_CONF_DC_EN); priv->use_count++; mutex_unlock(&priv->mutex); } EXPORT_SYMBOL_GPL(ipu_dc_enable); void ipu_dc_enable_channel(struct ipu_dc *dc) { int di; u32 reg; di = dc->di; reg = readl(dc->base + DC_WR_CH_CONF); reg |= DC_WR_CH_CONF_PROG_TYPE_NORMAL; writel(reg, dc->base + DC_WR_CH_CONF); } EXPORT_SYMBOL_GPL(ipu_dc_enable_channel); static irqreturn_t dc_irq_handler(int irq, void *dev_id) { struct ipu_dc *dc = dev_id; u32 reg; reg = readl(dc->base + DC_WR_CH_CONF); reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK; writel(reg, dc->base + DC_WR_CH_CONF); /* The Freescale BSP kernel clears DIx_COUNTER_RELEASE here */ complete(&dc->priv->comp); return IRQ_HANDLED; } void ipu_dc_disable_channel(struct ipu_dc *dc) { struct ipu_dc_priv *priv = dc->priv; int irq; unsigned long ret; u32 val; /* TODO: Handle MEM_FG_SYNC differently from MEM_BG_SYNC */ if (dc->chno == 1) irq = priv->dc_irq; else if (dc->chno == 5) irq = priv->dp_irq; else return; init_completion(&priv->comp); enable_irq(irq); ret = wait_for_completion_timeout(&priv->comp, msecs_to_jiffies(50)); disable_irq(irq); if (ret == 0) { dev_warn(priv->dev, "DC stop timeout after 50 ms\n"); val = readl(dc->base + DC_WR_CH_CONF); val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK; writel(val, dc->base + DC_WR_CH_CONF); } } EXPORT_SYMBOL_GPL(ipu_dc_disable_channel); void ipu_dc_disable(struct ipu_soc *ipu) { struct ipu_dc_priv *priv = ipu->dc_priv; mutex_lock(&priv->mutex); priv->use_count--; if (!priv->use_count) ipu_module_disable(priv->ipu, IPU_CONF_DC_EN); if (priv->use_count < 0) priv->use_count = 0; mutex_unlock(&priv->mutex); } EXPORT_SYMBOL_GPL(ipu_dc_disable); static void ipu_dc_map_config(struct ipu_dc_priv *priv, enum ipu_dc_map map, int byte_num, int offset, int mask) { int ptr = map * 3 + byte_num; u32 reg; reg = readl(priv->dc_reg + DC_MAP_CONF_VAL(ptr)); reg &= ~(0xffff << (16 * (ptr & 0x1))); reg |= ((offset << 8) | mask) << (16 * (ptr & 0x1)); writel(reg, priv->dc_reg + DC_MAP_CONF_VAL(ptr)); reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map)); reg &= ~(0x1f << ((16 * (map & 0x1)) + (5 * byte_num))); reg |= ptr << ((16 * (map & 0x1)) + (5 * byte_num)); writel(reg, priv->dc_reg + DC_MAP_CONF_PTR(map)); } static void ipu_dc_map_clear(struct ipu_dc_priv *priv, int map) { u32 reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map)); writel(reg & ~(0xffff << (16 * (map & 0x1))), priv->dc_reg + DC_MAP_CONF_PTR(map)); } struct ipu_dc *ipu_dc_get(struct ipu_soc *ipu, int channel) { struct ipu_dc_priv *priv = ipu->dc_priv; struct ipu_dc *dc; if (channel >= IPU_DC_NUM_CHANNELS) return ERR_PTR(-ENODEV); dc = &priv->channels[channel]; mutex_lock(&priv->mutex); if (dc->in_use) { mutex_unlock(&priv->mutex); return ERR_PTR(-EBUSY); } dc->in_use = true; mutex_unlock(&priv->mutex); return dc; } EXPORT_SYMBOL_GPL(ipu_dc_get); void ipu_dc_put(struct ipu_dc *dc) { struct ipu_dc_priv *priv = dc->priv; mutex_lock(&priv->mutex); dc->in_use = false; mutex_unlock(&priv->mutex); } EXPORT_SYMBOL_GPL(ipu_dc_put); int ipu_dc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base, unsigned long template_base) { struct ipu_dc_priv *priv; static int channel_offsets[] = { 0, 0x1c, 0x38, 0x54, 0x58, 0x5c, 0x78, 0, 0x94, 0xb4}; int i, ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; mutex_init(&priv->mutex); priv->dev = dev; priv->ipu = ipu; priv->dc_reg = devm_ioremap(dev, base, PAGE_SIZE); priv->dc_tmpl_reg = devm_ioremap(dev, template_base, PAGE_SIZE); if (!priv->dc_reg || !priv->dc_tmpl_reg) return -ENOMEM; for (i = 0; i < IPU_DC_NUM_CHANNELS; i++) { priv->channels[i].chno = i; priv->channels[i].priv = priv; priv->channels[i].base = priv->dc_reg + channel_offsets[i]; } priv->dc_irq = ipu_map_irq(ipu, IPU_IRQ_DC_FC_1); if (!priv->dc_irq) return -EINVAL; ret = devm_request_irq(dev, priv->dc_irq, dc_irq_handler, 0, NULL, &priv->channels[1]); if (ret < 0) return ret; disable_irq(priv->dc_irq); priv->dp_irq = ipu_map_irq(ipu, IPU_IRQ_DP_SF_END); if (!priv->dp_irq) return -EINVAL; ret = devm_request_irq(dev, priv->dp_irq, dc_irq_handler, 0, NULL, &priv->channels[5]); if (ret < 0) return ret; disable_irq(priv->dp_irq); writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(1) | DC_WR_CH_CONF_PROG_DI_ID, priv->channels[1].base + DC_WR_CH_CONF); writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(0), priv->channels[5].base + DC_WR_CH_CONF); writel(DC_GEN_SYNC_1_6_SYNC | DC_GEN_SYNC_PRIORITY_1, priv->dc_reg + DC_GEN); ipu->dc_priv = priv; dev_dbg(dev, "DC base: 0x%08lx template base: 0x%08lx\n", base, template_base); /* rgb24 */ ipu_dc_map_clear(priv, IPU_DC_MAP_RGB24); ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 0, 7, 0xff); /* blue */ ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 1, 15, 0xff); /* green */ ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 2, 23, 0xff); /* red */ /* rgb565 */ ipu_dc_map_clear(priv, IPU_DC_MAP_RGB565); ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 0, 4, 0xf8); /* blue */ ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 1, 10, 0xfc); /* green */ ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 2, 15, 0xf8); /* red */ /* gbr24 */ ipu_dc_map_clear(priv, IPU_DC_MAP_GBR24); ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 2, 15, 0xff); /* green */ ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 1, 7, 0xff); /* blue */ ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 0, 23, 0xff); /* red */ /* bgr666 */ ipu_dc_map_clear(priv, IPU_DC_MAP_BGR666); ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 0, 5, 0xfc); /* blue */ ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 1, 11, 0xfc); /* green */ ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 2, 17, 0xfc); /* red */ /* lvds666 */ ipu_dc_map_clear(priv, IPU_DC_MAP_LVDS666); ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 0, 5, 0xfc); /* blue */ ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 1, 13, 0xfc); /* green */ ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 2, 21, 0xfc); /* red */ /* bgr24 */ ipu_dc_map_clear(priv, IPU_DC_MAP_BGR24); ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 2, 7, 0xff); /* red */ ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 1, 15, 0xff); /* green */ ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 0, 23, 0xff); /* blue */ return 0; } void ipu_dc_exit(struct ipu_soc *ipu) { }
gpl-2.0
emagii/linux-2.6-imx
drivers/scsi/libfc/fc_elsct.c
188
3693
/* * Copyright(c) 2008 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained at www.Open-FCoE.org */ /* * Provide interface to send ELS/CT FC frames */ #include <linux/export.h> #include <asm/unaligned.h> #include <scsi/fc/fc_gs.h> #include <scsi/fc/fc_ns.h> #include <scsi/fc/fc_els.h> #include <scsi/libfc.h> #include <scsi/fc_encode.h> /** * fc_elsct_send() - Send an ELS or CT frame * @lport: The local port to send the frame on * @did: The destination ID for the frame * @fp: The frame to be sent * @op: The operational code * @resp: The callback routine when the response is received * @arg: The argument to pass to the response callback routine * @timer_msec: The timeout period for the frame (in msecs) */ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did, struct fc_frame *fp, unsigned int op, void (*resp)(struct fc_seq *, struct fc_frame *, void *), void *arg, u32 timer_msec) { enum fc_rctl r_ctl; enum fc_fh_type fh_type; int rc; /* ELS requests */ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type); else { /* CT requests */ rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type); did = FC_FID_DIR_SERV; } if (rc) { fc_frame_free(fp); return NULL; } fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type, FC_FCTL_REQ, 0); return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec); } EXPORT_SYMBOL(fc_elsct_send); /** * fc_elsct_init() - Initialize the ELS/CT layer * @lport: The local port to initialize the ELS/CT layer for */ int fc_elsct_init(struct fc_lport *lport) { if (!lport->tt.elsct_send) lport->tt.elsct_send = fc_elsct_send; return 0; } EXPORT_SYMBOL(fc_elsct_init); /** * fc_els_resp_type() - Return a string describing the ELS response * @fp: The frame pointer or possible error code */ const char *fc_els_resp_type(struct fc_frame *fp) { const char *msg; struct fc_frame_header *fh; struct fc_ct_hdr *ct; if (IS_ERR(fp)) { switch (-PTR_ERR(fp)) { case FC_NO_ERR: msg = "response no error"; break; case FC_EX_TIMEOUT: msg = "response timeout"; break; case FC_EX_CLOSED: msg = "response closed"; break; default: msg = "response unknown error"; break; } } else { fh = fc_frame_header_get(fp); switch (fh->fh_type) { case FC_TYPE_ELS: switch (fc_frame_payload_op(fp)) { case ELS_LS_ACC: msg = "accept"; break; case ELS_LS_RJT: msg = "reject"; break; default: msg = "response unknown ELS"; break; } break; case FC_TYPE_CT: ct = fc_frame_payload_get(fp, sizeof(*ct)); if (ct) { switch (ntohs(ct->ct_cmd)) { case FC_FS_ACC: msg = "CT accept"; break; case FC_FS_RJT: msg = "CT reject"; break; default: msg = "response unknown CT"; break; } } else { msg = "short CT response"; } break; default: msg = "response not ELS or CT"; break; } } return msg; }
gpl-2.0
boddob/linux
drivers/gpu/drm/rcar-du/rcar_du_encoder.c
188
4878
/* * rcar_du_encoder.c -- R-Car Display Unit Encoder * * Copyright (C) 2013-2014 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/export.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include "rcar_du_drv.h" #include "rcar_du_encoder.h" #include "rcar_du_hdmienc.h" #include "rcar_du_kms.h" #include "rcar_du_lvdscon.h" #include "rcar_du_lvdsenc.h" #include "rcar_du_vgacon.h" /* ----------------------------------------------------------------------------- * Encoder */ static void rcar_du_encoder_disable(struct drm_encoder *encoder) { struct rcar_du_encoder *renc = to_rcar_encoder(encoder); if (renc->lvds) rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, false); } static void rcar_du_encoder_enable(struct drm_encoder *encoder) { struct rcar_du_encoder *renc = to_rcar_encoder(encoder); if (renc->lvds) rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, true); } static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct rcar_du_encoder *renc = to_rcar_encoder(encoder); struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; const struct drm_display_mode *mode = &crtc_state->mode; const struct drm_display_mode *panel_mode; struct drm_connector *connector = conn_state->connector; struct drm_device *dev = encoder->dev; /* DAC encoders have currently no restriction on the mode. */ if (encoder->encoder_type == DRM_MODE_ENCODER_DAC) return 0; if (list_empty(&connector->modes)) { dev_dbg(dev->dev, "encoder: empty modes list\n"); return -EINVAL; } panel_mode = list_first_entry(&connector->modes, struct drm_display_mode, head); /* We're not allowed to modify the resolution. */ if (mode->hdisplay != panel_mode->hdisplay || mode->vdisplay != panel_mode->vdisplay) return -EINVAL; /* The flat panel mode is fixed, just copy it to the adjusted mode. */ drm_mode_copy(adjusted_mode, panel_mode); if (renc->lvds) rcar_du_lvdsenc_atomic_check(renc->lvds, adjusted_mode); return 0; } static void rcar_du_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct rcar_du_encoder *renc = to_rcar_encoder(encoder); rcar_du_crtc_route_output(encoder->crtc, renc->output); } static const struct drm_encoder_helper_funcs encoder_helper_funcs = { .mode_set = rcar_du_encoder_mode_set, .disable = rcar_du_encoder_disable, .enable = rcar_du_encoder_enable, .atomic_check = rcar_du_encoder_atomic_check, }; static const struct drm_encoder_funcs encoder_funcs = { .destroy = drm_encoder_cleanup, }; int rcar_du_encoder_init(struct rcar_du_device *rcdu, enum rcar_du_encoder_type type, enum rcar_du_output output, struct device_node *enc_node, struct device_node *con_node) { struct rcar_du_encoder *renc; struct drm_encoder *encoder; unsigned int encoder_type; int ret; renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL); if (renc == NULL) return -ENOMEM; renc->output = output; encoder = rcar_encoder_to_drm_encoder(renc); switch (output) { case RCAR_DU_OUTPUT_LVDS0: renc->lvds = rcdu->lvds[0]; break; case RCAR_DU_OUTPUT_LVDS1: renc->lvds = rcdu->lvds[1]; break; default: break; } switch (type) { case RCAR_DU_ENCODER_VGA: encoder_type = DRM_MODE_ENCODER_DAC; break; case RCAR_DU_ENCODER_LVDS: encoder_type = DRM_MODE_ENCODER_LVDS; break; case RCAR_DU_ENCODER_HDMI: encoder_type = DRM_MODE_ENCODER_TMDS; break; case RCAR_DU_ENCODER_NONE: default: /* No external encoder, use the internal encoder type. */ encoder_type = rcdu->info->routes[output].encoder_type; break; } if (type == RCAR_DU_ENCODER_HDMI) { ret = rcar_du_hdmienc_init(rcdu, renc, enc_node); if (ret < 0) goto done; } else { ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, encoder_type, NULL); if (ret < 0) goto done; drm_encoder_helper_add(encoder, &encoder_helper_funcs); } switch (encoder_type) { case DRM_MODE_ENCODER_LVDS: ret = rcar_du_lvds_connector_init(rcdu, renc, con_node); break; case DRM_MODE_ENCODER_DAC: ret = rcar_du_vga_connector_init(rcdu, renc); break; case DRM_MODE_ENCODER_TMDS: /* connector managed by the bridge driver */ break; default: ret = -EINVAL; break; } done: if (ret < 0) { if (encoder->name) encoder->funcs->destroy(encoder); devm_kfree(rcdu->dev, renc); } return ret; }
gpl-2.0
mason-hock/CHIP-linux-libre
CHIP-linux-libre/arch/x86/kernel/msr.c
956
6655
/* ----------------------------------------------------------------------- * * * Copyright 2000-2008 H. Peter Anvin - All Rights Reserved * Copyright 2009 Intel Corporation; author: H. Peter Anvin * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, * USA; either version 2 of the License, or (at your option) any later * version; incorporated herein by reference. * * ----------------------------------------------------------------------- */ /* * x86 MSR access device * * This device is accessed by lseek() to the appropriate register number * and then read/write in chunks of 8 bytes. A larger size means multiple * reads or writes of the same register. * * This driver uses /dev/cpu/%d/msr where %d is the minor number, and on * an SMP box will direct the access to CPU %d. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/smp.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/device.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/uaccess.h> #include <linux/gfp.h> #include <asm/processor.h> #include <asm/msr.h> static struct class *msr_class; static loff_t msr_seek(struct file *file, loff_t offset, int orig) { loff_t ret; struct inode *inode = file_inode(file); mutex_lock(&inode->i_mutex); switch (orig) { case SEEK_SET: file->f_pos = offset; ret = file->f_pos; break; case SEEK_CUR: file->f_pos += offset; ret = file->f_pos; break; default: ret = -EINVAL; } mutex_unlock(&inode->i_mutex); return ret; } static ssize_t msr_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { u32 __user *tmp = (u32 __user *) buf; u32 data[2]; u32 reg = *ppos; int cpu = iminor(file_inode(file)); int err = 0; ssize_t bytes = 0; if (count % 8) return -EINVAL; /* Invalid chunk size */ for (; count; count -= 8) { err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); if (err) break; if (copy_to_user(tmp, &data, 8)) { err = -EFAULT; break; } tmp += 2; bytes += 8; } return bytes ? bytes : err; } static ssize_t msr_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { const u32 __user *tmp = (const u32 __user *)buf; u32 data[2]; u32 reg = *ppos; int cpu = iminor(file_inode(file)); int err = 0; ssize_t bytes = 0; if (count % 8) return -EINVAL; /* Invalid chunk size */ for (; count; count -= 8) { if (copy_from_user(&data, tmp, 8)) { err = -EFAULT; break; } err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); if (err) break; tmp += 2; bytes += 8; } return bytes ? bytes : err; } static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg) { u32 __user *uregs = (u32 __user *)arg; u32 regs[8]; int cpu = iminor(file_inode(file)); int err; switch (ioc) { case X86_IOC_RDMSR_REGS: if (!(file->f_mode & FMODE_READ)) { err = -EBADF; break; } if (copy_from_user(&regs, uregs, sizeof regs)) { err = -EFAULT; break; } err = rdmsr_safe_regs_on_cpu(cpu, regs); if (err) break; if (copy_to_user(uregs, &regs, sizeof regs)) err = -EFAULT; break; case X86_IOC_WRMSR_REGS: if (!(file->f_mode & FMODE_WRITE)) { err = -EBADF; break; } if (copy_from_user(&regs, uregs, sizeof regs)) { err = -EFAULT; break; } err = wrmsr_safe_regs_on_cpu(cpu, regs); if (err) break; if (copy_to_user(uregs, &regs, sizeof regs)) err = -EFAULT; break; default: err = -ENOTTY; break; } return err; } static int msr_open(struct inode *inode, struct file *file) { unsigned int cpu = iminor(file_inode(file)); struct cpuinfo_x86 *c; if (!capable(CAP_SYS_RAWIO)) return -EPERM; if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return -ENXIO; /* No such CPU */ c = &cpu_data(cpu); if (!cpu_has(c, X86_FEATURE_MSR)) return -EIO; /* MSR not supported */ return 0; } /* * File operations we support */ static const struct file_operations msr_fops = { .owner = THIS_MODULE, .llseek = msr_seek, .read = msr_read, .write = msr_write, .open = msr_open, .unlocked_ioctl = msr_ioctl, .compat_ioctl = msr_ioctl, }; static int msr_device_create(int cpu) { struct device *dev; dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), NULL, "msr%d", cpu); return PTR_ERR_OR_ZERO(dev); } static void msr_device_destroy(int cpu) { device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); } static int msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int err = 0; switch (action) { case CPU_UP_PREPARE: err = msr_device_create(cpu); break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: msr_device_destroy(cpu); break; } return notifier_from_errno(err); } static struct notifier_block __refdata msr_class_cpu_notifier = { .notifier_call = msr_class_cpu_callback, }; static char *msr_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt)); } static int __init msr_init(void) { int i, err = 0; i = 0; if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) { pr_err("unable to get major %d for msr\n", MSR_MAJOR); err = -EBUSY; goto out; } msr_class = class_create(THIS_MODULE, "msr"); if (IS_ERR(msr_class)) { err = PTR_ERR(msr_class); goto out_chrdev; } msr_class->devnode = msr_devnode; cpu_notifier_register_begin(); for_each_online_cpu(i) { err = msr_device_create(i); if (err != 0) goto out_class; } __register_hotcpu_notifier(&msr_class_cpu_notifier); cpu_notifier_register_done(); err = 0; goto out; out_class: i = 0; for_each_online_cpu(i) msr_device_destroy(i); cpu_notifier_register_done(); class_destroy(msr_class); out_chrdev: __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); out: return err; } static void __exit msr_exit(void) { int cpu = 0; cpu_notifier_register_begin(); for_each_online_cpu(cpu) msr_device_destroy(cpu); class_destroy(msr_class); __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); __unregister_hotcpu_notifier(&msr_class_cpu_notifier); cpu_notifier_register_done(); } module_init(msr_init); module_exit(msr_exit) MODULE_AUTHOR("H. Peter Anvin <hpa@zytor.com>"); MODULE_DESCRIPTION("x86 generic MSR driver"); MODULE_LICENSE("GPL");
gpl-2.0
CML/GP0-2.6.35-Kernel
arch/sparc/prom/devmap.c
956
1455
/* * promdevmap.c: Map device/IO areas to virtual addresses. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <asm/openprom.h> #include <asm/oplib.h> extern void restore_current(void); /* Just like the routines in palloc.c, these should not be used * by the kernel at all. Bootloader facility mainly. And again, * this is only available on V2 proms and above. */ /* Map physical device address 'paddr' in IO space 'ios' of size * 'num_bytes' to a virtual address, with 'vhint' being a hint to * the prom as to where you would prefer the mapping. We return * where the prom actually mapped it. */ char * prom_mapio(char *vhint, int ios, unsigned int paddr, unsigned int num_bytes) { unsigned long flags; char *ret; spin_lock_irqsave(&prom_lock, flags); if((num_bytes == 0) || (paddr == 0)) ret = (char *) 0x0; else ret = (*(romvec->pv_v2devops.v2_dumb_mmap))(vhint, ios, paddr, num_bytes); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return ret; } /* Unmap an IO/device area that was mapped using the above routine. */ void prom_unmapio(char *vaddr, unsigned int num_bytes) { unsigned long flags; if(num_bytes == 0x0) return; spin_lock_irqsave(&prom_lock, flags); (*(romvec->pv_v2devops.v2_dumb_munmap))(vaddr, num_bytes); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); }
gpl-2.0
haodongdong9999/vyos_kernel
drivers/cpufreq/pmac64-cpufreq.c
1468
18365
/* * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs, * that is iMac G5 and latest single CPU desktop. */ #undef DEBUG #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/cpufreq.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/mutex.h> #include <linux/of_device.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/irq.h> #include <asm/sections.h> #include <asm/cputable.h> #include <asm/time.h> #include <asm/smu.h> #include <asm/pmac_pfunc.h> #define DBG(fmt...) pr_debug(fmt) /* see 970FX user manual */ #define SCOM_PCR 0x0aa001 /* PCR scom addr */ #define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */ #define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */ #define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */ #define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */ #define PCR_SPEED_MASK 0x000e0000U /* speed mask */ #define PCR_SPEED_SHIFT 17 #define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */ #define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */ #define PCR_TARGET_TIME_MASK 0x00006000U /* target time */ #define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */ #define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */ #define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */ #define SCOM_PSR 0x408001 /* PSR scom addr */ /* warning: PSR is a 64 bits register */ #define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */ #define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */ #define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */ #define PSR_CUR_SPEED_SHIFT (56) /* * The G5 only supports two frequencies (Quarter speed is not supported) */ #define CPUFREQ_HIGH 0 #define CPUFREQ_LOW 1 static struct cpufreq_frequency_table g5_cpu_freqs[] = { {0, CPUFREQ_HIGH, 0}, {0, CPUFREQ_LOW, 0}, {0, 0, CPUFREQ_TABLE_END}, }; /* Power mode data is an array of the 32 bits PCR values to use for * the various frequencies, retrieved from the device-tree */ static int g5_pmode_cur; static void (*g5_switch_volt)(int speed_mode); static int (*g5_switch_freq)(int speed_mode); static int (*g5_query_freq)(void); static unsigned long transition_latency; #ifdef CONFIG_PMAC_SMU static const u32 *g5_pmode_data; static int g5_pmode_max; static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */ static int g5_fvt_count; /* number of op. points */ static int g5_fvt_cur; /* current op. point */ /* * SMU based voltage switching for Neo2 platforms */ static void g5_smu_switch_volt(int speed_mode) { struct smu_simple_cmd cmd; DECLARE_COMPLETION_ONSTACK(comp); smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, smu_done_complete, &comp, 'V', 'S', 'L', 'E', 'W', 0xff, g5_fvt_cur+1, speed_mode); wait_for_completion(&comp); } /* * Platform function based voltage/vdnap switching for Neo2 */ static struct pmf_function *pfunc_set_vdnap0; static struct pmf_function *pfunc_vdnap0_complete; static void g5_vdnap_switch_volt(int speed_mode) { struct pmf_args args; u32 slew, done = 0; unsigned long timeout; slew = (speed_mode == CPUFREQ_LOW) ? 1 : 0; args.count = 1; args.u[0].p = &slew; pmf_call_one(pfunc_set_vdnap0, &args); /* It's an irq GPIO so we should be able to just block here, * I'll do that later after I've properly tested the IRQ code for * platform functions */ timeout = jiffies + HZ/10; while(!time_after(jiffies, timeout)) { args.count = 1; args.u[0].p = &done; pmf_call_one(pfunc_vdnap0_complete, &args); if (done) break; usleep_range(1000, 1000); } if (done == 0) printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); } /* * SCOM based frequency switching for 970FX rev3 */ static int g5_scom_switch_freq(int speed_mode) { unsigned long flags; int to; /* If frequency is going up, first ramp up the voltage */ if (speed_mode < g5_pmode_cur) g5_switch_volt(speed_mode); local_irq_save(flags); /* Clear PCR high */ scom970_write(SCOM_PCR, 0); /* Clear PCR low */ scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0); /* Set PCR low */ scom970_write(SCOM_PCR, PCR_HILO_SELECT | g5_pmode_data[speed_mode]); /* Wait for completion */ for (to = 0; to < 10; to++) { unsigned long psr = scom970_read(SCOM_PSR); if ((psr & PSR_CMD_RECEIVED) == 0 && (((psr >> PSR_CUR_SPEED_SHIFT) ^ (g5_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3) == 0) break; if (psr & PSR_CMD_COMPLETED) break; udelay(100); } local_irq_restore(flags); /* If frequency is going down, last ramp the voltage */ if (speed_mode > g5_pmode_cur) g5_switch_volt(speed_mode); g5_pmode_cur = speed_mode; ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul; return 0; } static int g5_scom_query_freq(void) { unsigned long psr = scom970_read(SCOM_PSR); int i; for (i = 0; i <= g5_pmode_max; i++) if ((((psr >> PSR_CUR_SPEED_SHIFT) ^ (g5_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0) break; return i; } /* * Fake voltage switching for platforms with missing support */ static void g5_dummy_switch_volt(int speed_mode) { } #endif /* CONFIG_PMAC_SMU */ /* * Platform function based voltage switching for PowerMac7,2 & 7,3 */ static struct pmf_function *pfunc_cpu0_volt_high; static struct pmf_function *pfunc_cpu0_volt_low; static struct pmf_function *pfunc_cpu1_volt_high; static struct pmf_function *pfunc_cpu1_volt_low; static void g5_pfunc_switch_volt(int speed_mode) { if (speed_mode == CPUFREQ_HIGH) { if (pfunc_cpu0_volt_high) pmf_call_one(pfunc_cpu0_volt_high, NULL); if (pfunc_cpu1_volt_high) pmf_call_one(pfunc_cpu1_volt_high, NULL); } else { if (pfunc_cpu0_volt_low) pmf_call_one(pfunc_cpu0_volt_low, NULL); if (pfunc_cpu1_volt_low) pmf_call_one(pfunc_cpu1_volt_low, NULL); } usleep_range(10000, 10000); /* should be faster , to fix */ } /* * Platform function based frequency switching for PowerMac7,2 & 7,3 */ static struct pmf_function *pfunc_cpu_setfreq_high; static struct pmf_function *pfunc_cpu_setfreq_low; static struct pmf_function *pfunc_cpu_getfreq; static struct pmf_function *pfunc_slewing_done; static int g5_pfunc_switch_freq(int speed_mode) { struct pmf_args args; u32 done = 0; unsigned long timeout; int rc; DBG("g5_pfunc_switch_freq(%d)\n", speed_mode); /* If frequency is going up, first ramp up the voltage */ if (speed_mode < g5_pmode_cur) g5_switch_volt(speed_mode); /* Do it */ if (speed_mode == CPUFREQ_HIGH) rc = pmf_call_one(pfunc_cpu_setfreq_high, NULL); else rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL); if (rc) printk(KERN_WARNING "cpufreq: pfunc switch error %d\n", rc); /* It's an irq GPIO so we should be able to just block here, * I'll do that later after I've properly tested the IRQ code for * platform functions */ timeout = jiffies + HZ/10; while(!time_after(jiffies, timeout)) { args.count = 1; args.u[0].p = &done; pmf_call_one(pfunc_slewing_done, &args); if (done) break; usleep_range(500, 500); } if (done == 0) printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); /* If frequency is going down, last ramp the voltage */ if (speed_mode > g5_pmode_cur) g5_switch_volt(speed_mode); g5_pmode_cur = speed_mode; ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul; return 0; } static int g5_pfunc_query_freq(void) { struct pmf_args args; u32 val = 0; args.count = 1; args.u[0].p = &val; pmf_call_one(pfunc_cpu_getfreq, &args); return val ? CPUFREQ_HIGH : CPUFREQ_LOW; } /* * Common interface to the cpufreq core */ static int g5_cpufreq_target(struct cpufreq_policy *policy, unsigned int index) { return g5_switch_freq(index); } static unsigned int g5_cpufreq_get_speed(unsigned int cpu) { return g5_cpu_freqs[g5_pmode_cur].frequency; } static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) { return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency); } static struct cpufreq_driver g5_cpufreq_driver = { .name = "powermac", .flags = CPUFREQ_CONST_LOOPS, .init = g5_cpufreq_cpu_init, .verify = cpufreq_generic_frequency_table_verify, .target_index = g5_cpufreq_target, .get = g5_cpufreq_get_speed, .attr = cpufreq_generic_attr, }; #ifdef CONFIG_PMAC_SMU static int __init g5_neo2_cpufreq_init(struct device_node *cpunode) { unsigned int psize, ssize; unsigned long max_freq; char *freq_method, *volt_method; const u32 *valp; u32 pvr_hi; int use_volts_vdnap = 0; int use_volts_smu = 0; int rc = -ENODEV; /* Check supported platforms */ if (of_machine_is_compatible("PowerMac8,1") || of_machine_is_compatible("PowerMac8,2") || of_machine_is_compatible("PowerMac9,1") || of_machine_is_compatible("PowerMac12,1")) use_volts_smu = 1; else if (of_machine_is_compatible("PowerMac11,2")) use_volts_vdnap = 1; else return -ENODEV; /* Check 970FX for now */ valp = of_get_property(cpunode, "cpu-version", NULL); if (!valp) { DBG("No cpu-version property !\n"); goto bail_noprops; } pvr_hi = (*valp) >> 16; if (pvr_hi != 0x3c && pvr_hi != 0x44) { printk(KERN_ERR "cpufreq: Unsupported CPU version\n"); goto bail_noprops; } /* Look for the powertune data in the device-tree */ g5_pmode_data = of_get_property(cpunode, "power-mode-data",&psize); if (!g5_pmode_data) { DBG("No power-mode-data !\n"); goto bail_noprops; } g5_pmode_max = psize / sizeof(u32) - 1; if (use_volts_smu) { const struct smu_sdbp_header *shdr; /* Look for the FVT table */ shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL); if (!shdr) goto bail_noprops; g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1]; ssize = (shdr->len * sizeof(u32)) - sizeof(*shdr); g5_fvt_count = ssize / sizeof(*g5_fvt_table); g5_fvt_cur = 0; /* Sanity checking */ if (g5_fvt_count < 1 || g5_pmode_max < 1) goto bail_noprops; g5_switch_volt = g5_smu_switch_volt; volt_method = "SMU"; } else if (use_volts_vdnap) { struct device_node *root; root = of_find_node_by_path("/"); if (root == NULL) { printk(KERN_ERR "cpufreq: Can't find root of " "device tree\n"); goto bail_noprops; } pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0"); pfunc_vdnap0_complete = pmf_find_function(root, "slewing-done"); if (pfunc_set_vdnap0 == NULL || pfunc_vdnap0_complete == NULL) { printk(KERN_ERR "cpufreq: Can't find required " "platform function\n"); goto bail_noprops; } g5_switch_volt = g5_vdnap_switch_volt; volt_method = "GPIO"; } else { g5_switch_volt = g5_dummy_switch_volt; volt_method = "none"; } /* * From what I see, clock-frequency is always the maximal frequency. * The current driver can not slew sysclk yet, so we really only deal * with powertune steps for now. We also only implement full freq and * half freq in this version. So far, I haven't yet seen a machine * supporting anything else. */ valp = of_get_property(cpunode, "clock-frequency", NULL); if (!valp) return -ENODEV; max_freq = (*valp)/1000; g5_cpu_freqs[0].frequency = max_freq; g5_cpu_freqs[1].frequency = max_freq/2; /* Set callbacks */ transition_latency = 12000; g5_switch_freq = g5_scom_switch_freq; g5_query_freq = g5_scom_query_freq; freq_method = "SCOM"; /* Force apply current frequency to make sure everything is in * sync (voltage is right for example). Firmware may leave us with * a strange setting ... */ g5_switch_volt(CPUFREQ_HIGH); msleep(10); g5_pmode_cur = -1; g5_switch_freq(g5_query_freq()); printk(KERN_INFO "Registering G5 CPU frequency driver\n"); printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n", freq_method, volt_method); printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", g5_cpu_freqs[1].frequency/1000, g5_cpu_freqs[0].frequency/1000, g5_cpu_freqs[g5_pmode_cur].frequency/1000); rc = cpufreq_register_driver(&g5_cpufreq_driver); /* We keep the CPU node on hold... hopefully, Apple G5 don't have * hotplug CPU with a dynamic device-tree ... */ return rc; bail_noprops: of_node_put(cpunode); return rc; } #endif /* CONFIG_PMAC_SMU */ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode) { struct device_node *cpuid = NULL, *hwclock = NULL; const u8 *eeprom = NULL; const u32 *valp; u64 max_freq, min_freq, ih, il; int has_volt = 1, rc = 0; DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and" " RackMac3,1...\n"); /* Lookup the cpuid eeprom node */ cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0"); if (cpuid != NULL) eeprom = of_get_property(cpuid, "cpuid", NULL); if (eeprom == NULL) { printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n"); rc = -ENODEV; goto bail; } /* Lookup the i2c hwclock */ for_each_node_by_name(hwclock, "i2c-hwclock") { const char *loc = of_get_property(hwclock, "hwctrl-location", NULL); if (loc == NULL) continue; if (strcmp(loc, "CPU CLOCK")) continue; if (!of_get_property(hwclock, "platform-get-frequency", NULL)) continue; break; } if (hwclock == NULL) { printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n"); rc = -ENODEV; goto bail; } DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name); /* Now get all the platform functions */ pfunc_cpu_getfreq = pmf_find_function(hwclock, "get-frequency"); pfunc_cpu_setfreq_high = pmf_find_function(hwclock, "set-frequency-high"); pfunc_cpu_setfreq_low = pmf_find_function(hwclock, "set-frequency-low"); pfunc_slewing_done = pmf_find_function(hwclock, "slewing-done"); pfunc_cpu0_volt_high = pmf_find_function(hwclock, "set-voltage-high-0"); pfunc_cpu0_volt_low = pmf_find_function(hwclock, "set-voltage-low-0"); pfunc_cpu1_volt_high = pmf_find_function(hwclock, "set-voltage-high-1"); pfunc_cpu1_volt_low = pmf_find_function(hwclock, "set-voltage-low-1"); /* Check we have minimum requirements */ if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL || pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) { printk(KERN_ERR "cpufreq: Can't find platform functions !\n"); rc = -ENODEV; goto bail; } /* Check that we have complete sets */ if (pfunc_cpu0_volt_high == NULL || pfunc_cpu0_volt_low == NULL) { pmf_put_function(pfunc_cpu0_volt_high); pmf_put_function(pfunc_cpu0_volt_low); pfunc_cpu0_volt_high = pfunc_cpu0_volt_low = NULL; has_volt = 0; } if (!has_volt || pfunc_cpu1_volt_high == NULL || pfunc_cpu1_volt_low == NULL) { pmf_put_function(pfunc_cpu1_volt_high); pmf_put_function(pfunc_cpu1_volt_low); pfunc_cpu1_volt_high = pfunc_cpu1_volt_low = NULL; } /* Note: The device tree also contains a "platform-set-values" * function for which I haven't quite figured out the usage. It * might have to be called on init and/or wakeup, I'm not too sure * but things seem to work fine without it so far ... */ /* Get max frequency from device-tree */ valp = of_get_property(cpunode, "clock-frequency", NULL); if (!valp) { printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n"); rc = -ENODEV; goto bail; } max_freq = (*valp)/1000; /* Now calculate reduced frequency by using the cpuid input freq * ratio. This requires 64 bits math unless we are willing to lose * some precision */ ih = *((u32 *)(eeprom + 0x10)); il = *((u32 *)(eeprom + 0x20)); /* Check for machines with no useful settings */ if (il == ih) { printk(KERN_WARNING "cpufreq: No low frequency mode available" " on this model !\n"); rc = -ENODEV; goto bail; } min_freq = 0; if (ih != 0 && il != 0) min_freq = (max_freq * il) / ih; /* Sanity check */ if (min_freq >= max_freq || min_freq < 1000) { printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n"); rc = -ENXIO; goto bail; } g5_cpu_freqs[0].frequency = max_freq; g5_cpu_freqs[1].frequency = min_freq; /* Based on a measurement on Xserve G5, rounded up. */ transition_latency = 10 * NSEC_PER_MSEC; /* Set callbacks */ g5_switch_volt = g5_pfunc_switch_volt; g5_switch_freq = g5_pfunc_switch_freq; g5_query_freq = g5_pfunc_query_freq; /* Force apply current frequency to make sure everything is in * sync (voltage is right for example). Firmware may leave us with * a strange setting ... */ g5_switch_volt(CPUFREQ_HIGH); msleep(10); g5_pmode_cur = -1; g5_switch_freq(g5_query_freq()); printk(KERN_INFO "Registering G5 CPU frequency driver\n"); printk(KERN_INFO "Frequency method: i2c/pfunc, " "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none"); printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", g5_cpu_freqs[1].frequency/1000, g5_cpu_freqs[0].frequency/1000, g5_cpu_freqs[g5_pmode_cur].frequency/1000); rc = cpufreq_register_driver(&g5_cpufreq_driver); bail: if (rc != 0) { pmf_put_function(pfunc_cpu_getfreq); pmf_put_function(pfunc_cpu_setfreq_high); pmf_put_function(pfunc_cpu_setfreq_low); pmf_put_function(pfunc_slewing_done); pmf_put_function(pfunc_cpu0_volt_high); pmf_put_function(pfunc_cpu0_volt_low); pmf_put_function(pfunc_cpu1_volt_high); pmf_put_function(pfunc_cpu1_volt_low); } of_node_put(hwclock); of_node_put(cpuid); of_node_put(cpunode); return rc; } static int __init g5_cpufreq_init(void) { struct device_node *cpunode; int rc = 0; /* Get first CPU node */ cpunode = of_cpu_device_node_get(0); if (cpunode == NULL) { pr_err("cpufreq: Can't find any CPU node\n"); return -ENODEV; } if (of_machine_is_compatible("PowerMac7,2") || of_machine_is_compatible("PowerMac7,3") || of_machine_is_compatible("RackMac3,1")) rc = g5_pm72_cpufreq_init(cpunode); #ifdef CONFIG_PMAC_SMU else rc = g5_neo2_cpufreq_init(cpunode); #endif /* CONFIG_PMAC_SMU */ return rc; } module_init(g5_cpufreq_init); MODULE_LICENSE("GPL");
gpl-2.0
xingrz/android_kernel_pifoundation_bcm2710
drivers/cpufreq/pmac64-cpufreq.c
1468
18365
/* * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs, * that is iMac G5 and latest single CPU desktop. */ #undef DEBUG #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/cpufreq.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/mutex.h> #include <linux/of_device.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/irq.h> #include <asm/sections.h> #include <asm/cputable.h> #include <asm/time.h> #include <asm/smu.h> #include <asm/pmac_pfunc.h> #define DBG(fmt...) pr_debug(fmt) /* see 970FX user manual */ #define SCOM_PCR 0x0aa001 /* PCR scom addr */ #define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */ #define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */ #define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */ #define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */ #define PCR_SPEED_MASK 0x000e0000U /* speed mask */ #define PCR_SPEED_SHIFT 17 #define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */ #define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */ #define PCR_TARGET_TIME_MASK 0x00006000U /* target time */ #define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */ #define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */ #define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */ #define SCOM_PSR 0x408001 /* PSR scom addr */ /* warning: PSR is a 64 bits register */ #define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */ #define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */ #define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */ #define PSR_CUR_SPEED_SHIFT (56) /* * The G5 only supports two frequencies (Quarter speed is not supported) */ #define CPUFREQ_HIGH 0 #define CPUFREQ_LOW 1 static struct cpufreq_frequency_table g5_cpu_freqs[] = { {0, CPUFREQ_HIGH, 0}, {0, CPUFREQ_LOW, 0}, {0, 0, CPUFREQ_TABLE_END}, }; /* Power mode data is an array of the 32 bits PCR values to use for * the various frequencies, retrieved from the device-tree */ static int g5_pmode_cur; static void (*g5_switch_volt)(int speed_mode); static int (*g5_switch_freq)(int speed_mode); static int (*g5_query_freq)(void); static unsigned long transition_latency; #ifdef CONFIG_PMAC_SMU static const u32 *g5_pmode_data; static int g5_pmode_max; static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */ static int g5_fvt_count; /* number of op. points */ static int g5_fvt_cur; /* current op. point */ /* * SMU based voltage switching for Neo2 platforms */ static void g5_smu_switch_volt(int speed_mode) { struct smu_simple_cmd cmd; DECLARE_COMPLETION_ONSTACK(comp); smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, smu_done_complete, &comp, 'V', 'S', 'L', 'E', 'W', 0xff, g5_fvt_cur+1, speed_mode); wait_for_completion(&comp); } /* * Platform function based voltage/vdnap switching for Neo2 */ static struct pmf_function *pfunc_set_vdnap0; static struct pmf_function *pfunc_vdnap0_complete; static void g5_vdnap_switch_volt(int speed_mode) { struct pmf_args args; u32 slew, done = 0; unsigned long timeout; slew = (speed_mode == CPUFREQ_LOW) ? 1 : 0; args.count = 1; args.u[0].p = &slew; pmf_call_one(pfunc_set_vdnap0, &args); /* It's an irq GPIO so we should be able to just block here, * I'll do that later after I've properly tested the IRQ code for * platform functions */ timeout = jiffies + HZ/10; while(!time_after(jiffies, timeout)) { args.count = 1; args.u[0].p = &done; pmf_call_one(pfunc_vdnap0_complete, &args); if (done) break; usleep_range(1000, 1000); } if (done == 0) printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); } /* * SCOM based frequency switching for 970FX rev3 */ static int g5_scom_switch_freq(int speed_mode) { unsigned long flags; int to; /* If frequency is going up, first ramp up the voltage */ if (speed_mode < g5_pmode_cur) g5_switch_volt(speed_mode); local_irq_save(flags); /* Clear PCR high */ scom970_write(SCOM_PCR, 0); /* Clear PCR low */ scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0); /* Set PCR low */ scom970_write(SCOM_PCR, PCR_HILO_SELECT | g5_pmode_data[speed_mode]); /* Wait for completion */ for (to = 0; to < 10; to++) { unsigned long psr = scom970_read(SCOM_PSR); if ((psr & PSR_CMD_RECEIVED) == 0 && (((psr >> PSR_CUR_SPEED_SHIFT) ^ (g5_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3) == 0) break; if (psr & PSR_CMD_COMPLETED) break; udelay(100); } local_irq_restore(flags); /* If frequency is going down, last ramp the voltage */ if (speed_mode > g5_pmode_cur) g5_switch_volt(speed_mode); g5_pmode_cur = speed_mode; ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul; return 0; } static int g5_scom_query_freq(void) { unsigned long psr = scom970_read(SCOM_PSR); int i; for (i = 0; i <= g5_pmode_max; i++) if ((((psr >> PSR_CUR_SPEED_SHIFT) ^ (g5_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0) break; return i; } /* * Fake voltage switching for platforms with missing support */ static void g5_dummy_switch_volt(int speed_mode) { } #endif /* CONFIG_PMAC_SMU */ /* * Platform function based voltage switching for PowerMac7,2 & 7,3 */ static struct pmf_function *pfunc_cpu0_volt_high; static struct pmf_function *pfunc_cpu0_volt_low; static struct pmf_function *pfunc_cpu1_volt_high; static struct pmf_function *pfunc_cpu1_volt_low; static void g5_pfunc_switch_volt(int speed_mode) { if (speed_mode == CPUFREQ_HIGH) { if (pfunc_cpu0_volt_high) pmf_call_one(pfunc_cpu0_volt_high, NULL); if (pfunc_cpu1_volt_high) pmf_call_one(pfunc_cpu1_volt_high, NULL); } else { if (pfunc_cpu0_volt_low) pmf_call_one(pfunc_cpu0_volt_low, NULL); if (pfunc_cpu1_volt_low) pmf_call_one(pfunc_cpu1_volt_low, NULL); } usleep_range(10000, 10000); /* should be faster , to fix */ } /* * Platform function based frequency switching for PowerMac7,2 & 7,3 */ static struct pmf_function *pfunc_cpu_setfreq_high; static struct pmf_function *pfunc_cpu_setfreq_low; static struct pmf_function *pfunc_cpu_getfreq; static struct pmf_function *pfunc_slewing_done; static int g5_pfunc_switch_freq(int speed_mode) { struct pmf_args args; u32 done = 0; unsigned long timeout; int rc; DBG("g5_pfunc_switch_freq(%d)\n", speed_mode); /* If frequency is going up, first ramp up the voltage */ if (speed_mode < g5_pmode_cur) g5_switch_volt(speed_mode); /* Do it */ if (speed_mode == CPUFREQ_HIGH) rc = pmf_call_one(pfunc_cpu_setfreq_high, NULL); else rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL); if (rc) printk(KERN_WARNING "cpufreq: pfunc switch error %d\n", rc); /* It's an irq GPIO so we should be able to just block here, * I'll do that later after I've properly tested the IRQ code for * platform functions */ timeout = jiffies + HZ/10; while(!time_after(jiffies, timeout)) { args.count = 1; args.u[0].p = &done; pmf_call_one(pfunc_slewing_done, &args); if (done) break; usleep_range(500, 500); } if (done == 0) printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); /* If frequency is going down, last ramp the voltage */ if (speed_mode > g5_pmode_cur) g5_switch_volt(speed_mode); g5_pmode_cur = speed_mode; ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul; return 0; } static int g5_pfunc_query_freq(void) { struct pmf_args args; u32 val = 0; args.count = 1; args.u[0].p = &val; pmf_call_one(pfunc_cpu_getfreq, &args); return val ? CPUFREQ_HIGH : CPUFREQ_LOW; } /* * Common interface to the cpufreq core */ static int g5_cpufreq_target(struct cpufreq_policy *policy, unsigned int index) { return g5_switch_freq(index); } static unsigned int g5_cpufreq_get_speed(unsigned int cpu) { return g5_cpu_freqs[g5_pmode_cur].frequency; } static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) { return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency); } static struct cpufreq_driver g5_cpufreq_driver = { .name = "powermac", .flags = CPUFREQ_CONST_LOOPS, .init = g5_cpufreq_cpu_init, .verify = cpufreq_generic_frequency_table_verify, .target_index = g5_cpufreq_target, .get = g5_cpufreq_get_speed, .attr = cpufreq_generic_attr, }; #ifdef CONFIG_PMAC_SMU static int __init g5_neo2_cpufreq_init(struct device_node *cpunode) { unsigned int psize, ssize; unsigned long max_freq; char *freq_method, *volt_method; const u32 *valp; u32 pvr_hi; int use_volts_vdnap = 0; int use_volts_smu = 0; int rc = -ENODEV; /* Check supported platforms */ if (of_machine_is_compatible("PowerMac8,1") || of_machine_is_compatible("PowerMac8,2") || of_machine_is_compatible("PowerMac9,1") || of_machine_is_compatible("PowerMac12,1")) use_volts_smu = 1; else if (of_machine_is_compatible("PowerMac11,2")) use_volts_vdnap = 1; else return -ENODEV; /* Check 970FX for now */ valp = of_get_property(cpunode, "cpu-version", NULL); if (!valp) { DBG("No cpu-version property !\n"); goto bail_noprops; } pvr_hi = (*valp) >> 16; if (pvr_hi != 0x3c && pvr_hi != 0x44) { printk(KERN_ERR "cpufreq: Unsupported CPU version\n"); goto bail_noprops; } /* Look for the powertune data in the device-tree */ g5_pmode_data = of_get_property(cpunode, "power-mode-data",&psize); if (!g5_pmode_data) { DBG("No power-mode-data !\n"); goto bail_noprops; } g5_pmode_max = psize / sizeof(u32) - 1; if (use_volts_smu) { const struct smu_sdbp_header *shdr; /* Look for the FVT table */ shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL); if (!shdr) goto bail_noprops; g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1]; ssize = (shdr->len * sizeof(u32)) - sizeof(*shdr); g5_fvt_count = ssize / sizeof(*g5_fvt_table); g5_fvt_cur = 0; /* Sanity checking */ if (g5_fvt_count < 1 || g5_pmode_max < 1) goto bail_noprops; g5_switch_volt = g5_smu_switch_volt; volt_method = "SMU"; } else if (use_volts_vdnap) { struct device_node *root; root = of_find_node_by_path("/"); if (root == NULL) { printk(KERN_ERR "cpufreq: Can't find root of " "device tree\n"); goto bail_noprops; } pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0"); pfunc_vdnap0_complete = pmf_find_function(root, "slewing-done"); if (pfunc_set_vdnap0 == NULL || pfunc_vdnap0_complete == NULL) { printk(KERN_ERR "cpufreq: Can't find required " "platform function\n"); goto bail_noprops; } g5_switch_volt = g5_vdnap_switch_volt; volt_method = "GPIO"; } else { g5_switch_volt = g5_dummy_switch_volt; volt_method = "none"; } /* * From what I see, clock-frequency is always the maximal frequency. * The current driver can not slew sysclk yet, so we really only deal * with powertune steps for now. We also only implement full freq and * half freq in this version. So far, I haven't yet seen a machine * supporting anything else. */ valp = of_get_property(cpunode, "clock-frequency", NULL); if (!valp) return -ENODEV; max_freq = (*valp)/1000; g5_cpu_freqs[0].frequency = max_freq; g5_cpu_freqs[1].frequency = max_freq/2; /* Set callbacks */ transition_latency = 12000; g5_switch_freq = g5_scom_switch_freq; g5_query_freq = g5_scom_query_freq; freq_method = "SCOM"; /* Force apply current frequency to make sure everything is in * sync (voltage is right for example). Firmware may leave us with * a strange setting ... */ g5_switch_volt(CPUFREQ_HIGH); msleep(10); g5_pmode_cur = -1; g5_switch_freq(g5_query_freq()); printk(KERN_INFO "Registering G5 CPU frequency driver\n"); printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n", freq_method, volt_method); printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", g5_cpu_freqs[1].frequency/1000, g5_cpu_freqs[0].frequency/1000, g5_cpu_freqs[g5_pmode_cur].frequency/1000); rc = cpufreq_register_driver(&g5_cpufreq_driver); /* We keep the CPU node on hold... hopefully, Apple G5 don't have * hotplug CPU with a dynamic device-tree ... */ return rc; bail_noprops: of_node_put(cpunode); return rc; } #endif /* CONFIG_PMAC_SMU */ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode) { struct device_node *cpuid = NULL, *hwclock = NULL; const u8 *eeprom = NULL; const u32 *valp; u64 max_freq, min_freq, ih, il; int has_volt = 1, rc = 0; DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and" " RackMac3,1...\n"); /* Lookup the cpuid eeprom node */ cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0"); if (cpuid != NULL) eeprom = of_get_property(cpuid, "cpuid", NULL); if (eeprom == NULL) { printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n"); rc = -ENODEV; goto bail; } /* Lookup the i2c hwclock */ for_each_node_by_name(hwclock, "i2c-hwclock") { const char *loc = of_get_property(hwclock, "hwctrl-location", NULL); if (loc == NULL) continue; if (strcmp(loc, "CPU CLOCK")) continue; if (!of_get_property(hwclock, "platform-get-frequency", NULL)) continue; break; } if (hwclock == NULL) { printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n"); rc = -ENODEV; goto bail; } DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name); /* Now get all the platform functions */ pfunc_cpu_getfreq = pmf_find_function(hwclock, "get-frequency"); pfunc_cpu_setfreq_high = pmf_find_function(hwclock, "set-frequency-high"); pfunc_cpu_setfreq_low = pmf_find_function(hwclock, "set-frequency-low"); pfunc_slewing_done = pmf_find_function(hwclock, "slewing-done"); pfunc_cpu0_volt_high = pmf_find_function(hwclock, "set-voltage-high-0"); pfunc_cpu0_volt_low = pmf_find_function(hwclock, "set-voltage-low-0"); pfunc_cpu1_volt_high = pmf_find_function(hwclock, "set-voltage-high-1"); pfunc_cpu1_volt_low = pmf_find_function(hwclock, "set-voltage-low-1"); /* Check we have minimum requirements */ if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL || pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) { printk(KERN_ERR "cpufreq: Can't find platform functions !\n"); rc = -ENODEV; goto bail; } /* Check that we have complete sets */ if (pfunc_cpu0_volt_high == NULL || pfunc_cpu0_volt_low == NULL) { pmf_put_function(pfunc_cpu0_volt_high); pmf_put_function(pfunc_cpu0_volt_low); pfunc_cpu0_volt_high = pfunc_cpu0_volt_low = NULL; has_volt = 0; } if (!has_volt || pfunc_cpu1_volt_high == NULL || pfunc_cpu1_volt_low == NULL) { pmf_put_function(pfunc_cpu1_volt_high); pmf_put_function(pfunc_cpu1_volt_low); pfunc_cpu1_volt_high = pfunc_cpu1_volt_low = NULL; } /* Note: The device tree also contains a "platform-set-values" * function for which I haven't quite figured out the usage. It * might have to be called on init and/or wakeup, I'm not too sure * but things seem to work fine without it so far ... */ /* Get max frequency from device-tree */ valp = of_get_property(cpunode, "clock-frequency", NULL); if (!valp) { printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n"); rc = -ENODEV; goto bail; } max_freq = (*valp)/1000; /* Now calculate reduced frequency by using the cpuid input freq * ratio. This requires 64 bits math unless we are willing to lose * some precision */ ih = *((u32 *)(eeprom + 0x10)); il = *((u32 *)(eeprom + 0x20)); /* Check for machines with no useful settings */ if (il == ih) { printk(KERN_WARNING "cpufreq: No low frequency mode available" " on this model !\n"); rc = -ENODEV; goto bail; } min_freq = 0; if (ih != 0 && il != 0) min_freq = (max_freq * il) / ih; /* Sanity check */ if (min_freq >= max_freq || min_freq < 1000) { printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n"); rc = -ENXIO; goto bail; } g5_cpu_freqs[0].frequency = max_freq; g5_cpu_freqs[1].frequency = min_freq; /* Based on a measurement on Xserve G5, rounded up. */ transition_latency = 10 * NSEC_PER_MSEC; /* Set callbacks */ g5_switch_volt = g5_pfunc_switch_volt; g5_switch_freq = g5_pfunc_switch_freq; g5_query_freq = g5_pfunc_query_freq; /* Force apply current frequency to make sure everything is in * sync (voltage is right for example). Firmware may leave us with * a strange setting ... */ g5_switch_volt(CPUFREQ_HIGH); msleep(10); g5_pmode_cur = -1; g5_switch_freq(g5_query_freq()); printk(KERN_INFO "Registering G5 CPU frequency driver\n"); printk(KERN_INFO "Frequency method: i2c/pfunc, " "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none"); printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", g5_cpu_freqs[1].frequency/1000, g5_cpu_freqs[0].frequency/1000, g5_cpu_freqs[g5_pmode_cur].frequency/1000); rc = cpufreq_register_driver(&g5_cpufreq_driver); bail: if (rc != 0) { pmf_put_function(pfunc_cpu_getfreq); pmf_put_function(pfunc_cpu_setfreq_high); pmf_put_function(pfunc_cpu_setfreq_low); pmf_put_function(pfunc_slewing_done); pmf_put_function(pfunc_cpu0_volt_high); pmf_put_function(pfunc_cpu0_volt_low); pmf_put_function(pfunc_cpu1_volt_high); pmf_put_function(pfunc_cpu1_volt_low); } of_node_put(hwclock); of_node_put(cpuid); of_node_put(cpunode); return rc; } static int __init g5_cpufreq_init(void) { struct device_node *cpunode; int rc = 0; /* Get first CPU node */ cpunode = of_cpu_device_node_get(0); if (cpunode == NULL) { pr_err("cpufreq: Can't find any CPU node\n"); return -ENODEV; } if (of_machine_is_compatible("PowerMac7,2") || of_machine_is_compatible("PowerMac7,3") || of_machine_is_compatible("RackMac3,1")) rc = g5_pm72_cpufreq_init(cpunode); #ifdef CONFIG_PMAC_SMU else rc = g5_neo2_cpufreq_init(cpunode); #endif /* CONFIG_PMAC_SMU */ return rc; } module_init(g5_cpufreq_init); MODULE_LICENSE("GPL");
gpl-2.0
fengguoqing/linux3.10-mini2440
net/netfilter/xt_set.c
2236
14170
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> * Patrick Schaaf <bof@bof.de> * Martin Josefsson <gandalf@wlug.westbo.se> * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Kernel module which implements the set match and SET target * for netfilter/iptables. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_set.h> #include <linux/netfilter/ipset/ip_set_timeout.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_DESCRIPTION("Xtables: IP set match and target module"); MODULE_ALIAS("xt_SET"); MODULE_ALIAS("ipt_set"); MODULE_ALIAS("ip6t_set"); MODULE_ALIAS("ipt_SET"); MODULE_ALIAS("ip6t_SET"); static inline int match_set(ip_set_id_t index, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt, int inv) { if (ip_set_test(index, skb, par, opt)) inv = !inv; return inv; } #define ADT_OPT(n, f, d, fs, cfs, t) \ struct ip_set_adt_opt n = { \ .family = f, \ .dim = d, \ .flags = fs, \ .cmdflags = cfs, \ .ext.timeout = t, \ } /* Revision 0 interface: backward compatible with netfilter/iptables */ static bool set_match_v0(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_set_info_match_v0 *info = par->matchinfo; ADT_OPT(opt, par->family, info->match_set.u.compat.dim, info->match_set.u.compat.flags, 0, UINT_MAX); return match_set(info->match_set.index, skb, par, &opt, info->match_set.u.compat.flags & IPSET_INV_MATCH); } static void compat_flags(struct xt_set_info_v0 *info) { u_int8_t i; /* Fill out compatibility data according to enum ip_set_kopt */ info->u.compat.dim = IPSET_DIM_ZERO; if (info->u.flags[0] & IPSET_MATCH_INV) info->u.compat.flags |= IPSET_INV_MATCH; for (i = 0; i < IPSET_DIM_MAX-1 && info->u.flags[i]; i++) { info->u.compat.dim++; if (info->u.flags[i] & IPSET_SRC) info->u.compat.flags |= (1<<info->u.compat.dim); } } static int set_match_v0_checkentry(const struct xt_mtchk_param *par) { struct xt_set_info_match_v0 *info = par->matchinfo; ip_set_id_t index; index = ip_set_nfnl_get_byindex(info->match_set.index); if (index == IPSET_INVALID_ID) { pr_warning("Cannot find set indentified by id %u to match\n", info->match_set.index); return -ENOENT; } if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) { pr_warning("Protocol error: set match dimension " "is over the limit!\n"); ip_set_nfnl_put(info->match_set.index); return -ERANGE; } /* Fill out compatibility data */ compat_flags(&info->match_set); return 0; } static void set_match_v0_destroy(const struct xt_mtdtor_param *par) { struct xt_set_info_match_v0 *info = par->matchinfo; ip_set_nfnl_put(info->match_set.index); } static unsigned int set_target_v0(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_set_info_target_v0 *info = par->targinfo; ADT_OPT(add_opt, par->family, info->add_set.u.compat.dim, info->add_set.u.compat.flags, 0, UINT_MAX); ADT_OPT(del_opt, par->family, info->del_set.u.compat.dim, info->del_set.u.compat.flags, 0, UINT_MAX); if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); if (info->del_set.index != IPSET_INVALID_ID) ip_set_del(info->del_set.index, skb, par, &del_opt); return XT_CONTINUE; } static int set_target_v0_checkentry(const struct xt_tgchk_param *par) { struct xt_set_info_target_v0 *info = par->targinfo; ip_set_id_t index; if (info->add_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(info->add_set.index); if (index == IPSET_INVALID_ID) { pr_warning("Cannot find add_set index %u as target\n", info->add_set.index); return -ENOENT; } } if (info->del_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(info->del_set.index); if (index == IPSET_INVALID_ID) { pr_warning("Cannot find del_set index %u as target\n", info->del_set.index); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->add_set.index); return -ENOENT; } } if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 || info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) { pr_warning("Protocol error: SET target dimension " "is over the limit!\n"); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->add_set.index); if (info->del_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->del_set.index); return -ERANGE; } /* Fill out compatibility data */ compat_flags(&info->add_set); compat_flags(&info->del_set); return 0; } static void set_target_v0_destroy(const struct xt_tgdtor_param *par) { const struct xt_set_info_target_v0 *info = par->targinfo; if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->add_set.index); if (info->del_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->del_set.index); } /* Revision 1 match and target */ static bool set_match_v1(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_set_info_match_v1 *info = par->matchinfo; ADT_OPT(opt, par->family, info->match_set.dim, info->match_set.flags, 0, UINT_MAX); if (opt.flags & IPSET_RETURN_NOMATCH) opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH; return match_set(info->match_set.index, skb, par, &opt, info->match_set.flags & IPSET_INV_MATCH); } static int set_match_v1_checkentry(const struct xt_mtchk_param *par) { struct xt_set_info_match_v1 *info = par->matchinfo; ip_set_id_t index; index = ip_set_nfnl_get_byindex(info->match_set.index); if (index == IPSET_INVALID_ID) { pr_warning("Cannot find set indentified by id %u to match\n", info->match_set.index); return -ENOENT; } if (info->match_set.dim > IPSET_DIM_MAX) { pr_warning("Protocol error: set match dimension " "is over the limit!\n"); ip_set_nfnl_put(info->match_set.index); return -ERANGE; } return 0; } static void set_match_v1_destroy(const struct xt_mtdtor_param *par) { struct xt_set_info_match_v1 *info = par->matchinfo; ip_set_nfnl_put(info->match_set.index); } static unsigned int set_target_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_set_info_target_v1 *info = par->targinfo; ADT_OPT(add_opt, par->family, info->add_set.dim, info->add_set.flags, 0, UINT_MAX); ADT_OPT(del_opt, par->family, info->del_set.dim, info->del_set.flags, 0, UINT_MAX); if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); if (info->del_set.index != IPSET_INVALID_ID) ip_set_del(info->del_set.index, skb, par, &del_opt); return XT_CONTINUE; } static int set_target_v1_checkentry(const struct xt_tgchk_param *par) { const struct xt_set_info_target_v1 *info = par->targinfo; ip_set_id_t index; if (info->add_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(info->add_set.index); if (index == IPSET_INVALID_ID) { pr_warning("Cannot find add_set index %u as target\n", info->add_set.index); return -ENOENT; } } if (info->del_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(info->del_set.index); if (index == IPSET_INVALID_ID) { pr_warning("Cannot find del_set index %u as target\n", info->del_set.index); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->add_set.index); return -ENOENT; } } if (info->add_set.dim > IPSET_DIM_MAX || info->del_set.dim > IPSET_DIM_MAX) { pr_warning("Protocol error: SET target dimension " "is over the limit!\n"); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->add_set.index); if (info->del_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->del_set.index); return -ERANGE; } return 0; } static void set_target_v1_destroy(const struct xt_tgdtor_param *par) { const struct xt_set_info_target_v1 *info = par->targinfo; if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->add_set.index); if (info->del_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->del_set.index); } /* Revision 2 target */ static unsigned int set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_set_info_target_v2 *info = par->targinfo; ADT_OPT(add_opt, par->family, info->add_set.dim, info->add_set.flags, info->flags, info->timeout); ADT_OPT(del_opt, par->family, info->del_set.dim, info->del_set.flags, 0, UINT_MAX); /* Normalize to fit into jiffies */ if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && add_opt.ext.timeout > UINT_MAX/MSEC_PER_SEC) add_opt.ext.timeout = UINT_MAX/MSEC_PER_SEC; if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); if (info->del_set.index != IPSET_INVALID_ID) ip_set_del(info->del_set.index, skb, par, &del_opt); return XT_CONTINUE; } #define set_target_v2_checkentry set_target_v1_checkentry #define set_target_v2_destroy set_target_v1_destroy /* Revision 3 match */ static bool match_counter(u64 counter, const struct ip_set_counter_match *info) { switch (info->op) { case IPSET_COUNTER_NONE: return true; case IPSET_COUNTER_EQ: return counter == info->value; case IPSET_COUNTER_NE: return counter != info->value; case IPSET_COUNTER_LT: return counter < info->value; case IPSET_COUNTER_GT: return counter > info->value; } return false; } static bool set_match_v3(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_set_info_match_v3 *info = par->matchinfo; ADT_OPT(opt, par->family, info->match_set.dim, info->match_set.flags, info->flags, UINT_MAX); int ret; if (info->packets.op != IPSET_COUNTER_NONE || info->bytes.op != IPSET_COUNTER_NONE) opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS; ret = match_set(info->match_set.index, skb, par, &opt, info->match_set.flags & IPSET_INV_MATCH); if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS)) return ret; if (!match_counter(opt.ext.packets, &info->packets)) return 0; return match_counter(opt.ext.bytes, &info->bytes); } #define set_match_v3_checkentry set_match_v1_checkentry #define set_match_v3_destroy set_match_v1_destroy static struct xt_match set_matches[] __read_mostly = { { .name = "set", .family = NFPROTO_IPV4, .revision = 0, .match = set_match_v0, .matchsize = sizeof(struct xt_set_info_match_v0), .checkentry = set_match_v0_checkentry, .destroy = set_match_v0_destroy, .me = THIS_MODULE }, { .name = "set", .family = NFPROTO_IPV4, .revision = 1, .match = set_match_v1, .matchsize = sizeof(struct xt_set_info_match_v1), .checkentry = set_match_v1_checkentry, .destroy = set_match_v1_destroy, .me = THIS_MODULE }, { .name = "set", .family = NFPROTO_IPV6, .revision = 1, .match = set_match_v1, .matchsize = sizeof(struct xt_set_info_match_v1), .checkentry = set_match_v1_checkentry, .destroy = set_match_v1_destroy, .me = THIS_MODULE }, /* --return-nomatch flag support */ { .name = "set", .family = NFPROTO_IPV4, .revision = 2, .match = set_match_v1, .matchsize = sizeof(struct xt_set_info_match_v1), .checkentry = set_match_v1_checkentry, .destroy = set_match_v1_destroy, .me = THIS_MODULE }, { .name = "set", .family = NFPROTO_IPV6, .revision = 2, .match = set_match_v1, .matchsize = sizeof(struct xt_set_info_match_v1), .checkentry = set_match_v1_checkentry, .destroy = set_match_v1_destroy, .me = THIS_MODULE }, /* counters support: update, match */ { .name = "set", .family = NFPROTO_IPV4, .revision = 3, .match = set_match_v3, .matchsize = sizeof(struct xt_set_info_match_v3), .checkentry = set_match_v3_checkentry, .destroy = set_match_v3_destroy, .me = THIS_MODULE }, { .name = "set", .family = NFPROTO_IPV6, .revision = 3, .match = set_match_v3, .matchsize = sizeof(struct xt_set_info_match_v3), .checkentry = set_match_v3_checkentry, .destroy = set_match_v3_destroy, .me = THIS_MODULE }, }; static struct xt_target set_targets[] __read_mostly = { { .name = "SET", .revision = 0, .family = NFPROTO_IPV4, .target = set_target_v0, .targetsize = sizeof(struct xt_set_info_target_v0), .checkentry = set_target_v0_checkentry, .destroy = set_target_v0_destroy, .me = THIS_MODULE }, { .name = "SET", .revision = 1, .family = NFPROTO_IPV4, .target = set_target_v1, .targetsize = sizeof(struct xt_set_info_target_v1), .checkentry = set_target_v1_checkentry, .destroy = set_target_v1_destroy, .me = THIS_MODULE }, { .name = "SET", .revision = 1, .family = NFPROTO_IPV6, .target = set_target_v1, .targetsize = sizeof(struct xt_set_info_target_v1), .checkentry = set_target_v1_checkentry, .destroy = set_target_v1_destroy, .me = THIS_MODULE }, /* --timeout and --exist flags support */ { .name = "SET", .revision = 2, .family = NFPROTO_IPV4, .target = set_target_v2, .targetsize = sizeof(struct xt_set_info_target_v2), .checkentry = set_target_v2_checkentry, .destroy = set_target_v2_destroy, .me = THIS_MODULE }, { .name = "SET", .revision = 2, .family = NFPROTO_IPV6, .target = set_target_v2, .targetsize = sizeof(struct xt_set_info_target_v2), .checkentry = set_target_v2_checkentry, .destroy = set_target_v2_destroy, .me = THIS_MODULE }, }; static int __init xt_set_init(void) { int ret = xt_register_matches(set_matches, ARRAY_SIZE(set_matches)); if (!ret) { ret = xt_register_targets(set_targets, ARRAY_SIZE(set_targets)); if (ret) xt_unregister_matches(set_matches, ARRAY_SIZE(set_matches)); } return ret; } static void __exit xt_set_fini(void) { xt_unregister_matches(set_matches, ARRAY_SIZE(set_matches)); xt_unregister_targets(set_targets, ARRAY_SIZE(set_targets)); } module_init(xt_set_init); module_exit(xt_set_fini);
gpl-2.0
xiaolvmu/villec2-kernel
drivers/hwmon/lm78.c
4028
29619
/* * lm78.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring * Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl> * Copyright (c) 2007, 2011 Jean Delvare <khali@linux-fr.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-vid.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #ifdef CONFIG_ISA #include <linux/platform_device.h> #include <linux/ioport.h> #include <linux/io.h> #endif /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; enum chips { lm78, lm79 }; /* Many LM78 constants specified below */ /* Length of ISA address segment */ #define LM78_EXTENT 8 /* Where are the ISA address/data registers relative to the base address */ #define LM78_ADDR_REG_OFFSET 5 #define LM78_DATA_REG_OFFSET 6 /* The LM78 registers */ #define LM78_REG_IN_MAX(nr) (0x2b + (nr) * 2) #define LM78_REG_IN_MIN(nr) (0x2c + (nr) * 2) #define LM78_REG_IN(nr) (0x20 + (nr)) #define LM78_REG_FAN_MIN(nr) (0x3b + (nr)) #define LM78_REG_FAN(nr) (0x28 + (nr)) #define LM78_REG_TEMP 0x27 #define LM78_REG_TEMP_OVER 0x39 #define LM78_REG_TEMP_HYST 0x3a #define LM78_REG_ALARM1 0x41 #define LM78_REG_ALARM2 0x42 #define LM78_REG_VID_FANDIV 0x47 #define LM78_REG_CONFIG 0x40 #define LM78_REG_CHIPID 0x49 #define LM78_REG_I2C_ADDR 0x48 /* * Conversions. Rounding and limit checking is only done on the TO_REG * variants. */ /* * IN: mV (0V to 4.08V) * REG: 16mV/bit */ static inline u8 IN_TO_REG(unsigned long val) { unsigned long nval = SENSORS_LIMIT(val, 0, 4080); return (nval + 8) / 16; } #define IN_FROM_REG(val) ((val) * 16) static inline u8 FAN_TO_REG(long rpm, int div) { if (rpm <= 0) return 255; return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); } static inline int FAN_FROM_REG(u8 val, int div) { return val == 0 ? -1 : val == 255 ? 0 : 1350000 / (val * div); } /* * TEMP: mC (-128C to +127C) * REG: 1C/bit, two's complement */ static inline s8 TEMP_TO_REG(int val) { int nval = SENSORS_LIMIT(val, -128000, 127000) ; return nval < 0 ? (nval - 500) / 1000 : (nval + 500) / 1000; } static inline int TEMP_FROM_REG(s8 val) { return val * 1000; } #define DIV_FROM_REG(val) (1 << (val)) struct lm78_data { struct i2c_client *client; struct device *hwmon_dev; struct mutex lock; enum chips type; /* For ISA device only */ const char *name; int isa_addr; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[7]; /* Register value */ u8 in_max[7]; /* Register value */ u8 in_min[7]; /* Register value */ u8 fan[3]; /* Register value */ u8 fan_min[3]; /* Register value */ s8 temp; /* Register value */ s8 temp_over; /* Register value */ s8 temp_hyst; /* Register value */ u8 fan_div[3]; /* Register encoding, shifted right */ u8 vid; /* Register encoding, combined */ u16 alarms; /* Register encoding, combined */ }; static int lm78_read_value(struct lm78_data *data, u8 reg); static int lm78_write_value(struct lm78_data *data, u8 reg, u8 value); static struct lm78_data *lm78_update_device(struct device *dev); static void lm78_init_device(struct lm78_data *data); /* 7 Voltages */ static ssize_t show_in(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct lm78_data *data = lm78_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in[attr->index])); } static ssize_t show_in_min(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct lm78_data *data = lm78_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[attr->index])); } static ssize_t show_in_max(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct lm78_data *data = lm78_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[attr->index])); } static ssize_t set_in_min(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct lm78_data *data = dev_get_drvdata(dev); int nr = attr->index; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[nr] = IN_TO_REG(val); lm78_write_value(data, LM78_REG_IN_MIN(nr), data->in_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_max(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct lm78_data *data = dev_get_drvdata(dev); int nr = attr->index; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[nr] = IN_TO_REG(val); lm78_write_value(data, LM78_REG_IN_MAX(nr), data->in_max[nr]); mutex_unlock(&data->update_lock); return count; } #define show_in_offset(offset) \ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ show_in, NULL, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \ show_in_min, set_in_min, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \ show_in_max, set_in_max, offset); show_in_offset(0); show_in_offset(1); show_in_offset(2); show_in_offset(3); show_in_offset(4); show_in_offset(5); show_in_offset(6); /* Temperature */ static ssize_t show_temp(struct device *dev, struct device_attribute *da, char *buf) { struct lm78_data *data = lm78_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp)); } static ssize_t show_temp_over(struct device *dev, struct device_attribute *da, char *buf) { struct lm78_data *data = lm78_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_over)); } static ssize_t set_temp_over(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct lm78_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_over = TEMP_TO_REG(val); lm78_write_value(data, LM78_REG_TEMP_OVER, data->temp_over); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_hyst(struct device *dev, struct device_attribute *da, char *buf) { struct lm78_data *data = lm78_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_hyst)); } static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct lm78_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_hyst = TEMP_TO_REG(val); lm78_write_value(data, LM78_REG_TEMP_HYST, data->temp_hyst); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL); static DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_over, set_temp_over); static DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_hyst, set_temp_hyst); /* 3 Fans */ static ssize_t show_fan(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct lm78_data *data = lm78_update_device(dev); int nr = attr->index; return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr], DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct lm78_data *data = lm78_update_device(dev); int nr = attr->index; return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t set_fan_min(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct lm78_data *data = dev_get_drvdata(dev); int nr = attr->index; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); lm78_write_value(data, LM78_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_fan_div(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct lm78_data *data = lm78_update_device(dev); return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[attr->index])); } /* * Note: we save and restore the fan minimum here, because its value is * determined in part by the fan divisor. This follows the principle of * least surprise; the user doesn't expect the fan minimum to change just * because the divisor changed. */ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct lm78_data *data = dev_get_drvdata(dev); int nr = attr->index; unsigned long min; u8 reg; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); switch (val) { case 1: data->fan_div[nr] = 0; break; case 2: data->fan_div[nr] = 1; break; case 4: data->fan_div[nr] = 2; break; case 8: data->fan_div[nr] = 3; break; default: dev_err(dev, "fan_div value %ld not " "supported. Choose one of 1, 2, 4 or 8!\n", val); mutex_unlock(&data->update_lock); return -EINVAL; } reg = lm78_read_value(data, LM78_REG_VID_FANDIV); switch (nr) { case 0: reg = (reg & 0xcf) | (data->fan_div[nr] << 4); break; case 1: reg = (reg & 0x3f) | (data->fan_div[nr] << 6); break; } lm78_write_value(data, LM78_REG_VID_FANDIV, reg); data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); lm78_write_value(data, LM78_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } #define show_fan_offset(offset) \ static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \ show_fan, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \ show_fan_min, set_fan_min, offset - 1); show_fan_offset(1); show_fan_offset(2); show_fan_offset(3); /* Fan 3 divisor is locked in H/W */ static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR, show_fan_div, set_fan_div, 0); static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR, show_fan_div, set_fan_div, 1); static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2); /* VID */ static ssize_t show_vid(struct device *dev, struct device_attribute *da, char *buf) { struct lm78_data *data = lm78_update_device(dev); return sprintf(buf, "%d\n", vid_from_reg(data->vid, 82)); } static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL); /* Alarms */ static ssize_t show_alarms(struct device *dev, struct device_attribute *da, char *buf) { struct lm78_data *data = lm78_update_device(dev); return sprintf(buf, "%u\n", data->alarms); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static ssize_t show_alarm(struct device *dev, struct device_attribute *da, char *buf) { struct lm78_data *data = lm78_update_device(dev); int nr = to_sensor_dev_attr(da)->index; return sprintf(buf, "%u\n", (data->alarms >> nr) & 1); } static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9); static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 10); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4); static struct attribute *lm78_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_in6_alarm.dev_attr.attr, &dev_attr_temp1_input.attr, &dev_attr_temp1_max.attr, &dev_attr_temp1_max_hyst.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan2_div.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &sensor_dev_attr_fan3_input.dev_attr.attr, &sensor_dev_attr_fan3_min.dev_attr.attr, &sensor_dev_attr_fan3_div.dev_attr.attr, &sensor_dev_attr_fan3_alarm.dev_attr.attr, &dev_attr_alarms.attr, &dev_attr_cpu0_vid.attr, NULL }; static const struct attribute_group lm78_group = { .attrs = lm78_attributes, }; /* * ISA related code */ #ifdef CONFIG_ISA /* ISA device, if found */ static struct platform_device *pdev; static unsigned short isa_address = 0x290; /* * I2C devices get this name attribute automatically, but for ISA devices * we must create it by ourselves. */ static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct lm78_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static struct lm78_data *lm78_data_if_isa(void) { return pdev ? platform_get_drvdata(pdev) : NULL; } /* Returns 1 if the I2C chip appears to be an alias of the ISA chip */ static int lm78_alias_detect(struct i2c_client *client, u8 chipid) { struct lm78_data *isa; int i; if (!pdev) /* No ISA chip */ return 0; isa = platform_get_drvdata(pdev); if (lm78_read_value(isa, LM78_REG_I2C_ADDR) != client->addr) return 0; /* Address doesn't match */ if ((lm78_read_value(isa, LM78_REG_CHIPID) & 0xfe) != (chipid & 0xfe)) return 0; /* Chip type doesn't match */ /* * We compare all the limit registers, the config register and the * interrupt mask registers */ for (i = 0x2b; i <= 0x3d; i++) { if (lm78_read_value(isa, i) != i2c_smbus_read_byte_data(client, i)) return 0; } if (lm78_read_value(isa, LM78_REG_CONFIG) != i2c_smbus_read_byte_data(client, LM78_REG_CONFIG)) return 0; for (i = 0x43; i <= 0x46; i++) { if (lm78_read_value(isa, i) != i2c_smbus_read_byte_data(client, i)) return 0; } return 1; } #else /* !CONFIG_ISA */ static int lm78_alias_detect(struct i2c_client *client, u8 chipid) { return 0; } static struct lm78_data *lm78_data_if_isa(void) { return NULL; } #endif /* CONFIG_ISA */ static int lm78_i2c_detect(struct i2c_client *client, struct i2c_board_info *info) { int i; struct lm78_data *isa = lm78_data_if_isa(); const char *client_name; struct i2c_adapter *adapter = client->adapter; int address = client->addr; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* * We block updates of the ISA device to minimize the risk of * concurrent access to the same LM78 chip through different * interfaces. */ if (isa) mutex_lock(&isa->update_lock); if ((i2c_smbus_read_byte_data(client, LM78_REG_CONFIG) & 0x80) || i2c_smbus_read_byte_data(client, LM78_REG_I2C_ADDR) != address) goto err_nodev; /* Explicitly prevent the misdetection of Winbond chips */ i = i2c_smbus_read_byte_data(client, 0x4f); if (i == 0xa3 || i == 0x5c) goto err_nodev; /* Determine the chip type. */ i = i2c_smbus_read_byte_data(client, LM78_REG_CHIPID); if (i == 0x00 || i == 0x20 /* LM78 */ || i == 0x40) /* LM78-J */ client_name = "lm78"; else if ((i & 0xfe) == 0xc0) client_name = "lm79"; else goto err_nodev; if (lm78_alias_detect(client, i)) { dev_dbg(&adapter->dev, "Device at 0x%02x appears to " "be the same as ISA device\n", address); goto err_nodev; } if (isa) mutex_unlock(&isa->update_lock); strlcpy(info->type, client_name, I2C_NAME_SIZE); return 0; err_nodev: if (isa) mutex_unlock(&isa->update_lock); return -ENODEV; } static int lm78_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lm78_data *data; int err; data = kzalloc(sizeof(struct lm78_data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(client, data); data->client = client; data->type = id->driver_data; /* Initialize the LM78 chip */ lm78_init_device(data); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &lm78_group); if (err) goto ERROR3; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto ERROR4; } return 0; ERROR4: sysfs_remove_group(&client->dev.kobj, &lm78_group); ERROR3: kfree(data); return err; } static int lm78_i2c_remove(struct i2c_client *client) { struct lm78_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm78_group); kfree(data); return 0; } static const struct i2c_device_id lm78_i2c_id[] = { { "lm78", lm78 }, { "lm79", lm79 }, { } }; MODULE_DEVICE_TABLE(i2c, lm78_i2c_id); static struct i2c_driver lm78_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "lm78", }, .probe = lm78_i2c_probe, .remove = lm78_i2c_remove, .id_table = lm78_i2c_id, .detect = lm78_i2c_detect, .address_list = normal_i2c, }; /* * The SMBus locks itself, but ISA access must be locked explicitly! * We don't want to lock the whole ISA bus, so we lock each client * separately. * We ignore the LM78 BUSY flag at this moment - it could lead to deadlocks, * would slow down the LM78 access and should not be necessary. */ static int lm78_read_value(struct lm78_data *data, u8 reg) { struct i2c_client *client = data->client; #ifdef CONFIG_ISA if (!client) { /* ISA device */ int res; mutex_lock(&data->lock); outb_p(reg, data->isa_addr + LM78_ADDR_REG_OFFSET); res = inb_p(data->isa_addr + LM78_DATA_REG_OFFSET); mutex_unlock(&data->lock); return res; } else #endif return i2c_smbus_read_byte_data(client, reg); } static int lm78_write_value(struct lm78_data *data, u8 reg, u8 value) { struct i2c_client *client = data->client; #ifdef CONFIG_ISA if (!client) { /* ISA device */ mutex_lock(&data->lock); outb_p(reg, data->isa_addr + LM78_ADDR_REG_OFFSET); outb_p(value, data->isa_addr + LM78_DATA_REG_OFFSET); mutex_unlock(&data->lock); return 0; } else #endif return i2c_smbus_write_byte_data(client, reg, value); } static void lm78_init_device(struct lm78_data *data) { u8 config; int i; /* Start monitoring */ config = lm78_read_value(data, LM78_REG_CONFIG); if ((config & 0x09) != 0x01) lm78_write_value(data, LM78_REG_CONFIG, (config & 0xf7) | 0x01); /* A few vars need to be filled upon startup */ for (i = 0; i < 3; i++) { data->fan_min[i] = lm78_read_value(data, LM78_REG_FAN_MIN(i)); } mutex_init(&data->update_lock); } static struct lm78_data *lm78_update_device(struct device *dev) { struct lm78_data *data = dev_get_drvdata(dev); int i; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { dev_dbg(dev, "Starting lm78 update\n"); for (i = 0; i <= 6; i++) { data->in[i] = lm78_read_value(data, LM78_REG_IN(i)); data->in_min[i] = lm78_read_value(data, LM78_REG_IN_MIN(i)); data->in_max[i] = lm78_read_value(data, LM78_REG_IN_MAX(i)); } for (i = 0; i < 3; i++) { data->fan[i] = lm78_read_value(data, LM78_REG_FAN(i)); data->fan_min[i] = lm78_read_value(data, LM78_REG_FAN_MIN(i)); } data->temp = lm78_read_value(data, LM78_REG_TEMP); data->temp_over = lm78_read_value(data, LM78_REG_TEMP_OVER); data->temp_hyst = lm78_read_value(data, LM78_REG_TEMP_HYST); i = lm78_read_value(data, LM78_REG_VID_FANDIV); data->vid = i & 0x0f; if (data->type == lm79) data->vid |= (lm78_read_value(data, LM78_REG_CHIPID) & 0x01) << 4; else data->vid |= 0x10; data->fan_div[0] = (i >> 4) & 0x03; data->fan_div[1] = i >> 6; data->alarms = lm78_read_value(data, LM78_REG_ALARM1) + (lm78_read_value(data, LM78_REG_ALARM2) << 8); data->last_updated = jiffies; data->valid = 1; data->fan_div[2] = 1; } mutex_unlock(&data->update_lock); return data; } #ifdef CONFIG_ISA static int __devinit lm78_isa_probe(struct platform_device *pdev) { int err; struct lm78_data *data; struct resource *res; /* Reserve the ISA region */ res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start + LM78_ADDR_REG_OFFSET, 2, "lm78")) { err = -EBUSY; goto exit; } data = kzalloc(sizeof(struct lm78_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit_release_region; } mutex_init(&data->lock); data->isa_addr = res->start; platform_set_drvdata(pdev, data); if (lm78_read_value(data, LM78_REG_CHIPID) & 0x80) { data->type = lm79; data->name = "lm79"; } else { data->type = lm78; data->name = "lm78"; } /* Initialize the LM78 chip */ lm78_init_device(data); /* Register sysfs hooks */ err = sysfs_create_group(&pdev->dev.kobj, &lm78_group); if (err) goto exit_remove_files; err = device_create_file(&pdev->dev, &dev_attr_name); if (err) goto exit_remove_files; data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: sysfs_remove_group(&pdev->dev.kobj, &lm78_group); device_remove_file(&pdev->dev, &dev_attr_name); kfree(data); exit_release_region: release_region(res->start + LM78_ADDR_REG_OFFSET, 2); exit: return err; } static int __devexit lm78_isa_remove(struct platform_device *pdev) { struct lm78_data *data = platform_get_drvdata(pdev); struct resource *res; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &lm78_group); device_remove_file(&pdev->dev, &dev_attr_name); kfree(data); res = platform_get_resource(pdev, IORESOURCE_IO, 0); release_region(res->start + LM78_ADDR_REG_OFFSET, 2); return 0; } static struct platform_driver lm78_isa_driver = { .driver = { .owner = THIS_MODULE, .name = "lm78", }, .probe = lm78_isa_probe, .remove = __devexit_p(lm78_isa_remove), }; /* return 1 if a supported chip is found, 0 otherwise */ static int __init lm78_isa_found(unsigned short address) { int val, save, found = 0; int port; /* * Some boards declare base+0 to base+7 as a PNP device, some base+4 * to base+7 and some base+5 to base+6. So we better request each port * individually for the probing phase. */ for (port = address; port < address + LM78_EXTENT; port++) { if (!request_region(port, 1, "lm78")) { pr_debug("Failed to request port 0x%x\n", port); goto release; } } #define REALLY_SLOW_IO /* * We need the timeouts for at least some LM78-like * chips. But only if we read 'undefined' registers. */ val = inb_p(address + 1); if (inb_p(address + 2) != val || inb_p(address + 3) != val || inb_p(address + 7) != val) goto release; #undef REALLY_SLOW_IO /* * We should be able to change the 7 LSB of the address port. The * MSB (busy flag) should be clear initially, set after the write. */ save = inb_p(address + LM78_ADDR_REG_OFFSET); if (save & 0x80) goto release; val = ~save & 0x7f; outb_p(val, address + LM78_ADDR_REG_OFFSET); if (inb_p(address + LM78_ADDR_REG_OFFSET) != (val | 0x80)) { outb_p(save, address + LM78_ADDR_REG_OFFSET); goto release; } /* We found a device, now see if it could be an LM78 */ outb_p(LM78_REG_CONFIG, address + LM78_ADDR_REG_OFFSET); val = inb_p(address + LM78_DATA_REG_OFFSET); if (val & 0x80) goto release; outb_p(LM78_REG_I2C_ADDR, address + LM78_ADDR_REG_OFFSET); val = inb_p(address + LM78_DATA_REG_OFFSET); if (val < 0x03 || val > 0x77) /* Not a valid I2C address */ goto release; /* The busy flag should be clear again */ if (inb_p(address + LM78_ADDR_REG_OFFSET) & 0x80) goto release; /* Explicitly prevent the misdetection of Winbond chips */ outb_p(0x4f, address + LM78_ADDR_REG_OFFSET); val = inb_p(address + LM78_DATA_REG_OFFSET); if (val == 0xa3 || val == 0x5c) goto release; /* Explicitly prevent the misdetection of ITE chips */ outb_p(0x58, address + LM78_ADDR_REG_OFFSET); val = inb_p(address + LM78_DATA_REG_OFFSET); if (val == 0x90) goto release; /* Determine the chip type */ outb_p(LM78_REG_CHIPID, address + LM78_ADDR_REG_OFFSET); val = inb_p(address + LM78_DATA_REG_OFFSET); if (val == 0x00 || val == 0x20 /* LM78 */ || val == 0x40 /* LM78-J */ || (val & 0xfe) == 0xc0) /* LM79 */ found = 1; if (found) pr_info("Found an %s chip at %#x\n", val & 0x80 ? "LM79" : "LM78", (int)address); release: for (port--; port >= address; port--) release_region(port, 1); return found; } static int __init lm78_isa_device_add(unsigned short address) { struct resource res = { .start = address, .end = address + LM78_EXTENT - 1, .name = "lm78", .flags = IORESOURCE_IO, }; int err; pdev = platform_device_alloc("lm78", address); if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed\n"); goto exit; } err = platform_device_add_resources(pdev, &res, 1); if (err) { pr_err("Device resource addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add(pdev); if (err) { pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } return 0; exit_device_put: platform_device_put(pdev); exit: pdev = NULL; return err; } static int __init lm78_isa_register(void) { int res; if (lm78_isa_found(isa_address)) { res = platform_driver_register(&lm78_isa_driver); if (res) goto exit; /* Sets global pdev as a side effect */ res = lm78_isa_device_add(isa_address); if (res) goto exit_unreg_isa_driver; } return 0; exit_unreg_isa_driver: platform_driver_unregister(&lm78_isa_driver); exit: return res; } static void lm78_isa_unregister(void) { if (pdev) { platform_device_unregister(pdev); platform_driver_unregister(&lm78_isa_driver); } } #else /* !CONFIG_ISA */ static int __init lm78_isa_register(void) { return 0; } static void lm78_isa_unregister(void) { } #endif /* CONFIG_ISA */ static int __init sm_lm78_init(void) { int res; /* * We register the ISA device first, so that we can skip the * registration of an I2C interface to the same device. */ res = lm78_isa_register(); if (res) goto exit; res = i2c_add_driver(&lm78_driver); if (res) goto exit_unreg_isa_device; return 0; exit_unreg_isa_device: lm78_isa_unregister(); exit: return res; } static void __exit sm_lm78_exit(void) { lm78_isa_unregister(); i2c_del_driver(&lm78_driver); } MODULE_AUTHOR("Frodo Looijaard, Jean Delvare <khali@linux-fr.org>"); MODULE_DESCRIPTION("LM78/LM79 driver"); MODULE_LICENSE("GPL"); module_init(sm_lm78_init); module_exit(sm_lm78_exit);
gpl-2.0
TV-LP51-Devices/kernel_asus_grouper
drivers/pcmcia/bcm63xx_pcmcia.c
4284
13724
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/timer.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/gpio.h> #include <bcm63xx_regs.h> #include <bcm63xx_io.h> #include "bcm63xx_pcmcia.h" #define PFX "bcm63xx_pcmcia: " #ifdef CONFIG_CARDBUS /* if cardbus is used, platform device needs reference to actual pci * device */ static struct pci_dev *bcm63xx_cb_dev; #endif /* * read/write helper for pcmcia regs */ static inline u32 pcmcia_readl(struct bcm63xx_pcmcia_socket *skt, u32 off) { return bcm_readl(skt->base + off); } static inline void pcmcia_writel(struct bcm63xx_pcmcia_socket *skt, u32 val, u32 off) { bcm_writel(val, skt->base + off); } /* * This callback should (re-)initialise the socket, turn on status * interrupts and PCMCIA bus, and wait for power to stabilise so that * the card status signals report correctly. * * Hardware cannot do that. */ static int bcm63xx_pcmcia_sock_init(struct pcmcia_socket *sock) { return 0; } /* * This callback should remove power on the socket, disable IRQs from * the card, turn off status interrupts, and disable the PCMCIA bus. * * Hardware cannot do that. */ static int bcm63xx_pcmcia_suspend(struct pcmcia_socket *sock) { return 0; } /* * Implements the set_socket() operation for the in-kernel PCMCIA * service (formerly SS_SetSocket in Card Services). We more or * less punt all of this work and let the kernel handle the details * of power configuration, reset, &c. We also record the value of * `state' in order to regurgitate it to the PCMCIA core later. */ static int bcm63xx_pcmcia_set_socket(struct pcmcia_socket *sock, socket_state_t *state) { struct bcm63xx_pcmcia_socket *skt; unsigned long flags; u32 val; skt = sock->driver_data; spin_lock_irqsave(&skt->lock, flags); /* note: hardware cannot control socket power, so we will * always report SS_POWERON */ /* apply socket reset */ val = pcmcia_readl(skt, PCMCIA_C1_REG); if (state->flags & SS_RESET) val |= PCMCIA_C1_RESET_MASK; else val &= ~PCMCIA_C1_RESET_MASK; /* reverse reset logic for cardbus card */ if (skt->card_detected && (skt->card_type & CARD_CARDBUS)) val ^= PCMCIA_C1_RESET_MASK; pcmcia_writel(skt, val, PCMCIA_C1_REG); /* keep requested state for event reporting */ skt->requested_state = *state; spin_unlock_irqrestore(&skt->lock, flags); return 0; } /* * identity cardtype from VS[12] input, CD[12] input while only VS2 is * floating, and CD[12] input while only VS1 is floating */ enum { IN_VS1 = (1 << 0), IN_VS2 = (1 << 1), IN_CD1_VS2H = (1 << 2), IN_CD2_VS2H = (1 << 3), IN_CD1_VS1H = (1 << 4), IN_CD2_VS1H = (1 << 5), }; static const u8 vscd_to_cardtype[] = { /* VS1 float, VS2 float */ [IN_VS1 | IN_VS2] = (CARD_PCCARD | CARD_5V), /* VS1 grounded, VS2 float */ [IN_VS2] = (CARD_PCCARD | CARD_5V | CARD_3V), /* VS1 grounded, VS2 grounded */ [0] = (CARD_PCCARD | CARD_5V | CARD_3V | CARD_XV), /* VS1 tied to CD1, VS2 float */ [IN_VS1 | IN_VS2 | IN_CD1_VS1H] = (CARD_CARDBUS | CARD_3V), /* VS1 grounded, VS2 tied to CD2 */ [IN_VS2 | IN_CD2_VS2H] = (CARD_CARDBUS | CARD_3V | CARD_XV), /* VS1 tied to CD2, VS2 grounded */ [IN_VS1 | IN_CD2_VS1H] = (CARD_CARDBUS | CARD_3V | CARD_XV | CARD_YV), /* VS1 float, VS2 grounded */ [IN_VS1] = (CARD_PCCARD | CARD_XV), /* VS1 float, VS2 tied to CD2 */ [IN_VS1 | IN_VS2 | IN_CD2_VS2H] = (CARD_CARDBUS | CARD_3V), /* VS1 float, VS2 tied to CD1 */ [IN_VS1 | IN_VS2 | IN_CD1_VS2H] = (CARD_CARDBUS | CARD_XV | CARD_YV), /* VS1 tied to CD2, VS2 float */ [IN_VS1 | IN_VS2 | IN_CD2_VS1H] = (CARD_CARDBUS | CARD_YV), /* VS2 grounded, VS1 is tied to CD1, CD2 is grounded */ [IN_VS1 | IN_CD1_VS1H] = 0, /* ignore cardbay */ }; /* * poll hardware to check card insertion status */ static unsigned int __get_socket_status(struct bcm63xx_pcmcia_socket *skt) { unsigned int stat; u32 val; stat = 0; /* check CD for card presence */ val = pcmcia_readl(skt, PCMCIA_C1_REG); if (!(val & PCMCIA_C1_CD1_MASK) && !(val & PCMCIA_C1_CD2_MASK)) stat |= SS_DETECT; /* if new insertion, detect cardtype */ if ((stat & SS_DETECT) && !skt->card_detected) { unsigned int stat = 0; /* float VS1, float VS2 */ val |= PCMCIA_C1_VS1OE_MASK; val |= PCMCIA_C1_VS2OE_MASK; pcmcia_writel(skt, val, PCMCIA_C1_REG); /* wait for output to stabilize and read VS[12] */ udelay(10); val = pcmcia_readl(skt, PCMCIA_C1_REG); stat |= (val & PCMCIA_C1_VS1_MASK) ? IN_VS1 : 0; stat |= (val & PCMCIA_C1_VS2_MASK) ? IN_VS2 : 0; /* drive VS1 low, float VS2 */ val &= ~PCMCIA_C1_VS1OE_MASK; val |= PCMCIA_C1_VS2OE_MASK; pcmcia_writel(skt, val, PCMCIA_C1_REG); /* wait for output to stabilize and read CD[12] */ udelay(10); val = pcmcia_readl(skt, PCMCIA_C1_REG); stat |= (val & PCMCIA_C1_CD1_MASK) ? IN_CD1_VS2H : 0; stat |= (val & PCMCIA_C1_CD2_MASK) ? IN_CD2_VS2H : 0; /* float VS1, drive VS2 low */ val |= PCMCIA_C1_VS1OE_MASK; val &= ~PCMCIA_C1_VS2OE_MASK; pcmcia_writel(skt, val, PCMCIA_C1_REG); /* wait for output to stabilize and read CD[12] */ udelay(10); val = pcmcia_readl(skt, PCMCIA_C1_REG); stat |= (val & PCMCIA_C1_CD1_MASK) ? IN_CD1_VS1H : 0; stat |= (val & PCMCIA_C1_CD2_MASK) ? IN_CD2_VS1H : 0; /* guess cardtype from all this */ skt->card_type = vscd_to_cardtype[stat]; if (!skt->card_type) dev_err(&skt->socket.dev, "unsupported card type\n"); /* drive both VS pin to 0 again */ val &= ~(PCMCIA_C1_VS1OE_MASK | PCMCIA_C1_VS2OE_MASK); /* enable correct logic */ val &= ~(PCMCIA_C1_EN_PCMCIA_MASK | PCMCIA_C1_EN_CARDBUS_MASK); if (skt->card_type & CARD_PCCARD) val |= PCMCIA_C1_EN_PCMCIA_MASK; else val |= PCMCIA_C1_EN_CARDBUS_MASK; pcmcia_writel(skt, val, PCMCIA_C1_REG); } skt->card_detected = (stat & SS_DETECT) ? 1 : 0; /* report card type/voltage */ if (skt->card_type & CARD_CARDBUS) stat |= SS_CARDBUS; if (skt->card_type & CARD_3V) stat |= SS_3VCARD; if (skt->card_type & CARD_XV) stat |= SS_XVCARD; stat |= SS_POWERON; if (gpio_get_value(skt->pd->ready_gpio)) stat |= SS_READY; return stat; } /* * core request to get current socket status */ static int bcm63xx_pcmcia_get_status(struct pcmcia_socket *sock, unsigned int *status) { struct bcm63xx_pcmcia_socket *skt; skt = sock->driver_data; spin_lock_bh(&skt->lock); *status = __get_socket_status(skt); spin_unlock_bh(&skt->lock); return 0; } /* * socket polling timer callback */ static void bcm63xx_pcmcia_poll(unsigned long data) { struct bcm63xx_pcmcia_socket *skt; unsigned int stat, events; skt = (struct bcm63xx_pcmcia_socket *)data; spin_lock_bh(&skt->lock); stat = __get_socket_status(skt); /* keep only changed bits, and mask with required one from the * core */ events = (stat ^ skt->old_status) & skt->requested_state.csc_mask; skt->old_status = stat; spin_unlock_bh(&skt->lock); if (events) pcmcia_parse_events(&skt->socket, events); mod_timer(&skt->timer, jiffies + msecs_to_jiffies(BCM63XX_PCMCIA_POLL_RATE)); } static int bcm63xx_pcmcia_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *map) { /* this doesn't seem to be called by pcmcia layer if static * mapping is used */ return 0; } static int bcm63xx_pcmcia_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *map) { struct bcm63xx_pcmcia_socket *skt; struct resource *res; skt = sock->driver_data; if (map->flags & MAP_ATTRIB) res = skt->attr_res; else res = skt->common_res; map->static_start = res->start + map->card_start; return 0; } static struct pccard_operations bcm63xx_pcmcia_operations = { .init = bcm63xx_pcmcia_sock_init, .suspend = bcm63xx_pcmcia_suspend, .get_status = bcm63xx_pcmcia_get_status, .set_socket = bcm63xx_pcmcia_set_socket, .set_io_map = bcm63xx_pcmcia_set_io_map, .set_mem_map = bcm63xx_pcmcia_set_mem_map, }; /* * register pcmcia socket to core */ static int __devinit bcm63xx_drv_pcmcia_probe(struct platform_device *pdev) { struct bcm63xx_pcmcia_socket *skt; struct pcmcia_socket *sock; struct resource *res, *irq_res; unsigned int regmem_size = 0, iomem_size = 0; u32 val; int ret; skt = kzalloc(sizeof(*skt), GFP_KERNEL); if (!skt) return -ENOMEM; spin_lock_init(&skt->lock); sock = &skt->socket; sock->driver_data = skt; /* make sure we have all resources we need */ skt->common_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); skt->attr_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); skt->pd = pdev->dev.platform_data; if (!skt->common_res || !skt->attr_res || !irq_res || !skt->pd) { ret = -EINVAL; goto err; } /* remap pcmcia registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regmem_size = resource_size(res); if (!request_mem_region(res->start, regmem_size, "bcm63xx_pcmcia")) { ret = -EINVAL; goto err; } skt->reg_res = res; skt->base = ioremap(res->start, regmem_size); if (!skt->base) { ret = -ENOMEM; goto err; } /* remap io registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 3); iomem_size = resource_size(res); skt->io_base = ioremap(res->start, iomem_size); if (!skt->io_base) { ret = -ENOMEM; goto err; } /* resources are static */ sock->resource_ops = &pccard_static_ops; sock->ops = &bcm63xx_pcmcia_operations; sock->owner = THIS_MODULE; sock->dev.parent = &pdev->dev; sock->features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD; sock->io_offset = (unsigned long)skt->io_base; sock->pci_irq = irq_res->start; #ifdef CONFIG_CARDBUS sock->cb_dev = bcm63xx_cb_dev; if (bcm63xx_cb_dev) sock->features |= SS_CAP_CARDBUS; #endif /* assume common & attribute memory have the same size */ sock->map_size = resource_size(skt->common_res); /* initialize polling timer */ setup_timer(&skt->timer, bcm63xx_pcmcia_poll, (unsigned long)skt); /* initialize pcmcia control register, drive VS[12] to 0, * leave CB IDSEL to the old value since it is set by the PCI * layer */ val = pcmcia_readl(skt, PCMCIA_C1_REG); val &= PCMCIA_C1_CBIDSEL_MASK; val |= PCMCIA_C1_EN_PCMCIA_GPIO_MASK; pcmcia_writel(skt, val, PCMCIA_C1_REG); /* * Hardware has only one set of timings registers, not one for * each memory access type, so we configure them for the * slowest one: attribute memory. */ val = PCMCIA_C2_DATA16_MASK; val |= 10 << PCMCIA_C2_RWCOUNT_SHIFT; val |= 6 << PCMCIA_C2_INACTIVE_SHIFT; val |= 3 << PCMCIA_C2_SETUP_SHIFT; val |= 3 << PCMCIA_C2_HOLD_SHIFT; pcmcia_writel(skt, val, PCMCIA_C2_REG); ret = pcmcia_register_socket(sock); if (ret) goto err; /* start polling socket */ mod_timer(&skt->timer, jiffies + msecs_to_jiffies(BCM63XX_PCMCIA_POLL_RATE)); platform_set_drvdata(pdev, skt); return 0; err: if (skt->io_base) iounmap(skt->io_base); if (skt->base) iounmap(skt->base); if (skt->reg_res) release_mem_region(skt->reg_res->start, regmem_size); kfree(skt); return ret; } static int __devexit bcm63xx_drv_pcmcia_remove(struct platform_device *pdev) { struct bcm63xx_pcmcia_socket *skt; struct resource *res; skt = platform_get_drvdata(pdev); del_timer_sync(&skt->timer); iounmap(skt->base); iounmap(skt->io_base); res = skt->reg_res; release_mem_region(res->start, resource_size(res)); kfree(skt); return 0; } struct platform_driver bcm63xx_pcmcia_driver = { .probe = bcm63xx_drv_pcmcia_probe, .remove = __devexit_p(bcm63xx_drv_pcmcia_remove), .driver = { .name = "bcm63xx_pcmcia", .owner = THIS_MODULE, }, }; #ifdef CONFIG_CARDBUS static int __devinit bcm63xx_cb_probe(struct pci_dev *dev, const struct pci_device_id *id) { /* keep pci device */ bcm63xx_cb_dev = dev; return platform_driver_register(&bcm63xx_pcmcia_driver); } static void __devexit bcm63xx_cb_exit(struct pci_dev *dev) { platform_driver_unregister(&bcm63xx_pcmcia_driver); bcm63xx_cb_dev = NULL; } static struct pci_device_id bcm63xx_cb_table[] = { { .vendor = PCI_VENDOR_ID_BROADCOM, .device = BCM6348_CPU_ID, .subvendor = PCI_VENDOR_ID_BROADCOM, .subdevice = PCI_ANY_ID, .class = PCI_CLASS_BRIDGE_CARDBUS << 8, .class_mask = ~0, }, { .vendor = PCI_VENDOR_ID_BROADCOM, .device = BCM6358_CPU_ID, .subvendor = PCI_VENDOR_ID_BROADCOM, .subdevice = PCI_ANY_ID, .class = PCI_CLASS_BRIDGE_CARDBUS << 8, .class_mask = ~0, }, { }, }; MODULE_DEVICE_TABLE(pci, bcm63xx_cb_table); static struct pci_driver bcm63xx_cardbus_driver = { .name = "bcm63xx_cardbus", .id_table = bcm63xx_cb_table, .probe = bcm63xx_cb_probe, .remove = __devexit_p(bcm63xx_cb_exit), }; #endif /* * if cardbus support is enabled, register our platform device after * our fake cardbus bridge has been registered */ static int __init bcm63xx_pcmcia_init(void) { #ifdef CONFIG_CARDBUS return pci_register_driver(&bcm63xx_cardbus_driver); #else return platform_driver_register(&bcm63xx_pcmcia_driver); #endif } static void __exit bcm63xx_pcmcia_exit(void) { #ifdef CONFIG_CARDBUS return pci_unregister_driver(&bcm63xx_cardbus_driver); #else platform_driver_unregister(&bcm63xx_pcmcia_driver); #endif } module_init(bcm63xx_pcmcia_init); module_exit(bcm63xx_pcmcia_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); MODULE_DESCRIPTION("Linux PCMCIA Card Services: bcm63xx Socket Controller");
gpl-2.0
Rashed97/lge_d295
arch/arm/mach-realview/core.c
4796
12930
/* * linux/arch/arm/mach-realview/core.c * * Copyright (C) 1999 - 2003 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/amba/bus.h> #include <linux/amba/clcd.h> #include <linux/io.h> #include <linux/smsc911x.h> #include <linux/ata_platform.h> #include <linux/amba/mmci.h> #include <linux/gfp.h> #include <linux/clkdev.h> #include <linux/mtd/physmap.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/leds.h> #include <asm/mach-types.h> #include <asm/hardware/arm_timer.h> #include <asm/hardware/icst.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/map.h> #include <asm/hardware/gic.h> #include <mach/platform.h> #include <mach/irqs.h> #include <asm/hardware/timer-sp.h> #include <plat/clcd.h> #include <plat/sched_clock.h> #include "core.h" #define REALVIEW_FLASHCTRL (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_FLASH_OFFSET) static void realview_flash_set_vpp(struct platform_device *pdev, int on) { u32 val; val = __raw_readl(REALVIEW_FLASHCTRL); if (on) val |= REALVIEW_FLASHPROG_FLVPPEN; else val &= ~REALVIEW_FLASHPROG_FLVPPEN; __raw_writel(val, REALVIEW_FLASHCTRL); } static struct physmap_flash_data realview_flash_data = { .width = 4, .set_vpp = realview_flash_set_vpp, }; struct platform_device realview_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &realview_flash_data, }, }; int realview_flash_register(struct resource *res, u32 num) { realview_flash_device.resource = res; realview_flash_device.num_resources = num; return platform_device_register(&realview_flash_device); } static struct smsc911x_platform_config smsc911x_config = { .flags = SMSC911X_USE_32BIT, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH, .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct platform_device realview_eth_device = { .name = "smsc911x", .id = 0, .num_resources = 2, }; int realview_eth_register(const char *name, struct resource *res) { if (name) realview_eth_device.name = name; realview_eth_device.resource = res; if (strcmp(realview_eth_device.name, "smsc911x") == 0) realview_eth_device.dev.platform_data = &smsc911x_config; return platform_device_register(&realview_eth_device); } struct platform_device realview_usb_device = { .name = "isp1760", .num_resources = 2, }; int realview_usb_register(struct resource *res) { realview_usb_device.resource = res; return platform_device_register(&realview_usb_device); } static struct pata_platform_info pata_platform_data = { .ioport_shift = 1, }; static struct resource pata_resources[] = { [0] = { .start = REALVIEW_CF_BASE, .end = REALVIEW_CF_BASE + 0xff, .flags = IORESOURCE_MEM, }, [1] = { .start = REALVIEW_CF_BASE + 0x100, .end = REALVIEW_CF_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; struct platform_device realview_cf_device = { .name = "pata_platform", .id = -1, .num_resources = ARRAY_SIZE(pata_resources), .resource = pata_resources, .dev = { .platform_data = &pata_platform_data, }, }; static struct resource realview_i2c_resource = { .start = REALVIEW_I2C_BASE, .end = REALVIEW_I2C_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }; struct platform_device realview_i2c_device = { .name = "versatile-i2c", .id = 0, .num_resources = 1, .resource = &realview_i2c_resource, }; static struct i2c_board_info realview_i2c_board_info[] = { { I2C_BOARD_INFO("ds1338", 0xd0 >> 1), }, }; static int __init realview_i2c_init(void) { return i2c_register_board_info(0, realview_i2c_board_info, ARRAY_SIZE(realview_i2c_board_info)); } arch_initcall(realview_i2c_init); #define REALVIEW_SYSMCI (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_MCI_OFFSET) /* * This is only used if GPIOLIB support is disabled */ static unsigned int realview_mmc_status(struct device *dev) { struct amba_device *adev = container_of(dev, struct amba_device, dev); u32 mask; if (machine_is_realview_pb1176()) { static bool inserted = false; /* * The PB1176 does not have the status register, * assume it is inserted at startup, then invert * for each call so card insertion/removal will * be detected anyway. This will not be called if * GPIO on PL061 is active, which is the proper * way to do this on the PB1176. */ inserted = !inserted; return inserted ? 0 : 1; } if (adev->res.start == REALVIEW_MMCI0_BASE) mask = 1; else mask = 2; return readl(REALVIEW_SYSMCI) & mask; } struct mmci_platform_data realview_mmc0_plat_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .status = realview_mmc_status, .gpio_wp = 17, .gpio_cd = 16, .cd_invert = true, }; struct mmci_platform_data realview_mmc1_plat_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .status = realview_mmc_status, .gpio_wp = 19, .gpio_cd = 18, .cd_invert = true, }; /* * Clock handling */ static const struct icst_params realview_oscvco_params = { .ref = 24000000, .vco_max = ICST307_VCO_MAX, .vco_min = ICST307_VCO_MIN, .vd_min = 4 + 8, .vd_max = 511 + 8, .rd_min = 1 + 2, .rd_max = 127 + 2, .s2div = icst307_s2div, .idx2s = icst307_idx2s, }; static void realview_oscvco_set(struct clk *clk, struct icst_vco vco) { void __iomem *sys_lock = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_LOCK_OFFSET; u32 val; val = readl(clk->vcoreg) & ~0x7ffff; val |= vco.v | (vco.r << 9) | (vco.s << 16); writel(0xa05f, sys_lock); writel(val, clk->vcoreg); writel(0, sys_lock); } static const struct clk_ops oscvco_clk_ops = { .round = icst_clk_round, .set = icst_clk_set, .setvco = realview_oscvco_set, }; static struct clk oscvco_clk = { .ops = &oscvco_clk_ops, .params = &realview_oscvco_params, }; /* * These are fixed clocks. */ static struct clk ref24_clk = { .rate = 24000000, }; static struct clk sp804_clk = { .rate = 1000000, }; static struct clk dummy_apb_pclk; static struct clk_lookup lookups[] = { { /* Bus clock */ .con_id = "apb_pclk", .clk = &dummy_apb_pclk, }, { /* UART0 */ .dev_id = "dev:uart0", .clk = &ref24_clk, }, { /* UART1 */ .dev_id = "dev:uart1", .clk = &ref24_clk, }, { /* UART2 */ .dev_id = "dev:uart2", .clk = &ref24_clk, }, { /* UART3 */ .dev_id = "fpga:uart3", .clk = &ref24_clk, }, { /* UART3 is on the dev chip in PB1176 */ .dev_id = "dev:uart3", .clk = &ref24_clk, }, { /* UART4 only exists in PB1176 */ .dev_id = "fpga:uart4", .clk = &ref24_clk, }, { /* KMI0 */ .dev_id = "fpga:kmi0", .clk = &ref24_clk, }, { /* KMI1 */ .dev_id = "fpga:kmi1", .clk = &ref24_clk, }, { /* MMC0 */ .dev_id = "fpga:mmc0", .clk = &ref24_clk, }, { /* CLCD is in the PB1176 and EB DevChip */ .dev_id = "dev:clcd", .clk = &oscvco_clk, }, { /* PB:CLCD */ .dev_id = "issp:clcd", .clk = &oscvco_clk, }, { /* SSP */ .dev_id = "dev:ssp0", .clk = &ref24_clk, }, { /* SP804 timers */ .dev_id = "sp804", .clk = &sp804_clk, }, }; void __init realview_init_early(void) { void __iomem *sys = __io_address(REALVIEW_SYS_BASE); if (machine_is_realview_pb1176()) oscvco_clk.vcoreg = sys + REALVIEW_SYS_OSC0_OFFSET; else oscvco_clk.vcoreg = sys + REALVIEW_SYS_OSC4_OFFSET; clkdev_add_table(lookups, ARRAY_SIZE(lookups)); versatile_sched_clock_init(sys + REALVIEW_SYS_24MHz_OFFSET, 24000000); } /* * CLCD support. */ #define SYS_CLCD_NLCDIOON (1 << 2) #define SYS_CLCD_VDDPOSSWITCH (1 << 3) #define SYS_CLCD_PWR3V5SWITCH (1 << 4) #define SYS_CLCD_ID_MASK (0x1f << 8) #define SYS_CLCD_ID_SANYO_3_8 (0x00 << 8) #define SYS_CLCD_ID_UNKNOWN_8_4 (0x01 << 8) #define SYS_CLCD_ID_EPSON_2_2 (0x02 << 8) #define SYS_CLCD_ID_SANYO_2_5 (0x07 << 8) #define SYS_CLCD_ID_VGA (0x1f << 8) /* * Disable all display connectors on the interface module. */ static void realview_clcd_disable(struct clcd_fb *fb) { void __iomem *sys_clcd = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_CLCD_OFFSET; u32 val; val = readl(sys_clcd); val &= ~SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH; writel(val, sys_clcd); } /* * Enable the relevant connector on the interface module. */ static void realview_clcd_enable(struct clcd_fb *fb) { void __iomem *sys_clcd = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_CLCD_OFFSET; u32 val; /* * Enable the PSUs */ val = readl(sys_clcd); val |= SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH; writel(val, sys_clcd); } /* * Detect which LCD panel is connected, and return the appropriate * clcd_panel structure. Note: we do not have any information on * the required timings for the 8.4in panel, so we presently assume * VGA timings. */ static int realview_clcd_setup(struct clcd_fb *fb) { void __iomem *sys_clcd = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_CLCD_OFFSET; const char *panel_name, *vga_panel_name; unsigned long framesize; u32 val; if (machine_is_realview_eb()) { /* VGA, 16bpp */ framesize = 640 * 480 * 2; vga_panel_name = "VGA"; } else { /* XVGA, 16bpp */ framesize = 1024 * 768 * 2; vga_panel_name = "XVGA"; } val = readl(sys_clcd) & SYS_CLCD_ID_MASK; if (val == SYS_CLCD_ID_SANYO_3_8) panel_name = "Sanyo TM38QV67A02A"; else if (val == SYS_CLCD_ID_SANYO_2_5) panel_name = "Sanyo QVGA Portrait"; else if (val == SYS_CLCD_ID_EPSON_2_2) panel_name = "Epson L2F50113T00"; else if (val == SYS_CLCD_ID_VGA) panel_name = vga_panel_name; else { pr_err("CLCD: unknown LCD panel ID 0x%08x, using VGA\n", val); panel_name = vga_panel_name; } fb->panel = versatile_clcd_get_panel(panel_name); if (!fb->panel) return -EINVAL; return versatile_clcd_setup_dma(fb, framesize); } struct clcd_board clcd_plat_data = { .name = "RealView", .caps = CLCD_CAP_ALL, .check = clcdfb_check, .decode = clcdfb_decode, .disable = realview_clcd_disable, .enable = realview_clcd_enable, .setup = realview_clcd_setup, .mmap = versatile_clcd_mmap_dma, .remove = versatile_clcd_remove_dma, }; #ifdef CONFIG_LEDS #define VA_LEDS_BASE (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_LED_OFFSET) void realview_leds_event(led_event_t ledevt) { unsigned long flags; u32 val; u32 led = 1 << smp_processor_id(); local_irq_save(flags); val = readl(VA_LEDS_BASE); switch (ledevt) { case led_idle_start: val = val & ~led; break; case led_idle_end: val = val | led; break; case led_timer: val = val ^ REALVIEW_SYS_LED7; break; case led_halted: val = 0; break; default: break; } writel(val, VA_LEDS_BASE); local_irq_restore(flags); } #endif /* CONFIG_LEDS */ /* * Where is the timer (VA)? */ void __iomem *timer0_va_base; void __iomem *timer1_va_base; void __iomem *timer2_va_base; void __iomem *timer3_va_base; /* * Set up the clock source and clock events devices */ void __init realview_timer_init(unsigned int timer_irq) { u32 val; /* * set clock frequency: * REALVIEW_REFCLK is 32KHz * REALVIEW_TIMCLK is 1MHz */ val = readl(__io_address(REALVIEW_SCTL_BASE)); writel((REALVIEW_TIMCLK << REALVIEW_TIMER1_EnSel) | (REALVIEW_TIMCLK << REALVIEW_TIMER2_EnSel) | (REALVIEW_TIMCLK << REALVIEW_TIMER3_EnSel) | (REALVIEW_TIMCLK << REALVIEW_TIMER4_EnSel) | val, __io_address(REALVIEW_SCTL_BASE)); /* * Initialise to a known state (all timers off) */ writel(0, timer0_va_base + TIMER_CTRL); writel(0, timer1_va_base + TIMER_CTRL); writel(0, timer2_va_base + TIMER_CTRL); writel(0, timer3_va_base + TIMER_CTRL); sp804_clocksource_init(timer3_va_base, "timer3"); sp804_clockevents_init(timer0_va_base, timer_irq, "timer0"); } /* * Setup the memory banks. */ void realview_fixup(struct tag *tags, char **from, struct meminfo *meminfo) { /* * Most RealView platforms have 512MB contiguous RAM at 0x70000000. * Half of this is mirrored at 0. */ #ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET meminfo->bank[0].start = 0x70000000; meminfo->bank[0].size = SZ_512M; meminfo->nr_banks = 1; #else meminfo->bank[0].start = 0; meminfo->bank[0].size = SZ_256M; meminfo->nr_banks = 1; #endif }
gpl-2.0
poondog/kangaroo-m7-mkv
drivers/atm/horizon.c
4796
85914
/* Madge Horizon ATM Adapter driver. Copyright (C) 1995-1999 Madge Networks Ltd. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA The GNU GPL is contained in /usr/doc/copyright/GPL on a Debian system and in the file COPYING in the Linux kernel source. */ /* IMPORTANT NOTE: Madge Networks no longer makes the adapters supported by this driver and makes no commitment to maintain it. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/sonet.h> #include <linux/skbuff.h> #include <linux/time.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/wait.h> #include <linux/slab.h> #include <asm/io.h> #include <linux/atomic.h> #include <asm/uaccess.h> #include <asm/string.h> #include <asm/byteorder.h> #include "horizon.h" #define maintainer_string "Giuliano Procida at Madge Networks <gprocida@madge.com>" #define description_string "Madge ATM Horizon [Ultra] driver" #define version_string "1.2.1" static inline void __init show_version (void) { printk ("%s version %s\n", description_string, version_string); } /* CREDITS Driver and documentation by: Chris Aston Madge Networks Giuliano Procida Madge Networks Simon Benham Madge Networks Simon Johnson Madge Networks Various Others Madge Networks Some inspiration taken from other drivers by: Alexandru Cucos UTBv Kari Mettinen University of Helsinki Werner Almesberger EPFL LRC Theory of Operation I Hardware, detection, initialisation and shutdown. 1. Supported Hardware This driver should handle all variants of the PCI Madge ATM adapters with the Horizon chipset. These are all PCI cards supporting PIO, BM DMA and a form of MMIO (registers only, not internal RAM). The driver is only known to work with SONET and UTP Horizon Ultra cards at 155Mb/s. However, code is in place to deal with both the original Horizon and 25Mb/s operation. There are two revisions of the Horizon ASIC: the original and the Ultra. Details of hardware bugs are in section III. The ASIC version can be distinguished by chip markings but is NOT indicated by the PCI revision (all adapters seem to have PCI rev 1). I believe that: Horizon => Collage 25 PCI Adapter (UTP and STP) Horizon Ultra => Collage 155 PCI Client (UTP or SONET) Ambassador x => Collage 155 PCI Server (completely different) Horizon (25Mb/s) is fitted with UTP and STP connectors. It seems to have a Madge B154 plus glue logic serializer. I have also found a really ancient version of this with slightly different glue. It comes with the revision 0 (140-025-01) ASIC. Horizon Ultra (155Mb/s) is fitted with either a Pulse Medialink output (UTP) or an HP HFBR 5205 output (SONET). It has either Madge's SAMBA framer or a SUNI-lite device (early versions). It comes with the revision 1 (140-027-01) ASIC. 2. Detection All Horizon-based cards present with the same PCI Vendor and Device IDs. The standard Linux 2.2 PCI API is used to locate any cards and to enable bus-mastering (with appropriate latency). ATM_LAYER_STATUS in the control register distinguishes between the two possible physical layers (25 and 155). It is not clear whether the 155 cards can also operate at 25Mbps. We rely on the fact that a card operates at 155 if and only if it has the newer Horizon Ultra ASIC. For 155 cards the two possible framers are probed for and then set up for loop-timing. 3. Initialisation The card is reset and then put into a known state. The physical layer is configured for normal operation at the appropriate speed; in the case of the 155 cards, the framer is initialised with line-based timing; the internal RAM is zeroed and the allocation of buffers for RX and TX is made; the Burnt In Address is read and copied to the ATM ESI; various policy settings for RX (VPI bits, unknown VCs, oam cells) are made. Ideally all policy items should be configurable at module load (if not actually on-demand), however, only the vpi vs vci bit allocation can be specified at insmod. 4. Shutdown This is in response to module_cleaup. No VCs are in use and the card should be idle; it is reset. II Driver software (as it should be) 0. Traffic Parameters The traffic classes (not an enumeration) are currently: ATM_NONE (no traffic), ATM_UBR, ATM_CBR, ATM_VBR and ATM_ABR, ATM_ANYCLASS (compatible with everything). Together with (perhaps only some of) the following items they make up the traffic specification. struct atm_trafprm { unsigned char traffic_class; traffic class (ATM_UBR, ...) int max_pcr; maximum PCR in cells per second int pcr; desired PCR in cells per second int min_pcr; minimum PCR in cells per second int max_cdv; maximum CDV in microseconds int max_sdu; maximum SDU in bytes }; Note that these denote bandwidth available not bandwidth used; the possibilities according to ATMF are: Real Time (cdv and max CDT given) CBR(pcr) pcr bandwidth always available rtVBR(pcr,scr,mbs) scr bandwidth always available, up to pcr at mbs too Non Real Time nrtVBR(pcr,scr,mbs) scr bandwidth always available, up to pcr at mbs too UBR() ABR(mcr,pcr) mcr bandwidth always available, up to pcr (depending) too mbs is max burst size (bucket) pcr and scr have associated cdvt values mcr is like scr but has no cdtv cdtv may differ at each hop Some of the above items are qos items (as opposed to traffic parameters). We have nothing to do with qos. All except ABR can have their traffic parameters converted to GCRA parameters. The GCRA may be implemented as a (real-number) leaky bucket. The GCRA can be used in complicated ways by switches and in simpler ways by end-stations. It can be used both to filter incoming cells and shape out-going cells. ATM Linux actually supports: ATM_NONE() (no traffic in this direction) ATM_UBR(max_frame_size) ATM_CBR(max/min_pcr, max_cdv, max_frame_size) 0 or ATM_MAX_PCR are used to indicate maximum available PCR A traffic specification consists of the AAL type and separate traffic specifications for either direction. In ATM Linux it is: struct atm_qos { struct atm_trafprm txtp; struct atm_trafprm rxtp; unsigned char aal; }; AAL types are: ATM_NO_AAL AAL not specified ATM_AAL0 "raw" ATM cells ATM_AAL1 AAL1 (CBR) ATM_AAL2 AAL2 (VBR) ATM_AAL34 AAL3/4 (data) ATM_AAL5 AAL5 (data) ATM_SAAL signaling AAL The Horizon has support for AAL frame types: 0, 3/4 and 5. However, it does not implement AAL 3/4 SAR and it has a different notion of "raw cell" to ATM Linux's (48 bytes vs. 52 bytes) so neither are supported by this driver. The Horizon has limited support for ABR (including UBR), VBR and CBR. Each TX channel has a bucket (containing up to 31 cell units) and two timers (PCR and SCR) associated with it that can be used to govern cell emissions and host notification (in the case of ABR this is presumably so that RM cells may be emitted at appropriate times). The timers may either be disabled or may be set to any of 240 values (determined by the clock crystal, a fixed (?) per-device divider, a configurable divider and a configurable timer preload value). At the moment only UBR and CBR are supported by the driver. VBR will be supported as soon as ATM for Linux supports it. ABR support is very unlikely as RM cell handling is completely up to the driver. 1. TX (TX channel setup and TX transfer) The TX half of the driver owns the TX Horizon registers. The TX component in the IRQ handler is the BM completion handler. This can only be entered when tx_busy is true (enforced by hardware). The other TX component can only be entered when tx_busy is false (enforced by driver). So TX is single-threaded. Apart from a minor optimisation to not re-select the last channel, the TX send component works as follows: Atomic test and set tx_busy until we succeed; we should implement some sort of timeout so that tx_busy will never be stuck at true. If no TX channel is set up for this VC we wait for an idle one (if necessary) and set it up. At this point we have a TX channel ready for use. We wait for enough buffers to become available then start a TX transmit (set the TX descriptor, schedule transfer, exit). The IRQ component handles TX completion (stats, free buffer, tx_busy unset, exit). We also re-schedule further transfers for the same frame if needed. TX setup in more detail: TX open is a nop, the relevant information is held in the hrz_vcc (vcc->dev_data) structure and is "cached" on the card. TX close gets the TX lock and clears the channel from the "cache". 2. RX (Data Available and RX transfer) The RX half of the driver owns the RX registers. There are two RX components in the IRQ handler: the data available handler deals with fresh data that has arrived on the card, the BM completion handler is very similar to the TX completion handler. The data available handler grabs the rx_lock and it is only released once the data has been discarded or completely transferred to the host. The BM completion handler only runs when the lock is held; the data available handler is locked out over the same period. Data available on the card triggers an interrupt. If the data is not suitable for our existing RX channels or we cannot allocate a buffer it is flushed. Otherwise an RX receive is scheduled. Multiple RX transfers may be scheduled for the same frame. RX setup in more detail: RX open... RX close... III Hardware Bugs 0. Byte vs Word addressing of adapter RAM. A design feature; see the .h file (especially the memory map). 1. Bus Master Data Transfers (original Horizon only, fixed in Ultra) The host must not start a transmit direction transfer at a non-four-byte boundary in host memory. Instead the host should perform a byte, or a two byte, or one byte followed by two byte transfer in order to start the rest of the transfer on a four byte boundary. RX is OK. Simultaneous transmit and receive direction bus master transfers are not allowed. The simplest solution to these two is to always do PIO (never DMA) in the TX direction on the original Horizon. More complicated solutions are likely to hurt my brain. 2. Loss of buffer on close VC When a VC is being closed, the buffer associated with it is not returned to the pool. The host must store the reference to this buffer and when opening a new VC then give it to that new VC. The host intervention currently consists of stacking such a buffer pointer at VC close and checking the stack at VC open. 3. Failure to close a VC If a VC is currently receiving a frame then closing the VC may fail and the frame continues to be received. The solution is to make sure any received frames are flushed when ready. This is currently done just before the solution to 2. 4. PCI bus (original Horizon only, fixed in Ultra) Reading from the data port prior to initialisation will hang the PCI bus. Just don't do that then! We don't. IV To Do List . Timer code may be broken. . Allow users to specify buffer allocation split for TX and RX. . Deal once and for all with buggy VC close. . Handle interrupted and/or non-blocking operations. . Change some macros to functions and move from .h to .c. . Try to limit the number of TX frames each VC may have queued, in order to reduce the chances of TX buffer exhaustion. . Implement VBR (bucket and timers not understood) and ABR (need to do RM cells manually); also no Linux support for either. . Implement QoS changes on open VCs (involves extracting parts of VC open and close into separate functions and using them to make changes). */ /********** globals **********/ static void do_housekeeping (unsigned long arg); static unsigned short debug = 0; static unsigned short vpi_bits = 0; static int max_tx_size = 9000; static int max_rx_size = 9000; static unsigned char pci_lat = 0; /********** access functions **********/ /* Read / Write Horizon registers */ static inline void wr_regl (const hrz_dev * dev, unsigned char reg, u32 data) { outl (cpu_to_le32 (data), dev->iobase + reg); } static inline u32 rd_regl (const hrz_dev * dev, unsigned char reg) { return le32_to_cpu (inl (dev->iobase + reg)); } static inline void wr_regw (const hrz_dev * dev, unsigned char reg, u16 data) { outw (cpu_to_le16 (data), dev->iobase + reg); } static inline u16 rd_regw (const hrz_dev * dev, unsigned char reg) { return le16_to_cpu (inw (dev->iobase + reg)); } static inline void wrs_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) { outsb (dev->iobase + reg, addr, len); } static inline void rds_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) { insb (dev->iobase + reg, addr, len); } /* Read / Write to a given address in Horizon buffer memory. Interrupts must be disabled between the address register and data port accesses as these must form an atomic operation. */ static inline void wr_mem (const hrz_dev * dev, HDW * addr, u32 data) { // wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr); wr_regl (dev, MEM_WR_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW)); wr_regl (dev, MEMORY_PORT_OFF, data); } static inline u32 rd_mem (const hrz_dev * dev, HDW * addr) { // wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr); wr_regl (dev, MEM_RD_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW)); return rd_regl (dev, MEMORY_PORT_OFF); } static inline void wr_framer (const hrz_dev * dev, u32 addr, u32 data) { wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr | 0x80000000); wr_regl (dev, MEMORY_PORT_OFF, data); } static inline u32 rd_framer (const hrz_dev * dev, u32 addr) { wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr | 0x80000000); return rd_regl (dev, MEMORY_PORT_OFF); } /********** specialised access functions **********/ /* RX */ static inline void FLUSH_RX_CHANNEL (hrz_dev * dev, u16 channel) { wr_regw (dev, RX_CHANNEL_PORT_OFF, FLUSH_CHANNEL | channel); return; } static void WAIT_FLUSH_RX_COMPLETE (hrz_dev * dev) { while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & FLUSH_CHANNEL) ; return; } static inline void SELECT_RX_CHANNEL (hrz_dev * dev, u16 channel) { wr_regw (dev, RX_CHANNEL_PORT_OFF, channel); return; } static void WAIT_UPDATE_COMPLETE (hrz_dev * dev) { while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & RX_CHANNEL_UPDATE_IN_PROGRESS) ; return; } /* TX */ static inline void SELECT_TX_CHANNEL (hrz_dev * dev, u16 tx_channel) { wr_regl (dev, TX_CHANNEL_PORT_OFF, tx_channel); return; } /* Update or query one configuration parameter of a particular channel. */ static inline void update_tx_channel_config (hrz_dev * dev, short chan, u8 mode, u16 value) { wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF, chan * TX_CHANNEL_CONFIG_MULT | mode); wr_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF, value); return; } static inline u16 query_tx_channel_config (hrz_dev * dev, short chan, u8 mode) { wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF, chan * TX_CHANNEL_CONFIG_MULT | mode); return rd_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF); } /********** dump functions **********/ static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) { #ifdef DEBUG_HORIZON unsigned int i; unsigned char * data = skb->data; PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc); for (i=0; i<skb->len && i < 256;i++) PRINTDM (DBG_DATA, "%02x ", data[i]); PRINTDE (DBG_DATA,""); #else (void) prefix; (void) vc; (void) skb; #endif return; } static inline void dump_regs (hrz_dev * dev) { #ifdef DEBUG_HORIZON PRINTD (DBG_REGS, "CONTROL 0: %#x", rd_regl (dev, CONTROL_0_REG)); PRINTD (DBG_REGS, "RX CONFIG: %#x", rd_regw (dev, RX_CONFIG_OFF)); PRINTD (DBG_REGS, "TX CONFIG: %#x", rd_regw (dev, TX_CONFIG_OFF)); PRINTD (DBG_REGS, "TX STATUS: %#x", rd_regw (dev, TX_STATUS_OFF)); PRINTD (DBG_REGS, "IRQ ENBLE: %#x", rd_regl (dev, INT_ENABLE_REG_OFF)); PRINTD (DBG_REGS, "IRQ SORCE: %#x", rd_regl (dev, INT_SOURCE_REG_OFF)); #else (void) dev; #endif return; } static inline void dump_framer (hrz_dev * dev) { #ifdef DEBUG_HORIZON unsigned int i; PRINTDB (DBG_REGS, "framer registers:"); for (i = 0; i < 0x10; ++i) PRINTDM (DBG_REGS, " %02x", rd_framer (dev, i)); PRINTDE (DBG_REGS,""); #else (void) dev; #endif return; } /********** VPI/VCI <-> (RX) channel conversions **********/ /* RX channels are 10 bit integers, these fns are quite paranoid */ static inline int channel_to_vpivci (const u16 channel, short * vpi, int * vci) { unsigned short vci_bits = 10 - vpi_bits; if ((channel & RX_CHANNEL_MASK) == channel) { *vci = channel & ((~0)<<vci_bits); *vpi = channel >> vci_bits; return channel ? 0 : -EINVAL; } return -EINVAL; } static inline int vpivci_to_channel (u16 * channel, const short vpi, const int vci) { unsigned short vci_bits = 10 - vpi_bits; if (0 <= vpi && vpi < 1<<vpi_bits && 0 <= vci && vci < 1<<vci_bits) { *channel = vpi<<vci_bits | vci; return *channel ? 0 : -EINVAL; } return -EINVAL; } /********** decode RX queue entries **********/ static inline u16 rx_q_entry_to_length (u32 x) { return x & RX_Q_ENTRY_LENGTH_MASK; } static inline u16 rx_q_entry_to_rx_channel (u32 x) { return (x>>RX_Q_ENTRY_CHANNEL_SHIFT) & RX_CHANNEL_MASK; } /* Cell Transmit Rate Values * * the cell transmit rate (cells per sec) can be set to a variety of * different values by specifying two parameters: a timer preload from * 1 to 16 (stored as 0 to 15) and a clock divider (2 to the power of * an exponent from 0 to 14; the special value 15 disables the timer). * * cellrate = baserate / (preload * 2^divider) * * The maximum cell rate that can be specified is therefore just the * base rate. Halving the preload is equivalent to adding 1 to the * divider and so values 1 to 8 of the preload are redundant except * in the case of a maximal divider (14). * * Given a desired cell rate, an algorithm to determine the preload * and divider is: * * a) x = baserate / cellrate, want p * 2^d = x (as far as possible) * b) if x > 16 * 2^14 then set p = 16, d = 14 (min rate), done * if x <= 16 then set p = x, d = 0 (high rates), done * c) now have 16 < x <= 2^18, or 1 < x/16 <= 2^14 and we want to * know n such that 2^(n-1) < x/16 <= 2^n, so slide a bit until * we find the range (n will be between 1 and 14), set d = n * d) Also have 8 < x/2^n <= 16, so set p nearest x/2^n * * The algorithm used below is a minor variant of the above. * * The base rate is derived from the oscillator frequency (Hz) using a * fixed divider: * * baserate = freq / 32 in the case of some Unknown Card * baserate = freq / 8 in the case of the Horizon 25 * baserate = freq / 8 in the case of the Horizon Ultra 155 * * The Horizon cards have oscillators and base rates as follows: * * Card Oscillator Base Rate * Unknown Card 33 MHz 1.03125 MHz (33 MHz = PCI freq) * Horizon 25 32 MHz 4 MHz * Horizon Ultra 155 40 MHz 5 MHz * * The following defines give the base rates in Hz. These were * previously a factor of 100 larger, no doubt someone was using * cps*100. */ #define BR_UKN 1031250l #define BR_HRZ 4000000l #define BR_ULT 5000000l // d is an exponent #define CR_MIND 0 #define CR_MAXD 14 // p ranges from 1 to a power of 2 #define CR_MAXPEXP 4 static int make_rate (const hrz_dev * dev, u32 c, rounding r, u16 * bits, unsigned int * actual) { // note: rounding the rate down means rounding 'p' up const unsigned long br = test_bit(ultra, &dev->flags) ? BR_ULT : BR_HRZ; u32 div = CR_MIND; u32 pre; // br_exp and br_man are used to avoid overflowing (c*maxp*2^d) in // the tests below. We could think harder about exact possibilities // of failure... unsigned long br_man = br; unsigned int br_exp = 0; PRINTD (DBG_QOS|DBG_FLOW, "make_rate b=%lu, c=%u, %s", br, c, r == round_up ? "up" : r == round_down ? "down" : "nearest"); // avoid div by zero if (!c) { PRINTD (DBG_QOS|DBG_ERR, "zero rate is not allowed!"); return -EINVAL; } while (br_exp < CR_MAXPEXP + CR_MIND && (br_man % 2 == 0)) { br_man = br_man >> 1; ++br_exp; } // (br >>br_exp) <<br_exp == br and // br_exp <= CR_MAXPEXP+CR_MIND if (br_man <= (c << (CR_MAXPEXP+CR_MIND-br_exp))) { // Equivalent to: B <= (c << (MAXPEXP+MIND)) // take care of rounding switch (r) { case round_down: pre = DIV_ROUND_UP(br, c<<div); // but p must be non-zero if (!pre) pre = 1; break; case round_nearest: pre = DIV_ROUND_CLOSEST(br, c<<div); // but p must be non-zero if (!pre) pre = 1; break; default: /* round_up */ pre = br/(c<<div); // but p must be non-zero if (!pre) return -EINVAL; } PRINTD (DBG_QOS, "A: p=%u, d=%u", pre, div); goto got_it; } // at this point we have // d == MIND and (c << (MAXPEXP+MIND)) < B while (div < CR_MAXD) { div++; if (br_man <= (c << (CR_MAXPEXP+div-br_exp))) { // Equivalent to: B <= (c << (MAXPEXP+d)) // c << (MAXPEXP+d-1) < B <= c << (MAXPEXP+d) // 1 << (MAXPEXP-1) < B/2^d/c <= 1 << MAXPEXP // MAXP/2 < B/c2^d <= MAXP // take care of rounding switch (r) { case round_down: pre = DIV_ROUND_UP(br, c<<div); break; case round_nearest: pre = DIV_ROUND_CLOSEST(br, c<<div); break; default: /* round_up */ pre = br/(c<<div); } PRINTD (DBG_QOS, "B: p=%u, d=%u", pre, div); goto got_it; } } // at this point we have // d == MAXD and (c << (MAXPEXP+MAXD)) < B // but we cannot go any higher // take care of rounding if (r == round_down) return -EINVAL; pre = 1 << CR_MAXPEXP; PRINTD (DBG_QOS, "C: p=%u, d=%u", pre, div); got_it: // paranoia if (div > CR_MAXD || (!pre) || pre > 1<<CR_MAXPEXP) { PRINTD (DBG_QOS, "set_cr internal failure: d=%u p=%u", div, pre); return -EINVAL; } else { if (bits) *bits = (div<<CLOCK_SELECT_SHIFT) | (pre-1); if (actual) { *actual = DIV_ROUND_UP(br, pre<<div); PRINTD (DBG_QOS, "actual rate: %u", *actual); } return 0; } } static int make_rate_with_tolerance (const hrz_dev * dev, u32 c, rounding r, unsigned int tol, u16 * bit_pattern, unsigned int * actual) { unsigned int my_actual; PRINTD (DBG_QOS|DBG_FLOW, "make_rate_with_tolerance c=%u, %s, tol=%u", c, (r == round_up) ? "up" : (r == round_down) ? "down" : "nearest", tol); if (!actual) // actual rate is not returned actual = &my_actual; if (make_rate (dev, c, round_nearest, bit_pattern, actual)) // should never happen as round_nearest always succeeds return -1; if (c - tol <= *actual && *actual <= c + tol) // within tolerance return 0; else // intolerant, try rounding instead return make_rate (dev, c, r, bit_pattern, actual); } /********** Listen on a VC **********/ static int hrz_open_rx (hrz_dev * dev, u16 channel) { // is there any guarantee that we don't get two simulataneous // identical calls of this function from different processes? yes // rate_lock unsigned long flags; u32 channel_type; // u16? u16 buf_ptr = RX_CHANNEL_IDLE; rx_ch_desc * rx_desc = &memmap->rx_descs[channel]; PRINTD (DBG_FLOW, "hrz_open_rx %x", channel); spin_lock_irqsave (&dev->mem_lock, flags); channel_type = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK; spin_unlock_irqrestore (&dev->mem_lock, flags); // very serious error, should never occur if (channel_type != RX_CHANNEL_DISABLED) { PRINTD (DBG_ERR|DBG_VCC, "RX channel for VC already open"); return -EBUSY; // clean up? } // Give back spare buffer if (dev->noof_spare_buffers) { buf_ptr = dev->spare_buffers[--dev->noof_spare_buffers]; PRINTD (DBG_VCC, "using a spare buffer: %u", buf_ptr); // should never occur if (buf_ptr == RX_CHANNEL_DISABLED || buf_ptr == RX_CHANNEL_IDLE) { // but easy to recover from PRINTD (DBG_ERR|DBG_VCC, "bad spare buffer pointer, using IDLE"); buf_ptr = RX_CHANNEL_IDLE; } } else { PRINTD (DBG_VCC, "using IDLE buffer pointer"); } // Channel is currently disabled so change its status to idle // do we really need to save the flags again? spin_lock_irqsave (&dev->mem_lock, flags); wr_mem (dev, &rx_desc->wr_buf_type, buf_ptr | CHANNEL_TYPE_AAL5 | FIRST_CELL_OF_AAL5_FRAME); if (buf_ptr != RX_CHANNEL_IDLE) wr_mem (dev, &rx_desc->rd_buf_type, buf_ptr); spin_unlock_irqrestore (&dev->mem_lock, flags); // rxer->rate = make_rate (qos->peak_cells); PRINTD (DBG_FLOW, "hrz_open_rx ok"); return 0; } #if 0 /********** change vc rate for a given vc **********/ static void hrz_change_vc_qos (ATM_RXER * rxer, MAAL_QOS * qos) { rxer->rate = make_rate (qos->peak_cells); } #endif /********** free an skb (as per ATM device driver documentation) **********/ static void hrz_kfree_skb (struct sk_buff * skb) { if (ATM_SKB(skb)->vcc->pop) { ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb); } else { dev_kfree_skb_any (skb); } } /********** cancel listen on a VC **********/ static void hrz_close_rx (hrz_dev * dev, u16 vc) { unsigned long flags; u32 value; u32 r1, r2; rx_ch_desc * rx_desc = &memmap->rx_descs[vc]; int was_idle = 0; spin_lock_irqsave (&dev->mem_lock, flags); value = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK; spin_unlock_irqrestore (&dev->mem_lock, flags); if (value == RX_CHANNEL_DISABLED) { // I suppose this could happen once we deal with _NONE traffic properly PRINTD (DBG_VCC, "closing VC: RX channel %u already disabled", vc); return; } if (value == RX_CHANNEL_IDLE) was_idle = 1; spin_lock_irqsave (&dev->mem_lock, flags); for (;;) { wr_mem (dev, &rx_desc->wr_buf_type, RX_CHANNEL_DISABLED); if ((rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK) == RX_CHANNEL_DISABLED) break; was_idle = 0; } if (was_idle) { spin_unlock_irqrestore (&dev->mem_lock, flags); return; } WAIT_FLUSH_RX_COMPLETE(dev); // XXX Is this all really necessary? We can rely on the rx_data_av // handler to discard frames that remain queued for delivery. If the // worry is that immediately reopening the channel (perhaps by a // different process) may cause some data to be mis-delivered then // there may still be a simpler solution (such as busy-waiting on // rx_busy once the channel is disabled or before a new one is // opened - does this leave any holes?). Arguably setting up and // tearing down the TX and RX halves of each virtual circuit could // most safely be done within ?x_busy protected regions. // OK, current changes are that Simon's marker is disabled and we DO // look for NULL rxer elsewhere. The code here seems flush frames // and then remember the last dead cell belonging to the channel // just disabled - the cell gets relinked at the next vc_open. // However, when all VCs are closed or only a few opened there are a // handful of buffers that are unusable. // Does anyone feel like documenting spare_buffers properly? // Does anyone feel like fixing this in a nicer way? // Flush any data which is left in the channel for (;;) { // Change the rx channel port to something different to the RX // channel we are trying to close to force Horizon to flush the rx // channel read and write pointers. u16 other = vc^(RX_CHANS/2); SELECT_RX_CHANNEL (dev, other); WAIT_UPDATE_COMPLETE (dev); r1 = rd_mem (dev, &rx_desc->rd_buf_type); // Select this RX channel. Flush doesn't seem to work unless we // select an RX channel before hand SELECT_RX_CHANNEL (dev, vc); WAIT_UPDATE_COMPLETE (dev); // Attempt to flush a frame on this RX channel FLUSH_RX_CHANNEL (dev, vc); WAIT_FLUSH_RX_COMPLETE (dev); // Force Horizon to flush rx channel read and write pointers as before SELECT_RX_CHANNEL (dev, other); WAIT_UPDATE_COMPLETE (dev); r2 = rd_mem (dev, &rx_desc->rd_buf_type); PRINTD (DBG_VCC|DBG_RX, "r1 = %u, r2 = %u", r1, r2); if (r1 == r2) { dev->spare_buffers[dev->noof_spare_buffers++] = (u16)r1; break; } } #if 0 { rx_q_entry * wr_ptr = &memmap->rx_q_entries[rd_regw (dev, RX_QUEUE_WR_PTR_OFF)]; rx_q_entry * rd_ptr = dev->rx_q_entry; PRINTD (DBG_VCC|DBG_RX, "rd_ptr = %u, wr_ptr = %u", rd_ptr, wr_ptr); while (rd_ptr != wr_ptr) { u32 x = rd_mem (dev, (HDW *) rd_ptr); if (vc == rx_q_entry_to_rx_channel (x)) { x |= SIMONS_DODGEY_MARKER; PRINTD (DBG_RX|DBG_VCC|DBG_WARN, "marking a frame as dodgey"); wr_mem (dev, (HDW *) rd_ptr, x); } if (rd_ptr == dev->rx_q_wrap) rd_ptr = dev->rx_q_reset; else rd_ptr++; } } #endif spin_unlock_irqrestore (&dev->mem_lock, flags); return; } /********** schedule RX transfers **********/ // Note on tail recursion: a GCC developer said that it is not likely // to be fixed soon, so do not define TAILRECUSRIONWORKS unless you // are sure it does as you may otherwise overflow the kernel stack. // giving this fn a return value would help GCC, allegedly static void rx_schedule (hrz_dev * dev, int irq) { unsigned int rx_bytes; int pio_instead = 0; #ifndef TAILRECURSIONWORKS pio_instead = 1; while (pio_instead) { #endif // bytes waiting for RX transfer rx_bytes = dev->rx_bytes; #if 0 spin_count = 0; while (rd_regl (dev, MASTER_RX_COUNT_REG_OFF)) { PRINTD (DBG_RX|DBG_WARN, "RX error: other PCI Bus Master RX still in progress!"); if (++spin_count > 10) { PRINTD (DBG_RX|DBG_ERR, "spun out waiting PCI Bus Master RX completion"); wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0); clear_bit (rx_busy, &dev->flags); hrz_kfree_skb (dev->rx_skb); return; } } #endif // this code follows the TX code but (at the moment) there is only // one region - the skb itself. I don't know if this will change, // but it doesn't hurt to have the code here, disabled. if (rx_bytes) { // start next transfer within same region if (rx_bytes <= MAX_PIO_COUNT) { PRINTD (DBG_RX|DBG_BUS, "(pio)"); pio_instead = 1; } if (rx_bytes <= MAX_TRANSFER_COUNT) { PRINTD (DBG_RX|DBG_BUS, "(simple or last multi)"); dev->rx_bytes = 0; } else { PRINTD (DBG_RX|DBG_BUS, "(continuing multi)"); dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT; rx_bytes = MAX_TRANSFER_COUNT; } } else { // rx_bytes == 0 -- we're between regions // regions remaining to transfer #if 0 unsigned int rx_regions = dev->rx_regions; #else unsigned int rx_regions = 0; #endif if (rx_regions) { #if 0 // start a new region dev->rx_addr = dev->rx_iovec->iov_base; rx_bytes = dev->rx_iovec->iov_len; ++dev->rx_iovec; dev->rx_regions = rx_regions - 1; if (rx_bytes <= MAX_PIO_COUNT) { PRINTD (DBG_RX|DBG_BUS, "(pio)"); pio_instead = 1; } if (rx_bytes <= MAX_TRANSFER_COUNT) { PRINTD (DBG_RX|DBG_BUS, "(full region)"); dev->rx_bytes = 0; } else { PRINTD (DBG_RX|DBG_BUS, "(start multi region)"); dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT; rx_bytes = MAX_TRANSFER_COUNT; } #endif } else { // rx_regions == 0 // that's all folks - end of frame struct sk_buff * skb = dev->rx_skb; // dev->rx_iovec = 0; FLUSH_RX_CHANNEL (dev, dev->rx_channel); dump_skb ("<<<", dev->rx_channel, skb); PRINTD (DBG_RX|DBG_SKB, "push %p %u", skb->data, skb->len); { struct atm_vcc * vcc = ATM_SKB(skb)->vcc; // VC layer stats atomic_inc(&vcc->stats->rx); __net_timestamp(skb); // end of our responsibility vcc->push (vcc, skb); } } } // note: writing RX_COUNT clears any interrupt condition if (rx_bytes) { if (pio_instead) { if (irq) wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0); rds_regb (dev, DATA_PORT_OFF, dev->rx_addr, rx_bytes); } else { wr_regl (dev, MASTER_RX_ADDR_REG_OFF, virt_to_bus (dev->rx_addr)); wr_regl (dev, MASTER_RX_COUNT_REG_OFF, rx_bytes); } dev->rx_addr += rx_bytes; } else { if (irq) wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0); // allow another RX thread to start YELLOW_LED_ON(dev); clear_bit (rx_busy, &dev->flags); PRINTD (DBG_RX, "cleared rx_busy for dev %p", dev); } #ifdef TAILRECURSIONWORKS // and we all bless optimised tail calls if (pio_instead) return rx_schedule (dev, 0); return; #else // grrrrrrr! irq = 0; } return; #endif } /********** handle RX bus master complete events **********/ static void rx_bus_master_complete_handler (hrz_dev * dev) { if (test_bit (rx_busy, &dev->flags)) { rx_schedule (dev, 1); } else { PRINTD (DBG_RX|DBG_ERR, "unexpected RX bus master completion"); // clear interrupt condition on adapter wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0); } return; } /********** (queue to) become the next TX thread **********/ static int tx_hold (hrz_dev * dev) { PRINTD (DBG_TX, "sleeping at tx lock %p %lu", dev, dev->flags); wait_event_interruptible(dev->tx_queue, (!test_and_set_bit(tx_busy, &dev->flags))); PRINTD (DBG_TX, "woken at tx lock %p %lu", dev, dev->flags); if (signal_pending (current)) return -1; PRINTD (DBG_TX, "set tx_busy for dev %p", dev); return 0; } /********** allow another TX thread to start **********/ static inline void tx_release (hrz_dev * dev) { clear_bit (tx_busy, &dev->flags); PRINTD (DBG_TX, "cleared tx_busy for dev %p", dev); wake_up_interruptible (&dev->tx_queue); } /********** schedule TX transfers **********/ static void tx_schedule (hrz_dev * const dev, int irq) { unsigned int tx_bytes; int append_desc = 0; int pio_instead = 0; #ifndef TAILRECURSIONWORKS pio_instead = 1; while (pio_instead) { #endif // bytes in current region waiting for TX transfer tx_bytes = dev->tx_bytes; #if 0 spin_count = 0; while (rd_regl (dev, MASTER_TX_COUNT_REG_OFF)) { PRINTD (DBG_TX|DBG_WARN, "TX error: other PCI Bus Master TX still in progress!"); if (++spin_count > 10) { PRINTD (DBG_TX|DBG_ERR, "spun out waiting PCI Bus Master TX completion"); wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0); tx_release (dev); hrz_kfree_skb (dev->tx_skb); return; } } #endif if (tx_bytes) { // start next transfer within same region if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) { PRINTD (DBG_TX|DBG_BUS, "(pio)"); pio_instead = 1; } if (tx_bytes <= MAX_TRANSFER_COUNT) { PRINTD (DBG_TX|DBG_BUS, "(simple or last multi)"); if (!dev->tx_iovec) { // end of last region append_desc = 1; } dev->tx_bytes = 0; } else { PRINTD (DBG_TX|DBG_BUS, "(continuing multi)"); dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT; tx_bytes = MAX_TRANSFER_COUNT; } } else { // tx_bytes == 0 -- we're between regions // regions remaining to transfer unsigned int tx_regions = dev->tx_regions; if (tx_regions) { // start a new region dev->tx_addr = dev->tx_iovec->iov_base; tx_bytes = dev->tx_iovec->iov_len; ++dev->tx_iovec; dev->tx_regions = tx_regions - 1; if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) { PRINTD (DBG_TX|DBG_BUS, "(pio)"); pio_instead = 1; } if (tx_bytes <= MAX_TRANSFER_COUNT) { PRINTD (DBG_TX|DBG_BUS, "(full region)"); dev->tx_bytes = 0; } else { PRINTD (DBG_TX|DBG_BUS, "(start multi region)"); dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT; tx_bytes = MAX_TRANSFER_COUNT; } } else { // tx_regions == 0 // that's all folks - end of frame struct sk_buff * skb = dev->tx_skb; dev->tx_iovec = NULL; // VC layer stats atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); // free the skb hrz_kfree_skb (skb); } } // note: writing TX_COUNT clears any interrupt condition if (tx_bytes) { if (pio_instead) { if (irq) wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0); wrs_regb (dev, DATA_PORT_OFF, dev->tx_addr, tx_bytes); if (append_desc) wr_regl (dev, TX_DESCRIPTOR_PORT_OFF, cpu_to_be32 (dev->tx_skb->len)); } else { wr_regl (dev, MASTER_TX_ADDR_REG_OFF, virt_to_bus (dev->tx_addr)); if (append_desc) wr_regl (dev, TX_DESCRIPTOR_REG_OFF, cpu_to_be32 (dev->tx_skb->len)); wr_regl (dev, MASTER_TX_COUNT_REG_OFF, append_desc ? tx_bytes | MASTER_TX_AUTO_APPEND_DESC : tx_bytes); } dev->tx_addr += tx_bytes; } else { if (irq) wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0); YELLOW_LED_ON(dev); tx_release (dev); } #ifdef TAILRECURSIONWORKS // and we all bless optimised tail calls if (pio_instead) return tx_schedule (dev, 0); return; #else // grrrrrrr! irq = 0; } return; #endif } /********** handle TX bus master complete events **********/ static void tx_bus_master_complete_handler (hrz_dev * dev) { if (test_bit (tx_busy, &dev->flags)) { tx_schedule (dev, 1); } else { PRINTD (DBG_TX|DBG_ERR, "unexpected TX bus master completion"); // clear interrupt condition on adapter wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0); } return; } /********** move RX Q pointer to next item in circular buffer **********/ // called only from IRQ sub-handler static u32 rx_queue_entry_next (hrz_dev * dev) { u32 rx_queue_entry; spin_lock (&dev->mem_lock); rx_queue_entry = rd_mem (dev, &dev->rx_q_entry->entry); if (dev->rx_q_entry == dev->rx_q_wrap) dev->rx_q_entry = dev->rx_q_reset; else dev->rx_q_entry++; wr_regw (dev, RX_QUEUE_RD_PTR_OFF, dev->rx_q_entry - dev->rx_q_reset); spin_unlock (&dev->mem_lock); return rx_queue_entry; } /********** handle RX disabled by device **********/ static inline void rx_disabled_handler (hrz_dev * dev) { wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE); // count me please PRINTK (KERN_WARNING, "RX was disabled!"); } /********** handle RX data received by device **********/ // called from IRQ handler static void rx_data_av_handler (hrz_dev * dev) { u32 rx_queue_entry; u32 rx_queue_entry_flags; u16 rx_len; u16 rx_channel; PRINTD (DBG_FLOW, "hrz_data_av_handler"); // try to grab rx lock (not possible during RX bus mastering) if (test_and_set_bit (rx_busy, &dev->flags)) { PRINTD (DBG_RX, "locked out of rx lock"); return; } PRINTD (DBG_RX, "set rx_busy for dev %p", dev); // lock is cleared if we fail now, o/w after bus master completion YELLOW_LED_OFF(dev); rx_queue_entry = rx_queue_entry_next (dev); rx_len = rx_q_entry_to_length (rx_queue_entry); rx_channel = rx_q_entry_to_rx_channel (rx_queue_entry); WAIT_FLUSH_RX_COMPLETE (dev); SELECT_RX_CHANNEL (dev, rx_channel); PRINTD (DBG_RX, "rx_queue_entry is: %#x", rx_queue_entry); rx_queue_entry_flags = rx_queue_entry & (RX_CRC_32_OK|RX_COMPLETE_FRAME|SIMONS_DODGEY_MARKER); if (!rx_len) { // (at least) bus-mastering breaks if we try to handle a // zero-length frame, besides AAL5 does not support them PRINTK (KERN_ERR, "zero-length frame!"); rx_queue_entry_flags &= ~RX_COMPLETE_FRAME; } if (rx_queue_entry_flags & SIMONS_DODGEY_MARKER) { PRINTD (DBG_RX|DBG_ERR, "Simon's marker detected!"); } if (rx_queue_entry_flags == (RX_CRC_32_OK | RX_COMPLETE_FRAME)) { struct atm_vcc * atm_vcc; PRINTD (DBG_RX, "got a frame on rx_channel %x len %u", rx_channel, rx_len); atm_vcc = dev->rxer[rx_channel]; // if no vcc is assigned to this channel, we should drop the frame // (is this what SIMONS etc. was trying to achieve?) if (atm_vcc) { if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) { if (rx_len <= atm_vcc->qos.rxtp.max_sdu) { struct sk_buff * skb = atm_alloc_charge (atm_vcc, rx_len, GFP_ATOMIC); if (skb) { // remember this so we can push it later dev->rx_skb = skb; // remember this so we can flush it later dev->rx_channel = rx_channel; // prepare socket buffer skb_put (skb, rx_len); ATM_SKB(skb)->vcc = atm_vcc; // simple transfer // dev->rx_regions = 0; // dev->rx_iovec = 0; dev->rx_bytes = rx_len; dev->rx_addr = skb->data; PRINTD (DBG_RX, "RX start simple transfer (addr %p, len %d)", skb->data, rx_len); // do the business rx_schedule (dev, 0); return; } else { PRINTD (DBG_SKB|DBG_WARN, "failed to get skb"); } } else { PRINTK (KERN_INFO, "frame received on TX-only VC %x", rx_channel); // do we count this? } } else { PRINTK (KERN_WARNING, "dropped over-size frame"); // do we count this? } } else { PRINTD (DBG_WARN|DBG_VCC|DBG_RX, "no VCC for this frame (VC closed)"); // do we count this? } } else { // Wait update complete ? SPONG } // RX was aborted YELLOW_LED_ON(dev); FLUSH_RX_CHANNEL (dev,rx_channel); clear_bit (rx_busy, &dev->flags); return; } /********** interrupt handler **********/ static irqreturn_t interrupt_handler(int irq, void *dev_id) { hrz_dev *dev = dev_id; u32 int_source; unsigned int irq_ok; PRINTD (DBG_FLOW, "interrupt_handler: %p", dev_id); // definitely for us irq_ok = 0; while ((int_source = rd_regl (dev, INT_SOURCE_REG_OFF) & INTERESTING_INTERRUPTS)) { // In the interests of fairness, the handlers below are // called in sequence and without immediate return to the head of // the while loop. This is only of issue for slow hosts (or when // debugging messages are on). Really slow hosts may find a fast // sender keeps them permanently in the IRQ handler. :( // (only an issue for slow hosts) RX completion goes before // rx_data_av as the former implies rx_busy and so the latter // would just abort. If it reschedules another transfer // (continuing the same frame) then it will not clear rx_busy. // (only an issue for slow hosts) TX completion goes before RX // data available as it is a much shorter routine - there is the // chance that any further transfers it schedules will be complete // by the time of the return to the head of the while loop if (int_source & RX_BUS_MASTER_COMPLETE) { ++irq_ok; PRINTD (DBG_IRQ|DBG_BUS|DBG_RX, "rx_bus_master_complete asserted"); rx_bus_master_complete_handler (dev); } if (int_source & TX_BUS_MASTER_COMPLETE) { ++irq_ok; PRINTD (DBG_IRQ|DBG_BUS|DBG_TX, "tx_bus_master_complete asserted"); tx_bus_master_complete_handler (dev); } if (int_source & RX_DATA_AV) { ++irq_ok; PRINTD (DBG_IRQ|DBG_RX, "rx_data_av asserted"); rx_data_av_handler (dev); } } if (irq_ok) { PRINTD (DBG_IRQ, "work done: %u", irq_ok); } else { PRINTD (DBG_IRQ|DBG_WARN, "spurious interrupt source: %#x", int_source); } PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id); if (irq_ok) return IRQ_HANDLED; return IRQ_NONE; } /********** housekeeping **********/ static void do_housekeeping (unsigned long arg) { // just stats at the moment hrz_dev * dev = (hrz_dev *) arg; // collect device-specific (not driver/atm-linux) stats here dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF); dev->rx_cell_count += rd_regw (dev, RX_CELL_COUNT_OFF); dev->hec_error_count += rd_regw (dev, HEC_ERROR_COUNT_OFF); dev->unassigned_cell_count += rd_regw (dev, UNASSIGNED_CELL_COUNT_OFF); mod_timer (&dev->housekeeping, jiffies + HZ/10); return; } /********** find an idle channel for TX and set it up **********/ // called with tx_busy set static short setup_idle_tx_channel (hrz_dev * dev, hrz_vcc * vcc) { unsigned short idle_channels; short tx_channel = -1; unsigned int spin_count; PRINTD (DBG_FLOW|DBG_TX, "setup_idle_tx_channel %p", dev); // better would be to fail immediately, the caller can then decide whether // to wait or drop (depending on whether this is UBR etc.) spin_count = 0; while (!(idle_channels = rd_regw (dev, TX_STATUS_OFF) & IDLE_CHANNELS_MASK)) { PRINTD (DBG_TX|DBG_WARN, "waiting for idle TX channel"); // delay a bit here if (++spin_count > 100) { PRINTD (DBG_TX|DBG_ERR, "spun out waiting for idle TX channel"); return -EBUSY; } } // got an idle channel { // tx_idle ensures we look for idle channels in RR order int chan = dev->tx_idle; int keep_going = 1; while (keep_going) { if (idle_channels & (1<<chan)) { tx_channel = chan; keep_going = 0; } ++chan; if (chan == TX_CHANS) chan = 0; } dev->tx_idle = chan; } // set up the channel we found { // Initialise the cell header in the transmit channel descriptor // a.k.a. prepare the channel and remember that we have done so. tx_ch_desc * tx_desc = &memmap->tx_descs[tx_channel]; u32 rd_ptr; u32 wr_ptr; u16 channel = vcc->channel; unsigned long flags; spin_lock_irqsave (&dev->mem_lock, flags); // Update the transmit channel record. dev->tx_channel_record[tx_channel] = channel; // xBR channel update_tx_channel_config (dev, tx_channel, RATE_TYPE_ACCESS, vcc->tx_xbr_bits); // Update the PCR counter preload value etc. update_tx_channel_config (dev, tx_channel, PCR_TIMER_ACCESS, vcc->tx_pcr_bits); #if 0 if (vcc->tx_xbr_bits == VBR_RATE_TYPE) { // SCR timer update_tx_channel_config (dev, tx_channel, SCR_TIMER_ACCESS, vcc->tx_scr_bits); // Bucket size... update_tx_channel_config (dev, tx_channel, BUCKET_CAPACITY_ACCESS, vcc->tx_bucket_bits); // ... and fullness update_tx_channel_config (dev, tx_channel, BUCKET_FULLNESS_ACCESS, vcc->tx_bucket_bits); } #endif // Initialise the read and write buffer pointers rd_ptr = rd_mem (dev, &tx_desc->rd_buf_type) & BUFFER_PTR_MASK; wr_ptr = rd_mem (dev, &tx_desc->wr_buf_type) & BUFFER_PTR_MASK; // idle TX channels should have identical pointers if (rd_ptr != wr_ptr) { PRINTD (DBG_TX|DBG_ERR, "TX buffer pointers are broken!"); // spin_unlock... return -E... // I wonder if gcc would get rid of one of the pointer aliases } PRINTD (DBG_TX, "TX buffer pointers are: rd %x, wr %x.", rd_ptr, wr_ptr); switch (vcc->aal) { case aal0: PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal0"); rd_ptr |= CHANNEL_TYPE_RAW_CELLS; wr_ptr |= CHANNEL_TYPE_RAW_CELLS; break; case aal34: PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal34"); rd_ptr |= CHANNEL_TYPE_AAL3_4; wr_ptr |= CHANNEL_TYPE_AAL3_4; break; case aal5: rd_ptr |= CHANNEL_TYPE_AAL5; wr_ptr |= CHANNEL_TYPE_AAL5; // Initialise the CRC wr_mem (dev, &tx_desc->partial_crc, INITIAL_CRC); break; } wr_mem (dev, &tx_desc->rd_buf_type, rd_ptr); wr_mem (dev, &tx_desc->wr_buf_type, wr_ptr); // Write the Cell Header // Payload Type, CLP and GFC would go here if non-zero wr_mem (dev, &tx_desc->cell_header, channel); spin_unlock_irqrestore (&dev->mem_lock, flags); } return tx_channel; } /********** send a frame **********/ static int hrz_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) { unsigned int spin_count; int free_buffers; hrz_dev * dev = HRZ_DEV(atm_vcc->dev); hrz_vcc * vcc = HRZ_VCC(atm_vcc); u16 channel = vcc->channel; u32 buffers_required; /* signed for error return */ short tx_channel; PRINTD (DBG_FLOW|DBG_TX, "hrz_send vc %x data %p len %u", channel, skb->data, skb->len); dump_skb (">>>", channel, skb); if (atm_vcc->qos.txtp.traffic_class == ATM_NONE) { PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", channel); hrz_kfree_skb (skb); return -EIO; } // don't understand this ATM_SKB(skb)->vcc = atm_vcc; if (skb->len > atm_vcc->qos.txtp.max_sdu) { PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping..."); hrz_kfree_skb (skb); return -EIO; } if (!channel) { PRINTD (DBG_ERR|DBG_TX, "attempt to transmit on zero (rx_)channel"); hrz_kfree_skb (skb); return -EIO; } #if 0 { // where would be a better place for this? housekeeping? u16 status; pci_read_config_word (dev->pci_dev, PCI_STATUS, &status); if (status & PCI_STATUS_REC_MASTER_ABORT) { PRINTD (DBG_BUS|DBG_ERR, "Clearing PCI Master Abort (and cleaning up)"); status &= ~PCI_STATUS_REC_MASTER_ABORT; pci_write_config_word (dev->pci_dev, PCI_STATUS, status); if (test_bit (tx_busy, &dev->flags)) { hrz_kfree_skb (dev->tx_skb); tx_release (dev); } } } #endif #ifdef DEBUG_HORIZON /* wey-hey! */ if (channel == 1023) { unsigned int i; unsigned short d = 0; char * s = skb->data; if (*s++ == 'D') { for (i = 0; i < 4; ++i) d = (d << 4) | hex_to_bin(*s++); PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d); } } #endif // wait until TX is free and grab lock if (tx_hold (dev)) { hrz_kfree_skb (skb); return -ERESTARTSYS; } // Wait for enough space to be available in transmit buffer memory. // should be number of cells needed + 2 (according to hardware docs) // = ((framelen+8)+47) / 48 + 2 // = (framelen+7) / 48 + 3, hmm... faster to put addition inside XXX buffers_required = (skb->len+(ATM_AAL5_TRAILER-1)) / ATM_CELL_PAYLOAD + 3; // replace with timer and sleep, add dev->tx_buffers_queue (max 1 entry) spin_count = 0; while ((free_buffers = rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF)) < buffers_required) { PRINTD (DBG_TX, "waiting for free TX buffers, got %d of %d", free_buffers, buffers_required); // what is the appropriate delay? implement a timeout? (depending on line speed?) // mdelay (1); // what happens if we kill (current_pid, SIGKILL) ? schedule(); if (++spin_count > 1000) { PRINTD (DBG_TX|DBG_ERR, "spun out waiting for tx buffers, got %d of %d", free_buffers, buffers_required); tx_release (dev); hrz_kfree_skb (skb); return -ERESTARTSYS; } } // Select a channel to transmit the frame on. if (channel == dev->last_vc) { PRINTD (DBG_TX, "last vc hack: hit"); tx_channel = dev->tx_last; } else { PRINTD (DBG_TX, "last vc hack: miss"); // Are we currently transmitting this VC on one of the channels? for (tx_channel = 0; tx_channel < TX_CHANS; ++tx_channel) if (dev->tx_channel_record[tx_channel] == channel) { PRINTD (DBG_TX, "vc already on channel: hit"); break; } if (tx_channel == TX_CHANS) { PRINTD (DBG_TX, "vc already on channel: miss"); // Find and set up an idle channel. tx_channel = setup_idle_tx_channel (dev, vcc); if (tx_channel < 0) { PRINTD (DBG_TX|DBG_ERR, "failed to get channel"); tx_release (dev); return tx_channel; } } PRINTD (DBG_TX, "got channel"); SELECT_TX_CHANNEL(dev, tx_channel); dev->last_vc = channel; dev->tx_last = tx_channel; } PRINTD (DBG_TX, "using channel %u", tx_channel); YELLOW_LED_OFF(dev); // TX start transfer { unsigned int tx_len = skb->len; unsigned int tx_iovcnt = skb_shinfo(skb)->nr_frags; // remember this so we can free it later dev->tx_skb = skb; if (tx_iovcnt) { // scatter gather transfer dev->tx_regions = tx_iovcnt; dev->tx_iovec = NULL; /* @@@ needs rewritten */ dev->tx_bytes = 0; PRINTD (DBG_TX|DBG_BUS, "TX start scatter-gather transfer (iovec %p, len %d)", skb->data, tx_len); tx_release (dev); hrz_kfree_skb (skb); return -EIO; } else { // simple transfer dev->tx_regions = 0; dev->tx_iovec = NULL; dev->tx_bytes = tx_len; dev->tx_addr = skb->data; PRINTD (DBG_TX|DBG_BUS, "TX start simple transfer (addr %p, len %d)", skb->data, tx_len); } // and do the business tx_schedule (dev, 0); } return 0; } /********** reset a card **********/ static void hrz_reset (const hrz_dev * dev) { u32 control_0_reg = rd_regl (dev, CONTROL_0_REG); // why not set RESET_HORIZON to one and wait for the card to // reassert that bit as zero? Like so: control_0_reg = control_0_reg & RESET_HORIZON; wr_regl (dev, CONTROL_0_REG, control_0_reg); while (control_0_reg & RESET_HORIZON) control_0_reg = rd_regl (dev, CONTROL_0_REG); // old reset code retained: wr_regl (dev, CONTROL_0_REG, control_0_reg | RESET_ATM | RESET_RX | RESET_TX | RESET_HOST); // just guessing here udelay (1000); wr_regl (dev, CONTROL_0_REG, control_0_reg); } /********** read the burnt in address **********/ static void WRITE_IT_WAIT (const hrz_dev *dev, u32 ctrl) { wr_regl (dev, CONTROL_0_REG, ctrl); udelay (5); } static void CLOCK_IT (const hrz_dev *dev, u32 ctrl) { // DI must be valid around rising SK edge WRITE_IT_WAIT(dev, ctrl & ~SEEPROM_SK); WRITE_IT_WAIT(dev, ctrl | SEEPROM_SK); } static u16 __devinit read_bia (const hrz_dev * dev, u16 addr) { u32 ctrl = rd_regl (dev, CONTROL_0_REG); const unsigned int addr_bits = 6; const unsigned int data_bits = 16; unsigned int i; u16 res; ctrl &= ~(SEEPROM_CS | SEEPROM_SK | SEEPROM_DI); WRITE_IT_WAIT(dev, ctrl); // wake Serial EEPROM and send 110 (READ) command ctrl |= (SEEPROM_CS | SEEPROM_DI); CLOCK_IT(dev, ctrl); ctrl |= SEEPROM_DI; CLOCK_IT(dev, ctrl); ctrl &= ~SEEPROM_DI; CLOCK_IT(dev, ctrl); for (i=0; i<addr_bits; i++) { if (addr & (1 << (addr_bits-1))) ctrl |= SEEPROM_DI; else ctrl &= ~SEEPROM_DI; CLOCK_IT(dev, ctrl); addr = addr << 1; } // we could check that we have DO = 0 here ctrl &= ~SEEPROM_DI; res = 0; for (i=0;i<data_bits;i++) { res = res >> 1; CLOCK_IT(dev, ctrl); if (rd_regl (dev, CONTROL_0_REG) & SEEPROM_DO) res |= (1 << (data_bits-1)); } ctrl &= ~(SEEPROM_SK | SEEPROM_CS); WRITE_IT_WAIT(dev, ctrl); return res; } /********** initialise a card **********/ static int __devinit hrz_init (hrz_dev * dev) { int onefivefive; u16 chan; int buff_count; HDW * mem; cell_buf * tx_desc; cell_buf * rx_desc; u32 ctrl; ctrl = rd_regl (dev, CONTROL_0_REG); PRINTD (DBG_INFO, "ctrl0reg is %#x", ctrl); onefivefive = ctrl & ATM_LAYER_STATUS; if (onefivefive) printk (DEV_LABEL ": Horizon Ultra (at 155.52 MBps)"); else printk (DEV_LABEL ": Horizon (at 25 MBps)"); printk (":"); // Reset the card to get everything in a known state printk (" reset"); hrz_reset (dev); // Clear all the buffer memory printk (" clearing memory"); for (mem = (HDW *) memmap; mem < (HDW *) (memmap + 1); ++mem) wr_mem (dev, mem, 0); printk (" tx channels"); // All transmit eight channels are set up as AAL5 ABR channels with // a 16us cell spacing. Why? // Channel 0 gets the free buffer at 100h, channel 1 gets the free // buffer at 110h etc. for (chan = 0; chan < TX_CHANS; ++chan) { tx_ch_desc * tx_desc = &memmap->tx_descs[chan]; cell_buf * buf = &memmap->inittxbufs[chan]; // initialise the read and write buffer pointers wr_mem (dev, &tx_desc->rd_buf_type, BUF_PTR(buf)); wr_mem (dev, &tx_desc->wr_buf_type, BUF_PTR(buf)); // set the status of the initial buffers to empty wr_mem (dev, &buf->next, BUFF_STATUS_EMPTY); } // Use space bufn3 at the moment for tx buffers printk (" tx buffers"); tx_desc = memmap->bufn3; wr_mem (dev, &memmap->txfreebufstart.next, BUF_PTR(tx_desc) | BUFF_STATUS_EMPTY); for (buff_count = 0; buff_count < BUFN3_SIZE-1; buff_count++) { wr_mem (dev, &tx_desc->next, BUF_PTR(tx_desc+1) | BUFF_STATUS_EMPTY); tx_desc++; } wr_mem (dev, &tx_desc->next, BUF_PTR(&memmap->txfreebufend) | BUFF_STATUS_EMPTY); // Initialise the transmit free buffer count wr_regw (dev, TX_FREE_BUFFER_COUNT_OFF, BUFN3_SIZE); printk (" rx channels"); // Initialise all of the receive channels to be AAL5 disabled with // an interrupt threshold of 0 for (chan = 0; chan < RX_CHANS; ++chan) { rx_ch_desc * rx_desc = &memmap->rx_descs[chan]; wr_mem (dev, &rx_desc->wr_buf_type, CHANNEL_TYPE_AAL5 | RX_CHANNEL_DISABLED); } printk (" rx buffers"); // Use space bufn4 at the moment for rx buffers rx_desc = memmap->bufn4; wr_mem (dev, &memmap->rxfreebufstart.next, BUF_PTR(rx_desc) | BUFF_STATUS_EMPTY); for (buff_count = 0; buff_count < BUFN4_SIZE-1; buff_count++) { wr_mem (dev, &rx_desc->next, BUF_PTR(rx_desc+1) | BUFF_STATUS_EMPTY); rx_desc++; } wr_mem (dev, &rx_desc->next, BUF_PTR(&memmap->rxfreebufend) | BUFF_STATUS_EMPTY); // Initialise the receive free buffer count wr_regw (dev, RX_FREE_BUFFER_COUNT_OFF, BUFN4_SIZE); // Initialize Horizons registers // TX config wr_regw (dev, TX_CONFIG_OFF, ABR_ROUND_ROBIN | TX_NORMAL_OPERATION | DRVR_DRVRBAR_ENABLE); // RX config. Use 10-x VC bits, x VP bits, non user cells in channel 0. wr_regw (dev, RX_CONFIG_OFF, DISCARD_UNUSED_VPI_VCI_BITS_SET | NON_USER_CELLS_IN_ONE_CHANNEL | vpi_bits); // RX line config wr_regw (dev, RX_LINE_CONFIG_OFF, LOCK_DETECT_ENABLE | FREQUENCY_DETECT_ENABLE | GXTALOUT_SELECT_DIV4); // Set the max AAL5 cell count to be just enough to contain the // largest AAL5 frame that the user wants to receive wr_regw (dev, MAX_AAL5_CELL_COUNT_OFF, DIV_ROUND_UP(max_rx_size + ATM_AAL5_TRAILER, ATM_CELL_PAYLOAD)); // Enable receive wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE); printk (" control"); // Drive the OE of the LEDs then turn the green LED on ctrl |= GREEN_LED_OE | YELLOW_LED_OE | GREEN_LED | YELLOW_LED; wr_regl (dev, CONTROL_0_REG, ctrl); // Test for a 155-capable card if (onefivefive) { // Select 155 mode... make this a choice (or: how do we detect // external line speed and switch?) ctrl |= ATM_LAYER_SELECT; wr_regl (dev, CONTROL_0_REG, ctrl); // test SUNI-lite vs SAMBA // Register 0x00 in the SUNI will have some of bits 3-7 set, and // they will always be zero for the SAMBA. Ha! Bloody hardware // engineers. It'll never work. if (rd_framer (dev, 0) & 0x00f0) { // SUNI printk (" SUNI"); // Reset, just in case wr_framer (dev, 0x00, 0x0080); wr_framer (dev, 0x00, 0x0000); // Configure transmit FIFO wr_framer (dev, 0x63, rd_framer (dev, 0x63) | 0x0002); // Set line timed mode wr_framer (dev, 0x05, rd_framer (dev, 0x05) | 0x0001); } else { // SAMBA printk (" SAMBA"); // Reset, just in case wr_framer (dev, 0, rd_framer (dev, 0) | 0x0001); wr_framer (dev, 0, rd_framer (dev, 0) &~ 0x0001); // Turn off diagnostic loopback and enable line-timed mode wr_framer (dev, 0, 0x0002); // Turn on transmit outputs wr_framer (dev, 2, 0x0B80); } } else { // Select 25 mode ctrl &= ~ATM_LAYER_SELECT; // Madge B154 setup // none required? } printk (" LEDs"); GREEN_LED_ON(dev); YELLOW_LED_ON(dev); printk (" ESI="); { u16 b = 0; int i; u8 * esi = dev->atm_dev->esi; // in the card I have, EEPROM // addresses 0, 1, 2 contain 0 // addresess 5, 6 etc. contain ffff // NB: Madge prefix is 00 00 f6 (which is 00 00 6f in Ethernet bit order) // the read_bia routine gets the BIA in Ethernet bit order for (i=0; i < ESI_LEN; ++i) { if (i % 2 == 0) b = read_bia (dev, i/2 + 2); else b = b >> 8; esi[i] = b & 0xFF; printk ("%02x", esi[i]); } } // Enable RX_Q and ?X_COMPLETE interrupts only wr_regl (dev, INT_ENABLE_REG_OFF, INTERESTING_INTERRUPTS); printk (" IRQ on"); printk (".\n"); return onefivefive; } /********** check max_sdu **********/ static int check_max_sdu (hrz_aal aal, struct atm_trafprm * tp, unsigned int max_frame_size) { PRINTD (DBG_FLOW|DBG_QOS, "check_max_sdu"); switch (aal) { case aal0: if (!(tp->max_sdu)) { PRINTD (DBG_QOS, "defaulting max_sdu"); tp->max_sdu = ATM_AAL0_SDU; } else if (tp->max_sdu != ATM_AAL0_SDU) { PRINTD (DBG_QOS|DBG_ERR, "rejecting max_sdu"); return -EINVAL; } break; case aal34: if (tp->max_sdu == 0 || tp->max_sdu > ATM_MAX_AAL34_PDU) { PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default"); tp->max_sdu = ATM_MAX_AAL34_PDU; } break; case aal5: if (tp->max_sdu == 0 || tp->max_sdu > max_frame_size) { PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default"); tp->max_sdu = max_frame_size; } break; } return 0; } /********** check pcr **********/ // something like this should be part of ATM Linux static int atm_pcr_check (struct atm_trafprm * tp, unsigned int pcr) { // we are assuming non-UBR, and non-special values of pcr if (tp->min_pcr == ATM_MAX_PCR) PRINTD (DBG_QOS, "luser gave min_pcr = ATM_MAX_PCR"); else if (tp->min_pcr < 0) PRINTD (DBG_QOS, "luser gave negative min_pcr"); else if (tp->min_pcr && tp->min_pcr > pcr) PRINTD (DBG_QOS, "pcr less than min_pcr"); else // !! max_pcr = UNSPEC (0) is equivalent to max_pcr = MAX (-1) // easier to #define ATM_MAX_PCR 0 and have all rates unsigned? // [this would get rid of next two conditionals] if ((0) && tp->max_pcr == ATM_MAX_PCR) PRINTD (DBG_QOS, "luser gave max_pcr = ATM_MAX_PCR"); else if ((tp->max_pcr != ATM_MAX_PCR) && tp->max_pcr < 0) PRINTD (DBG_QOS, "luser gave negative max_pcr"); else if (tp->max_pcr && tp->max_pcr != ATM_MAX_PCR && tp->max_pcr < pcr) PRINTD (DBG_QOS, "pcr greater than max_pcr"); else { // each limit unspecified or not violated PRINTD (DBG_QOS, "xBR(pcr) OK"); return 0; } PRINTD (DBG_QOS, "pcr=%u, tp: min_pcr=%d, pcr=%d, max_pcr=%d", pcr, tp->min_pcr, tp->pcr, tp->max_pcr); return -EINVAL; } /********** open VC **********/ static int hrz_open (struct atm_vcc *atm_vcc) { int error; u16 channel; struct atm_qos * qos; struct atm_trafprm * txtp; struct atm_trafprm * rxtp; hrz_dev * dev = HRZ_DEV(atm_vcc->dev); hrz_vcc vcc; hrz_vcc * vccp; // allocated late short vpi = atm_vcc->vpi; int vci = atm_vcc->vci; PRINTD (DBG_FLOW|DBG_VCC, "hrz_open %x %x", vpi, vci); #ifdef ATM_VPI_UNSPEC // UNSPEC is deprecated, remove this code eventually if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) { PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)"); return -EINVAL; } #endif error = vpivci_to_channel (&channel, vpi, vci); if (error) { PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci); return error; } vcc.channel = channel; // max speed for the moment vcc.tx_rate = 0x0; qos = &atm_vcc->qos; // check AAL and remember it switch (qos->aal) { case ATM_AAL0: // we would if it were 48 bytes and not 52! PRINTD (DBG_QOS|DBG_VCC, "AAL0"); vcc.aal = aal0; break; case ATM_AAL34: // we would if I knew how do the SAR! PRINTD (DBG_QOS|DBG_VCC, "AAL3/4"); vcc.aal = aal34; break; case ATM_AAL5: PRINTD (DBG_QOS|DBG_VCC, "AAL5"); vcc.aal = aal5; break; default: PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!"); return -EINVAL; break; } // TX traffic parameters // there are two, interrelated problems here: 1. the reservation of // PCR is not a binary choice, we are given bounds and/or a // desirable value; 2. the device is only capable of certain values, // most of which are not integers. It is almost certainly acceptable // to be off by a maximum of 1 to 10 cps. // Pragmatic choice: always store an integral PCR as that which has // been allocated, even if we allocate a little (or a lot) less, // after rounding. The actual allocation depends on what we can // manage with our rate selection algorithm. The rate selection // algorithm is given an integral PCR and a tolerance and told // whether it should round the value up or down if the tolerance is // exceeded; it returns: a) the actual rate selected (rounded up to // the nearest integer), b) a bit pattern to feed to the timer // register, and c) a failure value if no applicable rate exists. // Part of the job is done by atm_pcr_goal which gives us a PCR // specification which says: EITHER grab the maximum available PCR // (and perhaps a lower bound which we musn't pass), OR grab this // amount, rounding down if you have to (and perhaps a lower bound // which we musn't pass) OR grab this amount, rounding up if you // have to (and perhaps an upper bound which we musn't pass). If any // bounds ARE passed we fail. Note that rounding is only rounding to // match device limitations, we do not round down to satisfy // bandwidth availability even if this would not violate any given // lower bound. // Note: telephony = 64kb/s = 48 byte cell payload @ 500/3 cells/s // (say) so this is not even a binary fixpoint cell rate (but this // device can do it). To avoid this sort of hassle we use a // tolerance parameter (currently fixed at 10 cps). PRINTD (DBG_QOS, "TX:"); txtp = &qos->txtp; // set up defaults for no traffic vcc.tx_rate = 0; // who knows what would actually happen if you try and send on this? vcc.tx_xbr_bits = IDLE_RATE_TYPE; vcc.tx_pcr_bits = CLOCK_DISABLE; #if 0 vcc.tx_scr_bits = CLOCK_DISABLE; vcc.tx_bucket_bits = 0; #endif if (txtp->traffic_class != ATM_NONE) { error = check_max_sdu (vcc.aal, txtp, max_tx_size); if (error) { PRINTD (DBG_QOS, "TX max_sdu check failed"); return error; } switch (txtp->traffic_class) { case ATM_UBR: { // we take "the PCR" as a rate-cap // not reserved vcc.tx_rate = 0; make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, NULL); vcc.tx_xbr_bits = ABR_RATE_TYPE; break; } #if 0 case ATM_ABR: { // reserve min, allow up to max vcc.tx_rate = 0; // ? make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, 0); vcc.tx_xbr_bits = ABR_RATE_TYPE; break; } #endif case ATM_CBR: { int pcr = atm_pcr_goal (txtp); rounding r; if (!pcr) { // down vs. up, remaining bandwidth vs. unlimited bandwidth!! // should really have: once someone gets unlimited bandwidth // that no more non-UBR channels can be opened until the // unlimited one closes?? For the moment, round_down means // greedy people actually get something and not nothing r = round_down; // slight race (no locking) here so we may get -EAGAIN // later; the greedy bastards would deserve it :) PRINTD (DBG_QOS, "snatching all remaining TX bandwidth"); pcr = dev->tx_avail; } else if (pcr < 0) { r = round_down; pcr = -pcr; } else { r = round_up; } error = make_rate_with_tolerance (dev, pcr, r, 10, &vcc.tx_pcr_bits, &vcc.tx_rate); if (error) { PRINTD (DBG_QOS, "could not make rate from TX PCR"); return error; } // not really clear what further checking is needed error = atm_pcr_check (txtp, vcc.tx_rate); if (error) { PRINTD (DBG_QOS, "TX PCR failed consistency check"); return error; } vcc.tx_xbr_bits = CBR_RATE_TYPE; break; } #if 0 case ATM_VBR: { int pcr = atm_pcr_goal (txtp); // int scr = atm_scr_goal (txtp); int scr = pcr/2; // just for fun unsigned int mbs = 60; // just for fun rounding pr; rounding sr; unsigned int bucket; if (!pcr) { pr = round_nearest; pcr = 1<<30; } else if (pcr < 0) { pr = round_down; pcr = -pcr; } else { pr = round_up; } error = make_rate_with_tolerance (dev, pcr, pr, 10, &vcc.tx_pcr_bits, 0); if (!scr) { // see comments for PCR with CBR above sr = round_down; // slight race (no locking) here so we may get -EAGAIN // later; the greedy bastards would deserve it :) PRINTD (DBG_QOS, "snatching all remaining TX bandwidth"); scr = dev->tx_avail; } else if (scr < 0) { sr = round_down; scr = -scr; } else { sr = round_up; } error = make_rate_with_tolerance (dev, scr, sr, 10, &vcc.tx_scr_bits, &vcc.tx_rate); if (error) { PRINTD (DBG_QOS, "could not make rate from TX SCR"); return error; } // not really clear what further checking is needed // error = atm_scr_check (txtp, vcc.tx_rate); if (error) { PRINTD (DBG_QOS, "TX SCR failed consistency check"); return error; } // bucket calculations (from a piece of paper...) cell bucket // capacity must be largest integer smaller than m(p-s)/p + 1 // where m = max burst size, p = pcr, s = scr bucket = mbs*(pcr-scr)/pcr; if (bucket*pcr != mbs*(pcr-scr)) bucket += 1; if (bucket > BUCKET_MAX_SIZE) { PRINTD (DBG_QOS, "shrinking bucket from %u to %u", bucket, BUCKET_MAX_SIZE); bucket = BUCKET_MAX_SIZE; } vcc.tx_xbr_bits = VBR_RATE_TYPE; vcc.tx_bucket_bits = bucket; break; } #endif default: { PRINTD (DBG_QOS, "unsupported TX traffic class"); return -EINVAL; break; } } } // RX traffic parameters PRINTD (DBG_QOS, "RX:"); rxtp = &qos->rxtp; // set up defaults for no traffic vcc.rx_rate = 0; if (rxtp->traffic_class != ATM_NONE) { error = check_max_sdu (vcc.aal, rxtp, max_rx_size); if (error) { PRINTD (DBG_QOS, "RX max_sdu check failed"); return error; } switch (rxtp->traffic_class) { case ATM_UBR: { // not reserved break; } #if 0 case ATM_ABR: { // reserve min vcc.rx_rate = 0; // ? break; } #endif case ATM_CBR: { int pcr = atm_pcr_goal (rxtp); if (!pcr) { // slight race (no locking) here so we may get -EAGAIN // later; the greedy bastards would deserve it :) PRINTD (DBG_QOS, "snatching all remaining RX bandwidth"); pcr = dev->rx_avail; } else if (pcr < 0) { pcr = -pcr; } vcc.rx_rate = pcr; // not really clear what further checking is needed error = atm_pcr_check (rxtp, vcc.rx_rate); if (error) { PRINTD (DBG_QOS, "RX PCR failed consistency check"); return error; } break; } #if 0 case ATM_VBR: { // int scr = atm_scr_goal (rxtp); int scr = 1<<16; // just for fun if (!scr) { // slight race (no locking) here so we may get -EAGAIN // later; the greedy bastards would deserve it :) PRINTD (DBG_QOS, "snatching all remaining RX bandwidth"); scr = dev->rx_avail; } else if (scr < 0) { scr = -scr; } vcc.rx_rate = scr; // not really clear what further checking is needed // error = atm_scr_check (rxtp, vcc.rx_rate); if (error) { PRINTD (DBG_QOS, "RX SCR failed consistency check"); return error; } break; } #endif default: { PRINTD (DBG_QOS, "unsupported RX traffic class"); return -EINVAL; break; } } } // late abort useful for diagnostics if (vcc.aal != aal5) { PRINTD (DBG_QOS, "AAL not supported"); return -EINVAL; } // get space for our vcc stuff and copy parameters into it vccp = kmalloc (sizeof(hrz_vcc), GFP_KERNEL); if (!vccp) { PRINTK (KERN_ERR, "out of memory!"); return -ENOMEM; } *vccp = vcc; // clear error and grab cell rate resource lock error = 0; spin_lock (&dev->rate_lock); if (vcc.tx_rate > dev->tx_avail) { PRINTD (DBG_QOS, "not enough TX PCR left"); error = -EAGAIN; } if (vcc.rx_rate > dev->rx_avail) { PRINTD (DBG_QOS, "not enough RX PCR left"); error = -EAGAIN; } if (!error) { // really consume cell rates dev->tx_avail -= vcc.tx_rate; dev->rx_avail -= vcc.rx_rate; PRINTD (DBG_QOS|DBG_VCC, "reserving %u TX PCR and %u RX PCR", vcc.tx_rate, vcc.rx_rate); } // release lock and exit on error spin_unlock (&dev->rate_lock); if (error) { PRINTD (DBG_QOS|DBG_VCC, "insufficient cell rate resources"); kfree (vccp); return error; } // this is "immediately before allocating the connection identifier // in hardware" - so long as the next call does not fail :) set_bit(ATM_VF_ADDR,&atm_vcc->flags); // any errors here are very serious and should never occur if (rxtp->traffic_class != ATM_NONE) { if (dev->rxer[channel]) { PRINTD (DBG_ERR|DBG_VCC, "VC already open for RX"); error = -EBUSY; } if (!error) error = hrz_open_rx (dev, channel); if (error) { kfree (vccp); return error; } // this link allows RX frames through dev->rxer[channel] = atm_vcc; } // success, set elements of atm_vcc atm_vcc->dev_data = (void *) vccp; // indicate readiness set_bit(ATM_VF_READY,&atm_vcc->flags); return 0; } /********** close VC **********/ static void hrz_close (struct atm_vcc * atm_vcc) { hrz_dev * dev = HRZ_DEV(atm_vcc->dev); hrz_vcc * vcc = HRZ_VCC(atm_vcc); u16 channel = vcc->channel; PRINTD (DBG_VCC|DBG_FLOW, "hrz_close"); // indicate unreadiness clear_bit(ATM_VF_READY,&atm_vcc->flags); if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) { unsigned int i; // let any TX on this channel that has started complete // no restart, just keep trying while (tx_hold (dev)) ; // remove record of any tx_channel having been setup for this channel for (i = 0; i < TX_CHANS; ++i) if (dev->tx_channel_record[i] == channel) { dev->tx_channel_record[i] = -1; break; } if (dev->last_vc == channel) dev->tx_last = -1; tx_release (dev); } if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) { // disable RXing - it tries quite hard hrz_close_rx (dev, channel); // forget the vcc - no more skbs will be pushed if (atm_vcc != dev->rxer[channel]) PRINTK (KERN_ERR, "%s atm_vcc=%p rxer[channel]=%p", "arghhh! we're going to die!", atm_vcc, dev->rxer[channel]); dev->rxer[channel] = NULL; } // atomically release our rate reservation spin_lock (&dev->rate_lock); PRINTD (DBG_QOS|DBG_VCC, "releasing %u TX PCR and %u RX PCR", vcc->tx_rate, vcc->rx_rate); dev->tx_avail += vcc->tx_rate; dev->rx_avail += vcc->rx_rate; spin_unlock (&dev->rate_lock); // free our structure kfree (vcc); // say the VPI/VCI is free again clear_bit(ATM_VF_ADDR,&atm_vcc->flags); } #if 0 static int hrz_getsockopt (struct atm_vcc * atm_vcc, int level, int optname, void *optval, int optlen) { hrz_dev * dev = HRZ_DEV(atm_vcc->dev); PRINTD (DBG_FLOW|DBG_VCC, "hrz_getsockopt"); switch (level) { case SOL_SOCKET: switch (optname) { // case SO_BCTXOPT: // break; // case SO_BCRXOPT: // break; default: return -ENOPROTOOPT; break; }; break; } return -EINVAL; } static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname, void *optval, unsigned int optlen) { hrz_dev * dev = HRZ_DEV(atm_vcc->dev); PRINTD (DBG_FLOW|DBG_VCC, "hrz_setsockopt"); switch (level) { case SOL_SOCKET: switch (optname) { // case SO_BCTXOPT: // break; // case SO_BCRXOPT: // break; default: return -ENOPROTOOPT; break; }; break; } return -EINVAL; } #endif #if 0 static int hrz_ioctl (struct atm_dev * atm_dev, unsigned int cmd, void *arg) { hrz_dev * dev = HRZ_DEV(atm_dev); PRINTD (DBG_FLOW, "hrz_ioctl"); return -1; } unsigned char hrz_phy_get (struct atm_dev * atm_dev, unsigned long addr) { hrz_dev * dev = HRZ_DEV(atm_dev); PRINTD (DBG_FLOW, "hrz_phy_get"); return 0; } static void hrz_phy_put (struct atm_dev * atm_dev, unsigned char value, unsigned long addr) { hrz_dev * dev = HRZ_DEV(atm_dev); PRINTD (DBG_FLOW, "hrz_phy_put"); } static int hrz_change_qos (struct atm_vcc * atm_vcc, struct atm_qos *qos, int flgs) { hrz_dev * dev = HRZ_DEV(vcc->dev); PRINTD (DBG_FLOW, "hrz_change_qos"); return -1; } #endif /********** proc file contents **********/ static int hrz_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) { hrz_dev * dev = HRZ_DEV(atm_dev); int left = *pos; PRINTD (DBG_FLOW, "hrz_proc_read"); /* more diagnostics here? */ #if 0 if (!left--) { unsigned int count = sprintf (page, "vbr buckets:"); unsigned int i; for (i = 0; i < TX_CHANS; ++i) count += sprintf (page, " %u/%u", query_tx_channel_config (dev, i, BUCKET_FULLNESS_ACCESS), query_tx_channel_config (dev, i, BUCKET_CAPACITY_ACCESS)); count += sprintf (page+count, ".\n"); return count; } #endif if (!left--) return sprintf (page, "cells: TX %lu, RX %lu, HEC errors %lu, unassigned %lu.\n", dev->tx_cell_count, dev->rx_cell_count, dev->hec_error_count, dev->unassigned_cell_count); if (!left--) return sprintf (page, "free cell buffers: TX %hu, RX %hu+%hu.\n", rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF), rd_regw (dev, RX_FREE_BUFFER_COUNT_OFF), dev->noof_spare_buffers); if (!left--) return sprintf (page, "cps remaining: TX %u, RX %u\n", dev->tx_avail, dev->rx_avail); return 0; } static const struct atmdev_ops hrz_ops = { .open = hrz_open, .close = hrz_close, .send = hrz_send, .proc_read = hrz_proc_read, .owner = THIS_MODULE, }; static int __devinit hrz_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent) { hrz_dev * dev; int err = 0; // adapter slot free, read resources from PCI configuration space u32 iobase = pci_resource_start (pci_dev, 0); u32 * membase = bus_to_virt (pci_resource_start (pci_dev, 1)); unsigned int irq; unsigned char lat; PRINTD (DBG_FLOW, "hrz_probe"); if (pci_enable_device(pci_dev)) return -EINVAL; /* XXX DEV_LABEL is a guess */ if (!request_region(iobase, HRZ_IO_EXTENT, DEV_LABEL)) { err = -EINVAL; goto out_disable; } dev = kzalloc(sizeof(hrz_dev), GFP_KERNEL); if (!dev) { // perhaps we should be nice: deregister all adapters and abort? PRINTD(DBG_ERR, "out of memory"); err = -ENOMEM; goto out_release; } pci_set_drvdata(pci_dev, dev); // grab IRQ and install handler - move this someplace more sensible irq = pci_dev->irq; if (request_irq(irq, interrupt_handler, IRQF_SHARED, /* irqflags guess */ DEV_LABEL, /* name guess */ dev)) { PRINTD(DBG_WARN, "request IRQ failed!"); err = -EINVAL; goto out_free; } PRINTD(DBG_INFO, "found Madge ATM adapter (hrz) at: IO %x, IRQ %u, MEM %p", iobase, irq, membase); dev->atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &hrz_ops, -1, NULL); if (!(dev->atm_dev)) { PRINTD(DBG_ERR, "failed to register Madge ATM adapter"); err = -EINVAL; goto out_free_irq; } PRINTD(DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p", dev->atm_dev->number, dev, dev->atm_dev); dev->atm_dev->dev_data = (void *) dev; dev->pci_dev = pci_dev; // enable bus master accesses pci_set_master(pci_dev); // frobnicate latency (upwards, usually) pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &lat); if (pci_lat) { PRINTD(DBG_INFO, "%s PCI latency timer from %hu to %hu", "changing", lat, pci_lat); pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat); } else if (lat < MIN_PCI_LATENCY) { PRINTK(KERN_INFO, "%s PCI latency timer from %hu to %hu", "increasing", lat, MIN_PCI_LATENCY); pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, MIN_PCI_LATENCY); } dev->iobase = iobase; dev->irq = irq; dev->membase = membase; dev->rx_q_entry = dev->rx_q_reset = &memmap->rx_q_entries[0]; dev->rx_q_wrap = &memmap->rx_q_entries[RX_CHANS-1]; // these next three are performance hacks dev->last_vc = -1; dev->tx_last = -1; dev->tx_idle = 0; dev->tx_regions = 0; dev->tx_bytes = 0; dev->tx_skb = NULL; dev->tx_iovec = NULL; dev->tx_cell_count = 0; dev->rx_cell_count = 0; dev->hec_error_count = 0; dev->unassigned_cell_count = 0; dev->noof_spare_buffers = 0; { unsigned int i; for (i = 0; i < TX_CHANS; ++i) dev->tx_channel_record[i] = -1; } dev->flags = 0; // Allocate cell rates and remember ASIC version // Fibre: ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53 // Copper: (WRONG) we want 6 into the above, close to 25Mb/s // Copper: (plagarise!) 25600000/8/270*260/53 - n/53 if (hrz_init(dev)) { // to be really pedantic, this should be ATM_OC3c_PCR dev->tx_avail = ATM_OC3_PCR; dev->rx_avail = ATM_OC3_PCR; set_bit(ultra, &dev->flags); // NOT "|= ultra" ! } else { dev->tx_avail = ((25600000/8)*26)/(27*53); dev->rx_avail = ((25600000/8)*26)/(27*53); PRINTD(DBG_WARN, "Buggy ASIC: no TX bus-mastering."); } // rate changes spinlock spin_lock_init(&dev->rate_lock); // on-board memory access spinlock; we want atomic reads and // writes to adapter memory (handles IRQ and SMP) spin_lock_init(&dev->mem_lock); init_waitqueue_head(&dev->tx_queue); // vpi in 0..4, vci in 6..10 dev->atm_dev->ci_range.vpi_bits = vpi_bits; dev->atm_dev->ci_range.vci_bits = 10-vpi_bits; init_timer(&dev->housekeeping); dev->housekeeping.function = do_housekeeping; dev->housekeeping.data = (unsigned long) dev; mod_timer(&dev->housekeeping, jiffies); out: return err; out_free_irq: free_irq(dev->irq, dev); out_free: kfree(dev); out_release: release_region(iobase, HRZ_IO_EXTENT); out_disable: pci_disable_device(pci_dev); goto out; } static void __devexit hrz_remove_one(struct pci_dev *pci_dev) { hrz_dev *dev; dev = pci_get_drvdata(pci_dev); PRINTD(DBG_INFO, "closing %p (atm_dev = %p)", dev, dev->atm_dev); del_timer_sync(&dev->housekeeping); hrz_reset(dev); atm_dev_deregister(dev->atm_dev); free_irq(dev->irq, dev); release_region(dev->iobase, HRZ_IO_EXTENT); kfree(dev); pci_disable_device(pci_dev); } static void __init hrz_check_args (void) { #ifdef DEBUG_HORIZON PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK); #else if (debug) PRINTK (KERN_NOTICE, "no debug support in this image"); #endif if (vpi_bits > HRZ_MAX_VPI) PRINTK (KERN_ERR, "vpi_bits has been limited to %hu", vpi_bits = HRZ_MAX_VPI); if (max_tx_size < 0 || max_tx_size > TX_AAL5_LIMIT) PRINTK (KERN_NOTICE, "max_tx_size has been limited to %hu", max_tx_size = TX_AAL5_LIMIT); if (max_rx_size < 0 || max_rx_size > RX_AAL5_LIMIT) PRINTK (KERN_NOTICE, "max_rx_size has been limited to %hu", max_rx_size = RX_AAL5_LIMIT); return; } MODULE_AUTHOR(maintainer_string); MODULE_DESCRIPTION(description_string); MODULE_LICENSE("GPL"); module_param(debug, ushort, 0644); module_param(vpi_bits, ushort, 0); module_param(max_tx_size, int, 0); module_param(max_rx_size, int, 0); module_param(pci_lat, byte, 0); MODULE_PARM_DESC(debug, "debug bitmap, see .h file"); MODULE_PARM_DESC(vpi_bits, "number of bits (0..4) to allocate to VPIs"); MODULE_PARM_DESC(max_tx_size, "maximum size of TX AAL5 frames"); MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames"); MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles"); static struct pci_device_id hrz_pci_tbl[] = { { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_HORIZON, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } }; MODULE_DEVICE_TABLE(pci, hrz_pci_tbl); static struct pci_driver hrz_driver = { .name = "horizon", .probe = hrz_probe, .remove = __devexit_p(hrz_remove_one), .id_table = hrz_pci_tbl, }; /********** module entry **********/ static int __init hrz_module_init (void) { // sanity check - cast is needed since printk does not support %Zu if (sizeof(struct MEMMAP) != 128*1024/4) { PRINTK (KERN_ERR, "Fix struct MEMMAP (is %lu fakewords).", (unsigned long) sizeof(struct MEMMAP)); return -ENOMEM; } show_version(); // check arguments hrz_check_args(); // get the juice return pci_register_driver(&hrz_driver); } /********** module exit **********/ static void __exit hrz_module_exit (void) { PRINTD (DBG_FLOW, "cleanup_module"); pci_unregister_driver(&hrz_driver); } module_init(hrz_module_init); module_exit(hrz_module_exit);
gpl-2.0
santod/NuK3rn3l_htc_m7_GPE-5.1
arch/arm/mach-realview/core.c
4796
12930
/* * linux/arch/arm/mach-realview/core.c * * Copyright (C) 1999 - 2003 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/amba/bus.h> #include <linux/amba/clcd.h> #include <linux/io.h> #include <linux/smsc911x.h> #include <linux/ata_platform.h> #include <linux/amba/mmci.h> #include <linux/gfp.h> #include <linux/clkdev.h> #include <linux/mtd/physmap.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/leds.h> #include <asm/mach-types.h> #include <asm/hardware/arm_timer.h> #include <asm/hardware/icst.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/map.h> #include <asm/hardware/gic.h> #include <mach/platform.h> #include <mach/irqs.h> #include <asm/hardware/timer-sp.h> #include <plat/clcd.h> #include <plat/sched_clock.h> #include "core.h" #define REALVIEW_FLASHCTRL (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_FLASH_OFFSET) static void realview_flash_set_vpp(struct platform_device *pdev, int on) { u32 val; val = __raw_readl(REALVIEW_FLASHCTRL); if (on) val |= REALVIEW_FLASHPROG_FLVPPEN; else val &= ~REALVIEW_FLASHPROG_FLVPPEN; __raw_writel(val, REALVIEW_FLASHCTRL); } static struct physmap_flash_data realview_flash_data = { .width = 4, .set_vpp = realview_flash_set_vpp, }; struct platform_device realview_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &realview_flash_data, }, }; int realview_flash_register(struct resource *res, u32 num) { realview_flash_device.resource = res; realview_flash_device.num_resources = num; return platform_device_register(&realview_flash_device); } static struct smsc911x_platform_config smsc911x_config = { .flags = SMSC911X_USE_32BIT, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH, .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct platform_device realview_eth_device = { .name = "smsc911x", .id = 0, .num_resources = 2, }; int realview_eth_register(const char *name, struct resource *res) { if (name) realview_eth_device.name = name; realview_eth_device.resource = res; if (strcmp(realview_eth_device.name, "smsc911x") == 0) realview_eth_device.dev.platform_data = &smsc911x_config; return platform_device_register(&realview_eth_device); } struct platform_device realview_usb_device = { .name = "isp1760", .num_resources = 2, }; int realview_usb_register(struct resource *res) { realview_usb_device.resource = res; return platform_device_register(&realview_usb_device); } static struct pata_platform_info pata_platform_data = { .ioport_shift = 1, }; static struct resource pata_resources[] = { [0] = { .start = REALVIEW_CF_BASE, .end = REALVIEW_CF_BASE + 0xff, .flags = IORESOURCE_MEM, }, [1] = { .start = REALVIEW_CF_BASE + 0x100, .end = REALVIEW_CF_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; struct platform_device realview_cf_device = { .name = "pata_platform", .id = -1, .num_resources = ARRAY_SIZE(pata_resources), .resource = pata_resources, .dev = { .platform_data = &pata_platform_data, }, }; static struct resource realview_i2c_resource = { .start = REALVIEW_I2C_BASE, .end = REALVIEW_I2C_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }; struct platform_device realview_i2c_device = { .name = "versatile-i2c", .id = 0, .num_resources = 1, .resource = &realview_i2c_resource, }; static struct i2c_board_info realview_i2c_board_info[] = { { I2C_BOARD_INFO("ds1338", 0xd0 >> 1), }, }; static int __init realview_i2c_init(void) { return i2c_register_board_info(0, realview_i2c_board_info, ARRAY_SIZE(realview_i2c_board_info)); } arch_initcall(realview_i2c_init); #define REALVIEW_SYSMCI (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_MCI_OFFSET) /* * This is only used if GPIOLIB support is disabled */ static unsigned int realview_mmc_status(struct device *dev) { struct amba_device *adev = container_of(dev, struct amba_device, dev); u32 mask; if (machine_is_realview_pb1176()) { static bool inserted = false; /* * The PB1176 does not have the status register, * assume it is inserted at startup, then invert * for each call so card insertion/removal will * be detected anyway. This will not be called if * GPIO on PL061 is active, which is the proper * way to do this on the PB1176. */ inserted = !inserted; return inserted ? 0 : 1; } if (adev->res.start == REALVIEW_MMCI0_BASE) mask = 1; else mask = 2; return readl(REALVIEW_SYSMCI) & mask; } struct mmci_platform_data realview_mmc0_plat_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .status = realview_mmc_status, .gpio_wp = 17, .gpio_cd = 16, .cd_invert = true, }; struct mmci_platform_data realview_mmc1_plat_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .status = realview_mmc_status, .gpio_wp = 19, .gpio_cd = 18, .cd_invert = true, }; /* * Clock handling */ static const struct icst_params realview_oscvco_params = { .ref = 24000000, .vco_max = ICST307_VCO_MAX, .vco_min = ICST307_VCO_MIN, .vd_min = 4 + 8, .vd_max = 511 + 8, .rd_min = 1 + 2, .rd_max = 127 + 2, .s2div = icst307_s2div, .idx2s = icst307_idx2s, }; static void realview_oscvco_set(struct clk *clk, struct icst_vco vco) { void __iomem *sys_lock = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_LOCK_OFFSET; u32 val; val = readl(clk->vcoreg) & ~0x7ffff; val |= vco.v | (vco.r << 9) | (vco.s << 16); writel(0xa05f, sys_lock); writel(val, clk->vcoreg); writel(0, sys_lock); } static const struct clk_ops oscvco_clk_ops = { .round = icst_clk_round, .set = icst_clk_set, .setvco = realview_oscvco_set, }; static struct clk oscvco_clk = { .ops = &oscvco_clk_ops, .params = &realview_oscvco_params, }; /* * These are fixed clocks. */ static struct clk ref24_clk = { .rate = 24000000, }; static struct clk sp804_clk = { .rate = 1000000, }; static struct clk dummy_apb_pclk; static struct clk_lookup lookups[] = { { /* Bus clock */ .con_id = "apb_pclk", .clk = &dummy_apb_pclk, }, { /* UART0 */ .dev_id = "dev:uart0", .clk = &ref24_clk, }, { /* UART1 */ .dev_id = "dev:uart1", .clk = &ref24_clk, }, { /* UART2 */ .dev_id = "dev:uart2", .clk = &ref24_clk, }, { /* UART3 */ .dev_id = "fpga:uart3", .clk = &ref24_clk, }, { /* UART3 is on the dev chip in PB1176 */ .dev_id = "dev:uart3", .clk = &ref24_clk, }, { /* UART4 only exists in PB1176 */ .dev_id = "fpga:uart4", .clk = &ref24_clk, }, { /* KMI0 */ .dev_id = "fpga:kmi0", .clk = &ref24_clk, }, { /* KMI1 */ .dev_id = "fpga:kmi1", .clk = &ref24_clk, }, { /* MMC0 */ .dev_id = "fpga:mmc0", .clk = &ref24_clk, }, { /* CLCD is in the PB1176 and EB DevChip */ .dev_id = "dev:clcd", .clk = &oscvco_clk, }, { /* PB:CLCD */ .dev_id = "issp:clcd", .clk = &oscvco_clk, }, { /* SSP */ .dev_id = "dev:ssp0", .clk = &ref24_clk, }, { /* SP804 timers */ .dev_id = "sp804", .clk = &sp804_clk, }, }; void __init realview_init_early(void) { void __iomem *sys = __io_address(REALVIEW_SYS_BASE); if (machine_is_realview_pb1176()) oscvco_clk.vcoreg = sys + REALVIEW_SYS_OSC0_OFFSET; else oscvco_clk.vcoreg = sys + REALVIEW_SYS_OSC4_OFFSET; clkdev_add_table(lookups, ARRAY_SIZE(lookups)); versatile_sched_clock_init(sys + REALVIEW_SYS_24MHz_OFFSET, 24000000); } /* * CLCD support. */ #define SYS_CLCD_NLCDIOON (1 << 2) #define SYS_CLCD_VDDPOSSWITCH (1 << 3) #define SYS_CLCD_PWR3V5SWITCH (1 << 4) #define SYS_CLCD_ID_MASK (0x1f << 8) #define SYS_CLCD_ID_SANYO_3_8 (0x00 << 8) #define SYS_CLCD_ID_UNKNOWN_8_4 (0x01 << 8) #define SYS_CLCD_ID_EPSON_2_2 (0x02 << 8) #define SYS_CLCD_ID_SANYO_2_5 (0x07 << 8) #define SYS_CLCD_ID_VGA (0x1f << 8) /* * Disable all display connectors on the interface module. */ static void realview_clcd_disable(struct clcd_fb *fb) { void __iomem *sys_clcd = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_CLCD_OFFSET; u32 val; val = readl(sys_clcd); val &= ~SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH; writel(val, sys_clcd); } /* * Enable the relevant connector on the interface module. */ static void realview_clcd_enable(struct clcd_fb *fb) { void __iomem *sys_clcd = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_CLCD_OFFSET; u32 val; /* * Enable the PSUs */ val = readl(sys_clcd); val |= SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH; writel(val, sys_clcd); } /* * Detect which LCD panel is connected, and return the appropriate * clcd_panel structure. Note: we do not have any information on * the required timings for the 8.4in panel, so we presently assume * VGA timings. */ static int realview_clcd_setup(struct clcd_fb *fb) { void __iomem *sys_clcd = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_CLCD_OFFSET; const char *panel_name, *vga_panel_name; unsigned long framesize; u32 val; if (machine_is_realview_eb()) { /* VGA, 16bpp */ framesize = 640 * 480 * 2; vga_panel_name = "VGA"; } else { /* XVGA, 16bpp */ framesize = 1024 * 768 * 2; vga_panel_name = "XVGA"; } val = readl(sys_clcd) & SYS_CLCD_ID_MASK; if (val == SYS_CLCD_ID_SANYO_3_8) panel_name = "Sanyo TM38QV67A02A"; else if (val == SYS_CLCD_ID_SANYO_2_5) panel_name = "Sanyo QVGA Portrait"; else if (val == SYS_CLCD_ID_EPSON_2_2) panel_name = "Epson L2F50113T00"; else if (val == SYS_CLCD_ID_VGA) panel_name = vga_panel_name; else { pr_err("CLCD: unknown LCD panel ID 0x%08x, using VGA\n", val); panel_name = vga_panel_name; } fb->panel = versatile_clcd_get_panel(panel_name); if (!fb->panel) return -EINVAL; return versatile_clcd_setup_dma(fb, framesize); } struct clcd_board clcd_plat_data = { .name = "RealView", .caps = CLCD_CAP_ALL, .check = clcdfb_check, .decode = clcdfb_decode, .disable = realview_clcd_disable, .enable = realview_clcd_enable, .setup = realview_clcd_setup, .mmap = versatile_clcd_mmap_dma, .remove = versatile_clcd_remove_dma, }; #ifdef CONFIG_LEDS #define VA_LEDS_BASE (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_LED_OFFSET) void realview_leds_event(led_event_t ledevt) { unsigned long flags; u32 val; u32 led = 1 << smp_processor_id(); local_irq_save(flags); val = readl(VA_LEDS_BASE); switch (ledevt) { case led_idle_start: val = val & ~led; break; case led_idle_end: val = val | led; break; case led_timer: val = val ^ REALVIEW_SYS_LED7; break; case led_halted: val = 0; break; default: break; } writel(val, VA_LEDS_BASE); local_irq_restore(flags); } #endif /* CONFIG_LEDS */ /* * Where is the timer (VA)? */ void __iomem *timer0_va_base; void __iomem *timer1_va_base; void __iomem *timer2_va_base; void __iomem *timer3_va_base; /* * Set up the clock source and clock events devices */ void __init realview_timer_init(unsigned int timer_irq) { u32 val; /* * set clock frequency: * REALVIEW_REFCLK is 32KHz * REALVIEW_TIMCLK is 1MHz */ val = readl(__io_address(REALVIEW_SCTL_BASE)); writel((REALVIEW_TIMCLK << REALVIEW_TIMER1_EnSel) | (REALVIEW_TIMCLK << REALVIEW_TIMER2_EnSel) | (REALVIEW_TIMCLK << REALVIEW_TIMER3_EnSel) | (REALVIEW_TIMCLK << REALVIEW_TIMER4_EnSel) | val, __io_address(REALVIEW_SCTL_BASE)); /* * Initialise to a known state (all timers off) */ writel(0, timer0_va_base + TIMER_CTRL); writel(0, timer1_va_base + TIMER_CTRL); writel(0, timer2_va_base + TIMER_CTRL); writel(0, timer3_va_base + TIMER_CTRL); sp804_clocksource_init(timer3_va_base, "timer3"); sp804_clockevents_init(timer0_va_base, timer_irq, "timer0"); } /* * Setup the memory banks. */ void realview_fixup(struct tag *tags, char **from, struct meminfo *meminfo) { /* * Most RealView platforms have 512MB contiguous RAM at 0x70000000. * Half of this is mirrored at 0. */ #ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET meminfo->bank[0].start = 0x70000000; meminfo->bank[0].size = SZ_512M; meminfo->nr_banks = 1; #else meminfo->bank[0].start = 0; meminfo->bank[0].size = SZ_256M; meminfo->nr_banks = 1; #endif }
gpl-2.0
Snuzzo/B14CKB1RD_kernel_m8
drivers/gpu/drm/gma500/intel_opregion.c
5564
2240
/* * Copyright 2010 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * FIXME: resolve with the i915 version */ #include "psb_drv.h" struct opregion_header { u8 signature[16]; u32 size; u32 opregion_ver; u8 bios_ver[32]; u8 vbios_ver[16]; u8 driver_ver[16]; u32 mboxes; u8 reserved[164]; } __packed; struct opregion_apci { /*FIXME: add it later*/ } __packed; struct opregion_swsci { /*FIXME: add it later*/ } __packed; struct opregion_acpi { /*FIXME: add it later*/ } __packed; int gma_intel_opregion_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; u32 opregion_phy; void *base; u32 *lid_state; dev_priv->lid_state = NULL; pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy); if (opregion_phy == 0) return -ENOTSUPP; base = ioremap(opregion_phy, 8*1024); if (!base) return -ENOMEM; lid_state = base + 0x01ac; dev_priv->lid_state = lid_state; dev_priv->lid_last_state = readl(lid_state); return 0; } int gma_intel_opregion_exit(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; if (dev_priv->lid_state) iounmap(dev_priv->lid_state); return 0; }
gpl-2.0
one-2-z/a830s_kernel
arch/arm/mach-integrator/cpu.c
7868
5220
/* * linux/arch/arm/mach-integrator/cpu.c * * Copyright (C) 2001-2002 Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * CPU support functions */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/cpufreq.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/platform.h> #include <asm/mach-types.h> #include <asm/hardware/icst.h> static struct cpufreq_driver integrator_driver; #define CM_ID IO_ADDRESS(INTEGRATOR_HDR_ID) #define CM_OSC IO_ADDRESS(INTEGRATOR_HDR_OSC) #define CM_STAT IO_ADDRESS(INTEGRATOR_HDR_STAT) #define CM_LOCK IO_ADDRESS(INTEGRATOR_HDR_LOCK) static const struct icst_params lclk_params = { .ref = 24000000, .vco_max = ICST525_VCO_MAX_5V, .vco_min = ICST525_VCO_MIN, .vd_min = 8, .vd_max = 132, .rd_min = 24, .rd_max = 24, .s2div = icst525_s2div, .idx2s = icst525_idx2s, }; static const struct icst_params cclk_params = { .ref = 24000000, .vco_max = ICST525_VCO_MAX_5V, .vco_min = ICST525_VCO_MIN, .vd_min = 12, .vd_max = 160, .rd_min = 24, .rd_max = 24, .s2div = icst525_s2div, .idx2s = icst525_idx2s, }; /* * Validate the speed policy. */ static int integrator_verify_policy(struct cpufreq_policy *policy) { struct icst_vco vco; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); vco = icst_hz_to_vco(&cclk_params, policy->max * 1000); policy->max = icst_hz(&cclk_params, vco) / 1000; vco = icst_hz_to_vco(&cclk_params, policy->min * 1000); policy->min = icst_hz(&cclk_params, vco) / 1000; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); return 0; } static int integrator_set_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { cpumask_t cpus_allowed; int cpu = policy->cpu; struct icst_vco vco; struct cpufreq_freqs freqs; u_int cm_osc; /* * Save this threads cpus_allowed mask. */ cpus_allowed = current->cpus_allowed; /* * Bind to the specified CPU. When this call returns, * we should be running on the right CPU. */ set_cpus_allowed(current, cpumask_of_cpu(cpu)); BUG_ON(cpu != smp_processor_id()); /* get current setting */ cm_osc = __raw_readl(CM_OSC); if (machine_is_integrator()) { vco.s = (cm_osc >> 8) & 7; } else if (machine_is_cintegrator()) { vco.s = 1; } vco.v = cm_osc & 255; vco.r = 22; freqs.old = icst_hz(&cclk_params, vco) / 1000; /* icst_hz_to_vco rounds down -- so we need the next * larger freq in case of CPUFREQ_RELATION_L. */ if (relation == CPUFREQ_RELATION_L) target_freq += 999; if (target_freq > policy->max) target_freq = policy->max; vco = icst_hz_to_vco(&cclk_params, target_freq * 1000); freqs.new = icst_hz(&cclk_params, vco) / 1000; freqs.cpu = policy->cpu; if (freqs.old == freqs.new) { set_cpus_allowed(current, cpus_allowed); return 0; } cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cm_osc = __raw_readl(CM_OSC); if (machine_is_integrator()) { cm_osc &= 0xfffff800; cm_osc |= vco.s << 8; } else if (machine_is_cintegrator()) { cm_osc &= 0xffffff00; } cm_osc |= vco.v; __raw_writel(0xa05f, CM_LOCK); __raw_writel(cm_osc, CM_OSC); __raw_writel(0, CM_LOCK); /* * Restore the CPUs allowed mask. */ set_cpus_allowed(current, cpus_allowed); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); return 0; } static unsigned int integrator_get(unsigned int cpu) { cpumask_t cpus_allowed; unsigned int current_freq; u_int cm_osc; struct icst_vco vco; cpus_allowed = current->cpus_allowed; set_cpus_allowed(current, cpumask_of_cpu(cpu)); BUG_ON(cpu != smp_processor_id()); /* detect memory etc. */ cm_osc = __raw_readl(CM_OSC); if (machine_is_integrator()) { vco.s = (cm_osc >> 8) & 7; } else { vco.s = 1; } vco.v = cm_osc & 255; vco.r = 22; current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */ set_cpus_allowed(current, cpus_allowed); return current_freq; } static int integrator_cpufreq_init(struct cpufreq_policy *policy) { /* set default policy and cpuinfo */ policy->cpuinfo.max_freq = 160000; policy->cpuinfo.min_freq = 12000; policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */ policy->cur = policy->min = policy->max = integrator_get(policy->cpu); return 0; } static struct cpufreq_driver integrator_driver = { .verify = integrator_verify_policy, .target = integrator_set_target, .get = integrator_get, .init = integrator_cpufreq_init, .name = "integrator", }; static int __init integrator_cpu_init(void) { return cpufreq_register_driver(&integrator_driver); } static void __exit integrator_cpu_exit(void) { cpufreq_unregister_driver(&integrator_driver); } MODULE_AUTHOR ("Russell M. King"); MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs"); MODULE_LICENSE ("GPL"); module_init(integrator_cpu_init); module_exit(integrator_cpu_exit);
gpl-2.0
BoyGau/linux
arch/powerpc/platforms/83xx/mpc830x_rdb.c
8892
1643
/* * arch/powerpc/platforms/83xx/mpc830x_rdb.c * * Description: MPC830x RDB board specific routines. * This file is based on mpc831x_rdb.c * * Copyright (C) Freescale Semiconductor, Inc. 2009. All rights reserved. * Copyright (C) 2010. Ilya Yanok, Emcraft Systems, yanok@emcraft.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/pci.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <sysdev/fsl_pci.h> #include <sysdev/fsl_soc.h> #include "mpc83xx.h" /* * Setup the architecture */ static void __init mpc830x_rdb_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("mpc830x_rdb_setup_arch()", 0); mpc83xx_setup_pci(); mpc831x_usb_cfg(); } static const char *board[] __initdata = { "MPC8308RDB", "fsl,mpc8308rdb", "denx,mpc8308_p1m", NULL }; /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc830x_rdb_probe(void) { return of_flat_dt_match(of_get_flat_dt_root(), board); } machine_device_initcall(mpc830x_rdb, mpc83xx_declare_of_platform_devices); define_machine(mpc830x_rdb) { .name = "MPC830x RDB", .probe = mpc830x_rdb_probe, .setup_arch = mpc830x_rdb_setup_arch, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
Alucard24/SGS4-SAMMY-Kernel
drivers/video/msm/mdp4_dtv.c
189
10585
/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/semaphore.h> #include <linux/uaccess.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <asm/system.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <linux/pm_runtime.h> #include <mach/clk.h> #include "msm_fb.h" #include "mdp4.h" static int dtv_probe(struct platform_device *pdev); static int dtv_remove(struct platform_device *pdev); static int dtv_off(struct platform_device *pdev); static int dtv_on(struct platform_device *pdev); static int dtv_off_sub(void); static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST]; static int pdev_list_cnt; static struct clk *tv_src_clk; static struct clk *hdmi_clk; static struct clk *mdp_tv_clk; static struct platform_device *dtv_pdev; static struct workqueue_struct *dtv_work_queue; static struct work_struct dtv_off_work; static int mdp4_dtv_runtime_suspend(struct device *dev) { dev_dbg(dev, "pm_runtime: suspending...\n"); return 0; } static int mdp4_dtv_runtime_resume(struct device *dev) { dev_dbg(dev, "pm_runtime: resuming...\n"); return 0; } static const struct dev_pm_ops mdp4_dtv_dev_pm_ops = { .runtime_suspend = mdp4_dtv_runtime_suspend, .runtime_resume = mdp4_dtv_runtime_resume, }; static struct platform_driver dtv_driver = { .probe = dtv_probe, .remove = dtv_remove, .suspend = NULL, .resume = NULL, .shutdown = NULL, .driver = { .name = "dtv", .pm = &mdp4_dtv_dev_pm_ops, }, }; static struct lcdc_platform_data *dtv_pdata; #ifdef CONFIG_MSM_BUS_SCALING static uint32_t dtv_bus_scale_handle; #if defined(CONFIG_MACH_JACTIVE_ATT) || defined(CONFIG_MACH_JACTIVE_EUR) static uint32_t dtv_bus_scale_override; static uint32_t dtv_current_bus_vector_index; #endif #else static struct clk *ebi1_clk; #endif #if defined(CONFIG_MACH_JACTIVE_ATT) || defined(CONFIG_MACH_JACTIVE_EUR) void dtv_update_camera_vector_override(uint8_t enable) { #ifdef CONFIG_MSM_BUS_SCALING if (dtv_bus_scale_handle <= 0) return; if (enable) { dtv_bus_scale_override = 1; if (dtv_current_bus_vector_index) msm_bus_scale_client_update_request(dtv_bus_scale_handle, 2); } else { dtv_bus_scale_override = 0; if (dtv_current_bus_vector_index) msm_bus_scale_client_update_request(dtv_bus_scale_handle, dtv_current_bus_vector_index); } #endif } #endif static int dtv_off(struct platform_device *pdev) { int ret = 0; struct msm_fb_data_type *mfd = NULL; if (!pdev) { pr_err("%s: FAILED: invalid arg\n", __func__); return -EINVAL; } mfd = platform_get_drvdata(pdev); if (!mfd) { pr_err("%s: FAILED: invalid mfd\n", __func__); return -EINVAL; } dtv_pdev = pdev; /* * If it's a suspend operation then handle the device * power down synchronously. * Otherwise, queue work item to handle power down sequence. * This is needed since we need to wait for the audio engine * to shutdown first before we turn off the DTV device. */ if (!mfd->suspend.op_suspend) { pr_debug("%s: Queuing work to turn off HDMI core\n", __func__); queue_work(dtv_work_queue, &dtv_off_work); } else { pr_debug("%s: turning off HDMI core\n", __func__); ret = dtv_off_sub(); } return ret; } static int dtv_off_sub(void) { int ret = 0; if (!dtv_pdev) { pr_err("%s: FAILED: invalid arg\n", __func__); return -EINVAL; } ret = panel_next_off(dtv_pdev); pr_info("%s\n", __func__); clk_disable_unprepare(hdmi_clk); if (mdp_tv_clk) clk_disable_unprepare(mdp_tv_clk); if (dtv_pdata && dtv_pdata->lcdc_power_save) dtv_pdata->lcdc_power_save(0); if (dtv_pdata && dtv_pdata->lcdc_gpio_config) ret = dtv_pdata->lcdc_gpio_config(0); #ifdef CONFIG_MSM_BUS_SCALING #if defined(CONFIG_MACH_JACTIVE_ATT) || defined(CONFIG_MACH_JACTIVE_EUR) if (dtv_bus_scale_handle > 0) { dtv_current_bus_vector_index = 0; msm_bus_scale_client_update_request(dtv_bus_scale_handle, 0); } #else if (dtv_bus_scale_handle > 0) msm_bus_scale_client_update_request(dtv_bus_scale_handle, 0); #endif #else if (ebi1_clk) clk_disable_unprepare(ebi1_clk); #endif mdp4_extn_disp = 0; return ret; } static void dtv_off_work_func(struct work_struct *work) { dtv_off_sub(); } static int dtv_on(struct platform_device *pdev) { int ret = 0; struct msm_fb_data_type *mfd; unsigned long panel_pixclock_freq , pm_qos_rate; /* If a power down is already underway, wait for it to finish */ flush_work_sync(&dtv_off_work); mfd = platform_get_drvdata(pdev); panel_pixclock_freq = mfd->fbi->var.pixclock; if (panel_pixclock_freq > 58000000) /* pm_qos_rate should be in Khz */ pm_qos_rate = panel_pixclock_freq / 1000 ; else pm_qos_rate = 58000; mdp4_extn_disp = 1; #ifdef CONFIG_MSM_BUS_SCALING #if defined(CONFIG_MACH_JACTIVE_ATT) || defined(CONFIG_MACH_JACTIVE_EUR) if (dtv_bus_scale_handle > 0) { if (dtv_bus_scale_override) msm_bus_scale_client_update_request(dtv_bus_scale_handle, 2); else msm_bus_scale_client_update_request(dtv_bus_scale_handle, 1); dtv_current_bus_vector_index = 1; } #else if (dtv_bus_scale_handle > 0) msm_bus_scale_client_update_request(dtv_bus_scale_handle, 1); #endif #else if (ebi1_clk) { clk_set_rate(ebi1_clk, pm_qos_rate * 1000); clk_prepare_enable(ebi1_clk); } #endif if (dtv_pdata && dtv_pdata->lcdc_power_save) dtv_pdata->lcdc_power_save(1); if (dtv_pdata && dtv_pdata->lcdc_gpio_config) ret = dtv_pdata->lcdc_gpio_config(1); mfd = platform_get_drvdata(pdev); ret = clk_set_rate(tv_src_clk, mfd->fbi->var.pixclock); if (ret) { pr_info("%s: clk_set_rate(%d) failed\n", __func__, mfd->fbi->var.pixclock); if (mfd->fbi->var.pixclock == 27030000) mfd->fbi->var.pixclock = 27000000; ret = clk_set_rate(tv_src_clk, mfd->fbi->var.pixclock); } pr_info("%s: tv_src_clk=%dkHz, pm_qos_rate=%ldkHz, [%d]\n", __func__, mfd->fbi->var.pixclock/1000, pm_qos_rate, ret); mfd->panel_info.clk_rate = mfd->fbi->var.pixclock; clk_prepare_enable(hdmi_clk); clk_reset(hdmi_clk, CLK_RESET_ASSERT); udelay(20); clk_reset(hdmi_clk, CLK_RESET_DEASSERT); if (mdp_tv_clk) clk_prepare_enable(mdp_tv_clk); ret = panel_next_on(pdev); return ret; } static int dtv_probe(struct platform_device *pdev) { struct msm_fb_data_type *mfd; struct fb_info *fbi; struct platform_device *mdp_dev = NULL; struct msm_fb_panel_data *pdata = NULL; int rc; if (pdev->id == 0) { dtv_pdata = pdev->dev.platform_data; #ifdef CONFIG_MSM_BUS_SCALING if (!dtv_bus_scale_handle && dtv_pdata && dtv_pdata->bus_scale_table) { dtv_bus_scale_handle = msm_bus_scale_register_client( dtv_pdata->bus_scale_table); if (!dtv_bus_scale_handle) { pr_err("%s not able to get bus scale\n", __func__); } } #else ebi1_clk = clk_get(&pdev->dev, "mem_clk"); if (IS_ERR(ebi1_clk)) { ebi1_clk = NULL; pr_warning("%s: Couldn't get ebi1 clock\n", __func__); } #endif tv_src_clk = clk_get(&pdev->dev, "src_clk"); if (IS_ERR(tv_src_clk)) { pr_err("error: can't get tv_src_clk!\n"); return IS_ERR(tv_src_clk); } hdmi_clk = clk_get(&pdev->dev, "hdmi_clk"); if (IS_ERR(hdmi_clk)) { pr_err("error: can't get hdmi_clk!\n"); return IS_ERR(hdmi_clk); } mdp_tv_clk = clk_get(&pdev->dev, "mdp_clk"); if (IS_ERR(mdp_tv_clk)) mdp_tv_clk = NULL; return 0; } dtv_work_queue = create_singlethread_workqueue("dtv_work"); INIT_WORK(&dtv_off_work, dtv_off_work_func); mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST) return -ENOMEM; mdp_dev = platform_device_alloc("mdp", pdev->id); if (!mdp_dev) return -ENOMEM; /* * link to the latest pdev */ mfd->pdev = mdp_dev; mfd->dest = DISPLAY_LCDC; /* * alloc panel device data */ if (platform_device_add_data (mdp_dev, pdev->dev.platform_data, sizeof(struct msm_fb_panel_data))) { pr_err("dtv_probe: platform_device_add_data failed!\n"); platform_device_put(mdp_dev); return -ENOMEM; } /* * data chain */ pdata = (struct msm_fb_panel_data *)mdp_dev->dev.platform_data; pdata->on = dtv_on; pdata->off = dtv_off; pdata->next = pdev; /* * get/set panel specific fb info */ mfd->panel_info = pdata->panel_info; if (hdmi_prim_display) mfd->fb_imgType = MSMFB_DEFAULT_TYPE; else mfd->fb_imgType = MDP_RGB_565; fbi = mfd->fbi; fbi->var.pixclock = mfd->panel_info.clk_rate; fbi->var.left_margin = mfd->panel_info.lcdc.h_back_porch; fbi->var.right_margin = mfd->panel_info.lcdc.h_front_porch; fbi->var.upper_margin = mfd->panel_info.lcdc.v_back_porch; fbi->var.lower_margin = mfd->panel_info.lcdc.v_front_porch; fbi->var.hsync_len = mfd->panel_info.lcdc.h_pulse_width; fbi->var.vsync_len = mfd->panel_info.lcdc.v_pulse_width; /* * set driver data */ platform_set_drvdata(mdp_dev, mfd); /* * register in mdp driver */ rc = platform_device_add(mdp_dev); if (rc) goto dtv_probe_err; pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); pdev_list[pdev_list_cnt++] = pdev; return 0; dtv_probe_err: #ifdef CONFIG_MSM_BUS_SCALING if (dtv_pdata && dtv_pdata->bus_scale_table && dtv_bus_scale_handle > 0) msm_bus_scale_unregister_client(dtv_bus_scale_handle); #endif platform_device_put(mdp_dev); return rc; } static int dtv_remove(struct platform_device *pdev) { if (dtv_work_queue) destroy_workqueue(dtv_work_queue); #ifdef CONFIG_MSM_BUS_SCALING if (dtv_pdata && dtv_pdata->bus_scale_table && dtv_bus_scale_handle > 0) msm_bus_scale_unregister_client(dtv_bus_scale_handle); #else if (ebi1_clk) clk_put(ebi1_clk); #endif pm_runtime_disable(&pdev->dev); return 0; } static int dtv_register_driver(void) { return platform_driver_register(&dtv_driver); } static int __init dtv_driver_init(void) { return dtv_register_driver(); } module_init(dtv_driver_init);
gpl-2.0
pichina/linux-bcache
drivers/char/mwave/mwavedd.c
189
18926
/* * * mwavedd.c -- mwave device driver * * * Written By: Mike Sullivan IBM Corporation * * Copyright (C) 1999 IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * 10/23/2000 - Alpha Release * First release to the public */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/major.h> #include <linux/miscdevice.h> #include <linux/device.h> #include <linux/serial.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/smp_lock.h> #include <linux/delay.h> #include <linux/serial_8250.h> #include "smapi.h" #include "mwavedd.h" #include "3780i.h" #include "tp3780i.h" MODULE_DESCRIPTION("3780i Advanced Communications Processor (Mwave) driver"); MODULE_AUTHOR("Mike Sullivan and Paul Schroeder"); MODULE_LICENSE("GPL"); /* * These parameters support the setting of MWave resources. Note that no * checks are made against other devices (ie. superio) for conflicts. * We'll depend on users using the tpctl utility to do that for now */ int mwave_debug = 0; int mwave_3780i_irq = 0; int mwave_3780i_io = 0; int mwave_uart_irq = 0; int mwave_uart_io = 0; module_param(mwave_debug, int, 0); module_param(mwave_3780i_irq, int, 0); module_param(mwave_3780i_io, int, 0); module_param(mwave_uart_irq, int, 0); module_param(mwave_uart_io, int, 0); static int mwave_open(struct inode *inode, struct file *file); static int mwave_close(struct inode *inode, struct file *file); static long mwave_ioctl(struct file *filp, unsigned int iocmd, unsigned long ioarg); MWAVE_DEVICE_DATA mwave_s_mdd; static int mwave_open(struct inode *inode, struct file *file) { unsigned int retval = 0; PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_open, entry inode %p file %p\n", inode, file); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_open, exit return retval %x\n", retval); cycle_kernel_lock(); return retval; } static int mwave_close(struct inode *inode, struct file *file) { unsigned int retval = 0; PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_close, entry inode %p file %p\n", inode, file); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n", retval); return retval; } static long mwave_ioctl(struct file *file, unsigned int iocmd, unsigned long ioarg) { unsigned int retval = 0; pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; void __user *arg = (void __user *)ioarg; PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n", file, iocmd, (int) ioarg); switch (iocmd) { case IOCTL_MW_RESET: PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RESET" " calling tp3780I_ResetDSP\n"); lock_kernel(); retval = tp3780I_ResetDSP(&pDrvData->rBDData); unlock_kernel(); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RESET" " retval %x from tp3780I_ResetDSP\n", retval); break; case IOCTL_MW_RUN: PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RUN" " calling tp3780I_StartDSP\n"); lock_kernel(); retval = tp3780I_StartDSP(&pDrvData->rBDData); unlock_kernel(); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RUN" " retval %x from tp3780I_StartDSP\n", retval); break; case IOCTL_MW_DSP_ABILITIES: { MW_ABILITIES rAbilities; PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_ioctl," " IOCTL_MW_DSP_ABILITIES calling" " tp3780I_QueryAbilities\n"); lock_kernel(); retval = tp3780I_QueryAbilities(&pDrvData->rBDData, &rAbilities); unlock_kernel(); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES" " retval %x from tp3780I_QueryAbilities\n", retval); if (retval == 0) { if( copy_to_user(arg, &rAbilities, sizeof(MW_ABILITIES)) ) return -EFAULT; } PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES" " exit retval %x\n", retval); } break; case IOCTL_MW_READ_DATA: case IOCTL_MW_READCLEAR_DATA: { MW_READWRITE rReadData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rReadData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *) (rReadData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_READ_DATA," " size %lx, ioarg %lx pusBuffer %p\n", rReadData.ulDataLength, ioarg, pusBuffer); lock_kernel(); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rReadData.ulDataLength, rReadData.usDspAddress); unlock_kernel(); } break; case IOCTL_MW_READ_INST: { MW_READWRITE rReadData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rReadData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *) (rReadData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_READ_INST," " size %lx, ioarg %lx pusBuffer %p\n", rReadData.ulDataLength / 2, ioarg, pusBuffer); lock_kernel(); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rReadData.ulDataLength / 2, rReadData.usDspAddress); unlock_kernel(); } break; case IOCTL_MW_WRITE_DATA: { MW_READWRITE rWriteData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rWriteData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *) (rWriteData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA," " size %lx, ioarg %lx pusBuffer %p\n", rWriteData.ulDataLength, ioarg, pusBuffer); lock_kernel(); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rWriteData.ulDataLength, rWriteData.usDspAddress); unlock_kernel(); } break; case IOCTL_MW_WRITE_INST: { MW_READWRITE rWriteData; unsigned short __user *pusBuffer = NULL; if( copy_from_user(&rWriteData, arg, sizeof(MW_READWRITE)) ) return -EFAULT; pusBuffer = (unsigned short __user *)(rWriteData.pBuf); PRINTK_4(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST," " size %lx, ioarg %lx pusBuffer %p\n", rWriteData.ulDataLength, ioarg, pusBuffer); lock_kernel(); retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData, iocmd, pusBuffer, rWriteData.ulDataLength, rWriteData.usDspAddress); unlock_kernel(); } break; case IOCTL_MW_REGISTER_IPC: { unsigned int ipcnum = (unsigned int) ioarg; PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" " ipcnum %x entry usIntCount %x\n", ipcnum, pDrvData->IPCs[ipcnum].usIntCount); if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_ioctl:" " IOCTL_MW_REGISTER_IPC:" " Error: Invalid ipcnum %x\n", ipcnum); return -EINVAL; } lock_kernel(); pDrvData->IPCs[ipcnum].bIsHere = FALSE; pDrvData->IPCs[ipcnum].bIsEnabled = TRUE; unlock_kernel(); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" " ipcnum %x exit\n", ipcnum); } break; case IOCTL_MW_GET_IPC: { unsigned int ipcnum = (unsigned int) ioarg; PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" " ipcnum %x, usIntCount %x\n", ipcnum, pDrvData->IPCs[ipcnum].usIntCount); if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_ioctl:" " IOCTL_MW_GET_IPC: Error:" " Invalid ipcnum %x\n", ipcnum); return -EINVAL; } lock_kernel(); if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { DECLARE_WAITQUEUE(wait, current); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, thread for" " ipc %x going to sleep\n", ipcnum); add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait); pDrvData->IPCs[ipcnum].bIsHere = TRUE; set_current_state(TASK_INTERRUPTIBLE); /* check whether an event was signalled by */ /* the interrupt handler while we were gone */ if (pDrvData->IPCs[ipcnum].usIntCount == 1) { /* first int has occurred (race condition) */ pDrvData->IPCs[ipcnum].usIntCount = 2; /* first int has been handled */ PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl" " IOCTL_MW_GET_IPC ipcnum %x" " handling first int\n", ipcnum); } else { /* either 1st int has not yet occurred, or we have already handled the first int */ schedule(); if (pDrvData->IPCs[ipcnum].usIntCount == 1) { pDrvData->IPCs[ipcnum].usIntCount = 2; } PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl" " IOCTL_MW_GET_IPC ipcnum %x" " woke up and returning to" " application\n", ipcnum); } pDrvData->IPCs[ipcnum].bIsHere = FALSE; remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait); set_current_state(TASK_RUNNING); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC," " returning thread for ipc %x" " processing\n", ipcnum); } unlock_kernel(); } break; case IOCTL_MW_UNREGISTER_IPC: { unsigned int ipcnum = (unsigned int) ioarg; PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC" " ipcnum %x\n", ipcnum); if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_ioctl:" " IOCTL_MW_UNREGISTER_IPC:" " Error: Invalid ipcnum %x\n", ipcnum); return -EINVAL; } lock_kernel(); if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { pDrvData->IPCs[ipcnum].bIsEnabled = FALSE; if (pDrvData->IPCs[ipcnum].bIsHere == TRUE) { wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue); } } unlock_kernel(); } break; default: return -ENOTTY; break; } /* switch */ PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval); return retval; } static ssize_t mwave_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) { PRINTK_5(TRACE_MWAVE, "mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n", file, buf, count, ppos); return -EINVAL; } static ssize_t mwave_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos) { PRINTK_5(TRACE_MWAVE, "mwavedd::mwave_write entry file %p, buf %p," " count %zx ppos %p\n", file, buf, count, ppos); return -EINVAL; } static int register_serial_portandirq(unsigned int port, int irq) { struct uart_port uart; switch ( port ) { case 0x3f8: case 0x2f8: case 0x3e8: case 0x2e8: /* OK */ break; default: PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::register_serial_portandirq:" " Error: Illegal port %x\n", port ); return -1; } /* switch */ /* port is okay */ switch ( irq ) { case 3: case 4: case 5: case 7: /* OK */ break; default: PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::register_serial_portandirq:" " Error: Illegal irq %x\n", irq ); return -1; } /* switch */ /* irq is okay */ memset(&uart, 0, sizeof(struct uart_port)); uart.uartclk = 1843200; uart.iobase = port; uart.irq = irq; uart.iotype = UPIO_PORT; uart.flags = UPF_SHARE_IRQ; return serial8250_register_port(&uart); } static const struct file_operations mwave_fops = { .owner = THIS_MODULE, .read = mwave_read, .write = mwave_write, .unlocked_ioctl = mwave_ioctl, .open = mwave_open, .release = mwave_close }; static struct miscdevice mwave_misc_dev = { MWAVE_MINOR, "mwave", &mwave_fops }; #if 0 /* totally b0rked */ /* * sysfs support <paulsch@us.ibm.com> */ struct device mwave_device; /* Prevent code redundancy, create a macro for mwave_show_* functions. */ #define mwave_show_function(attr_name, format_string, field) \ static ssize_t mwave_show_##attr_name(struct device *dev, struct device_attribute *attr, char *buf) \ { \ DSP_3780I_CONFIG_SETTINGS *pSettings = \ &mwave_s_mdd.rBDData.rDspSettings; \ return sprintf(buf, format_string, pSettings->field); \ } /* All of our attributes are read attributes. */ #define mwave_dev_rd_attr(attr_name, format_string, field) \ mwave_show_function(attr_name, format_string, field) \ static DEVICE_ATTR(attr_name, S_IRUGO, mwave_show_##attr_name, NULL) mwave_dev_rd_attr (3780i_dma, "%i\n", usDspDma); mwave_dev_rd_attr (3780i_irq, "%i\n", usDspIrq); mwave_dev_rd_attr (3780i_io, "%#.4x\n", usDspBaseIO); mwave_dev_rd_attr (uart_irq, "%i\n", usUartIrq); mwave_dev_rd_attr (uart_io, "%#.4x\n", usUartBaseIO); static struct device_attribute * const mwave_dev_attrs[] = { &dev_attr_3780i_dma, &dev_attr_3780i_irq, &dev_attr_3780i_io, &dev_attr_uart_irq, &dev_attr_uart_io, }; #endif /* * mwave_init is called on module load * * mwave_exit is called on module unload * mwave_exit is also used to clean up after an aborted mwave_init */ static void mwave_exit(void) { pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit entry\n"); #if 0 for (i = 0; i < pDrvData->nr_registered_attrs; i++) device_remove_file(&mwave_device, mwave_dev_attrs[i]); pDrvData->nr_registered_attrs = 0; if (pDrvData->device_registered) { device_unregister(&mwave_device); pDrvData->device_registered = FALSE; } #endif if ( pDrvData->sLine >= 0 ) { serial8250_unregister_port(pDrvData->sLine); } if (pDrvData->bMwaveDevRegistered) { misc_deregister(&mwave_misc_dev); } if (pDrvData->bDSPEnabled) { tp3780I_DisableDSP(&pDrvData->rBDData); } if (pDrvData->bResourcesClaimed) { tp3780I_ReleaseResources(&pDrvData->rBDData); } if (pDrvData->bBDInitialized) { tp3780I_Cleanup(&pDrvData->rBDData); } PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit exit\n"); } module_exit(mwave_exit); static int __init mwave_init(void) { int i; int retval = 0; pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_init entry\n"); memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA)); pDrvData->bBDInitialized = FALSE; pDrvData->bResourcesClaimed = FALSE; pDrvData->bDSPEnabled = FALSE; pDrvData->bDSPReset = FALSE; pDrvData->bMwaveDevRegistered = FALSE; pDrvData->sLine = -1; for (i = 0; i < ARRAY_SIZE(pDrvData->IPCs); i++) { pDrvData->IPCs[i].bIsEnabled = FALSE; pDrvData->IPCs[i].bIsHere = FALSE; pDrvData->IPCs[i].usIntCount = 0; /* no ints received yet */ init_waitqueue_head(&pDrvData->IPCs[i].ipc_wait_queue); } retval = tp3780I_InitializeBoardData(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_InitializeBoardData" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_init: Error:" " Failed to initialize board data\n"); goto cleanup_error; } pDrvData->bBDInitialized = TRUE; retval = tp3780I_CalcResources(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_CalcResources" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to calculate resources\n"); goto cleanup_error; } retval = tp3780I_ClaimResources(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_ClaimResources" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to claim resources\n"); goto cleanup_error; } pDrvData->bResourcesClaimed = TRUE; retval = tp3780I_EnableDSP(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_EnableDSP" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to enable DSP\n"); goto cleanup_error; } pDrvData->bDSPEnabled = TRUE; if (misc_register(&mwave_misc_dev) < 0) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to register misc device\n"); goto cleanup_error; } pDrvData->bMwaveDevRegistered = TRUE; pDrvData->sLine = register_serial_portandirq( pDrvData->rBDData.rDspSettings.usUartBaseIO, pDrvData->rBDData.rDspSettings.usUartIrq ); if (pDrvData->sLine < 0) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to register serial driver\n"); goto cleanup_error; } /* uart is registered */ #if 0 /* sysfs */ memset(&mwave_device, 0, sizeof (struct device)); dev_set_name(&mwave_device, "mwave"); if (device_register(&mwave_device)) goto cleanup_error; pDrvData->device_registered = TRUE; for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) { if(device_create_file(&mwave_device, mwave_dev_attrs[i])) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to create sysfs file %s\n", mwave_dev_attrs[i]->attr.name); goto cleanup_error; } pDrvData->nr_registered_attrs++; } #endif /* SUCCESS! */ return 0; cleanup_error: PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_init: Error:" " Failed to initialize\n"); mwave_exit(); /* clean up */ return -EIO; } module_init(mwave_init);
gpl-2.0
full-of-foo/linux
arch/x86/mm/pat.c
445
25443
/* * Handle caching attributes in page tables (PAT) * * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> * Suresh B Siddha <suresh.b.siddha@intel.com> * * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. */ #include <linux/seq_file.h> #include <linux/bootmem.h> #include <linux/debugfs.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/rbtree.h> #include <asm/cacheflush.h> #include <asm/processor.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> #include <asm/fcntl.h> #include <asm/e820.h> #include <asm/mtrr.h> #include <asm/page.h> #include <asm/msr.h> #include <asm/pat.h> #include <asm/io.h> #ifdef CONFIG_X86_PAT int __read_mostly pat_enabled = 1; static inline void pat_disable(const char *reason) { pat_enabled = 0; printk(KERN_INFO "%s\n", reason); } static int __init nopat(char *str) { pat_disable("PAT support disabled."); return 0; } early_param("nopat", nopat); #else static inline void pat_disable(const char *reason) { (void)reason; } #endif static int debug_enable; static int __init pat_debug_setup(char *str) { debug_enable = 1; return 0; } __setup("debugpat", pat_debug_setup); #define dprintk(fmt, arg...) \ do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0) static u64 __read_mostly boot_pat_state; enum { PAT_UC = 0, /* uncached */ PAT_WC = 1, /* Write combining */ PAT_WT = 4, /* Write Through */ PAT_WP = 5, /* Write Protected */ PAT_WB = 6, /* Write Back (default) */ PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ }; #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) void pat_init(void) { u64 pat; bool boot_cpu = !boot_pat_state; if (!pat_enabled) return; if (!cpu_has_pat) { if (!boot_pat_state) { pat_disable("PAT not supported by CPU."); return; } else { /* * If this happens we are on a secondary CPU, but * switched to PAT on the boot CPU. We have no way to * undo PAT. */ printk(KERN_ERR "PAT enabled, " "but not supported by secondary CPU\n"); BUG(); } } /* Set PWT to Write-Combining. All other bits stay the same */ /* * PTE encoding used in Linux: * PAT * |PCD * ||PWT * ||| * 000 WB _PAGE_CACHE_WB * 001 WC _PAGE_CACHE_WC * 010 UC- _PAGE_CACHE_UC_MINUS * 011 UC _PAGE_CACHE_UC * PAT bit unused */ pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); /* Boot CPU check */ if (!boot_pat_state) rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); wrmsrl(MSR_IA32_CR_PAT, pat); if (boot_cpu) printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", smp_processor_id(), boot_pat_state, pat); } #undef PAT static char *cattr_name(unsigned long flags) { switch (flags & _PAGE_CACHE_MASK) { case _PAGE_CACHE_UC: return "uncached"; case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; case _PAGE_CACHE_WB: return "write-back"; case _PAGE_CACHE_WC: return "write-combining"; default: return "broken"; } } /* * The global memtype list keeps track of memory type for specific * physical memory areas. Conflicting memory types in different * mappings can cause CPU cache corruption. To avoid this we keep track. * * The list is sorted based on starting address and can contain multiple * entries for each address (this allows reference counting for overlapping * areas). All the aliases have the same cache attributes of course. * Zero attributes are represented as holes. * * The data structure is a list that is also organized as an rbtree * sorted on the start address of memtype range. * * memtype_lock protects both the linear list and rbtree. */ struct memtype { u64 start; u64 end; unsigned long type; struct list_head nd; struct rb_node rb; }; static struct rb_root memtype_rbroot = RB_ROOT; static LIST_HEAD(memtype_list); static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ static struct memtype *memtype_rb_search(struct rb_root *root, u64 start) { struct rb_node *node = root->rb_node; struct memtype *last_lower = NULL; while (node) { struct memtype *data = container_of(node, struct memtype, rb); if (data->start < start) { last_lower = data; node = node->rb_right; } else if (data->start > start) { node = node->rb_left; } else return data; } /* Will return NULL if there is no entry with its start <= start */ return last_lower; } static void memtype_rb_insert(struct rb_root *root, struct memtype *data) { struct rb_node **new = &(root->rb_node); struct rb_node *parent = NULL; while (*new) { struct memtype *this = container_of(*new, struct memtype, rb); parent = *new; if (data->start <= this->start) new = &((*new)->rb_left); else if (data->start > this->start) new = &((*new)->rb_right); } rb_link_node(&data->rb, parent, new); rb_insert_color(&data->rb, root); } /* * Does intersection of PAT memory type and MTRR memory type and returns * the resulting memory type as PAT understands it. * (Type in pat and mtrr will not have same value) * The intersection is based on "Effective Memory Type" tables in IA-32 * SDM vol 3a */ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) { /* * Look for MTRR hint to get the effective type in case where PAT * request is for WB. */ if (req_type == _PAGE_CACHE_WB) { u8 mtrr_type; mtrr_type = mtrr_type_lookup(start, end); if (mtrr_type != MTRR_TYPE_WRBACK) return _PAGE_CACHE_UC_MINUS; return _PAGE_CACHE_WB; } return req_type; } static int chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type) { if (new->type != entry->type) { if (type) { new->type = entry->type; *type = entry->type; } else goto conflict; } /* check overlaps with more than one entry in the list */ list_for_each_entry_continue(entry, &memtype_list, nd) { if (new->end <= entry->start) break; else if (new->type != entry->type) goto conflict; } return 0; conflict: printk(KERN_INFO "%s:%d conflicting memory types " "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start, new->end, cattr_name(new->type), cattr_name(entry->type)); return -EBUSY; } static int pat_pagerange_is_ram(unsigned long start, unsigned long end) { int ram_page = 0, not_rampage = 0; unsigned long page_nr; for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT); ++page_nr) { /* * For legacy reasons, physical address range in the legacy ISA * region is tracked as non-RAM. This will allow users of * /dev/mem to map portions of legacy ISA region, even when * some of those portions are listed(or not even listed) with * different e820 types(RAM/reserved/..) */ if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) && page_is_ram(page_nr)) ram_page = 1; else not_rampage = 1; if (ram_page == not_rampage) return -1; } return ram_page; } /* * For RAM pages, we use page flags to mark the pages with appropriate type. * Here we do two pass: * - Find the memtype of all the pages in the range, look for any conflicts * - In case of no conflicts, set the new memtype for pages in the range * * Caller must hold memtype_lock for atomicity. */ static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, unsigned long *new_type) { struct page *page; u64 pfn; if (req_type == _PAGE_CACHE_UC) { /* We do not support strong UC */ WARN_ON_ONCE(1); req_type = _PAGE_CACHE_UC_MINUS; } for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { unsigned long type; page = pfn_to_page(pfn); type = get_page_memtype(page); if (type != -1) { printk(KERN_INFO "reserve_ram_pages_type failed " "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n", start, end, type, req_type); if (new_type) *new_type = type; return -EBUSY; } } if (new_type) *new_type = req_type; for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { page = pfn_to_page(pfn); set_page_memtype(page, req_type); } return 0; } static int free_ram_pages_type(u64 start, u64 end) { struct page *page; u64 pfn; for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { page = pfn_to_page(pfn); set_page_memtype(page, -1); } return 0; } /* * req_type typically has one of the: * - _PAGE_CACHE_WB * - _PAGE_CACHE_WC * - _PAGE_CACHE_UC_MINUS * - _PAGE_CACHE_UC * * req_type will have a special case value '-1', when requester want to inherit * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS. * * If new_type is NULL, function will return an error if it cannot reserve the * region with req_type. If new_type is non-NULL, function will return * available type in new_type in case of no error. In case of any error * it will return a negative return value. */ int reserve_memtype(u64 start, u64 end, unsigned long req_type, unsigned long *new_type) { struct memtype *new, *entry; unsigned long actual_type; struct list_head *where; int is_range_ram; int err = 0; BUG_ON(start >= end); /* end is exclusive */ if (!pat_enabled) { /* This is identical to page table setting without PAT */ if (new_type) { if (req_type == -1) *new_type = _PAGE_CACHE_WB; else if (req_type == _PAGE_CACHE_WC) *new_type = _PAGE_CACHE_UC_MINUS; else *new_type = req_type & _PAGE_CACHE_MASK; } return 0; } /* Low ISA region is always mapped WB in page table. No need to track */ if (is_ISA_range(start, end - 1)) { if (new_type) *new_type = _PAGE_CACHE_WB; return 0; } /* * Call mtrr_lookup to get the type hint. This is an * optimization for /dev/mem mmap'ers into WB memory (BIOS * tools and ACPI tools). Use WB request for WB memory and use * UC_MINUS otherwise. */ actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK); if (new_type) *new_type = actual_type; is_range_ram = pat_pagerange_is_ram(start, end); if (is_range_ram == 1) { spin_lock(&memtype_lock); err = reserve_ram_pages_type(start, end, req_type, new_type); spin_unlock(&memtype_lock); return err; } else if (is_range_ram < 0) { return -EINVAL; } new = kmalloc(sizeof(struct memtype), GFP_KERNEL); if (!new) return -ENOMEM; new->start = start; new->end = end; new->type = actual_type; spin_lock(&memtype_lock); /* Search for existing mapping that overlaps the current range */ where = NULL; list_for_each_entry(entry, &memtype_list, nd) { if (end <= entry->start) { where = entry->nd.prev; break; } else if (start <= entry->start) { /* end > entry->start */ err = chk_conflict(new, entry, new_type); if (!err) { dprintk("Overlap at 0x%Lx-0x%Lx\n", entry->start, entry->end); where = entry->nd.prev; } break; } else if (start < entry->end) { /* start > entry->start */ err = chk_conflict(new, entry, new_type); if (!err) { dprintk("Overlap at 0x%Lx-0x%Lx\n", entry->start, entry->end); /* * Move to right position in the linked * list to add this new entry */ list_for_each_entry_continue(entry, &memtype_list, nd) { if (start <= entry->start) { where = entry->nd.prev; break; } } } break; } } if (err) { printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " "track %s, req %s\n", start, end, cattr_name(new->type), cattr_name(req_type)); kfree(new); spin_unlock(&memtype_lock); return err; } if (where) list_add(&new->nd, where); else list_add_tail(&new->nd, &memtype_list); memtype_rb_insert(&memtype_rbroot, new); spin_unlock(&memtype_lock); dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", start, end, cattr_name(new->type), cattr_name(req_type), new_type ? cattr_name(*new_type) : "-"); return err; } int free_memtype(u64 start, u64 end) { struct memtype *entry, *saved_entry; int err = -EINVAL; int is_range_ram; if (!pat_enabled) return 0; /* Low ISA region is always mapped WB. No need to track */ if (is_ISA_range(start, end - 1)) return 0; is_range_ram = pat_pagerange_is_ram(start, end); if (is_range_ram == 1) { spin_lock(&memtype_lock); err = free_ram_pages_type(start, end); spin_unlock(&memtype_lock); return err; } else if (is_range_ram < 0) { return -EINVAL; } spin_lock(&memtype_lock); entry = memtype_rb_search(&memtype_rbroot, start); if (unlikely(entry == NULL)) goto unlock_ret; /* * Saved entry points to an entry with start same or less than what * we searched for. Now go through the list in both directions to look * for the entry that matches with both start and end, with list stored * in sorted start address */ saved_entry = entry; list_for_each_entry_from(entry, &memtype_list, nd) { if (entry->start == start && entry->end == end) { rb_erase(&entry->rb, &memtype_rbroot); list_del(&entry->nd); kfree(entry); err = 0; break; } else if (entry->start > start) { break; } } if (!err) goto unlock_ret; entry = saved_entry; list_for_each_entry_reverse(entry, &memtype_list, nd) { if (entry->start == start && entry->end == end) { rb_erase(&entry->rb, &memtype_rbroot); list_del(&entry->nd); kfree(entry); err = 0; break; } else if (entry->start < start) { break; } } unlock_ret: spin_unlock(&memtype_lock); if (err) { printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", current->comm, current->pid, start, end); } dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); return err; } /** * lookup_memtype - Looksup the memory type for a physical address * @paddr: physical address of which memory type needs to be looked up * * Only to be called when PAT is enabled * * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or * _PAGE_CACHE_UC */ static unsigned long lookup_memtype(u64 paddr) { int rettype = _PAGE_CACHE_WB; struct memtype *entry; if (is_ISA_range(paddr, paddr + PAGE_SIZE - 1)) return rettype; if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { struct page *page; spin_lock(&memtype_lock); page = pfn_to_page(paddr >> PAGE_SHIFT); rettype = get_page_memtype(page); spin_unlock(&memtype_lock); /* * -1 from get_page_memtype() implies RAM page is in its * default state and not reserved, and hence of type WB */ if (rettype == -1) rettype = _PAGE_CACHE_WB; return rettype; } spin_lock(&memtype_lock); entry = memtype_rb_search(&memtype_rbroot, paddr); if (entry != NULL) rettype = entry->type; else rettype = _PAGE_CACHE_UC_MINUS; spin_unlock(&memtype_lock); return rettype; } /** * io_reserve_memtype - Request a memory type mapping for a region of memory * @start: start (physical address) of the region * @end: end (physical address) of the region * @type: A pointer to memtype, with requested type. On success, requested * or any other compatible type that was available for the region is returned * * On success, returns 0 * On failure, returns non-zero */ int io_reserve_memtype(resource_size_t start, resource_size_t end, unsigned long *type) { resource_size_t size = end - start; unsigned long req_type = *type; unsigned long new_type; int ret; WARN_ON_ONCE(iomem_map_sanity_check(start, size)); ret = reserve_memtype(start, end, req_type, &new_type); if (ret) goto out_err; if (!is_new_memtype_allowed(start, size, req_type, new_type)) goto out_free; if (kernel_map_sync_memtype(start, size, new_type) < 0) goto out_free; *type = new_type; return 0; out_free: free_memtype(start, end); ret = -EBUSY; out_err: return ret; } /** * io_free_memtype - Release a memory type mapping for a region of memory * @start: start (physical address) of the region * @end: end (physical address) of the region */ void io_free_memtype(resource_size_t start, resource_size_t end) { free_memtype(start, end); } pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { return vma_prot; } #ifdef CONFIG_STRICT_DEVMEM /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/ static inline int range_is_allowed(unsigned long pfn, unsigned long size) { return 1; } #else /* This check is needed to avoid cache aliasing when PAT is enabled */ static inline int range_is_allowed(unsigned long pfn, unsigned long size) { u64 from = ((u64)pfn) << PAGE_SHIFT; u64 to = from + size; u64 cursor = from; if (!pat_enabled) return 1; while (cursor < to) { if (!devmem_is_allowed(pfn)) { printk(KERN_INFO "Program %s tried to access /dev/mem between %Lx->%Lx.\n", current->comm, from, to); return 0; } cursor += PAGE_SIZE; pfn++; } return 1; } #endif /* CONFIG_STRICT_DEVMEM */ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { unsigned long flags = _PAGE_CACHE_WB; if (!range_is_allowed(pfn, size)) return 0; if (file->f_flags & O_SYNC) { flags = _PAGE_CACHE_UC_MINUS; } #ifdef CONFIG_X86_32 /* * On the PPro and successors, the MTRRs are used to set * memory types for physical addresses outside main memory, * so blindly setting UC or PWT on those pages is wrong. * For Pentiums and earlier, the surround logic should disable * caching for the high addresses through the KEN pin, but * we maintain the tradition of paranoia in this code. */ if (!pat_enabled && !(boot_cpu_has(X86_FEATURE_MTRR) || boot_cpu_has(X86_FEATURE_K6_MTRR) || boot_cpu_has(X86_FEATURE_CYRIX_ARR) || boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && (pfn << PAGE_SHIFT) >= __pa(high_memory)) { flags = _PAGE_CACHE_UC; } #endif *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | flags); return 1; } /* * Change the memory type for the physial address range in kernel identity * mapping space if that range is a part of identity map. */ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) { unsigned long id_sz; if (base >= __pa(high_memory)) return 0; id_sz = (__pa(high_memory) < base + size) ? __pa(high_memory) - base : size; if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " "for %Lx-%Lx\n", current->comm, current->pid, cattr_name(flags), base, (unsigned long long)(base + size)); return -EINVAL; } return 0; } /* * Internal interface to reserve a range of physical memory with prot. * Reserved non RAM regions only and after successful reserve_memtype, * this func also keeps identity mapping (if any) in sync with this new prot. */ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, int strict_prot) { int is_ram = 0; int ret; unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); unsigned long flags = want_flags; is_ram = pat_pagerange_is_ram(paddr, paddr + size); /* * reserve_pfn_range() for RAM pages. We do not refcount to keep * track of number of mappings of RAM pages. We can assert that * the type requested matches the type of first page in the range. */ if (is_ram) { if (!pat_enabled) return 0; flags = lookup_memtype(paddr); if (want_flags != flags) { printk(KERN_WARNING "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n", current->comm, current->pid, cattr_name(want_flags), (unsigned long long)paddr, (unsigned long long)(paddr + size), cattr_name(flags)); *vma_prot = __pgprot((pgprot_val(*vma_prot) & (~_PAGE_CACHE_MASK)) | flags); } return 0; } ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); if (ret) return ret; if (flags != want_flags) { if (strict_prot || !is_new_memtype_allowed(paddr, size, want_flags, flags)) { free_memtype(paddr, paddr + size); printk(KERN_ERR "%s:%d map pfn expected mapping type %s" " for %Lx-%Lx, got %s\n", current->comm, current->pid, cattr_name(want_flags), (unsigned long long)paddr, (unsigned long long)(paddr + size), cattr_name(flags)); return -EINVAL; } /* * We allow returning different type than the one requested in * non strict case. */ *vma_prot = __pgprot((pgprot_val(*vma_prot) & (~_PAGE_CACHE_MASK)) | flags); } if (kernel_map_sync_memtype(paddr, size, flags) < 0) { free_memtype(paddr, paddr + size); return -EINVAL; } return 0; } /* * Internal interface to free a range of physical memory. * Frees non RAM regions only. */ static void free_pfn_range(u64 paddr, unsigned long size) { int is_ram; is_ram = pat_pagerange_is_ram(paddr, paddr + size); if (is_ram == 0) free_memtype(paddr, paddr + size); } /* * track_pfn_vma_copy is called when vma that is covering the pfnmap gets * copied through copy_page_range(). * * If the vma has a linear pfn mapping for the entire range, we get the prot * from pte and reserve the entire vma range with single reserve_pfn_range call. */ int track_pfn_vma_copy(struct vm_area_struct *vma) { resource_size_t paddr; unsigned long prot; unsigned long vma_size = vma->vm_end - vma->vm_start; pgprot_t pgprot; if (is_linear_pfn_mapping(vma)) { /* * reserve the whole chunk covered by vma. We need the * starting address and protection from pte. */ if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { WARN_ON_ONCE(1); return -EINVAL; } pgprot = __pgprot(prot); return reserve_pfn_range(paddr, vma_size, &pgprot, 1); } return 0; } /* * track_pfn_vma_new is called when a _new_ pfn mapping is being established * for physical range indicated by pfn and size. * * prot is passed in as a parameter for the new mapping. If the vma has a * linear pfn mapping for the entire range reserve the entire vma range with * single reserve_pfn_range call. */ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size) { unsigned long flags; resource_size_t paddr; unsigned long vma_size = vma->vm_end - vma->vm_start; if (is_linear_pfn_mapping(vma)) { /* reserve the whole chunk starting from vm_pgoff */ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; return reserve_pfn_range(paddr, vma_size, prot, 0); } if (!pat_enabled) return 0; /* for vm_insert_pfn and friends, we set prot based on lookup */ flags = lookup_memtype(pfn << PAGE_SHIFT); *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | flags); return 0; } /* * untrack_pfn_vma is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or * can be for the entire vma (in which case size can be zero). */ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, unsigned long size) { resource_size_t paddr; unsigned long vma_size = vma->vm_end - vma->vm_start; if (is_linear_pfn_mapping(vma)) { /* free the whole chunk starting from vm_pgoff */ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; free_pfn_range(paddr, vma_size); return; } } pgprot_t pgprot_writecombine(pgprot_t prot) { if (pat_enabled) return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); else return pgprot_noncached(prot); } EXPORT_SYMBOL_GPL(pgprot_writecombine); #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) /* get Nth element of the linked list */ static struct memtype *memtype_get_idx(loff_t pos) { struct memtype *list_node, *print_entry; int i = 1; print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); if (!print_entry) return NULL; spin_lock(&memtype_lock); list_for_each_entry(list_node, &memtype_list, nd) { if (pos == i) { *print_entry = *list_node; spin_unlock(&memtype_lock); return print_entry; } ++i; } spin_unlock(&memtype_lock); kfree(print_entry); return NULL; } static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos == 0) { ++*pos; seq_printf(seq, "PAT memtype list:\n"); } return memtype_get_idx(*pos); } static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return memtype_get_idx(*pos); } static void memtype_seq_stop(struct seq_file *seq, void *v) { } static int memtype_seq_show(struct seq_file *seq, void *v) { struct memtype *print_entry = (struct memtype *)v; seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), print_entry->start, print_entry->end); kfree(print_entry); return 0; } static const struct seq_operations memtype_seq_ops = { .start = memtype_seq_start, .next = memtype_seq_next, .stop = memtype_seq_stop, .show = memtype_seq_show, }; static int memtype_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &memtype_seq_ops); } static const struct file_operations memtype_fops = { .open = memtype_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init pat_memtype_list_init(void) { debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir, NULL, &memtype_fops); return 0; } late_initcall(pat_memtype_list_init); #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */
gpl-2.0
gkoloventzos/hetfs-linux
drivers/leds/leds-ss4200.c
1469
14747
/* * SS4200-E Hardware API * Copyright (c) 2009, Intel Corporation. * Copyright IBM Corporation, 2009 * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Author: Dave Hansen <dave@sr71.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/dmi.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/types.h> #include <linux/uaccess.h> MODULE_AUTHOR("Rodney Girod <rgirod@confocus.com>, Dave Hansen <dave@sr71.net>"); MODULE_DESCRIPTION("Intel NAS/Home Server ICH7 GPIO Driver"); MODULE_LICENSE("GPL"); /* * ICH7 LPC/GPIO PCI Config register offsets */ #define PMBASE 0x040 #define GPIO_BASE 0x048 #define GPIO_CTRL 0x04c #define GPIO_EN 0x010 /* * The ICH7 GPIO register block is 64 bytes in size. */ #define ICH7_GPIO_SIZE 64 /* * Define register offsets within the ICH7 register block. */ #define GPIO_USE_SEL 0x000 #define GP_IO_SEL 0x004 #define GP_LVL 0x00c #define GPO_BLINK 0x018 #define GPI_INV 0x030 #define GPIO_USE_SEL2 0x034 #define GP_IO_SEL2 0x038 #define GP_LVL2 0x03c /* * PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives. */ static const struct pci_device_id ich7_lpc_pci_id[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_30) }, { } /* NULL entry */ }; MODULE_DEVICE_TABLE(pci, ich7_lpc_pci_id); static int __init ss4200_led_dmi_callback(const struct dmi_system_id *id) { pr_info("detected '%s'\n", id->ident); return 1; } static bool nodetect; module_param_named(nodetect, nodetect, bool, 0); MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection"); /* * struct nas_led_whitelist - List of known good models * * Contains the known good models this driver is compatible with. * When adding a new model try to be as strict as possible. This * makes it possible to keep the false positives (the model is * detected as working, but in reality it is not) as low as * possible. */ static struct dmi_system_id nas_led_whitelist[] __initdata = { { .callback = ss4200_led_dmi_callback, .ident = "Intel SS4200-E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel"), DMI_MATCH(DMI_PRODUCT_NAME, "SS4200-E"), DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00") } }, {} }; /* * Base I/O address assigned to the Power Management register block */ static u32 g_pm_io_base; /* * Base I/O address assigned to the ICH7 GPIO register block */ static u32 nas_gpio_io_base; /* * When we successfully register a region, we are returned a resource. * We use these to identify which regions we need to release on our way * back out. */ static struct resource *gp_gpio_resource; struct nasgpio_led { char *name; u32 gpio_bit; struct led_classdev led_cdev; }; /* * gpio_bit(s) are the ICH7 GPIO bit assignments */ static struct nasgpio_led nasgpio_leds[] = { { .name = "hdd1:blue:sata", .gpio_bit = 0 }, { .name = "hdd1:amber:sata", .gpio_bit = 1 }, { .name = "hdd2:blue:sata", .gpio_bit = 2 }, { .name = "hdd2:amber:sata", .gpio_bit = 3 }, { .name = "hdd3:blue:sata", .gpio_bit = 4 }, { .name = "hdd3:amber:sata", .gpio_bit = 5 }, { .name = "hdd4:blue:sata", .gpio_bit = 6 }, { .name = "hdd4:amber:sata", .gpio_bit = 7 }, { .name = "power:blue:power", .gpio_bit = 27}, { .name = "power:amber:power", .gpio_bit = 28}, }; #define NAS_RECOVERY 0x00000400 /* GPIO10 */ static struct nasgpio_led * led_classdev_to_nasgpio_led(struct led_classdev *led_cdev) { return container_of(led_cdev, struct nasgpio_led, led_cdev); } static struct nasgpio_led *get_led_named(char *name) { int i; for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) { if (strcmp(nasgpio_leds[i].name, name)) continue; return &nasgpio_leds[i]; } return NULL; } /* * This protects access to the gpio ports. */ static DEFINE_SPINLOCK(nasgpio_gpio_lock); /* * There are two gpio ports, one for blinking and the other * for power. @port tells us if we're doing blinking or * power control. * * Caller must hold nasgpio_gpio_lock */ static void __nasgpio_led_set_attr(struct led_classdev *led_cdev, u32 port, u32 value) { struct nasgpio_led *led = led_classdev_to_nasgpio_led(led_cdev); u32 gpio_out; gpio_out = inl(nas_gpio_io_base + port); if (value) gpio_out |= (1<<led->gpio_bit); else gpio_out &= ~(1<<led->gpio_bit); outl(gpio_out, nas_gpio_io_base + port); } static void nasgpio_led_set_attr(struct led_classdev *led_cdev, u32 port, u32 value) { spin_lock(&nasgpio_gpio_lock); __nasgpio_led_set_attr(led_cdev, port, value); spin_unlock(&nasgpio_gpio_lock); } static u32 nasgpio_led_get_attr(struct led_classdev *led_cdev, u32 port) { struct nasgpio_led *led = led_classdev_to_nasgpio_led(led_cdev); u32 gpio_in; spin_lock(&nasgpio_gpio_lock); gpio_in = inl(nas_gpio_io_base + port); spin_unlock(&nasgpio_gpio_lock); if (gpio_in & (1<<led->gpio_bit)) return 1; return 0; } /* * There is actual brightness control in the hardware, * but it is via smbus commands and not implemented * in this driver. */ static void nasgpio_led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { u32 setting = 0; if (brightness >= LED_HALF) setting = 1; /* * Hold the lock across both operations. This ensures * consistency so that both the "turn off blinking" * and "turn light off" operations complete as a set. */ spin_lock(&nasgpio_gpio_lock); /* * LED class documentation asks that past blink state * be disabled when brightness is turned to zero. */ if (brightness == 0) __nasgpio_led_set_attr(led_cdev, GPO_BLINK, 0); __nasgpio_led_set_attr(led_cdev, GP_LVL, setting); spin_unlock(&nasgpio_gpio_lock); } static int nasgpio_led_set_blink(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { u32 setting = 1; if (!(*delay_on == 0 && *delay_off == 0) && !(*delay_on == 500 && *delay_off == 500)) return -EINVAL; /* * These are very approximate. */ *delay_on = 500; *delay_off = 500; nasgpio_led_set_attr(led_cdev, GPO_BLINK, setting); return 0; } /* * Initialize the ICH7 GPIO registers for NAS usage. The BIOS should have * already taken care of this, but we will do so in a non destructive manner * so that we have what we need whether the BIOS did it or not. */ static int ich7_gpio_init(struct device *dev) { int i; u32 config_data = 0; u32 all_nas_led = 0; for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) all_nas_led |= (1<<nasgpio_leds[i].gpio_bit); spin_lock(&nasgpio_gpio_lock); /* * We need to enable all of the GPIO lines used by the NAS box, * so we will read the current Use Selection and add our usage * to it. This should be benign with regard to the original * BIOS configuration. */ config_data = inl(nas_gpio_io_base + GPIO_USE_SEL); dev_dbg(dev, ": Data read from GPIO_USE_SEL = 0x%08x\n", config_data); config_data |= all_nas_led + NAS_RECOVERY; outl(config_data, nas_gpio_io_base + GPIO_USE_SEL); config_data = inl(nas_gpio_io_base + GPIO_USE_SEL); dev_dbg(dev, ": GPIO_USE_SEL = 0x%08x\n\n", config_data); /* * The LED GPIO outputs need to be configured for output, so we * will ensure that all LED lines are cleared for output and the * RECOVERY line ready for input. This too should be benign with * regard to BIOS configuration. */ config_data = inl(nas_gpio_io_base + GP_IO_SEL); dev_dbg(dev, ": Data read from GP_IO_SEL = 0x%08x\n", config_data); config_data &= ~all_nas_led; config_data |= NAS_RECOVERY; outl(config_data, nas_gpio_io_base + GP_IO_SEL); config_data = inl(nas_gpio_io_base + GP_IO_SEL); dev_dbg(dev, ": GP_IO_SEL = 0x%08x\n", config_data); /* * In our final system, the BIOS will initialize the state of all * of the LEDs. For now, we turn them all off (or Low). */ config_data = inl(nas_gpio_io_base + GP_LVL); dev_dbg(dev, ": Data read from GP_LVL = 0x%08x\n", config_data); /* * In our final system, the BIOS will initialize the blink state of all * of the LEDs. For now, we turn blink off for all of them. */ config_data = inl(nas_gpio_io_base + GPO_BLINK); dev_dbg(dev, ": Data read from GPO_BLINK = 0x%08x\n", config_data); /* * At this moment, I am unsure if anything needs to happen with GPI_INV */ config_data = inl(nas_gpio_io_base + GPI_INV); dev_dbg(dev, ": Data read from GPI_INV = 0x%08x\n", config_data); spin_unlock(&nasgpio_gpio_lock); return 0; } static void ich7_lpc_cleanup(struct device *dev) { /* * If we were given exclusive use of the GPIO * I/O Address range, we must return it. */ if (gp_gpio_resource) { dev_dbg(dev, ": Releasing GPIO I/O addresses\n"); release_region(nas_gpio_io_base, ICH7_GPIO_SIZE); gp_gpio_resource = NULL; } } /* * The OS has determined that the LPC of the Intel ICH7 Southbridge is present * so we can retrive the required operational information and prepare the GPIO. */ static struct pci_dev *nas_gpio_pci_dev; static int ich7_lpc_probe(struct pci_dev *dev, const struct pci_device_id *id) { int status; u32 gc = 0; status = pci_enable_device(dev); if (status) { dev_err(&dev->dev, "pci_enable_device failed\n"); return -EIO; } nas_gpio_pci_dev = dev; status = pci_read_config_dword(dev, PMBASE, &g_pm_io_base); if (status) goto out; g_pm_io_base &= 0x00000ff80; status = pci_read_config_dword(dev, GPIO_CTRL, &gc); if (!(GPIO_EN & gc)) { status = -EEXIST; dev_info(&dev->dev, "ERROR: The LPC GPIO Block has not been enabled.\n"); goto out; } status = pci_read_config_dword(dev, GPIO_BASE, &nas_gpio_io_base); if (0 > status) { dev_info(&dev->dev, "Unable to read GPIOBASE.\n"); goto out; } dev_dbg(&dev->dev, ": GPIOBASE = 0x%08x\n", nas_gpio_io_base); nas_gpio_io_base &= 0x00000ffc0; /* * Insure that we have exclusive access to the GPIO I/O address range. */ gp_gpio_resource = request_region(nas_gpio_io_base, ICH7_GPIO_SIZE, KBUILD_MODNAME); if (NULL == gp_gpio_resource) { dev_info(&dev->dev, "ERROR Unable to register GPIO I/O addresses.\n"); status = -1; goto out; } /* * Initialize the GPIO for NAS/Home Server Use */ ich7_gpio_init(&dev->dev); out: if (status) { ich7_lpc_cleanup(&dev->dev); pci_disable_device(dev); } return status; } static void ich7_lpc_remove(struct pci_dev *dev) { ich7_lpc_cleanup(&dev->dev); pci_disable_device(dev); } /* * pci_driver structure passed to the PCI modules */ static struct pci_driver nas_gpio_pci_driver = { .name = KBUILD_MODNAME, .id_table = ich7_lpc_pci_id, .probe = ich7_lpc_probe, .remove = ich7_lpc_remove, }; static struct led_classdev *get_classdev_for_led_nr(int nr) { struct nasgpio_led *nas_led = &nasgpio_leds[nr]; struct led_classdev *led = &nas_led->led_cdev; return led; } static void set_power_light_amber_noblink(void) { struct nasgpio_led *amber = get_led_named("power:amber:power"); struct nasgpio_led *blue = get_led_named("power:blue:power"); if (!amber || !blue) return; /* * LED_OFF implies disabling future blinking */ pr_debug("setting blue off and amber on\n"); nasgpio_led_set_brightness(&blue->led_cdev, LED_OFF); nasgpio_led_set_brightness(&amber->led_cdev, LED_FULL); } static ssize_t nas_led_blink_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led = dev_get_drvdata(dev); int blinking = 0; if (nasgpio_led_get_attr(led, GPO_BLINK)) blinking = 1; return sprintf(buf, "%u\n", blinking); } static ssize_t nas_led_blink_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int ret; struct led_classdev *led = dev_get_drvdata(dev); unsigned long blink_state; ret = kstrtoul(buf, 10, &blink_state); if (ret) return ret; nasgpio_led_set_attr(led, GPO_BLINK, blink_state); return size; } static DEVICE_ATTR(blink, 0644, nas_led_blink_show, nas_led_blink_store); static struct attribute *nasgpio_led_attrs[] = { &dev_attr_blink.attr, NULL }; ATTRIBUTE_GROUPS(nasgpio_led); static int register_nasgpio_led(int led_nr) { int ret; struct nasgpio_led *nas_led = &nasgpio_leds[led_nr]; struct led_classdev *led = get_classdev_for_led_nr(led_nr); led->name = nas_led->name; led->brightness = LED_OFF; if (nasgpio_led_get_attr(led, GP_LVL)) led->brightness = LED_FULL; led->brightness_set = nasgpio_led_set_brightness; led->blink_set = nasgpio_led_set_blink; led->groups = nasgpio_led_groups; ret = led_classdev_register(&nas_gpio_pci_dev->dev, led); if (ret) return ret; return 0; } static void unregister_nasgpio_led(int led_nr) { struct led_classdev *led = get_classdev_for_led_nr(led_nr); led_classdev_unregister(led); } /* * module load/initialization */ static int __init nas_gpio_init(void) { int i; int ret = 0; int nr_devices = 0; nr_devices = dmi_check_system(nas_led_whitelist); if (nodetect) { pr_info("skipping hardware autodetection\n"); pr_info("Please send 'dmidecode' output to dave@sr71.net\n"); nr_devices++; } if (nr_devices <= 0) { pr_info("no LED devices found\n"); return -ENODEV; } pr_info("registering PCI driver\n"); ret = pci_register_driver(&nas_gpio_pci_driver); if (ret) return ret; for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) { ret = register_nasgpio_led(i); if (ret) goto out_err; } /* * When the system powers on, the BIOS leaves the power * light blue and blinking. This will turn it solid * amber once the driver is loaded. */ set_power_light_amber_noblink(); return 0; out_err: for (i--; i >= 0; i--) unregister_nasgpio_led(i); pci_unregister_driver(&nas_gpio_pci_driver); return ret; } /* * module unload */ static void __exit nas_gpio_exit(void) { int i; pr_info("Unregistering driver\n"); for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) unregister_nasgpio_led(i); pci_unregister_driver(&nas_gpio_pci_driver); } module_init(nas_gpio_init); module_exit(nas_gpio_exit);
gpl-2.0
ysat0/linux-ysato
arch/mips/mm/sc-ip22.c
1981
3864
/* * sc-ip22.c: Indy cache management functions. * * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org), * derived from r4xx0.c by David S. Miller (davem@davemloft.net). */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <asm/bcache.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/bootinfo.h> #include <asm/sgi/ip22.h> #include <asm/sgi/mc.h> /* Secondary cache size in bytes, if present. */ static unsigned long scache_size; #undef DEBUG_CACHE #define SC_SIZE 0x00080000 #define SC_LINE 32 #define CI_MASK (SC_SIZE - SC_LINE) #define SC_INDEX(n) ((n) & CI_MASK) static inline void indy_sc_wipe(unsigned long first, unsigned long last) { unsigned long tmp; __asm__ __volatile__( ".set\tpush\t\t\t# indy_sc_wipe\n\t" ".set\tnoreorder\n\t" ".set\tmips3\n\t" ".set\tnoat\n\t" "mfc0\t%2, $12\n\t" "li\t$1, 0x80\t\t\t# Go 64 bit\n\t" "mtc0\t$1, $12\n\t" "dli\t$1, 0x9000000080000000\n\t" "or\t%0, $1\t\t\t# first line to flush\n\t" "or\t%1, $1\t\t\t# last line to flush\n\t" ".set\tat\n\t" "1:\tsw\t$0, 0(%0)\n\t" "bne\t%0, %1, 1b\n\t" " daddu\t%0, 32\n\t" "mtc0\t%2, $12\t\t\t# Back to 32 bit\n\t" "nop; nop; nop; nop;\n\t" ".set\tpop" : "=r" (first), "=r" (last), "=&r" (tmp) : "0" (first), "1" (last)); } static void indy_sc_wback_invalidate(unsigned long addr, unsigned long size) { unsigned long first_line, last_line; unsigned long flags; #ifdef DEBUG_CACHE printk("indy_sc_wback_invalidate[%08lx,%08lx]", addr, size); #endif /* Catch bad driver code */ BUG_ON(size == 0); /* Which lines to flush? */ first_line = SC_INDEX(addr); last_line = SC_INDEX(addr + size - 1); local_irq_save(flags); if (first_line <= last_line) { indy_sc_wipe(first_line, last_line); goto out; } indy_sc_wipe(first_line, SC_SIZE - SC_LINE); indy_sc_wipe(0, last_line); out: local_irq_restore(flags); } static void indy_sc_enable(void) { unsigned long addr, tmp1, tmp2; /* This is really cool... */ #ifdef DEBUG_CACHE printk("Enabling R4600 SCACHE\n"); #endif __asm__ __volatile__( ".set\tpush\n\t" ".set\tnoreorder\n\t" ".set\tmips3\n\t" "mfc0\t%2, $12\n\t" "nop; nop; nop; nop;\n\t" "li\t%1, 0x80\n\t" "mtc0\t%1, $12\n\t" "nop; nop; nop; nop;\n\t" "li\t%0, 0x1\n\t" "dsll\t%0, 31\n\t" "lui\t%1, 0x9000\n\t" "dsll32\t%1, 0\n\t" "or\t%0, %1, %0\n\t" "sb\t$0, 0(%0)\n\t" "mtc0\t$0, $12\n\t" "nop; nop; nop; nop;\n\t" "mtc0\t%2, $12\n\t" "nop; nop; nop; nop;\n\t" ".set\tpop" : "=r" (tmp1), "=r" (tmp2), "=r" (addr)); } static void indy_sc_disable(void) { unsigned long tmp1, tmp2, tmp3; #ifdef DEBUG_CACHE printk("Disabling R4600 SCACHE\n"); #endif __asm__ __volatile__( ".set\tpush\n\t" ".set\tnoreorder\n\t" ".set\tmips3\n\t" "li\t%0, 0x1\n\t" "dsll\t%0, 31\n\t" "lui\t%1, 0x9000\n\t" "dsll32\t%1, 0\n\t" "or\t%0, %1, %0\n\t" "mfc0\t%2, $12\n\t" "nop; nop; nop; nop\n\t" "li\t%1, 0x80\n\t" "mtc0\t%1, $12\n\t" "nop; nop; nop; nop\n\t" "sh\t$0, 0(%0)\n\t" "mtc0\t$0, $12\n\t" "nop; nop; nop; nop\n\t" "mtc0\t%2, $12\n\t" "nop; nop; nop; nop\n\t" ".set\tpop" : "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)); } static inline int __init indy_sc_probe(void) { unsigned int size = ip22_eeprom_read(&sgimc->eeprom, 17); if (size == 0) return 0; size <<= PAGE_SHIFT; printk(KERN_INFO "R4600/R5000 SCACHE size %dK, linesize 32 bytes.\n", size >> 10); scache_size = size; return 1; } /* XXX Check with wje if the Indy caches can differenciate between writeback + invalidate and just invalidate. */ static struct bcache_ops indy_sc_ops = { .bc_enable = indy_sc_enable, .bc_disable = indy_sc_disable, .bc_wback_inv = indy_sc_wback_invalidate, .bc_inv = indy_sc_wback_invalidate }; void __cpuinit indy_sc_init(void) { if (indy_sc_probe()) { indy_sc_enable(); bcops = &indy_sc_ops; } }
gpl-2.0
clemsyn/Grouper
kernel/time/tick-common.c
2237
9750
/* * linux/kernel/time/tick-common.c * * This file contains the base functions to manage periodic tick * related events. * * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner * * This code is licenced under the GPL version 2. For details see * kernel-base/COPYING. */ #include <linux/cpu.h> #include <linux/err.h> #include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/profile.h> #include <linux/sched.h> #include <asm/irq_regs.h> #include "tick-internal.h" /* * Tick devices */ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); /* * Tick next event: keeps track of the tick time */ ktime_t tick_next_period; ktime_t tick_period; int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; static DEFINE_RAW_SPINLOCK(tick_device_lock); /* * Debugging: see timer_list.c */ struct tick_device *tick_get_device(int cpu) { return &per_cpu(tick_cpu_device, cpu); } /** * tick_is_oneshot_available - check for a oneshot capable event device */ int tick_is_oneshot_available(void) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return 0; if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) return 1; return tick_broadcast_oneshot_available(); } /* * Periodic tick */ static void tick_periodic(int cpu) { if (tick_do_timer_cpu == cpu) { write_seqlock(&xtime_lock); /* Keep track of the next tick event */ tick_next_period = ktime_add(tick_next_period, tick_period); do_timer(1); write_sequnlock(&xtime_lock); } update_process_times(user_mode(get_irq_regs())); profile_tick(CPU_PROFILING); } /* * Event handler for periodic ticks */ void tick_handle_periodic(struct clock_event_device *dev) { int cpu = smp_processor_id(); ktime_t next; tick_periodic(cpu); if (dev->mode != CLOCK_EVT_MODE_ONESHOT) return; /* * Setup the next period for devices, which do not have * periodic mode: */ next = ktime_add(dev->next_event, tick_period); for (;;) { if (!clockevents_program_event(dev, next, ktime_get())) return; /* * Have to be careful here. If we're in oneshot mode, * before we call tick_periodic() in a loop, we need * to be sure we're using a real hardware clocksource. * Otherwise we could get trapped in an infinite * loop, as the tick_periodic() increments jiffies, * when then will increment time, posibly causing * the loop to trigger again and again. */ if (timekeeping_valid_for_hres()) tick_periodic(cpu); next = ktime_add(next, tick_period); } } /* * Setup the device for a periodic tick */ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) { tick_set_periodic_handler(dev, broadcast); /* Broadcast setup ? */ if (!tick_device_is_functional(dev)) return; if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && !tick_broadcast_oneshot_active()) { clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); } else { unsigned long seq; ktime_t next; do { seq = read_seqbegin(&xtime_lock); next = tick_next_period; } while (read_seqretry(&xtime_lock, seq)); clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); for (;;) { if (!clockevents_program_event(dev, next, ktime_get())) return; next = ktime_add(next, tick_period); } } } /* * Setup the tick device */ static void tick_setup_device(struct tick_device *td, struct clock_event_device *newdev, int cpu, const struct cpumask *cpumask) { ktime_t next_event; void (*handler)(struct clock_event_device *) = NULL; /* * First device setup ? */ if (!td->evtdev) { /* * If no cpu took the do_timer update, assign it to * this cpu: */ if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { tick_do_timer_cpu = cpu; tick_next_period = ktime_get(); tick_period = ktime_set(0, NSEC_PER_SEC / HZ); } /* * Startup in periodic mode first. */ td->mode = TICKDEV_MODE_PERIODIC; } else { handler = td->evtdev->event_handler; next_event = td->evtdev->next_event; td->evtdev->event_handler = clockevents_handle_noop; } td->evtdev = newdev; /* * When the device is not per cpu, pin the interrupt to the * current cpu: */ if (!cpumask_equal(newdev->cpumask, cpumask)) irq_set_affinity(newdev->irq, cpumask); /* * When global broadcasting is active, check if the current * device is registered as a placeholder for broadcast mode. * This allows us to handle this x86 misfeature in a generic * way. */ if (tick_device_uses_broadcast(newdev, cpu)) return; if (td->mode == TICKDEV_MODE_PERIODIC) tick_setup_periodic(newdev, 0); else tick_setup_oneshot(newdev, handler, next_event); } /* * Check, if the new registered device should be used. */ static int tick_check_new_device(struct clock_event_device *newdev) { struct clock_event_device *curdev; struct tick_device *td; int cpu, ret = NOTIFY_OK; unsigned long flags; raw_spin_lock_irqsave(&tick_device_lock, flags); cpu = smp_processor_id(); if (!cpumask_test_cpu(cpu, newdev->cpumask)) goto out_bc; td = &per_cpu(tick_cpu_device, cpu); curdev = td->evtdev; /* cpu local device ? */ if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) { /* * If the cpu affinity of the device interrupt can not * be set, ignore it. */ if (!irq_can_set_affinity(newdev->irq)) goto out_bc; /* * If we have a cpu local device already, do not replace it * by a non cpu local device */ if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) goto out_bc; } /* * If we have an active device, then check the rating and the oneshot * feature. */ if (curdev) { /* * Prefer one shot capable devices ! */ if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) && !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) goto out_bc; /* * Check the rating */ if (curdev->rating >= newdev->rating) goto out_bc; } /* * Replace the eventually existing device by the new * device. If the current device is the broadcast device, do * not give it back to the clockevents layer ! */ if (tick_is_broadcast_device(curdev)) { clockevents_shutdown(curdev); curdev = NULL; } clockevents_exchange_device(curdev, newdev); tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) tick_oneshot_notify(); raw_spin_unlock_irqrestore(&tick_device_lock, flags); return NOTIFY_STOP; out_bc: /* * Can the new device be used as a broadcast device ? */ if (tick_check_broadcast_device(newdev)) ret = NOTIFY_STOP; raw_spin_unlock_irqrestore(&tick_device_lock, flags); return ret; } /* * Transfer the do_timer job away from a dying cpu. * * Called with interrupts disabled. */ static void tick_handover_do_timer(int *cpup) { if (*cpup == tick_do_timer_cpu) { int cpu = cpumask_first(cpu_online_mask); tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : TICK_DO_TIMER_NONE; } } /* * Shutdown an event device on a given cpu: * * This is called on a life CPU, when a CPU is dead. So we cannot * access the hardware device itself. * We just set the mode and remove it from the lists. */ static void tick_shutdown(unsigned int *cpup) { struct tick_device *td = &per_cpu(tick_cpu_device, *cpup); struct clock_event_device *dev = td->evtdev; unsigned long flags; raw_spin_lock_irqsave(&tick_device_lock, flags); td->mode = TICKDEV_MODE_PERIODIC; if (dev) { /* * Prevent that the clock events layer tries to call * the set mode function! */ dev->mode = CLOCK_EVT_MODE_UNUSED; clockevents_exchange_device(dev, NULL); td->evtdev = NULL; } raw_spin_unlock_irqrestore(&tick_device_lock, flags); } static void tick_suspend(void) { struct tick_device *td = &__get_cpu_var(tick_cpu_device); unsigned long flags; raw_spin_lock_irqsave(&tick_device_lock, flags); clockevents_shutdown(td->evtdev); raw_spin_unlock_irqrestore(&tick_device_lock, flags); } static void tick_resume(void) { struct tick_device *td = &__get_cpu_var(tick_cpu_device); unsigned long flags; int broadcast = tick_resume_broadcast(); raw_spin_lock_irqsave(&tick_device_lock, flags); clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); if (!broadcast) { if (td->mode == TICKDEV_MODE_PERIODIC) tick_setup_periodic(td->evtdev, 0); else tick_resume_oneshot(); } raw_spin_unlock_irqrestore(&tick_device_lock, flags); } /* * Notification about clock event devices */ static int tick_notify(struct notifier_block *nb, unsigned long reason, void *dev) { switch (reason) { case CLOCK_EVT_NOTIFY_ADD: return tick_check_new_device(dev); case CLOCK_EVT_NOTIFY_BROADCAST_ON: case CLOCK_EVT_NOTIFY_BROADCAST_OFF: case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: tick_broadcast_on_off(reason, dev); break; case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: tick_broadcast_oneshot_control(reason); break; case CLOCK_EVT_NOTIFY_CPU_DYING: tick_handover_do_timer(dev); break; case CLOCK_EVT_NOTIFY_CPU_DEAD: tick_shutdown_broadcast_oneshot(dev); tick_shutdown_broadcast(dev); tick_shutdown(dev); break; case CLOCK_EVT_NOTIFY_SUSPEND: tick_suspend(); tick_suspend_broadcast(); break; case CLOCK_EVT_NOTIFY_RESUME: tick_resume(); break; default: break; } return NOTIFY_OK; } static struct notifier_block tick_notifier = { .notifier_call = tick_notify, }; /** * tick_init - initialize the tick control * * Register the notifier with the clockevents framework */ void __init tick_init(void) { clockevents_register_notifier(&tick_notifier); }
gpl-2.0
MoKee/android_kernel_htc_endeavoru
drivers/media/video/adv7170.c
3261
9861
/* * adv7170 - adv7170, adv7171 video encoder driver version 0.0.1 * * Copyright (C) 2002 Maxim Yevtyushkin <max@linuxmedialabs.com> * * Based on adv7176 driver by: * * Copyright (C) 1998 Dave Perks <dperks@ibm.net> * Copyright (C) 1999 Wolfgang Scherr <scherr@net4you.net> * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx> * - some corrections for Pinnacle Systems Inc. DC10plus card. * * Changes by Ronald Bultje <rbultje@ronald.bitfreak.net> * - moved over to linux>=2.4.x i2c protocol (1/1/2003) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> MODULE_DESCRIPTION("Analog Devices ADV7170 video encoder driver"); MODULE_AUTHOR("Maxim Yevtyushkin"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* ----------------------------------------------------------------------- */ struct adv7170 { struct v4l2_subdev sd; unsigned char reg[128]; v4l2_std_id norm; int input; }; static inline struct adv7170 *to_adv7170(struct v4l2_subdev *sd) { return container_of(sd, struct adv7170, sd); } static char *inputs[] = { "pass_through", "play_back" }; /* ----------------------------------------------------------------------- */ static inline int adv7170_write(struct v4l2_subdev *sd, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct adv7170 *encoder = to_adv7170(sd); encoder->reg[reg] = value; return i2c_smbus_write_byte_data(client, reg, value); } static inline int adv7170_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_read_byte_data(client, reg); } static int adv7170_write_block(struct v4l2_subdev *sd, const u8 *data, unsigned int len) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct adv7170 *encoder = to_adv7170(sd); int ret = -1; u8 reg; /* the adv7170 has an autoincrement function, use it if * the adapter understands raw I2C */ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { /* do raw I2C, not smbus compatible */ u8 block_data[32]; int block_len; while (len >= 2) { block_len = 0; block_data[block_len++] = reg = data[0]; do { block_data[block_len++] = encoder->reg[reg++] = data[1]; len -= 2; data += 2; } while (len >= 2 && data[0] == reg && block_len < 32); ret = i2c_master_send(client, block_data, block_len); if (ret < 0) break; } } else { /* do some slow I2C emulation kind of thing */ while (len >= 2) { reg = *data++; ret = adv7170_write(sd, reg, *data++); if (ret < 0) break; len -= 2; } } return ret; } /* ----------------------------------------------------------------------- */ #define TR0MODE 0x4c #define TR0RST 0x80 #define TR1CAPT 0x00 #define TR1PLAY 0x00 static const unsigned char init_NTSC[] = { 0x00, 0x10, /* MR0 */ 0x01, 0x20, /* MR1 */ 0x02, 0x0e, /* MR2 RTC control: bits 2 and 1 */ 0x03, 0x80, /* MR3 */ 0x04, 0x30, /* MR4 */ 0x05, 0x00, /* Reserved */ 0x06, 0x00, /* Reserved */ 0x07, TR0MODE, /* TM0 */ 0x08, TR1CAPT, /* TM1 */ 0x09, 0x16, /* Fsc0 */ 0x0a, 0x7c, /* Fsc1 */ 0x0b, 0xf0, /* Fsc2 */ 0x0c, 0x21, /* Fsc3 */ 0x0d, 0x00, /* Subcarrier Phase */ 0x0e, 0x00, /* Closed Capt. Ext 0 */ 0x0f, 0x00, /* Closed Capt. Ext 1 */ 0x10, 0x00, /* Closed Capt. 0 */ 0x11, 0x00, /* Closed Capt. 1 */ 0x12, 0x00, /* Pedestal Ctl 0 */ 0x13, 0x00, /* Pedestal Ctl 1 */ 0x14, 0x00, /* Pedestal Ctl 2 */ 0x15, 0x00, /* Pedestal Ctl 3 */ 0x16, 0x00, /* CGMS_WSS_0 */ 0x17, 0x00, /* CGMS_WSS_1 */ 0x18, 0x00, /* CGMS_WSS_2 */ 0x19, 0x00, /* Teletext Ctl */ }; static const unsigned char init_PAL[] = { 0x00, 0x71, /* MR0 */ 0x01, 0x20, /* MR1 */ 0x02, 0x0e, /* MR2 RTC control: bits 2 and 1 */ 0x03, 0x80, /* MR3 */ 0x04, 0x30, /* MR4 */ 0x05, 0x00, /* Reserved */ 0x06, 0x00, /* Reserved */ 0x07, TR0MODE, /* TM0 */ 0x08, TR1CAPT, /* TM1 */ 0x09, 0xcb, /* Fsc0 */ 0x0a, 0x8a, /* Fsc1 */ 0x0b, 0x09, /* Fsc2 */ 0x0c, 0x2a, /* Fsc3 */ 0x0d, 0x00, /* Subcarrier Phase */ 0x0e, 0x00, /* Closed Capt. Ext 0 */ 0x0f, 0x00, /* Closed Capt. Ext 1 */ 0x10, 0x00, /* Closed Capt. 0 */ 0x11, 0x00, /* Closed Capt. 1 */ 0x12, 0x00, /* Pedestal Ctl 0 */ 0x13, 0x00, /* Pedestal Ctl 1 */ 0x14, 0x00, /* Pedestal Ctl 2 */ 0x15, 0x00, /* Pedestal Ctl 3 */ 0x16, 0x00, /* CGMS_WSS_0 */ 0x17, 0x00, /* CGMS_WSS_1 */ 0x18, 0x00, /* CGMS_WSS_2 */ 0x19, 0x00, /* Teletext Ctl */ }; static int adv7170_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std) { struct adv7170 *encoder = to_adv7170(sd); v4l2_dbg(1, debug, sd, "set norm %llx\n", (unsigned long long)std); if (std & V4L2_STD_NTSC) { adv7170_write_block(sd, init_NTSC, sizeof(init_NTSC)); if (encoder->input == 0) adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */ adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); } else if (std & V4L2_STD_PAL) { adv7170_write_block(sd, init_PAL, sizeof(init_PAL)); if (encoder->input == 0) adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */ adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); } else { v4l2_dbg(1, debug, sd, "illegal norm: %llx\n", (unsigned long long)std); return -EINVAL; } v4l2_dbg(1, debug, sd, "switched to %llx\n", (unsigned long long)std); encoder->norm = std; return 0; } static int adv7170_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct adv7170 *encoder = to_adv7170(sd); /* RJ: input = 0: input is from decoder input = 1: input is from ZR36060 input = 2: color bar */ v4l2_dbg(1, debug, sd, "set input from %s\n", input == 0 ? "decoder" : "ZR36060"); switch (input) { case 0: adv7170_write(sd, 0x01, 0x20); adv7170_write(sd, 0x08, TR1CAPT); /* TR1 */ adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */ adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); /* udelay(10); */ break; case 1: adv7170_write(sd, 0x01, 0x00); adv7170_write(sd, 0x08, TR1PLAY); /* TR1 */ adv7170_write(sd, 0x02, 0x08); adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); /* udelay(10); */ break; default: v4l2_dbg(1, debug, sd, "illegal input: %d\n", input); return -EINVAL; } v4l2_dbg(1, debug, sd, "switched to %s\n", inputs[input]); encoder->input = input; return 0; } static int adv7170_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7170, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops adv7170_core_ops = { .g_chip_ident = adv7170_g_chip_ident, }; static const struct v4l2_subdev_video_ops adv7170_video_ops = { .s_std_output = adv7170_s_std_output, .s_routing = adv7170_s_routing, }; static const struct v4l2_subdev_ops adv7170_ops = { .core = &adv7170_core_ops, .video = &adv7170_video_ops, }; /* ----------------------------------------------------------------------- */ static int adv7170_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adv7170 *encoder; struct v4l2_subdev *sd; int i; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); encoder = kzalloc(sizeof(struct adv7170), GFP_KERNEL); if (encoder == NULL) return -ENOMEM; sd = &encoder->sd; v4l2_i2c_subdev_init(sd, client, &adv7170_ops); encoder->norm = V4L2_STD_NTSC; encoder->input = 0; i = adv7170_write_block(sd, init_NTSC, sizeof(init_NTSC)); if (i >= 0) { i = adv7170_write(sd, 0x07, TR0MODE | TR0RST); i = adv7170_write(sd, 0x07, TR0MODE); i = adv7170_read(sd, 0x12); v4l2_dbg(1, debug, sd, "revision %d\n", i & 1); } if (i < 0) v4l2_dbg(1, debug, sd, "init error 0x%x\n", i); return 0; } static int adv7170_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_adv7170(sd)); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id adv7170_id[] = { { "adv7170", 0 }, { "adv7171", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adv7170_id); static struct i2c_driver adv7170_driver = { .driver = { .owner = THIS_MODULE, .name = "adv7170", }, .probe = adv7170_probe, .remove = adv7170_remove, .id_table = adv7170_id, }; static __init int init_adv7170(void) { return i2c_add_driver(&adv7170_driver); } static __exit void exit_adv7170(void) { i2c_del_driver(&adv7170_driver); } module_init(init_adv7170); module_exit(exit_adv7170);
gpl-2.0
CyanogenMod/android_kernel_sony_msm7x27a
arch/tile/lib/spinlock_32.c
4797
7370
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/spinlock.h> #include <linux/module.h> #include <asm/processor.h> #include <arch/spr_def.h> #include "spinlock_common.h" void arch_spin_lock(arch_spinlock_t *lock) { int my_ticket; int iterations = 0; int delta; while ((my_ticket = __insn_tns((void *)&lock->next_ticket)) & 1) delay_backoff(iterations++); /* Increment the next ticket number, implicitly releasing tns lock. */ lock->next_ticket = my_ticket + TICKET_QUANTUM; /* Wait until it's our turn. */ while ((delta = my_ticket - lock->current_ticket) != 0) relax((128 / CYCLES_PER_RELAX_LOOP) * delta); } EXPORT_SYMBOL(arch_spin_lock); int arch_spin_trylock(arch_spinlock_t *lock) { /* * Grab a ticket; no need to retry if it's busy, we'll just * treat that the same as "locked", since someone else * will lock it momentarily anyway. */ int my_ticket = __insn_tns((void *)&lock->next_ticket); if (my_ticket == lock->current_ticket) { /* Not currently locked, so lock it by keeping this ticket. */ lock->next_ticket = my_ticket + TICKET_QUANTUM; /* Success! */ return 1; } if (!(my_ticket & 1)) { /* Release next_ticket. */ lock->next_ticket = my_ticket; } return 0; } EXPORT_SYMBOL(arch_spin_trylock); void arch_spin_unlock_wait(arch_spinlock_t *lock) { u32 iterations = 0; while (arch_spin_is_locked(lock)) delay_backoff(iterations++); } EXPORT_SYMBOL(arch_spin_unlock_wait); /* * The low byte is always reserved to be the marker for a "tns" operation * since the low bit is set to "1" by a tns. The next seven bits are * zeroes. The next byte holds the "next" writer value, i.e. the ticket * available for the next task that wants to write. The third byte holds * the current writer value, i.e. the writer who holds the current ticket. * If current == next == 0, there are no interested writers. */ #define WR_NEXT_SHIFT _WR_NEXT_SHIFT #define WR_CURR_SHIFT _WR_CURR_SHIFT #define WR_WIDTH _WR_WIDTH #define WR_MASK ((1 << WR_WIDTH) - 1) /* * The last eight bits hold the active reader count. This has to be * zero before a writer can start to write. */ #define RD_COUNT_SHIFT _RD_COUNT_SHIFT #define RD_COUNT_WIDTH _RD_COUNT_WIDTH #define RD_COUNT_MASK ((1 << RD_COUNT_WIDTH) - 1) /* * We can get the read lock if everything but the reader bits (which * are in the high part of the word) is zero, i.e. no active or * waiting writers, no tns. * * We guard the tns/store-back with an interrupt critical section to * preserve the semantic that the same read lock can be acquired in an * interrupt context. */ inline int arch_read_trylock(arch_rwlock_t *rwlock) { u32 val; __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1); val = __insn_tns((int *)&rwlock->lock); if (likely((val << _RD_COUNT_WIDTH) == 0)) { val += 1 << RD_COUNT_SHIFT; rwlock->lock = val; __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); BUG_ON(val == 0); /* we don't expect wraparound */ return 1; } if ((val & 1) == 0) rwlock->lock = val; __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); return 0; } EXPORT_SYMBOL(arch_read_trylock); /* * Spin doing arch_read_trylock() until we acquire the lock. * ISSUE: This approach can permanently starve readers. A reader who sees * a writer could instead take a ticket lock (just like a writer would), * and atomically enter read mode (with 1 reader) when it gets the ticket. * This way both readers and writers would always make forward progress * in a finite time. */ void arch_read_lock(arch_rwlock_t *rwlock) { u32 iterations = 0; while (unlikely(!arch_read_trylock(rwlock))) delay_backoff(iterations++); } EXPORT_SYMBOL(arch_read_lock); void arch_read_unlock(arch_rwlock_t *rwlock) { u32 val, iterations = 0; mb(); /* guarantee anything modified under the lock is visible */ for (;;) { __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1); val = __insn_tns((int *)&rwlock->lock); if (likely((val & 1) == 0)) { rwlock->lock = val - (1 << _RD_COUNT_SHIFT); __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); break; } __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); delay_backoff(iterations++); } } EXPORT_SYMBOL(arch_read_unlock); /* * We don't need an interrupt critical section here (unlike for * arch_read_lock) since we should never use a bare write lock where * it could be interrupted by code that could try to re-acquire it. */ void arch_write_lock(arch_rwlock_t *rwlock) { /* * The trailing underscore on this variable (and curr_ below) * reminds us that the high bits are garbage; we mask them out * when we compare them. */ u32 my_ticket_; u32 iterations = 0; u32 val = __insn_tns((int *)&rwlock->lock); if (likely(val == 0)) { rwlock->lock = 1 << _WR_NEXT_SHIFT; return; } /* * Wait until there are no readers, then bump up the next * field and capture the ticket value. */ for (;;) { if (!(val & 1)) { if ((val >> RD_COUNT_SHIFT) == 0) break; rwlock->lock = val; } delay_backoff(iterations++); val = __insn_tns((int *)&rwlock->lock); } /* Take out the next ticket and extract my ticket value. */ rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT); my_ticket_ = val >> WR_NEXT_SHIFT; /* Wait until the "current" field matches our ticket. */ for (;;) { u32 curr_ = val >> WR_CURR_SHIFT; u32 delta = ((my_ticket_ - curr_) & WR_MASK); if (likely(delta == 0)) break; /* Delay based on how many lock-holders are still out there. */ relax((256 / CYCLES_PER_RELAX_LOOP) * delta); /* * Get a non-tns value to check; we don't need to tns * it ourselves. Since we're not tns'ing, we retry * more rapidly to get a valid value. */ while ((val = rwlock->lock) & 1) relax(4); } } EXPORT_SYMBOL(arch_write_lock); int arch_write_trylock(arch_rwlock_t *rwlock) { u32 val = __insn_tns((int *)&rwlock->lock); /* * If a tns is in progress, or there's a waiting or active locker, * or active readers, we can't take the lock, so give up. */ if (unlikely(val != 0)) { if (!(val & 1)) rwlock->lock = val; return 0; } /* Set the "next" field to mark it locked. */ rwlock->lock = 1 << _WR_NEXT_SHIFT; return 1; } EXPORT_SYMBOL(arch_write_trylock); void arch_write_unlock(arch_rwlock_t *rwlock) { u32 val, eq, mask; mb(); /* guarantee anything modified under the lock is visible */ val = __insn_tns((int *)&rwlock->lock); if (likely(val == (1 << _WR_NEXT_SHIFT))) { rwlock->lock = 0; return; } while (unlikely(val & 1)) { /* Limited backoff since we are the highest-priority task. */ relax(4); val = __insn_tns((int *)&rwlock->lock); } mask = 1 << WR_CURR_SHIFT; val = __insn_addb(val, mask); eq = __insn_seqb(val, val << (WR_CURR_SHIFT - WR_NEXT_SHIFT)); val = __insn_mz(eq & mask, val); rwlock->lock = val; } EXPORT_SYMBOL(arch_write_unlock);
gpl-2.0
major91/Zeta_Chromium-L
arch/arm/mach-gemini/irq.c
4797
3066
/* * Interrupt routines for Gemini * * Copyright (C) 2001-2006 Storlink, Corp. * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/stddef.h> #include <linux/list.h> #include <linux/sched.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #define IRQ_SOURCE(base_addr) (base_addr + 0x00) #define IRQ_MASK(base_addr) (base_addr + 0x04) #define IRQ_CLEAR(base_addr) (base_addr + 0x08) #define IRQ_TMODE(base_addr) (base_addr + 0x0C) #define IRQ_TLEVEL(base_addr) (base_addr + 0x10) #define IRQ_STATUS(base_addr) (base_addr + 0x14) #define FIQ_SOURCE(base_addr) (base_addr + 0x20) #define FIQ_MASK(base_addr) (base_addr + 0x24) #define FIQ_CLEAR(base_addr) (base_addr + 0x28) #define FIQ_TMODE(base_addr) (base_addr + 0x2C) #define FIQ_LEVEL(base_addr) (base_addr + 0x30) #define FIQ_STATUS(base_addr) (base_addr + 0x34) static void gemini_ack_irq(struct irq_data *d) { __raw_writel(1 << d->irq, IRQ_CLEAR(IO_ADDRESS(GEMINI_INTERRUPT_BASE))); } static void gemini_mask_irq(struct irq_data *d) { unsigned int mask; mask = __raw_readl(IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE))); mask &= ~(1 << d->irq); __raw_writel(mask, IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE))); } static void gemini_unmask_irq(struct irq_data *d) { unsigned int mask; mask = __raw_readl(IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE))); mask |= (1 << d->irq); __raw_writel(mask, IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE))); } static struct irq_chip gemini_irq_chip = { .name = "INTC", .irq_ack = gemini_ack_irq, .irq_mask = gemini_mask_irq, .irq_unmask = gemini_unmask_irq, }; static struct resource irq_resource = { .name = "irq_handler", .start = IO_ADDRESS(GEMINI_INTERRUPT_BASE), .end = IO_ADDRESS(FIQ_STATUS(GEMINI_INTERRUPT_BASE)) + 4, }; void __init gemini_init_irq(void) { unsigned int i, mode = 0, level = 0; /* * Disable the idle handler by default since it is buggy * For more info see arch/arm/mach-gemini/idle.c */ disable_hlt(); request_resource(&iomem_resource, &irq_resource); for (i = 0; i < NR_IRQS; i++) { irq_set_chip(i, &gemini_irq_chip); if((i >= IRQ_TIMER1 && i <= IRQ_TIMER3) || (i >= IRQ_SERIRQ0 && i <= IRQ_SERIRQ1)) { irq_set_handler(i, handle_edge_irq); mode |= 1 << i; level |= 1 << i; } else { irq_set_handler(i, handle_level_irq); } set_irq_flags(i, IRQF_VALID | IRQF_PROBE); } /* Disable all interrupts */ __raw_writel(0, IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE))); __raw_writel(0, FIQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE))); /* Set interrupt mode */ __raw_writel(mode, IRQ_TMODE(IO_ADDRESS(GEMINI_INTERRUPT_BASE))); __raw_writel(level, IRQ_TLEVEL(IO_ADDRESS(GEMINI_INTERRUPT_BASE))); }
gpl-2.0
tadeas482/kernel-old
arch/tile/lib/spinlock_32.c
4797
7370
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/spinlock.h> #include <linux/module.h> #include <asm/processor.h> #include <arch/spr_def.h> #include "spinlock_common.h" void arch_spin_lock(arch_spinlock_t *lock) { int my_ticket; int iterations = 0; int delta; while ((my_ticket = __insn_tns((void *)&lock->next_ticket)) & 1) delay_backoff(iterations++); /* Increment the next ticket number, implicitly releasing tns lock. */ lock->next_ticket = my_ticket + TICKET_QUANTUM; /* Wait until it's our turn. */ while ((delta = my_ticket - lock->current_ticket) != 0) relax((128 / CYCLES_PER_RELAX_LOOP) * delta); } EXPORT_SYMBOL(arch_spin_lock); int arch_spin_trylock(arch_spinlock_t *lock) { /* * Grab a ticket; no need to retry if it's busy, we'll just * treat that the same as "locked", since someone else * will lock it momentarily anyway. */ int my_ticket = __insn_tns((void *)&lock->next_ticket); if (my_ticket == lock->current_ticket) { /* Not currently locked, so lock it by keeping this ticket. */ lock->next_ticket = my_ticket + TICKET_QUANTUM; /* Success! */ return 1; } if (!(my_ticket & 1)) { /* Release next_ticket. */ lock->next_ticket = my_ticket; } return 0; } EXPORT_SYMBOL(arch_spin_trylock); void arch_spin_unlock_wait(arch_spinlock_t *lock) { u32 iterations = 0; while (arch_spin_is_locked(lock)) delay_backoff(iterations++); } EXPORT_SYMBOL(arch_spin_unlock_wait); /* * The low byte is always reserved to be the marker for a "tns" operation * since the low bit is set to "1" by a tns. The next seven bits are * zeroes. The next byte holds the "next" writer value, i.e. the ticket * available for the next task that wants to write. The third byte holds * the current writer value, i.e. the writer who holds the current ticket. * If current == next == 0, there are no interested writers. */ #define WR_NEXT_SHIFT _WR_NEXT_SHIFT #define WR_CURR_SHIFT _WR_CURR_SHIFT #define WR_WIDTH _WR_WIDTH #define WR_MASK ((1 << WR_WIDTH) - 1) /* * The last eight bits hold the active reader count. This has to be * zero before a writer can start to write. */ #define RD_COUNT_SHIFT _RD_COUNT_SHIFT #define RD_COUNT_WIDTH _RD_COUNT_WIDTH #define RD_COUNT_MASK ((1 << RD_COUNT_WIDTH) - 1) /* * We can get the read lock if everything but the reader bits (which * are in the high part of the word) is zero, i.e. no active or * waiting writers, no tns. * * We guard the tns/store-back with an interrupt critical section to * preserve the semantic that the same read lock can be acquired in an * interrupt context. */ inline int arch_read_trylock(arch_rwlock_t *rwlock) { u32 val; __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1); val = __insn_tns((int *)&rwlock->lock); if (likely((val << _RD_COUNT_WIDTH) == 0)) { val += 1 << RD_COUNT_SHIFT; rwlock->lock = val; __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); BUG_ON(val == 0); /* we don't expect wraparound */ return 1; } if ((val & 1) == 0) rwlock->lock = val; __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); return 0; } EXPORT_SYMBOL(arch_read_trylock); /* * Spin doing arch_read_trylock() until we acquire the lock. * ISSUE: This approach can permanently starve readers. A reader who sees * a writer could instead take a ticket lock (just like a writer would), * and atomically enter read mode (with 1 reader) when it gets the ticket. * This way both readers and writers would always make forward progress * in a finite time. */ void arch_read_lock(arch_rwlock_t *rwlock) { u32 iterations = 0; while (unlikely(!arch_read_trylock(rwlock))) delay_backoff(iterations++); } EXPORT_SYMBOL(arch_read_lock); void arch_read_unlock(arch_rwlock_t *rwlock) { u32 val, iterations = 0; mb(); /* guarantee anything modified under the lock is visible */ for (;;) { __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1); val = __insn_tns((int *)&rwlock->lock); if (likely((val & 1) == 0)) { rwlock->lock = val - (1 << _RD_COUNT_SHIFT); __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); break; } __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); delay_backoff(iterations++); } } EXPORT_SYMBOL(arch_read_unlock); /* * We don't need an interrupt critical section here (unlike for * arch_read_lock) since we should never use a bare write lock where * it could be interrupted by code that could try to re-acquire it. */ void arch_write_lock(arch_rwlock_t *rwlock) { /* * The trailing underscore on this variable (and curr_ below) * reminds us that the high bits are garbage; we mask them out * when we compare them. */ u32 my_ticket_; u32 iterations = 0; u32 val = __insn_tns((int *)&rwlock->lock); if (likely(val == 0)) { rwlock->lock = 1 << _WR_NEXT_SHIFT; return; } /* * Wait until there are no readers, then bump up the next * field and capture the ticket value. */ for (;;) { if (!(val & 1)) { if ((val >> RD_COUNT_SHIFT) == 0) break; rwlock->lock = val; } delay_backoff(iterations++); val = __insn_tns((int *)&rwlock->lock); } /* Take out the next ticket and extract my ticket value. */ rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT); my_ticket_ = val >> WR_NEXT_SHIFT; /* Wait until the "current" field matches our ticket. */ for (;;) { u32 curr_ = val >> WR_CURR_SHIFT; u32 delta = ((my_ticket_ - curr_) & WR_MASK); if (likely(delta == 0)) break; /* Delay based on how many lock-holders are still out there. */ relax((256 / CYCLES_PER_RELAX_LOOP) * delta); /* * Get a non-tns value to check; we don't need to tns * it ourselves. Since we're not tns'ing, we retry * more rapidly to get a valid value. */ while ((val = rwlock->lock) & 1) relax(4); } } EXPORT_SYMBOL(arch_write_lock); int arch_write_trylock(arch_rwlock_t *rwlock) { u32 val = __insn_tns((int *)&rwlock->lock); /* * If a tns is in progress, or there's a waiting or active locker, * or active readers, we can't take the lock, so give up. */ if (unlikely(val != 0)) { if (!(val & 1)) rwlock->lock = val; return 0; } /* Set the "next" field to mark it locked. */ rwlock->lock = 1 << _WR_NEXT_SHIFT; return 1; } EXPORT_SYMBOL(arch_write_trylock); void arch_write_unlock(arch_rwlock_t *rwlock) { u32 val, eq, mask; mb(); /* guarantee anything modified under the lock is visible */ val = __insn_tns((int *)&rwlock->lock); if (likely(val == (1 << _WR_NEXT_SHIFT))) { rwlock->lock = 0; return; } while (unlikely(val & 1)) { /* Limited backoff since we are the highest-priority task. */ relax(4); val = __insn_tns((int *)&rwlock->lock); } mask = 1 << WR_CURR_SHIFT; val = __insn_addb(val, mask); eq = __insn_seqb(val, val << (WR_CURR_SHIFT - WR_NEXT_SHIFT)); val = __insn_mz(eq & mask, val); rwlock->lock = val; } EXPORT_SYMBOL(arch_write_unlock);
gpl-2.0
szezso/android_kernel_samsung_expressltexx
net/decnet/dn_dev.c
4797
32808
/* * DECnet An implementation of the DECnet protocol suite for the LINUX * operating system. DECnet is implemented using the BSD Socket * interface as the means of communication with the user level. * * DECnet Device Layer * * Authors: Steve Whitehouse <SteveW@ACM.org> * Eduardo Marcelo Serrat <emserrat@geocities.com> * * Changes: * Steve Whitehouse : Devices now see incoming frames so they * can mark on who it came from. * Steve Whitehouse : Fixed bug in creating neighbours. Each neighbour * can now have a device specific setup func. * Steve Whitehouse : Added /proc/sys/net/decnet/conf/<dev>/ * Steve Whitehouse : Fixed bug which sometimes killed timer * Steve Whitehouse : Multiple ifaddr support * Steve Whitehouse : SIOCGIFCONF is now a compile time option * Steve Whitehouse : /proc/sys/net/decnet/conf/<sys>/forwarding * Steve Whitehouse : Removed timer1 - it's a user space issue now * Patrick Caulfield : Fixed router hello message format * Steve Whitehouse : Got rid of constant sizes for blksize for * devices. All mtu based now. */ #include <linux/capability.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/if_addr.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/sysctl.h> #include <linux/notifier.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <net/net_namespace.h> #include <net/neighbour.h> #include <net/dst.h> #include <net/flow.h> #include <net/fib_rules.h> #include <net/netlink.h> #include <net/dn.h> #include <net/dn_dev.h> #include <net/dn_route.h> #include <net/dn_neigh.h> #include <net/dn_fib.h> #define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn)) static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00}; static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00}; static char dn_hiord[ETH_ALEN] = {0xAA,0x00,0x04,0x00,0x00,0x00}; static unsigned char dn_eco_version[3] = {0x02,0x00,0x00}; extern struct neigh_table dn_neigh_table; /* * decnet_address is kept in network order. */ __le16 decnet_address = 0; static DEFINE_SPINLOCK(dndev_lock); static struct net_device *decnet_default_device; static BLOCKING_NOTIFIER_HEAD(dnaddr_chain); static struct dn_dev *dn_dev_create(struct net_device *dev, int *err); static void dn_dev_delete(struct net_device *dev); static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa); static int dn_eth_up(struct net_device *); static void dn_eth_down(struct net_device *); static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa); static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa); static struct dn_dev_parms dn_dev_list[] = { { .type = ARPHRD_ETHER, /* Ethernet */ .mode = DN_DEV_BCAST, .state = DN_DEV_S_RU, .t2 = 1, .t3 = 10, .name = "ethernet", .up = dn_eth_up, .down = dn_eth_down, .timer3 = dn_send_brd_hello, }, { .type = ARPHRD_IPGRE, /* DECnet tunneled over GRE in IP */ .mode = DN_DEV_BCAST, .state = DN_DEV_S_RU, .t2 = 1, .t3 = 10, .name = "ipgre", .timer3 = dn_send_brd_hello, }, #if 0 { .type = ARPHRD_X25, /* Bog standard X.25 */ .mode = DN_DEV_UCAST, .state = DN_DEV_S_DS, .t2 = 1, .t3 = 120, .name = "x25", .timer3 = dn_send_ptp_hello, }, #endif #if 0 { .type = ARPHRD_PPP, /* DECnet over PPP */ .mode = DN_DEV_BCAST, .state = DN_DEV_S_RU, .t2 = 1, .t3 = 10, .name = "ppp", .timer3 = dn_send_brd_hello, }, #endif { .type = ARPHRD_DDCMP, /* DECnet over DDCMP */ .mode = DN_DEV_UCAST, .state = DN_DEV_S_DS, .t2 = 1, .t3 = 120, .name = "ddcmp", .timer3 = dn_send_ptp_hello, }, { .type = ARPHRD_LOOPBACK, /* Loopback interface - always last */ .mode = DN_DEV_BCAST, .state = DN_DEV_S_RU, .t2 = 1, .t3 = 10, .name = "loopback", .timer3 = dn_send_brd_hello, } }; #define DN_DEV_LIST_SIZE ARRAY_SIZE(dn_dev_list) #define DN_DEV_PARMS_OFFSET(x) offsetof(struct dn_dev_parms, x) #ifdef CONFIG_SYSCTL static int min_t2[] = { 1 }; static int max_t2[] = { 60 }; /* No max specified, but this seems sensible */ static int min_t3[] = { 1 }; static int max_t3[] = { 8191 }; /* Must fit in 16 bits when multiplied by BCT3MULT or T3MULT */ static int min_priority[1]; static int max_priority[] = { 127 }; /* From DECnet spec */ static int dn_forwarding_proc(ctl_table *, int, void __user *, size_t *, loff_t *); static struct dn_dev_sysctl_table { struct ctl_table_header *sysctl_header; ctl_table dn_dev_vars[5]; } dn_dev_sysctl = { NULL, { { .procname = "forwarding", .data = (void *)DN_DEV_PARMS_OFFSET(forwarding), .maxlen = sizeof(int), .mode = 0644, .proc_handler = dn_forwarding_proc, }, { .procname = "priority", .data = (void *)DN_DEV_PARMS_OFFSET(priority), .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_priority, .extra2 = &max_priority }, { .procname = "t2", .data = (void *)DN_DEV_PARMS_OFFSET(t2), .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_t2, .extra2 = &max_t2 }, { .procname = "t3", .data = (void *)DN_DEV_PARMS_OFFSET(t3), .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_t3, .extra2 = &max_t3 }, {0} }, }; static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms) { struct dn_dev_sysctl_table *t; int i; #define DN_CTL_PATH_DEV 3 struct ctl_path dn_ctl_path[] = { { .procname = "net", }, { .procname = "decnet", }, { .procname = "conf", }, { /* to be set */ }, { }, }; t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL); if (t == NULL) return; for(i = 0; i < ARRAY_SIZE(t->dn_dev_vars) - 1; i++) { long offset = (long)t->dn_dev_vars[i].data; t->dn_dev_vars[i].data = ((char *)parms) + offset; } if (dev) { dn_ctl_path[DN_CTL_PATH_DEV].procname = dev->name; } else { dn_ctl_path[DN_CTL_PATH_DEV].procname = parms->name; } t->dn_dev_vars[0].extra1 = (void *)dev; t->sysctl_header = register_sysctl_paths(dn_ctl_path, t->dn_dev_vars); if (t->sysctl_header == NULL) kfree(t); else parms->sysctl = t; } static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms) { if (parms->sysctl) { struct dn_dev_sysctl_table *t = parms->sysctl; parms->sysctl = NULL; unregister_sysctl_table(t->sysctl_header); kfree(t); } } static int dn_forwarding_proc(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { #ifdef CONFIG_DECNET_ROUTER struct net_device *dev = table->extra1; struct dn_dev *dn_db; int err; int tmp, old; if (table->extra1 == NULL) return -EINVAL; dn_db = rcu_dereference_raw(dev->dn_ptr); old = dn_db->parms.forwarding; err = proc_dointvec(table, write, buffer, lenp, ppos); if ((err >= 0) && write) { if (dn_db->parms.forwarding < 0) dn_db->parms.forwarding = 0; if (dn_db->parms.forwarding > 2) dn_db->parms.forwarding = 2; /* * What an ugly hack this is... its works, just. It * would be nice if sysctl/proc were just that little * bit more flexible so I don't have to write a special * routine, or suffer hacks like this - SJW */ tmp = dn_db->parms.forwarding; dn_db->parms.forwarding = old; if (dn_db->parms.down) dn_db->parms.down(dev); dn_db->parms.forwarding = tmp; if (dn_db->parms.up) dn_db->parms.up(dev); } return err; #else return -EINVAL; #endif } #else /* CONFIG_SYSCTL */ static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms) { } static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms) { } #endif /* CONFIG_SYSCTL */ static inline __u16 mtu2blksize(struct net_device *dev) { u32 blksize = dev->mtu; if (blksize > 0xffff) blksize = 0xffff; if (dev->type == ARPHRD_ETHER || dev->type == ARPHRD_PPP || dev->type == ARPHRD_IPGRE || dev->type == ARPHRD_LOOPBACK) blksize -= 2; return (__u16)blksize; } static struct dn_ifaddr *dn_dev_alloc_ifa(void) { struct dn_ifaddr *ifa; ifa = kzalloc(sizeof(*ifa), GFP_KERNEL); return ifa; } static void dn_dev_free_ifa(struct dn_ifaddr *ifa) { kfree_rcu(ifa, rcu); } static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy) { struct dn_ifaddr *ifa1 = rtnl_dereference(*ifap); unsigned char mac_addr[6]; struct net_device *dev = dn_db->dev; ASSERT_RTNL(); *ifap = ifa1->ifa_next; if (dn_db->dev->type == ARPHRD_ETHER) { if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) { dn_dn2eth(mac_addr, ifa1->ifa_local); dev_mc_del(dev, mac_addr); } } dn_ifaddr_notify(RTM_DELADDR, ifa1); blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1); if (destroy) { dn_dev_free_ifa(ifa1); if (dn_db->ifa_list == NULL) dn_dev_delete(dn_db->dev); } } static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa) { struct net_device *dev = dn_db->dev; struct dn_ifaddr *ifa1; unsigned char mac_addr[6]; ASSERT_RTNL(); /* Check for duplicates */ for (ifa1 = rtnl_dereference(dn_db->ifa_list); ifa1 != NULL; ifa1 = rtnl_dereference(ifa1->ifa_next)) { if (ifa1->ifa_local == ifa->ifa_local) return -EEXIST; } if (dev->type == ARPHRD_ETHER) { if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { dn_dn2eth(mac_addr, ifa->ifa_local); dev_mc_add(dev, mac_addr); } } ifa->ifa_next = dn_db->ifa_list; rcu_assign_pointer(dn_db->ifa_list, ifa); dn_ifaddr_notify(RTM_NEWADDR, ifa); blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); return 0; } static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa) { struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); int rv; if (dn_db == NULL) { int err; dn_db = dn_dev_create(dev, &err); if (dn_db == NULL) return err; } ifa->ifa_dev = dn_db; if (dev->flags & IFF_LOOPBACK) ifa->ifa_scope = RT_SCOPE_HOST; rv = dn_dev_insert_ifa(dn_db, ifa); if (rv) dn_dev_free_ifa(ifa); return rv; } int dn_dev_ioctl(unsigned int cmd, void __user *arg) { char buffer[DN_IFREQ_SIZE]; struct ifreq *ifr = (struct ifreq *)buffer; struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr; struct dn_dev *dn_db; struct net_device *dev; struct dn_ifaddr *ifa = NULL; struct dn_ifaddr __rcu **ifap = NULL; int ret = 0; if (copy_from_user(ifr, arg, DN_IFREQ_SIZE)) return -EFAULT; ifr->ifr_name[IFNAMSIZ-1] = 0; dev_load(&init_net, ifr->ifr_name); switch (cmd) { case SIOCGIFADDR: break; case SIOCSIFADDR: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (sdn->sdn_family != AF_DECnet) return -EINVAL; break; default: return -EINVAL; } rtnl_lock(); if ((dev = __dev_get_by_name(&init_net, ifr->ifr_name)) == NULL) { ret = -ENODEV; goto done; } if ((dn_db = rtnl_dereference(dev->dn_ptr)) != NULL) { for (ifap = &dn_db->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL; ifap = &ifa->ifa_next) if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0) break; } if (ifa == NULL && cmd != SIOCSIFADDR) { ret = -EADDRNOTAVAIL; goto done; } switch (cmd) { case SIOCGIFADDR: *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local; goto rarok; case SIOCSIFADDR: if (!ifa) { if ((ifa = dn_dev_alloc_ifa()) == NULL) { ret = -ENOBUFS; break; } memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); } else { if (ifa->ifa_local == dn_saddr2dn(sdn)) break; dn_dev_del_ifa(dn_db, ifap, 0); } ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn); ret = dn_dev_set_ifa(dev, ifa); } done: rtnl_unlock(); return ret; rarok: if (copy_to_user(arg, ifr, DN_IFREQ_SIZE)) ret = -EFAULT; goto done; } struct net_device *dn_dev_get_default(void) { struct net_device *dev; spin_lock(&dndev_lock); dev = decnet_default_device; if (dev) { if (dev->dn_ptr) dev_hold(dev); else dev = NULL; } spin_unlock(&dndev_lock); return dev; } int dn_dev_set_default(struct net_device *dev, int force) { struct net_device *old = NULL; int rv = -EBUSY; if (!dev->dn_ptr) return -ENODEV; spin_lock(&dndev_lock); if (force || decnet_default_device == NULL) { old = decnet_default_device; decnet_default_device = dev; rv = 0; } spin_unlock(&dndev_lock); if (old) dev_put(old); return rv; } static void dn_dev_check_default(struct net_device *dev) { spin_lock(&dndev_lock); if (dev == decnet_default_device) { decnet_default_device = NULL; } else { dev = NULL; } spin_unlock(&dndev_lock); if (dev) dev_put(dev); } /* * Called with RTNL */ static struct dn_dev *dn_dev_by_index(int ifindex) { struct net_device *dev; struct dn_dev *dn_dev = NULL; dev = __dev_get_by_index(&init_net, ifindex); if (dev) dn_dev = rtnl_dereference(dev->dn_ptr); return dn_dev; } static const struct nla_policy dn_ifa_policy[IFA_MAX+1] = { [IFA_ADDRESS] = { .type = NLA_U16 }, [IFA_LOCAL] = { .type = NLA_U16 }, [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, }; static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; struct dn_dev *dn_db; struct ifaddrmsg *ifm; struct dn_ifaddr *ifa; struct dn_ifaddr __rcu **ifap; int err = -EINVAL; if (!net_eq(net, &init_net)) goto errout; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); if (err < 0) goto errout; err = -ENODEV; ifm = nlmsg_data(nlh); if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL) goto errout; err = -EADDRNOTAVAIL; for (ifap = &dn_db->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL; ifap = &ifa->ifa_next) { if (tb[IFA_LOCAL] && nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2)) continue; if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) continue; dn_dev_del_ifa(dn_db, ifap, 1); return 0; } errout: return err; } static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; struct net_device *dev; struct dn_dev *dn_db; struct ifaddrmsg *ifm; struct dn_ifaddr *ifa; int err; if (!net_eq(net, &init_net)) return -EINVAL; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); if (err < 0) return err; if (tb[IFA_LOCAL] == NULL) return -EINVAL; ifm = nlmsg_data(nlh); if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL) return -ENODEV; if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) { dn_db = dn_dev_create(dev, &err); if (!dn_db) return err; } if ((ifa = dn_dev_alloc_ifa()) == NULL) return -ENOBUFS; if (tb[IFA_ADDRESS] == NULL) tb[IFA_ADDRESS] = tb[IFA_LOCAL]; ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]); ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]); ifa->ifa_flags = ifm->ifa_flags; ifa->ifa_scope = ifm->ifa_scope; ifa->ifa_dev = dn_db; if (tb[IFA_LABEL]) nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ); else memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); err = dn_dev_insert_ifa(dn_db, ifa); if (err) dn_dev_free_ifa(ifa); return err; } static inline size_t dn_ifaddr_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ + nla_total_size(2) /* IFA_ADDRESS */ + nla_total_size(2); /* IFA_LOCAL */ } static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa, u32 pid, u32 seq, int event, unsigned int flags) { struct ifaddrmsg *ifm; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags); if (nlh == NULL) return -EMSGSIZE; ifm = nlmsg_data(nlh); ifm->ifa_family = AF_DECnet; ifm->ifa_prefixlen = 16; ifm->ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT; ifm->ifa_scope = ifa->ifa_scope; ifm->ifa_index = ifa->ifa_dev->dev->ifindex; if (ifa->ifa_address) NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address); if (ifa->ifa_local) NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local); if (ifa->ifa_label[0]) NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa) { struct sk_buff *skb; int err = -ENOBUFS; skb = alloc_skb(dn_ifaddr_nlmsg_size(), GFP_KERNEL); if (skb == NULL) goto errout; err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0); if (err < 0) { /* -EMSGSIZE implies BUG in dn_ifaddr_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, &init_net, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL); return; errout: if (err < 0) rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_IFADDR, err); } static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); int idx, dn_idx = 0, skip_ndevs, skip_naddr; struct net_device *dev; struct dn_dev *dn_db; struct dn_ifaddr *ifa; if (!net_eq(net, &init_net)) return 0; skip_ndevs = cb->args[0]; skip_naddr = cb->args[1]; idx = 0; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { if (idx < skip_ndevs) goto cont; else if (idx > skip_ndevs) { /* Only skip over addresses for first dev dumped * in this iteration (idx == skip_ndevs) */ skip_naddr = 0; } if ((dn_db = rcu_dereference(dev->dn_ptr)) == NULL) goto cont; for (ifa = rcu_dereference(dn_db->ifa_list), dn_idx = 0; ifa; ifa = rcu_dereference(ifa->ifa_next), dn_idx++) { if (dn_idx < skip_naddr) continue; if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWADDR, NLM_F_MULTI) < 0) goto done; } cont: idx++; } done: rcu_read_unlock(); cb->args[0] = idx; cb->args[1] = dn_idx; return skb->len; } static int dn_dev_get_first(struct net_device *dev, __le16 *addr) { struct dn_dev *dn_db; struct dn_ifaddr *ifa; int rv = -ENODEV; rcu_read_lock(); dn_db = rcu_dereference(dev->dn_ptr); if (dn_db == NULL) goto out; ifa = rcu_dereference(dn_db->ifa_list); if (ifa != NULL) { *addr = ifa->ifa_local; rv = 0; } out: rcu_read_unlock(); return rv; } /* * Find a default address to bind to. * * This is one of those areas where the initial VMS concepts don't really * map onto the Linux concepts, and since we introduced multiple addresses * per interface we have to cope with slightly odd ways of finding out what * "our address" really is. Mostly it's not a problem; for this we just guess * a sensible default. Eventually the routing code will take care of all the * nasties for us I hope. */ int dn_dev_bind_default(__le16 *addr) { struct net_device *dev; int rv; dev = dn_dev_get_default(); last_chance: if (dev) { rv = dn_dev_get_first(dev, addr); dev_put(dev); if (rv == 0 || dev == init_net.loopback_dev) return rv; } dev = init_net.loopback_dev; dev_hold(dev); goto last_chance; } static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa) { struct endnode_hello_message *msg; struct sk_buff *skb = NULL; __le16 *pktlen; struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL) return; skb->dev = dev; msg = (struct endnode_hello_message *)skb_put(skb,sizeof(*msg)); msg->msgflg = 0x0D; memcpy(msg->tiver, dn_eco_version, 3); dn_dn2eth(msg->id, ifa->ifa_local); msg->iinfo = DN_RT_INFO_ENDN; msg->blksize = cpu_to_le16(mtu2blksize(dev)); msg->area = 0x00; memset(msg->seed, 0, 8); memcpy(msg->neighbor, dn_hiord, ETH_ALEN); if (dn_db->router) { struct dn_neigh *dn = (struct dn_neigh *)dn_db->router; dn_dn2eth(msg->neighbor, dn->addr); } msg->timer = cpu_to_le16((unsigned short)dn_db->parms.t3); msg->mpd = 0x00; msg->datalen = 0x02; memset(msg->data, 0xAA, 2); pktlen = (__le16 *)skb_push(skb,2); *pktlen = cpu_to_le16(skb->len - 2); skb_reset_network_header(skb); dn_rt_finish_output(skb, dn_rt_all_rt_mcast, msg->id); } #define DRDELAY (5 * HZ) static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa) { /* First check time since device went up */ if ((jiffies - dn_db->uptime) < DRDELAY) return 0; /* If there is no router, then yes... */ if (!dn_db->router) return 1; /* otherwise only if we have a higher priority or.. */ if (dn->priority < dn_db->parms.priority) return 1; /* if we have equal priority and a higher node number */ if (dn->priority != dn_db->parms.priority) return 0; if (le16_to_cpu(dn->addr) < le16_to_cpu(ifa->ifa_local)) return 1; return 0; } static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa) { int n; struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); struct dn_neigh *dn = (struct dn_neigh *)dn_db->router; struct sk_buff *skb; size_t size; unsigned char *ptr; unsigned char *i1, *i2; __le16 *pktlen; char *src; if (mtu2blksize(dev) < (26 + 7)) return; n = mtu2blksize(dev) - 26; n /= 7; if (n > 32) n = 32; size = 2 + 26 + 7 * n; if ((skb = dn_alloc_skb(NULL, size, GFP_ATOMIC)) == NULL) return; skb->dev = dev; ptr = skb_put(skb, size); *ptr++ = DN_RT_PKT_CNTL | DN_RT_PKT_ERTH; *ptr++ = 2; /* ECO */ *ptr++ = 0; *ptr++ = 0; dn_dn2eth(ptr, ifa->ifa_local); src = ptr; ptr += ETH_ALEN; *ptr++ = dn_db->parms.forwarding == 1 ? DN_RT_INFO_L1RT : DN_RT_INFO_L2RT; *((__le16 *)ptr) = cpu_to_le16(mtu2blksize(dev)); ptr += 2; *ptr++ = dn_db->parms.priority; /* Priority */ *ptr++ = 0; /* Area: Reserved */ *((__le16 *)ptr) = cpu_to_le16((unsigned short)dn_db->parms.t3); ptr += 2; *ptr++ = 0; /* MPD: Reserved */ i1 = ptr++; memset(ptr, 0, 7); /* Name: Reserved */ ptr += 7; i2 = ptr++; n = dn_neigh_elist(dev, ptr, n); *i2 = 7 * n; *i1 = 8 + *i2; skb_trim(skb, (27 + *i2)); pktlen = (__le16 *)skb_push(skb, 2); *pktlen = cpu_to_le16(skb->len - 2); skb_reset_network_header(skb); if (dn_am_i_a_router(dn, dn_db, ifa)) { struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); if (skb2) { dn_rt_finish_output(skb2, dn_rt_all_end_mcast, src); } } dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src); } static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if (dn_db->parms.forwarding == 0) dn_send_endnode_hello(dev, ifa); else dn_send_router_hello(dev, ifa); } static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa) { int tdlen = 16; int size = dev->hard_header_len + 2 + 4 + tdlen; struct sk_buff *skb = dn_alloc_skb(NULL, size, GFP_ATOMIC); int i; unsigned char *ptr; char src[ETH_ALEN]; if (skb == NULL) return ; skb->dev = dev; skb_push(skb, dev->hard_header_len); ptr = skb_put(skb, 2 + 4 + tdlen); *ptr++ = DN_RT_PKT_HELO; *((__le16 *)ptr) = ifa->ifa_local; ptr += 2; *ptr++ = tdlen; for(i = 0; i < tdlen; i++) *ptr++ = 0252; dn_dn2eth(src, ifa->ifa_local); dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src); } static int dn_eth_up(struct net_device *dev) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if (dn_db->parms.forwarding == 0) dev_mc_add(dev, dn_rt_all_end_mcast); else dev_mc_add(dev, dn_rt_all_rt_mcast); dn_db->use_long = 1; return 0; } static void dn_eth_down(struct net_device *dev) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if (dn_db->parms.forwarding == 0) dev_mc_del(dev, dn_rt_all_end_mcast); else dev_mc_del(dev, dn_rt_all_rt_mcast); } static void dn_dev_set_timer(struct net_device *dev); static void dn_dev_timer_func(unsigned long arg) { struct net_device *dev = (struct net_device *)arg; struct dn_dev *dn_db; struct dn_ifaddr *ifa; rcu_read_lock(); dn_db = rcu_dereference(dev->dn_ptr); if (dn_db->t3 <= dn_db->parms.t2) { if (dn_db->parms.timer3) { for (ifa = rcu_dereference(dn_db->ifa_list); ifa; ifa = rcu_dereference(ifa->ifa_next)) { if (!(ifa->ifa_flags & IFA_F_SECONDARY)) dn_db->parms.timer3(dev, ifa); } } dn_db->t3 = dn_db->parms.t3; } else { dn_db->t3 -= dn_db->parms.t2; } rcu_read_unlock(); dn_dev_set_timer(dev); } static void dn_dev_set_timer(struct net_device *dev) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if (dn_db->parms.t2 > dn_db->parms.t3) dn_db->parms.t2 = dn_db->parms.t3; dn_db->timer.data = (unsigned long)dev; dn_db->timer.function = dn_dev_timer_func; dn_db->timer.expires = jiffies + (dn_db->parms.t2 * HZ); add_timer(&dn_db->timer); } static struct dn_dev *dn_dev_create(struct net_device *dev, int *err) { int i; struct dn_dev_parms *p = dn_dev_list; struct dn_dev *dn_db; for(i = 0; i < DN_DEV_LIST_SIZE; i++, p++) { if (p->type == dev->type) break; } *err = -ENODEV; if (i == DN_DEV_LIST_SIZE) return NULL; *err = -ENOBUFS; if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL) return NULL; memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); rcu_assign_pointer(dev->dn_ptr, dn_db); dn_db->dev = dev; init_timer(&dn_db->timer); dn_db->uptime = jiffies; dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table); if (!dn_db->neigh_parms) { RCU_INIT_POINTER(dev->dn_ptr, NULL); kfree(dn_db); return NULL; } if (dn_db->parms.up) { if (dn_db->parms.up(dev) < 0) { neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms); dev->dn_ptr = NULL; kfree(dn_db); return NULL; } } dn_dev_sysctl_register(dev, &dn_db->parms); dn_dev_set_timer(dev); *err = 0; return dn_db; } /* * This processes a device up event. We only start up * the loopback device & ethernet devices with correct * MAC addresses automatically. Others must be started * specifically. * * FIXME: How should we configure the loopback address ? If we could dispense * with using decnet_address here and for autobind, it will be one less thing * for users to worry about setting up. */ void dn_dev_up(struct net_device *dev) { struct dn_ifaddr *ifa; __le16 addr = decnet_address; int maybe_default = 0; struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK)) return; /* * Need to ensure that loopback device has a dn_db attached to it * to allow creation of neighbours against it, even though it might * not have a local address of its own. Might as well do the same for * all autoconfigured interfaces. */ if (dn_db == NULL) { int err; dn_db = dn_dev_create(dev, &err); if (dn_db == NULL) return; } if (dev->type == ARPHRD_ETHER) { if (memcmp(dev->dev_addr, dn_hiord, 4) != 0) return; addr = dn_eth2dn(dev->dev_addr); maybe_default = 1; } if (addr == 0) return; if ((ifa = dn_dev_alloc_ifa()) == NULL) return; ifa->ifa_local = ifa->ifa_address = addr; ifa->ifa_flags = 0; ifa->ifa_scope = RT_SCOPE_UNIVERSE; strcpy(ifa->ifa_label, dev->name); dn_dev_set_ifa(dev, ifa); /* * Automagically set the default device to the first automatically * configured ethernet card in the system. */ if (maybe_default) { dev_hold(dev); if (dn_dev_set_default(dev, 0)) dev_put(dev); } } static void dn_dev_delete(struct net_device *dev) { struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); if (dn_db == NULL) return; del_timer_sync(&dn_db->timer); dn_dev_sysctl_unregister(&dn_db->parms); dn_dev_check_default(dev); neigh_ifdown(&dn_neigh_table, dev); if (dn_db->parms.down) dn_db->parms.down(dev); dev->dn_ptr = NULL; neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms); neigh_ifdown(&dn_neigh_table, dev); if (dn_db->router) neigh_release(dn_db->router); if (dn_db->peer) neigh_release(dn_db->peer); kfree(dn_db); } void dn_dev_down(struct net_device *dev) { struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); struct dn_ifaddr *ifa; if (dn_db == NULL) return; while ((ifa = rtnl_dereference(dn_db->ifa_list)) != NULL) { dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0); dn_dev_free_ifa(ifa); } dn_dev_delete(dev); } void dn_dev_init_pkt(struct sk_buff *skb) { } void dn_dev_veri_pkt(struct sk_buff *skb) { } void dn_dev_hello(struct sk_buff *skb) { } void dn_dev_devices_off(void) { struct net_device *dev; rtnl_lock(); for_each_netdev(&init_net, dev) dn_dev_down(dev); rtnl_unlock(); } void dn_dev_devices_on(void) { struct net_device *dev; rtnl_lock(); for_each_netdev(&init_net, dev) { if (dev->flags & IFF_UP) dn_dev_up(dev); } rtnl_unlock(); } int register_dnaddr_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&dnaddr_chain, nb); } int unregister_dnaddr_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&dnaddr_chain, nb); } #ifdef CONFIG_PROC_FS static inline int is_dn_dev(struct net_device *dev) { return dev->dn_ptr != NULL; } static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { int i; struct net_device *dev; rcu_read_lock(); if (*pos == 0) return SEQ_START_TOKEN; i = 1; for_each_netdev_rcu(&init_net, dev) { if (!is_dn_dev(dev)) continue; if (i++ == *pos) return dev; } return NULL; } static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net_device *dev; ++*pos; dev = v; if (v == SEQ_START_TOKEN) dev = net_device_entry(&init_net.dev_base_head); for_each_netdev_continue_rcu(&init_net, dev) { if (!is_dn_dev(dev)) continue; return dev; } return NULL; } static void dn_dev_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static char *dn_type2asc(char type) { switch (type) { case DN_DEV_BCAST: return "B"; case DN_DEV_UCAST: return "U"; case DN_DEV_MPOINT: return "M"; } return "?"; } static int dn_dev_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "Name Flags T1 Timer1 T3 Timer3 BlkSize Pri State DevType Router Peer\n"); else { struct net_device *dev = v; char peer_buf[DN_ASCBUF_LEN]; char router_buf[DN_ASCBUF_LEN]; struct dn_dev *dn_db = rcu_dereference(dev->dn_ptr); seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu" " %04hu %03d %02x %-10s %-7s %-7s\n", dev->name ? dev->name : "???", dn_type2asc(dn_db->parms.mode), 0, 0, dn_db->t3, dn_db->parms.t3, mtu2blksize(dev), dn_db->parms.priority, dn_db->parms.state, dn_db->parms.name, dn_db->router ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->router->primary_key), router_buf) : "", dn_db->peer ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->peer->primary_key), peer_buf) : ""); } return 0; } static const struct seq_operations dn_dev_seq_ops = { .start = dn_dev_seq_start, .next = dn_dev_seq_next, .stop = dn_dev_seq_stop, .show = dn_dev_seq_show, }; static int dn_dev_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &dn_dev_seq_ops); } static const struct file_operations dn_dev_seq_fops = { .owner = THIS_MODULE, .open = dn_dev_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* CONFIG_PROC_FS */ static int addr[2]; module_param_array(addr, int, NULL, 0444); MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node"); void __init dn_dev_init(void) { if (addr[0] > 63 || addr[0] < 0) { printk(KERN_ERR "DECnet: Area must be between 0 and 63"); return; } if (addr[1] > 1023 || addr[1] < 0) { printk(KERN_ERR "DECnet: Node must be between 0 and 1023"); return; } decnet_address = cpu_to_le16((addr[0] << 10) | addr[1]); dn_dev_devices_on(); rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL, NULL); rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, NULL); rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, NULL); proc_net_fops_create(&init_net, "decnet_dev", S_IRUGO, &dn_dev_seq_fops); #ifdef CONFIG_SYSCTL { int i; for(i = 0; i < DN_DEV_LIST_SIZE; i++) dn_dev_sysctl_register(NULL, &dn_dev_list[i]); } #endif /* CONFIG_SYSCTL */ } void __exit dn_dev_cleanup(void) { #ifdef CONFIG_SYSCTL { int i; for(i = 0; i < DN_DEV_LIST_SIZE; i++) dn_dev_sysctl_unregister(&dn_dev_list[i]); } #endif /* CONFIG_SYSCTL */ proc_net_remove(&init_net, "decnet_dev"); dn_dev_devices_off(); }
gpl-2.0
ztemt/Z5mini_H112_kernel
drivers/tty/serial/jsm/jsm_driver.c
5053
7035
/************************************************************************ * Copyright 2003 Digi International (www.digi.com) * * Copyright (C) 2004 IBM Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 * Temple Place - Suite 330, Boston, * MA 02111-1307, USA. * * Contact Information: * Scott H Kilau <Scott_Kilau@digi.com> * Wendy Xiong <wendyx@us.ibm.com> * * ***********************************************************************/ #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include "jsm.h" MODULE_AUTHOR("Digi International, http://www.digi.com"); MODULE_DESCRIPTION("Driver for the Digi International " "Neo PCI based product line"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("jsm"); #define JSM_DRIVER_NAME "jsm" #define NR_PORTS 32 #define JSM_MINOR_START 0 struct uart_driver jsm_uart_driver = { .owner = THIS_MODULE, .driver_name = JSM_DRIVER_NAME, .dev_name = "ttyn", .major = 0, .minor = JSM_MINOR_START, .nr = NR_PORTS, }; static pci_ers_result_t jsm_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state); static pci_ers_result_t jsm_io_slot_reset(struct pci_dev *pdev); static void jsm_io_resume(struct pci_dev *pdev); static struct pci_error_handlers jsm_err_handler = { .error_detected = jsm_io_error_detected, .slot_reset = jsm_io_slot_reset, .resume = jsm_io_resume, }; int jsm_debug; module_param(jsm_debug, int, 0); MODULE_PARM_DESC(jsm_debug, "Driver debugging level"); static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc = 0; struct jsm_board *brd; static int adapter_count = 0; rc = pci_enable_device(pdev); if (rc) { dev_err(&pdev->dev, "Device enable FAILED\n"); goto out; } rc = pci_request_regions(pdev, "jsm"); if (rc) { dev_err(&pdev->dev, "pci_request_region FAILED\n"); goto out_disable_device; } brd = kzalloc(sizeof(struct jsm_board), GFP_KERNEL); if (!brd) { dev_err(&pdev->dev, "memory allocation for board structure failed\n"); rc = -ENOMEM; goto out_release_regions; } /* store the info for the board we've found */ brd->boardnum = adapter_count++; brd->pci_dev = pdev; if (pdev->device == PCIE_DEVICE_ID_NEO_4_IBM) brd->maxports = 4; else if (pdev->device == PCI_DEVICE_ID_DIGI_NEO_8) brd->maxports = 8; else brd->maxports = 2; spin_lock_init(&brd->bd_intr_lock); /* store which revision we have */ brd->rev = pdev->revision; brd->irq = pdev->irq; jsm_printk(INIT, INFO, &brd->pci_dev, "jsm_found_board - NEO adapter\n"); /* get the PCI Base Address Registers */ brd->membase = pci_resource_start(pdev, 0); brd->membase_end = pci_resource_end(pdev, 0); if (brd->membase & 1) brd->membase &= ~3; else brd->membase &= ~15; /* Assign the board_ops struct */ brd->bd_ops = &jsm_neo_ops; brd->bd_uart_offset = 0x200; brd->bd_dividend = 921600; brd->re_map_membase = ioremap(brd->membase, pci_resource_len(pdev, 0)); if (!brd->re_map_membase) { dev_err(&pdev->dev, "card has no PCI Memory resources, " "failing board.\n"); rc = -ENOMEM; goto out_kfree_brd; } rc = request_irq(brd->irq, brd->bd_ops->intr, IRQF_SHARED, "JSM", brd); if (rc) { printk(KERN_WARNING "Failed to hook IRQ %d\n",brd->irq); goto out_iounmap; } rc = jsm_tty_init(brd); if (rc < 0) { dev_err(&pdev->dev, "Can't init tty devices (%d)\n", rc); rc = -ENXIO; goto out_free_irq; } rc = jsm_uart_port_init(brd); if (rc < 0) { /* XXX: leaking all resources from jsm_tty_init here! */ dev_err(&pdev->dev, "Can't init uart port (%d)\n", rc); rc = -ENXIO; goto out_free_irq; } /* Log the information about the board */ dev_info(&pdev->dev, "board %d: Digi Neo (rev %d), irq %d\n", adapter_count, brd->rev, brd->irq); pci_set_drvdata(pdev, brd); pci_save_state(pdev); return 0; out_free_irq: jsm_remove_uart_port(brd); free_irq(brd->irq, brd); out_iounmap: iounmap(brd->re_map_membase); out_kfree_brd: kfree(brd); out_release_regions: pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); out: return rc; } static void __devexit jsm_remove_one(struct pci_dev *pdev) { struct jsm_board *brd = pci_get_drvdata(pdev); int i = 0; jsm_remove_uart_port(brd); free_irq(brd->irq, brd); iounmap(brd->re_map_membase); /* Free all allocated channels structs */ for (i = 0; i < brd->maxports; i++) { if (brd->channels[i]) { kfree(brd->channels[i]->ch_rqueue); kfree(brd->channels[i]->ch_equeue); kfree(brd->channels[i]); } } pci_release_regions(pdev); pci_disable_device(pdev); kfree(brd); } static struct pci_device_id jsm_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2DB9), 0, 0, 0 }, { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2DB9PRI), 0, 0, 1 }, { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45), 0, 0, 2 }, { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45PRI), 0, 0, 3 }, { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_4_IBM), 0, 0, 4 }, { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_NEO_8), 0, 0, 5 }, { 0, } }; MODULE_DEVICE_TABLE(pci, jsm_pci_tbl); static struct pci_driver jsm_driver = { .name = "jsm", .id_table = jsm_pci_tbl, .probe = jsm_probe_one, .remove = __devexit_p(jsm_remove_one), .err_handler = &jsm_err_handler, }; static pci_ers_result_t jsm_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct jsm_board *brd = pci_get_drvdata(pdev); jsm_remove_uart_port(brd); return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t jsm_io_slot_reset(struct pci_dev *pdev) { int rc; rc = pci_enable_device(pdev); if (rc) return PCI_ERS_RESULT_DISCONNECT; pci_set_master(pdev); return PCI_ERS_RESULT_RECOVERED; } static void jsm_io_resume(struct pci_dev *pdev) { struct jsm_board *brd = pci_get_drvdata(pdev); pci_restore_state(pdev); pci_save_state(pdev); jsm_uart_port_init(brd); } static int __init jsm_init_module(void) { int rc; rc = uart_register_driver(&jsm_uart_driver); if (!rc) { rc = pci_register_driver(&jsm_driver); if (rc) uart_unregister_driver(&jsm_uart_driver); } return rc; } static void __exit jsm_exit_module(void) { pci_unregister_driver(&jsm_driver); uart_unregister_driver(&jsm_uart_driver); } module_init(jsm_init_module); module_exit(jsm_exit_module);
gpl-2.0
perillamint/android_kernel_casio_gzone
net/netfilter/ipvs/ip_vs_pe.c
8381
3074
#define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <asm/string.h> #include <linux/kmod.h> #include <linux/sysctl.h> #include <net/ip_vs.h> /* IPVS pe list */ static LIST_HEAD(ip_vs_pe); /* lock for service table */ static DEFINE_SPINLOCK(ip_vs_pe_lock); /* Bind a service with a pe */ void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe) { svc->pe = pe; } /* Unbind a service from its pe */ void ip_vs_unbind_pe(struct ip_vs_service *svc) { svc->pe = NULL; } /* Get pe in the pe list by name */ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name) { struct ip_vs_pe *pe; IP_VS_DBG(10, "%s(): pe_name \"%s\"\n", __func__, pe_name); spin_lock_bh(&ip_vs_pe_lock); list_for_each_entry(pe, &ip_vs_pe, n_list) { /* Test and get the modules atomically */ if (pe->module && !try_module_get(pe->module)) { /* This pe is just deleted */ continue; } if (strcmp(pe_name, pe->name)==0) { /* HIT */ spin_unlock_bh(&ip_vs_pe_lock); return pe; } if (pe->module) module_put(pe->module); } spin_unlock_bh(&ip_vs_pe_lock); return NULL; } /* Lookup pe and try to load it if it doesn't exist */ struct ip_vs_pe *ip_vs_pe_getbyname(const char *name) { struct ip_vs_pe *pe; /* Search for the pe by name */ pe = __ip_vs_pe_getbyname(name); /* If pe not found, load the module and search again */ if (!pe) { request_module("ip_vs_pe_%s", name); pe = __ip_vs_pe_getbyname(name); } return pe; } /* Register a pe in the pe list */ int register_ip_vs_pe(struct ip_vs_pe *pe) { struct ip_vs_pe *tmp; /* increase the module use count */ ip_vs_use_count_inc(); spin_lock_bh(&ip_vs_pe_lock); if (!list_empty(&pe->n_list)) { spin_unlock_bh(&ip_vs_pe_lock); ip_vs_use_count_dec(); pr_err("%s(): [%s] pe already linked\n", __func__, pe->name); return -EINVAL; } /* Make sure that the pe with this name doesn't exist * in the pe list. */ list_for_each_entry(tmp, &ip_vs_pe, n_list) { if (strcmp(tmp->name, pe->name) == 0) { spin_unlock_bh(&ip_vs_pe_lock); ip_vs_use_count_dec(); pr_err("%s(): [%s] pe already existed " "in the system\n", __func__, pe->name); return -EINVAL; } } /* Add it into the d-linked pe list */ list_add(&pe->n_list, &ip_vs_pe); spin_unlock_bh(&ip_vs_pe_lock); pr_info("[%s] pe registered.\n", pe->name); return 0; } EXPORT_SYMBOL_GPL(register_ip_vs_pe); /* Unregister a pe from the pe list */ int unregister_ip_vs_pe(struct ip_vs_pe *pe) { spin_lock_bh(&ip_vs_pe_lock); if (list_empty(&pe->n_list)) { spin_unlock_bh(&ip_vs_pe_lock); pr_err("%s(): [%s] pe is not in the list. failed\n", __func__, pe->name); return -EINVAL; } /* Remove it from the d-linked pe list */ list_del(&pe->n_list); spin_unlock_bh(&ip_vs_pe_lock); /* decrease the module use count */ ip_vs_use_count_dec(); pr_info("[%s] pe unregistered.\n", pe->name); return 0; } EXPORT_SYMBOL_GPL(unregister_ip_vs_pe);
gpl-2.0
laufersteppenwolf/lge-kernel-p880
arch/avr32/boards/mimc200/flash.c
13757
3150
/* * MIMC200 board-specific flash initialization * * Copyright (C) 2008 Mercury IMC Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <mach/smc.h> static struct smc_timing flash_timing __initdata = { .ncs_read_setup = 0, .nrd_setup = 15, .ncs_write_setup = 0, .nwe_setup = 0, .ncs_read_pulse = 115, .nrd_pulse = 110, .ncs_write_pulse = 60, .nwe_pulse = 60, .read_cycle = 115, .write_cycle = 100, }; static struct smc_config flash_config __initdata = { .bus_width = 2, .nrd_controlled = 1, .nwe_controlled = 1, .byte_write = 1, }; /* system flash definition */ static struct mtd_partition flash_parts_system[] = { { .name = "u-boot", .offset = 0x00000000, .size = 0x00020000, /* 128 KiB */ .mask_flags = MTD_WRITEABLE, }, { .name = "root", .offset = 0x00020000, .size = 0x007c0000, }, { .name = "splash", .offset = 0x007e0000, .size = 0x00010000, /* 64KiB */ }, { .name = "env", .offset = 0x007f0000, .size = 0x00010000, .mask_flags = MTD_WRITEABLE, }, }; static struct physmap_flash_data flash_system = { .width = 2, .nr_parts = ARRAY_SIZE(flash_parts_system), .parts = flash_parts_system, }; static struct resource flash_resource_system = { .start = 0x00000000, .end = 0x007fffff, .flags = IORESOURCE_MEM, }; static struct platform_device flash_device_system = { .name = "physmap-flash", .id = 0, .resource = &flash_resource_system, .num_resources = 1, .dev = { .platform_data = &flash_system, }, }; /* data flash definition */ static struct mtd_partition flash_parts_data[] = { { .name = "data", .offset = 0x00000000, .size = 0x00800000, }, }; static struct physmap_flash_data flash_data = { .width = 2, .nr_parts = ARRAY_SIZE(flash_parts_data), .parts = flash_parts_data, }; static struct resource flash_resource_data = { .start = 0x08000000, .end = 0x087fffff, .flags = IORESOURCE_MEM, }; static struct platform_device flash_device_data = { .name = "physmap-flash", .id = 1, .resource = &flash_resource_data, .num_resources = 1, .dev = { .platform_data = &flash_data, }, }; /* This needs to be called after the SMC has been initialized */ static int __init mimc200_flash_init(void) { int ret; smc_set_timing(&flash_config, &flash_timing); ret = smc_set_configuration(0, &flash_config); if (ret < 0) { printk(KERN_ERR "mimc200: failed to set 'System' NOR flash timing\n"); return ret; } ret = smc_set_configuration(1, &flash_config); if (ret < 0) { printk(KERN_ERR "mimc200: failed to set 'Data' NOR flash timing\n"); return ret; } platform_device_register(&flash_device_system); platform_device_register(&flash_device_data); return 0; } device_initcall(mimc200_flash_init);
gpl-2.0
DarkminecrafterHD/android_kernel_samsung_jf
arch/avr32/boards/mimc200/flash.c
13757
3150
/* * MIMC200 board-specific flash initialization * * Copyright (C) 2008 Mercury IMC Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <mach/smc.h> static struct smc_timing flash_timing __initdata = { .ncs_read_setup = 0, .nrd_setup = 15, .ncs_write_setup = 0, .nwe_setup = 0, .ncs_read_pulse = 115, .nrd_pulse = 110, .ncs_write_pulse = 60, .nwe_pulse = 60, .read_cycle = 115, .write_cycle = 100, }; static struct smc_config flash_config __initdata = { .bus_width = 2, .nrd_controlled = 1, .nwe_controlled = 1, .byte_write = 1, }; /* system flash definition */ static struct mtd_partition flash_parts_system[] = { { .name = "u-boot", .offset = 0x00000000, .size = 0x00020000, /* 128 KiB */ .mask_flags = MTD_WRITEABLE, }, { .name = "root", .offset = 0x00020000, .size = 0x007c0000, }, { .name = "splash", .offset = 0x007e0000, .size = 0x00010000, /* 64KiB */ }, { .name = "env", .offset = 0x007f0000, .size = 0x00010000, .mask_flags = MTD_WRITEABLE, }, }; static struct physmap_flash_data flash_system = { .width = 2, .nr_parts = ARRAY_SIZE(flash_parts_system), .parts = flash_parts_system, }; static struct resource flash_resource_system = { .start = 0x00000000, .end = 0x007fffff, .flags = IORESOURCE_MEM, }; static struct platform_device flash_device_system = { .name = "physmap-flash", .id = 0, .resource = &flash_resource_system, .num_resources = 1, .dev = { .platform_data = &flash_system, }, }; /* data flash definition */ static struct mtd_partition flash_parts_data[] = { { .name = "data", .offset = 0x00000000, .size = 0x00800000, }, }; static struct physmap_flash_data flash_data = { .width = 2, .nr_parts = ARRAY_SIZE(flash_parts_data), .parts = flash_parts_data, }; static struct resource flash_resource_data = { .start = 0x08000000, .end = 0x087fffff, .flags = IORESOURCE_MEM, }; static struct platform_device flash_device_data = { .name = "physmap-flash", .id = 1, .resource = &flash_resource_data, .num_resources = 1, .dev = { .platform_data = &flash_data, }, }; /* This needs to be called after the SMC has been initialized */ static int __init mimc200_flash_init(void) { int ret; smc_set_timing(&flash_config, &flash_timing); ret = smc_set_configuration(0, &flash_config); if (ret < 0) { printk(KERN_ERR "mimc200: failed to set 'System' NOR flash timing\n"); return ret; } ret = smc_set_configuration(1, &flash_config); if (ret < 0) { printk(KERN_ERR "mimc200: failed to set 'Data' NOR flash timing\n"); return ret; } platform_device_register(&flash_device_system); platform_device_register(&flash_device_data); return 0; } device_initcall(mimc200_flash_init);
gpl-2.0
dalingrin/NookColor_Kernel
sound/ppc/burgundy.c
190
25006
/* * PMac Burgundy lowlevel functions * * Copyright (c) by Takashi Iwai <tiwai@suse.de> * code based on dmasound.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/io.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/delay.h> #include <sound/core.h> #include "pmac.h" #include "burgundy.h" /* Waits for busy flag to clear */ static inline void snd_pmac_burgundy_busy_wait(struct snd_pmac *chip) { int timeout = 50; while ((in_le32(&chip->awacs->codec_ctrl) & MASK_NEWECMD) && timeout--) udelay(1); if (! timeout) printk(KERN_DEBUG "burgundy_busy_wait: timeout\n"); } static inline void snd_pmac_burgundy_extend_wait(struct snd_pmac *chip) { int timeout; timeout = 50; while (!(in_le32(&chip->awacs->codec_stat) & MASK_EXTEND) && timeout--) udelay(1); if (! timeout) printk(KERN_DEBUG "burgundy_extend_wait: timeout #1\n"); timeout = 50; while ((in_le32(&chip->awacs->codec_stat) & MASK_EXTEND) && timeout--) udelay(1); if (! timeout) printk(KERN_DEBUG "burgundy_extend_wait: timeout #2\n"); } static void snd_pmac_burgundy_wcw(struct snd_pmac *chip, unsigned addr, unsigned val) { out_le32(&chip->awacs->codec_ctrl, addr + 0x200c00 + (val & 0xff)); snd_pmac_burgundy_busy_wait(chip); out_le32(&chip->awacs->codec_ctrl, addr + 0x200d00 +((val>>8) & 0xff)); snd_pmac_burgundy_busy_wait(chip); out_le32(&chip->awacs->codec_ctrl, addr + 0x200e00 +((val>>16) & 0xff)); snd_pmac_burgundy_busy_wait(chip); out_le32(&chip->awacs->codec_ctrl, addr + 0x200f00 +((val>>24) & 0xff)); snd_pmac_burgundy_busy_wait(chip); } static unsigned snd_pmac_burgundy_rcw(struct snd_pmac *chip, unsigned addr) { unsigned val = 0; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); out_le32(&chip->awacs->codec_ctrl, addr + 0x100000); snd_pmac_burgundy_busy_wait(chip); snd_pmac_burgundy_extend_wait(chip); val += (in_le32(&chip->awacs->codec_stat) >> 4) & 0xff; out_le32(&chip->awacs->codec_ctrl, addr + 0x100100); snd_pmac_burgundy_busy_wait(chip); snd_pmac_burgundy_extend_wait(chip); val += ((in_le32(&chip->awacs->codec_stat)>>4) & 0xff) <<8; out_le32(&chip->awacs->codec_ctrl, addr + 0x100200); snd_pmac_burgundy_busy_wait(chip); snd_pmac_burgundy_extend_wait(chip); val += ((in_le32(&chip->awacs->codec_stat)>>4) & 0xff) <<16; out_le32(&chip->awacs->codec_ctrl, addr + 0x100300); snd_pmac_burgundy_busy_wait(chip); snd_pmac_burgundy_extend_wait(chip); val += ((in_le32(&chip->awacs->codec_stat)>>4) & 0xff) <<24; spin_unlock_irqrestore(&chip->reg_lock, flags); return val; } static void snd_pmac_burgundy_wcb(struct snd_pmac *chip, unsigned int addr, unsigned int val) { out_le32(&chip->awacs->codec_ctrl, addr + 0x300000 + (val & 0xff)); snd_pmac_burgundy_busy_wait(chip); } static unsigned snd_pmac_burgundy_rcb(struct snd_pmac *chip, unsigned int addr) { unsigned val = 0; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); out_le32(&chip->awacs->codec_ctrl, addr + 0x100000); snd_pmac_burgundy_busy_wait(chip); snd_pmac_burgundy_extend_wait(chip); val += (in_le32(&chip->awacs->codec_stat) >> 4) & 0xff; spin_unlock_irqrestore(&chip->reg_lock, flags); return val; } #define BASE2ADDR(base) ((base) << 12) #define ADDR2BASE(addr) ((addr) >> 12) /* * Burgundy volume: 0 - 100, stereo, word reg */ static void snd_pmac_burgundy_write_volume(struct snd_pmac *chip, unsigned int address, long *volume, int shift) { int hardvolume, lvolume, rvolume; if (volume[0] < 0 || volume[0] > 100 || volume[1] < 0 || volume[1] > 100) return; /* -EINVAL */ lvolume = volume[0] ? volume[0] + BURGUNDY_VOLUME_OFFSET : 0; rvolume = volume[1] ? volume[1] + BURGUNDY_VOLUME_OFFSET : 0; hardvolume = lvolume + (rvolume << shift); if (shift == 8) hardvolume |= hardvolume << 16; snd_pmac_burgundy_wcw(chip, address, hardvolume); } static void snd_pmac_burgundy_read_volume(struct snd_pmac *chip, unsigned int address, long *volume, int shift) { int wvolume; wvolume = snd_pmac_burgundy_rcw(chip, address); volume[0] = wvolume & 0xff; if (volume[0] >= BURGUNDY_VOLUME_OFFSET) volume[0] -= BURGUNDY_VOLUME_OFFSET; else volume[0] = 0; volume[1] = (wvolume >> shift) & 0xff; if (volume[1] >= BURGUNDY_VOLUME_OFFSET) volume[1] -= BURGUNDY_VOLUME_OFFSET; else volume[1] = 0; } static int snd_pmac_burgundy_info_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 100; return 0; } static int snd_pmac_burgundy_get_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR(kcontrol->private_value & 0xff); int shift = (kcontrol->private_value >> 8) & 0xff; snd_pmac_burgundy_read_volume(chip, addr, ucontrol->value.integer.value, shift); return 0; } static int snd_pmac_burgundy_put_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR(kcontrol->private_value & 0xff); int shift = (kcontrol->private_value >> 8) & 0xff; long nvoices[2]; snd_pmac_burgundy_write_volume(chip, addr, ucontrol->value.integer.value, shift); snd_pmac_burgundy_read_volume(chip, addr, nvoices, shift); return (nvoices[0] != ucontrol->value.integer.value[0] || nvoices[1] != ucontrol->value.integer.value[1]); } #define BURGUNDY_VOLUME_W(xname, xindex, addr, shift) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex,\ .info = snd_pmac_burgundy_info_volume,\ .get = snd_pmac_burgundy_get_volume,\ .put = snd_pmac_burgundy_put_volume,\ .private_value = ((ADDR2BASE(addr) & 0xff) | ((shift) << 8)) } /* * Burgundy volume: 0 - 100, stereo, 2-byte reg */ static void snd_pmac_burgundy_write_volume_2b(struct snd_pmac *chip, unsigned int address, long *volume, int off) { int lvolume, rvolume; off |= off << 2; lvolume = volume[0] ? volume[0] + BURGUNDY_VOLUME_OFFSET : 0; rvolume = volume[1] ? volume[1] + BURGUNDY_VOLUME_OFFSET : 0; snd_pmac_burgundy_wcb(chip, address + off, lvolume); snd_pmac_burgundy_wcb(chip, address + off + 0x500, rvolume); } static void snd_pmac_burgundy_read_volume_2b(struct snd_pmac *chip, unsigned int address, long *volume, int off) { volume[0] = snd_pmac_burgundy_rcb(chip, address + off); if (volume[0] >= BURGUNDY_VOLUME_OFFSET) volume[0] -= BURGUNDY_VOLUME_OFFSET; else volume[0] = 0; volume[1] = snd_pmac_burgundy_rcb(chip, address + off + 0x100); if (volume[1] >= BURGUNDY_VOLUME_OFFSET) volume[1] -= BURGUNDY_VOLUME_OFFSET; else volume[1] = 0; } static int snd_pmac_burgundy_info_volume_2b(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 100; return 0; } static int snd_pmac_burgundy_get_volume_2b(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR(kcontrol->private_value & 0xff); int off = kcontrol->private_value & 0x300; snd_pmac_burgundy_read_volume_2b(chip, addr, ucontrol->value.integer.value, off); return 0; } static int snd_pmac_burgundy_put_volume_2b(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR(kcontrol->private_value & 0xff); int off = kcontrol->private_value & 0x300; long nvoices[2]; snd_pmac_burgundy_write_volume_2b(chip, addr, ucontrol->value.integer.value, off); snd_pmac_burgundy_read_volume_2b(chip, addr, nvoices, off); return (nvoices[0] != ucontrol->value.integer.value[0] || nvoices[1] != ucontrol->value.integer.value[1]); } #define BURGUNDY_VOLUME_2B(xname, xindex, addr, off) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex,\ .info = snd_pmac_burgundy_info_volume_2b,\ .get = snd_pmac_burgundy_get_volume_2b,\ .put = snd_pmac_burgundy_put_volume_2b,\ .private_value = ((ADDR2BASE(addr) & 0xff) | ((off) << 8)) } /* * Burgundy gain/attenuation: 0 - 15, mono/stereo, byte reg */ static int snd_pmac_burgundy_info_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int stereo = (kcontrol->private_value >> 24) & 1; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = stereo + 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 15; return 0; } static int snd_pmac_burgundy_get_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR(kcontrol->private_value & 0xff); int stereo = (kcontrol->private_value >> 24) & 1; int atten = (kcontrol->private_value >> 25) & 1; int oval; oval = snd_pmac_burgundy_rcb(chip, addr); if (atten) oval = ~oval & 0xff; ucontrol->value.integer.value[0] = oval & 0xf; if (stereo) ucontrol->value.integer.value[1] = (oval >> 4) & 0xf; return 0; } static int snd_pmac_burgundy_put_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR(kcontrol->private_value & 0xff); int stereo = (kcontrol->private_value >> 24) & 1; int atten = (kcontrol->private_value >> 25) & 1; int oval, val; oval = snd_pmac_burgundy_rcb(chip, addr); if (atten) oval = ~oval & 0xff; val = ucontrol->value.integer.value[0]; if (stereo) val |= ucontrol->value.integer.value[1] << 4; else val |= ucontrol->value.integer.value[0] << 4; if (atten) val = ~val & 0xff; snd_pmac_burgundy_wcb(chip, addr, val); return val != oval; } #define BURGUNDY_VOLUME_B(xname, xindex, addr, stereo, atten) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex,\ .info = snd_pmac_burgundy_info_gain,\ .get = snd_pmac_burgundy_get_gain,\ .put = snd_pmac_burgundy_put_gain,\ .private_value = (ADDR2BASE(addr) | ((stereo) << 24) | ((atten) << 25)) } /* * Burgundy switch: 0/1, mono/stereo, word reg */ static int snd_pmac_burgundy_info_switch_w(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int stereo = (kcontrol->private_value >> 24) & 1; uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = stereo + 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } static int snd_pmac_burgundy_get_switch_w(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR((kcontrol->private_value >> 16) & 0xff); int lmask = 1 << (kcontrol->private_value & 0xff); int rmask = 1 << ((kcontrol->private_value >> 8) & 0xff); int stereo = (kcontrol->private_value >> 24) & 1; int val = snd_pmac_burgundy_rcw(chip, addr); ucontrol->value.integer.value[0] = (val & lmask) ? 1 : 0; if (stereo) ucontrol->value.integer.value[1] = (val & rmask) ? 1 : 0; return 0; } static int snd_pmac_burgundy_put_switch_w(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR((kcontrol->private_value >> 16) & 0xff); int lmask = 1 << (kcontrol->private_value & 0xff); int rmask = 1 << ((kcontrol->private_value >> 8) & 0xff); int stereo = (kcontrol->private_value >> 24) & 1; int val, oval; oval = snd_pmac_burgundy_rcw(chip, addr); val = oval & ~(lmask | (stereo ? rmask : 0)); if (ucontrol->value.integer.value[0]) val |= lmask; if (stereo && ucontrol->value.integer.value[1]) val |= rmask; snd_pmac_burgundy_wcw(chip, addr, val); return val != oval; } #define BURGUNDY_SWITCH_W(xname, xindex, addr, lbit, rbit, stereo) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex,\ .info = snd_pmac_burgundy_info_switch_w,\ .get = snd_pmac_burgundy_get_switch_w,\ .put = snd_pmac_burgundy_put_switch_w,\ .private_value = ((lbit) | ((rbit) << 8)\ | (ADDR2BASE(addr) << 16) | ((stereo) << 24)) } /* * Burgundy switch: 0/1, mono/stereo, byte reg, bit mask */ static int snd_pmac_burgundy_info_switch_b(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int stereo = (kcontrol->private_value >> 24) & 1; uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = stereo + 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } static int snd_pmac_burgundy_get_switch_b(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR((kcontrol->private_value >> 16) & 0xff); int lmask = kcontrol->private_value & 0xff; int rmask = (kcontrol->private_value >> 8) & 0xff; int stereo = (kcontrol->private_value >> 24) & 1; int val = snd_pmac_burgundy_rcb(chip, addr); ucontrol->value.integer.value[0] = (val & lmask) ? 1 : 0; if (stereo) ucontrol->value.integer.value[1] = (val & rmask) ? 1 : 0; return 0; } static int snd_pmac_burgundy_put_switch_b(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); unsigned int addr = BASE2ADDR((kcontrol->private_value >> 16) & 0xff); int lmask = kcontrol->private_value & 0xff; int rmask = (kcontrol->private_value >> 8) & 0xff; int stereo = (kcontrol->private_value >> 24) & 1; int val, oval; oval = snd_pmac_burgundy_rcb(chip, addr); val = oval & ~(lmask | rmask); if (ucontrol->value.integer.value[0]) val |= lmask; if (stereo && ucontrol->value.integer.value[1]) val |= rmask; snd_pmac_burgundy_wcb(chip, addr, val); return val != oval; } #define BURGUNDY_SWITCH_B(xname, xindex, addr, lmask, rmask, stereo) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex,\ .info = snd_pmac_burgundy_info_switch_b,\ .get = snd_pmac_burgundy_get_switch_b,\ .put = snd_pmac_burgundy_put_switch_b,\ .private_value = ((lmask) | ((rmask) << 8)\ | (ADDR2BASE(addr) << 16) | ((stereo) << 24)) } /* * Burgundy mixers */ static struct snd_kcontrol_new snd_pmac_burgundy_mixers[] __initdata = { BURGUNDY_VOLUME_W("Master Playback Volume", 0, MASK_ADDR_BURGUNDY_MASTER_VOLUME, 8), BURGUNDY_VOLUME_W("CD Capture Volume", 0, MASK_ADDR_BURGUNDY_VOLCD, 16), BURGUNDY_VOLUME_2B("Input Capture Volume", 0, MASK_ADDR_BURGUNDY_VOLMIX01, 2), BURGUNDY_VOLUME_2B("Mixer Playback Volume", 0, MASK_ADDR_BURGUNDY_VOLMIX23, 0), BURGUNDY_VOLUME_B("CD Gain Capture Volume", 0, MASK_ADDR_BURGUNDY_GAINCD, 1, 0), BURGUNDY_SWITCH_W("Master Capture Switch", 0, MASK_ADDR_BURGUNDY_OUTPUTENABLES, 24, 0, 0), BURGUNDY_SWITCH_W("CD Capture Switch", 0, MASK_ADDR_BURGUNDY_CAPTURESELECTS, 0, 16, 1), BURGUNDY_SWITCH_W("CD Playback Switch", 0, MASK_ADDR_BURGUNDY_OUTPUTSELECTS, 0, 16, 1), /* BURGUNDY_SWITCH_W("Loop Capture Switch", 0, * MASK_ADDR_BURGUNDY_CAPTURESELECTS, 8, 24, 1), * BURGUNDY_SWITCH_B("Mixer out Capture Switch", 0, * MASK_ADDR_BURGUNDY_HOSTIFAD, 0x02, 0, 0), * BURGUNDY_SWITCH_B("Mixer Capture Switch", 0, * MASK_ADDR_BURGUNDY_HOSTIFAD, 0x01, 0, 0), * BURGUNDY_SWITCH_B("PCM out Capture Switch", 0, * MASK_ADDR_BURGUNDY_HOSTIFEH, 0x02, 0, 0), */ BURGUNDY_SWITCH_B("PCM Capture Switch", 0, MASK_ADDR_BURGUNDY_HOSTIFEH, 0x01, 0, 0) }; static struct snd_kcontrol_new snd_pmac_burgundy_mixers_imac[] __initdata = { BURGUNDY_VOLUME_W("Line in Capture Volume", 0, MASK_ADDR_BURGUNDY_VOLLINE, 16), BURGUNDY_VOLUME_W("Mic Capture Volume", 0, MASK_ADDR_BURGUNDY_VOLMIC, 16), BURGUNDY_VOLUME_B("Line in Gain Capture Volume", 0, MASK_ADDR_BURGUNDY_GAINLINE, 1, 0), BURGUNDY_VOLUME_B("Mic Gain Capture Volume", 0, MASK_ADDR_BURGUNDY_GAINMIC, 1, 0), BURGUNDY_VOLUME_B("PC Speaker Playback Volume", 0, MASK_ADDR_BURGUNDY_ATTENSPEAKER, 1, 1), BURGUNDY_VOLUME_B("Line out Playback Volume", 0, MASK_ADDR_BURGUNDY_ATTENLINEOUT, 1, 1), BURGUNDY_VOLUME_B("Headphone Playback Volume", 0, MASK_ADDR_BURGUNDY_ATTENHP, 1, 1), BURGUNDY_SWITCH_W("Line in Capture Switch", 0, MASK_ADDR_BURGUNDY_CAPTURESELECTS, 1, 17, 1), BURGUNDY_SWITCH_W("Mic Capture Switch", 0, MASK_ADDR_BURGUNDY_CAPTURESELECTS, 2, 18, 1), BURGUNDY_SWITCH_W("Line in Playback Switch", 0, MASK_ADDR_BURGUNDY_OUTPUTSELECTS, 1, 17, 1), BURGUNDY_SWITCH_W("Mic Playback Switch", 0, MASK_ADDR_BURGUNDY_OUTPUTSELECTS, 2, 18, 1), BURGUNDY_SWITCH_B("Mic Boost Capture Switch", 0, MASK_ADDR_BURGUNDY_INPBOOST, 0x40, 0x80, 1) }; static struct snd_kcontrol_new snd_pmac_burgundy_mixers_pmac[] __initdata = { BURGUNDY_VOLUME_W("Line in Capture Volume", 0, MASK_ADDR_BURGUNDY_VOLMIC, 16), BURGUNDY_VOLUME_B("Line in Gain Capture Volume", 0, MASK_ADDR_BURGUNDY_GAINMIC, 1, 0), BURGUNDY_VOLUME_B("PC Speaker Playback Volume", 0, MASK_ADDR_BURGUNDY_ATTENMONO, 0, 1), BURGUNDY_VOLUME_B("Line out Playback Volume", 0, MASK_ADDR_BURGUNDY_ATTENSPEAKER, 1, 1), BURGUNDY_SWITCH_W("Line in Capture Switch", 0, MASK_ADDR_BURGUNDY_CAPTURESELECTS, 2, 18, 1), BURGUNDY_SWITCH_W("Line in Playback Switch", 0, MASK_ADDR_BURGUNDY_OUTPUTSELECTS, 2, 18, 1), /* BURGUNDY_SWITCH_B("Line in Boost Capture Switch", 0, * MASK_ADDR_BURGUNDY_INPBOOST, 0x40, 0x80, 1) */ }; static struct snd_kcontrol_new snd_pmac_burgundy_master_sw_imac __initdata = BURGUNDY_SWITCH_B("Master Playback Switch", 0, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, BURGUNDY_OUTPUT_LEFT | BURGUNDY_LINEOUT_LEFT | BURGUNDY_HP_LEFT, BURGUNDY_OUTPUT_RIGHT | BURGUNDY_LINEOUT_RIGHT | BURGUNDY_HP_RIGHT, 1); static struct snd_kcontrol_new snd_pmac_burgundy_master_sw_pmac __initdata = BURGUNDY_SWITCH_B("Master Playback Switch", 0, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, BURGUNDY_OUTPUT_INTERN | BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1); static struct snd_kcontrol_new snd_pmac_burgundy_speaker_sw_imac __initdata = BURGUNDY_SWITCH_B("PC Speaker Playback Switch", 0, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1); static struct snd_kcontrol_new snd_pmac_burgundy_speaker_sw_pmac __initdata = BURGUNDY_SWITCH_B("PC Speaker Playback Switch", 0, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, BURGUNDY_OUTPUT_INTERN, 0, 0); static struct snd_kcontrol_new snd_pmac_burgundy_line_sw_imac __initdata = BURGUNDY_SWITCH_B("Line out Playback Switch", 0, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, BURGUNDY_LINEOUT_LEFT, BURGUNDY_LINEOUT_RIGHT, 1); static struct snd_kcontrol_new snd_pmac_burgundy_line_sw_pmac __initdata = BURGUNDY_SWITCH_B("Line out Playback Switch", 0, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1); static struct snd_kcontrol_new snd_pmac_burgundy_hp_sw_imac __initdata = BURGUNDY_SWITCH_B("Headphone Playback Switch", 0, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, BURGUNDY_HP_LEFT, BURGUNDY_HP_RIGHT, 1); #ifdef PMAC_SUPPORT_AUTOMUTE /* * auto-mute stuffs */ static int snd_pmac_burgundy_detect_headphone(struct snd_pmac *chip) { return (in_le32(&chip->awacs->codec_stat) & chip->hp_stat_mask) ? 1 : 0; } static void snd_pmac_burgundy_update_automute(struct snd_pmac *chip, int do_notify) { if (chip->auto_mute) { int imac = machine_is_compatible("iMac"); int reg, oreg; reg = oreg = snd_pmac_burgundy_rcb(chip, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES); reg &= imac ? ~(BURGUNDY_OUTPUT_LEFT | BURGUNDY_OUTPUT_RIGHT | BURGUNDY_HP_LEFT | BURGUNDY_HP_RIGHT) : ~(BURGUNDY_OUTPUT_LEFT | BURGUNDY_OUTPUT_RIGHT | BURGUNDY_OUTPUT_INTERN); if (snd_pmac_burgundy_detect_headphone(chip)) reg |= imac ? (BURGUNDY_HP_LEFT | BURGUNDY_HP_RIGHT) : (BURGUNDY_OUTPUT_LEFT | BURGUNDY_OUTPUT_RIGHT); else reg |= imac ? (BURGUNDY_OUTPUT_LEFT | BURGUNDY_OUTPUT_RIGHT) : (BURGUNDY_OUTPUT_INTERN); if (do_notify && reg == oreg) return; snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, reg); if (do_notify) { snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->master_sw_ctl->id); snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->speaker_sw_ctl->id); snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->hp_detect_ctl->id); } } } #endif /* PMAC_SUPPORT_AUTOMUTE */ /* * initialize burgundy */ int __init snd_pmac_burgundy_init(struct snd_pmac *chip) { int imac = machine_is_compatible("iMac"); int i, err; /* Checks to see the chip is alive and kicking */ if ((in_le32(&chip->awacs->codec_ctrl) & MASK_ERRCODE) == 0xf0000) { printk(KERN_WARNING "pmac burgundy: disabled by MacOS :-(\n"); return 1; } snd_pmac_burgundy_wcw(chip, MASK_ADDR_BURGUNDY_OUTPUTENABLES, DEF_BURGUNDY_OUTPUTENABLES); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, DEF_BURGUNDY_MORE_OUTPUTENABLES); snd_pmac_burgundy_wcw(chip, MASK_ADDR_BURGUNDY_OUTPUTSELECTS, DEF_BURGUNDY_OUTPUTSELECTS); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_INPSEL21, DEF_BURGUNDY_INPSEL21); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_INPSEL3, imac ? DEF_BURGUNDY_INPSEL3_IMAC : DEF_BURGUNDY_INPSEL3_PMAC); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_GAINCD, DEF_BURGUNDY_GAINCD); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_GAINLINE, DEF_BURGUNDY_GAINLINE); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_GAINMIC, DEF_BURGUNDY_GAINMIC); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_GAINMODEM, DEF_BURGUNDY_GAINMODEM); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_ATTENSPEAKER, DEF_BURGUNDY_ATTENSPEAKER); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_ATTENLINEOUT, DEF_BURGUNDY_ATTENLINEOUT); snd_pmac_burgundy_wcb(chip, MASK_ADDR_BURGUNDY_ATTENHP, DEF_BURGUNDY_ATTENHP); snd_pmac_burgundy_wcw(chip, MASK_ADDR_BURGUNDY_MASTER_VOLUME, DEF_BURGUNDY_MASTER_VOLUME); snd_pmac_burgundy_wcw(chip, MASK_ADDR_BURGUNDY_VOLCD, DEF_BURGUNDY_VOLCD); snd_pmac_burgundy_wcw(chip, MASK_ADDR_BURGUNDY_VOLLINE, DEF_BURGUNDY_VOLLINE); snd_pmac_burgundy_wcw(chip, MASK_ADDR_BURGUNDY_VOLMIC, DEF_BURGUNDY_VOLMIC); if (chip->hp_stat_mask == 0) { /* set headphone-jack detection bit */ if (imac) chip->hp_stat_mask = BURGUNDY_HPDETECT_IMAC_UPPER | BURGUNDY_HPDETECT_IMAC_LOWER | BURGUNDY_HPDETECT_IMAC_SIDE; else chip->hp_stat_mask = BURGUNDY_HPDETECT_PMAC_BACK; } /* * build burgundy mixers */ strcpy(chip->card->mixername, "PowerMac Burgundy"); for (i = 0; i < ARRAY_SIZE(snd_pmac_burgundy_mixers); i++) { err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_pmac_burgundy_mixers[i], chip)); if (err < 0) return err; } for (i = 0; i < (imac ? ARRAY_SIZE(snd_pmac_burgundy_mixers_imac) : ARRAY_SIZE(snd_pmac_burgundy_mixers_pmac)); i++) { err = snd_ctl_add(chip->card, snd_ctl_new1(imac ? &snd_pmac_burgundy_mixers_imac[i] : &snd_pmac_burgundy_mixers_pmac[i], chip)); if (err < 0) return err; } chip->master_sw_ctl = snd_ctl_new1(imac ? &snd_pmac_burgundy_master_sw_imac : &snd_pmac_burgundy_master_sw_pmac, chip); err = snd_ctl_add(chip->card, chip->master_sw_ctl); if (err < 0) return err; chip->master_sw_ctl = snd_ctl_new1(imac ? &snd_pmac_burgundy_line_sw_imac : &snd_pmac_burgundy_line_sw_pmac, chip); err = snd_ctl_add(chip->card, chip->master_sw_ctl); if (err < 0) return err; if (imac) { chip->master_sw_ctl = snd_ctl_new1( &snd_pmac_burgundy_hp_sw_imac, chip); err = snd_ctl_add(chip->card, chip->master_sw_ctl); if (err < 0) return err; } chip->speaker_sw_ctl = snd_ctl_new1(imac ? &snd_pmac_burgundy_speaker_sw_imac : &snd_pmac_burgundy_speaker_sw_pmac, chip); err = snd_ctl_add(chip->card, chip->speaker_sw_ctl); if (err < 0) return err; #ifdef PMAC_SUPPORT_AUTOMUTE err = snd_pmac_add_automute(chip); if (err < 0) return err; chip->detect_headphone = snd_pmac_burgundy_detect_headphone; chip->update_automute = snd_pmac_burgundy_update_automute; snd_pmac_burgundy_update_automute(chip, 0); /* update the status only */ #endif return 0; }
gpl-2.0
j-r0dd/motus_kernel
net/irda/irsysctl.c
190
8232
/********************************************************************* * * Filename: irsysctl.c * Version: 1.0 * Description: Sysctl interface for IrDA * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sun May 24 22:12:06 1998 * Modified at: Fri Jun 4 02:50:15 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1997, 1999 Dag Brattli, All Rights Reserved. * Copyright (c) 2000-2001 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/mm.h> #include <linux/ctype.h> #include <linux/sysctl.h> #include <linux/init.h> #include <net/irda/irda.h> /* irda_debug */ #include <net/irda/irlmp.h> #include <net/irda/timer.h> #include <net/irda/irias_object.h> extern int sysctl_discovery; extern int sysctl_discovery_slots; extern int sysctl_discovery_timeout; extern int sysctl_slot_timeout; extern int sysctl_fast_poll_increase; extern char sysctl_devname[]; extern int sysctl_max_baud_rate; extern int sysctl_min_tx_turn_time; extern int sysctl_max_tx_data_size; extern int sysctl_max_tx_window; extern int sysctl_max_noreply_time; extern int sysctl_warn_noreply_time; extern int sysctl_lap_keepalive_time; extern struct irlmp_cb *irlmp; /* this is needed for the proc_dointvec_minmax - Jean II */ static int max_discovery_slots = 16; /* ??? */ static int min_discovery_slots = 1; /* IrLAP 6.13.2 says 25ms to 10+70ms - allow higher since some devices * seems to require it. (from Dag's comment) */ static int max_slot_timeout = 160; static int min_slot_timeout = 20; static int max_max_baud_rate = 16000000; /* See qos.c - IrLAP spec */ static int min_max_baud_rate = 2400; static int max_min_tx_turn_time = 10000; /* See qos.c - IrLAP spec */ static int min_min_tx_turn_time; static int max_max_tx_data_size = 2048; /* See qos.c - IrLAP spec */ static int min_max_tx_data_size = 64; static int max_max_tx_window = 7; /* See qos.c - IrLAP spec */ static int min_max_tx_window = 1; static int max_max_noreply_time = 40; /* See qos.c - IrLAP spec */ static int min_max_noreply_time = 3; static int max_warn_noreply_time = 3; /* 3s == standard */ static int min_warn_noreply_time = 1; /* 1s == min WD_TIMER */ static int max_lap_keepalive_time = 10000; /* 10s */ static int min_lap_keepalive_time = 100; /* 100us */ /* For other sysctl, I've no idea of the range. Maybe Dag could help * us on that - Jean II */ static int do_devname(ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dostring(table, write, filp, buffer, lenp, ppos); if (ret == 0 && write) { struct ias_value *val; val = irias_new_string_value(sysctl_devname); if (val) irias_object_change_attribute("Device", "DeviceName", val); } return ret; } static int do_discovery(ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); if (ret) return ret; if (irlmp == NULL) return -ENODEV; if (sysctl_discovery) irlmp_start_discovery_timer(irlmp, sysctl_discovery_timeout*HZ); else del_timer_sync(&irlmp->discovery_timer); return ret; } /* One file */ static ctl_table irda_table[] = { { .ctl_name = NET_IRDA_DISCOVERY, .procname = "discovery", .data = &sysctl_discovery, .maxlen = sizeof(int), .mode = 0644, .proc_handler = do_discovery, .strategy = sysctl_intvec }, { .ctl_name = NET_IRDA_DEVNAME, .procname = "devname", .data = sysctl_devname, .maxlen = 65, .mode = 0644, .proc_handler = do_devname, .strategy = sysctl_string }, #ifdef CONFIG_IRDA_DEBUG { .ctl_name = NET_IRDA_DEBUG, .procname = "debug", .data = &irda_debug, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #endif #ifdef CONFIG_IRDA_FAST_RR { .ctl_name = NET_IRDA_FAST_POLL, .procname = "fast_poll_increase", .data = &sysctl_fast_poll_increase, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #endif { .ctl_name = NET_IRDA_DISCOVERY_SLOTS, .procname = "discovery_slots", .data = &sysctl_discovery_slots, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .strategy = sysctl_intvec, .extra1 = &min_discovery_slots, .extra2 = &max_discovery_slots }, { .ctl_name = NET_IRDA_DISCOVERY_TIMEOUT, .procname = "discovery_timeout", .data = &sysctl_discovery_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IRDA_SLOT_TIMEOUT, .procname = "slot_timeout", .data = &sysctl_slot_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .strategy = sysctl_intvec, .extra1 = &min_slot_timeout, .extra2 = &max_slot_timeout }, { .ctl_name = NET_IRDA_MAX_BAUD_RATE, .procname = "max_baud_rate", .data = &sysctl_max_baud_rate, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .strategy = sysctl_intvec, .extra1 = &min_max_baud_rate, .extra2 = &max_max_baud_rate }, { .ctl_name = NET_IRDA_MIN_TX_TURN_TIME, .procname = "min_tx_turn_time", .data = &sysctl_min_tx_turn_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .strategy = sysctl_intvec, .extra1 = &min_min_tx_turn_time, .extra2 = &max_min_tx_turn_time }, { .ctl_name = NET_IRDA_MAX_TX_DATA_SIZE, .procname = "max_tx_data_size", .data = &sysctl_max_tx_data_size, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .strategy = sysctl_intvec, .extra1 = &min_max_tx_data_size, .extra2 = &max_max_tx_data_size }, { .ctl_name = NET_IRDA_MAX_TX_WINDOW, .procname = "max_tx_window", .data = &sysctl_max_tx_window, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .strategy = sysctl_intvec, .extra1 = &min_max_tx_window, .extra2 = &max_max_tx_window }, { .ctl_name = NET_IRDA_MAX_NOREPLY_TIME, .procname = "max_noreply_time", .data = &sysctl_max_noreply_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .strategy = sysctl_intvec, .extra1 = &min_max_noreply_time, .extra2 = &max_max_noreply_time }, { .ctl_name = NET_IRDA_WARN_NOREPLY_TIME, .procname = "warn_noreply_time", .data = &sysctl_warn_noreply_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .strategy = sysctl_intvec, .extra1 = &min_warn_noreply_time, .extra2 = &max_warn_noreply_time }, { .ctl_name = NET_IRDA_LAP_KEEPALIVE_TIME, .procname = "lap_keepalive_time", .data = &sysctl_lap_keepalive_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .strategy = sysctl_intvec, .extra1 = &min_lap_keepalive_time, .extra2 = &max_lap_keepalive_time }, { .ctl_name = 0 } }; static struct ctl_path irda_path[] = { { .procname = "net", .ctl_name = CTL_NET, }, { .procname = "irda", .ctl_name = NET_IRDA, }, { } }; static struct ctl_table_header *irda_table_header; /* * Function irda_sysctl_register (void) * * Register our sysctl interface * */ int __init irda_sysctl_register(void) { irda_table_header = register_sysctl_paths(irda_path, irda_table); if (!irda_table_header) return -ENOMEM; return 0; } /* * Function irda_sysctl_unregister (void) * * Unregister our sysctl interface * */ void irda_sysctl_unregister(void) { unregister_sysctl_table(irda_table_header); }
gpl-2.0
dan82840/Netgear-RBR50
git_home/linux.git/fs/xfs/xfs_qm_bhv.c
446
4421
/* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_quota.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_error.h" #include "xfs_trans.h" #include "xfs_qm.h" STATIC void xfs_fill_statvfs_from_dquot( struct kstatfs *statp, struct xfs_dquot *dqp) { __uint64_t limit; limit = dqp->q_core.d_blk_softlimit ? be64_to_cpu(dqp->q_core.d_blk_softlimit) : be64_to_cpu(dqp->q_core.d_blk_hardlimit); if (limit && statp->f_blocks > limit) { statp->f_blocks = limit; statp->f_bfree = statp->f_bavail = (statp->f_blocks > dqp->q_res_bcount) ? (statp->f_blocks - dqp->q_res_bcount) : 0; } limit = dqp->q_core.d_ino_softlimit ? be64_to_cpu(dqp->q_core.d_ino_softlimit) : be64_to_cpu(dqp->q_core.d_ino_hardlimit); if (limit && statp->f_files > limit) { statp->f_files = limit; statp->f_ffree = (statp->f_files > dqp->q_res_icount) ? (statp->f_ffree - dqp->q_res_icount) : 0; } } /* * Directory tree accounting is implemented using project quotas, where * the project identifier is inherited from parent directories. * A statvfs (df, etc.) of a directory that is using project quota should * return a statvfs of the project, not the entire filesystem. * This makes such trees appear as if they are filesystems in themselves. */ void xfs_qm_statvfs( xfs_inode_t *ip, struct kstatfs *statp) { xfs_mount_t *mp = ip->i_mount; xfs_dquot_t *dqp; if (!xfs_qm_dqget(mp, NULL, xfs_get_projid(ip), XFS_DQ_PROJ, 0, &dqp)) { xfs_fill_statvfs_from_dquot(statp, dqp); xfs_qm_dqput(dqp); } } int xfs_qm_newmount( xfs_mount_t *mp, uint *needquotamount, uint *quotaflags) { uint quotaondisk; uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0; quotaondisk = xfs_sb_version_hasquota(&mp->m_sb) && (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT); if (quotaondisk) { uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT; pquotaondisk = mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT; gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT; } /* * If the device itself is read-only, we can't allow * the user to change the state of quota on the mount - * this would generate a transaction on the ro device, * which would lead to an I/O error and shutdown */ if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) || (!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) || (gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) || (!gquotaondisk && XFS_IS_GQUOTA_ON(mp)) || (pquotaondisk && !XFS_IS_PQUOTA_ON(mp)) || (!pquotaondisk && XFS_IS_PQUOTA_ON(mp))) && xfs_dev_is_read_only(mp, "changing quota state")) { xfs_warn(mp, "please mount with%s%s%s%s.", (!quotaondisk ? "out quota" : ""), (uquotaondisk ? " usrquota" : ""), (gquotaondisk ? " grpquota" : ""), (pquotaondisk ? " prjquota" : "")); return XFS_ERROR(EPERM); } if (XFS_IS_QUOTA_ON(mp) || quotaondisk) { /* * Call mount_quotas at this point only if we won't have to do * a quotacheck. */ if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) { /* * If an error occurred, qm_mount_quotas code * has already disabled quotas. So, just finish * mounting, and get on with the boring life * without disk quotas. */ xfs_qm_mount_quotas(mp); } else { /* * Clear the quota flags, but remember them. This * is so that the quota code doesn't get invoked * before we're ready. This can happen when an * inode goes inactive and wants to free blocks, * or via xfs_log_mount_finish. */ *needquotamount = true; *quotaflags = mp->m_qflags; mp->m_qflags = 0; } } return 0; }
gpl-2.0
weimingtom/uve
UVEngineNative/oggvorbis/smallft.c
446
22249
/******************************************************************** * * * THIS FILE IS PART OF THE OggVorbis SOFTWARE CODEC SOURCE CODE. * * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS * * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE * * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. * * * * THE OggVorbis SOURCE CODE IS (C) COPYRIGHT 1994-2009 * * by the Xiph.Org Foundation http://www.xiph.org/ * * * ******************************************************************** function: *unnormalized* fft transform last mod: $Id: smallft.c 16227 2009-07-08 06:58:46Z xiphmont $ ********************************************************************/ /* FFT implementation from OggSquish, minus cosine transforms, * minus all but radix 2/4 case. In Vorbis we only need this * cut-down version. * * To do more than just power-of-two sized vectors, see the full * version I wrote for NetLib. * * Note that the packing is a little strange; rather than the FFT r/i * packing following R_0, I_n, R_1, I_1, R_2, I_2 ... R_n-1, I_n-1, * it follows R_0, R_1, I_1, R_2, I_2 ... R_n-1, I_n-1, I_n like the * FORTRAN version */ #include <stdlib.h> #include <string.h> #include <math.h> #include "smallft.h" #include "os.h" #include "misc.h" static void drfti1(int n, float *wa, int *ifac){ static int ntryh[4] = { 4,2,3,5 }; static float tpi = 6.28318530717958648f; float arg,argh,argld,fi; int ntry=0,i,j=-1; int k1, l1, l2, ib; int ld, ii, ip, is, nq, nr; int ido, ipm, nfm1; int nl=n; int nf=0; L101: j++; if (j < 4) ntry=ntryh[j]; else ntry+=2; L104: nq=nl/ntry; nr=nl-ntry*nq; if (nr!=0) goto L101; nf++; ifac[nf+1]=ntry; nl=nq; if(ntry!=2)goto L107; if(nf==1)goto L107; for (i=1;i<nf;i++){ ib=nf-i+1; ifac[ib+1]=ifac[ib]; } ifac[2] = 2; L107: if(nl!=1)goto L104; ifac[0]=n; ifac[1]=nf; argh=tpi/n; is=0; nfm1=nf-1; l1=1; if(nfm1==0)return; for (k1=0;k1<nfm1;k1++){ ip=ifac[k1+2]; ld=0; l2=l1*ip; ido=n/l2; ipm=ip-1; for (j=0;j<ipm;j++){ ld+=l1; i=is; argld=(float)ld*argh; fi=0.f; for (ii=2;ii<ido;ii+=2){ fi+=1.f; arg=fi*argld; wa[i++]=cos(arg); wa[i++]=sin(arg); } is+=ido; } l1=l2; } } static void fdrffti(int n, float *wsave, int *ifac){ if (n == 1) return; drfti1(n, wsave+n, ifac); } static void dradf2(int ido,int l1,float *cc,float *ch,float *wa1){ int i,k; float ti2,tr2; int t0,t1,t2,t3,t4,t5,t6; t1=0; t0=(t2=l1*ido); t3=ido<<1; for(k=0;k<l1;k++){ ch[t1<<1]=cc[t1]+cc[t2]; ch[(t1<<1)+t3-1]=cc[t1]-cc[t2]; t1+=ido; t2+=ido; } if(ido<2)return; if(ido==2)goto L105; t1=0; t2=t0; for(k=0;k<l1;k++){ t3=t2; t4=(t1<<1)+(ido<<1); t5=t1; t6=t1+t1; for(i=2;i<ido;i+=2){ t3+=2; t4-=2; t5+=2; t6+=2; tr2=wa1[i-2]*cc[t3-1]+wa1[i-1]*cc[t3]; ti2=wa1[i-2]*cc[t3]-wa1[i-1]*cc[t3-1]; ch[t6]=cc[t5]+ti2; ch[t4]=ti2-cc[t5]; ch[t6-1]=cc[t5-1]+tr2; ch[t4-1]=cc[t5-1]-tr2; } t1+=ido; t2+=ido; } if(ido%2==1)return; L105: t3=(t2=(t1=ido)-1); t2+=t0; for(k=0;k<l1;k++){ ch[t1]=-cc[t2]; ch[t1-1]=cc[t3]; t1+=ido<<1; t2+=ido; t3+=ido; } } static void dradf4(int ido,int l1,float *cc,float *ch,float *wa1, float *wa2,float *wa3){ static float hsqt2 = .70710678118654752f; int i,k,t0,t1,t2,t3,t4,t5,t6; float ci2,ci3,ci4,cr2,cr3,cr4,ti1,ti2,ti3,ti4,tr1,tr2,tr3,tr4; t0=l1*ido; t1=t0; t4=t1<<1; t2=t1+(t1<<1); t3=0; for(k=0;k<l1;k++){ tr1=cc[t1]+cc[t2]; tr2=cc[t3]+cc[t4]; ch[t5=t3<<2]=tr1+tr2; ch[(ido<<2)+t5-1]=tr2-tr1; ch[(t5+=(ido<<1))-1]=cc[t3]-cc[t4]; ch[t5]=cc[t2]-cc[t1]; t1+=ido; t2+=ido; t3+=ido; t4+=ido; } if(ido<2)return; if(ido==2)goto L105; t1=0; for(k=0;k<l1;k++){ t2=t1; t4=t1<<2; t5=(t6=ido<<1)+t4; for(i=2;i<ido;i+=2){ t3=(t2+=2); t4+=2; t5-=2; t3+=t0; cr2=wa1[i-2]*cc[t3-1]+wa1[i-1]*cc[t3]; ci2=wa1[i-2]*cc[t3]-wa1[i-1]*cc[t3-1]; t3+=t0; cr3=wa2[i-2]*cc[t3-1]+wa2[i-1]*cc[t3]; ci3=wa2[i-2]*cc[t3]-wa2[i-1]*cc[t3-1]; t3+=t0; cr4=wa3[i-2]*cc[t3-1]+wa3[i-1]*cc[t3]; ci4=wa3[i-2]*cc[t3]-wa3[i-1]*cc[t3-1]; tr1=cr2+cr4; tr4=cr4-cr2; ti1=ci2+ci4; ti4=ci2-ci4; ti2=cc[t2]+ci3; ti3=cc[t2]-ci3; tr2=cc[t2-1]+cr3; tr3=cc[t2-1]-cr3; ch[t4-1]=tr1+tr2; ch[t4]=ti1+ti2; ch[t5-1]=tr3-ti4; ch[t5]=tr4-ti3; ch[t4+t6-1]=ti4+tr3; ch[t4+t6]=tr4+ti3; ch[t5+t6-1]=tr2-tr1; ch[t5+t6]=ti1-ti2; } t1+=ido; } if(ido&1)return; L105: t2=(t1=t0+ido-1)+(t0<<1); t3=ido<<2; t4=ido; t5=ido<<1; t6=ido; for(k=0;k<l1;k++){ ti1=-hsqt2*(cc[t1]+cc[t2]); tr1=hsqt2*(cc[t1]-cc[t2]); ch[t4-1]=tr1+cc[t6-1]; ch[t4+t5-1]=cc[t6-1]-tr1; ch[t4]=ti1-cc[t1+t0]; ch[t4+t5]=ti1+cc[t1+t0]; t1+=ido; t2+=ido; t4+=t3; t6+=ido; } } static void dradfg(int ido,int ip,int l1,int idl1,float *cc,float *c1, float *c2,float *ch,float *ch2,float *wa){ static float tpi=6.283185307179586f; int idij,ipph,i,j,k,l,ic,ik,is; int t0,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; float dc2,ai1,ai2,ar1,ar2,ds2; int nbd; float dcp,arg,dsp,ar1h,ar2h; int idp2,ipp2; arg=tpi/(float)ip; dcp=cos(arg); dsp=sin(arg); ipph=(ip+1)>>1; ipp2=ip; idp2=ido; nbd=(ido-1)>>1; t0=l1*ido; t10=ip*ido; if(ido==1)goto L119; for(ik=0;ik<idl1;ik++)ch2[ik]=c2[ik]; t1=0; for(j=1;j<ip;j++){ t1+=t0; t2=t1; for(k=0;k<l1;k++){ ch[t2]=c1[t2]; t2+=ido; } } is=-ido; t1=0; if(nbd>l1){ for(j=1;j<ip;j++){ t1+=t0; is+=ido; t2= -ido+t1; for(k=0;k<l1;k++){ idij=is-1; t2+=ido; t3=t2; for(i=2;i<ido;i+=2){ idij+=2; t3+=2; ch[t3-1]=wa[idij-1]*c1[t3-1]+wa[idij]*c1[t3]; ch[t3]=wa[idij-1]*c1[t3]-wa[idij]*c1[t3-1]; } } } }else{ for(j=1;j<ip;j++){ is+=ido; idij=is-1; t1+=t0; t2=t1; for(i=2;i<ido;i+=2){ idij+=2; t2+=2; t3=t2; for(k=0;k<l1;k++){ ch[t3-1]=wa[idij-1]*c1[t3-1]+wa[idij]*c1[t3]; ch[t3]=wa[idij-1]*c1[t3]-wa[idij]*c1[t3-1]; t3+=ido; } } } } t1=0; t2=ipp2*t0; if(nbd<l1){ for(j=1;j<ipph;j++){ t1+=t0; t2-=t0; t3=t1; t4=t2; for(i=2;i<ido;i+=2){ t3+=2; t4+=2; t5=t3-ido; t6=t4-ido; for(k=0;k<l1;k++){ t5+=ido; t6+=ido; c1[t5-1]=ch[t5-1]+ch[t6-1]; c1[t6-1]=ch[t5]-ch[t6]; c1[t5]=ch[t5]+ch[t6]; c1[t6]=ch[t6-1]-ch[t5-1]; } } } }else{ for(j=1;j<ipph;j++){ t1+=t0; t2-=t0; t3=t1; t4=t2; for(k=0;k<l1;k++){ t5=t3; t6=t4; for(i=2;i<ido;i+=2){ t5+=2; t6+=2; c1[t5-1]=ch[t5-1]+ch[t6-1]; c1[t6-1]=ch[t5]-ch[t6]; c1[t5]=ch[t5]+ch[t6]; c1[t6]=ch[t6-1]-ch[t5-1]; } t3+=ido; t4+=ido; } } } L119: for(ik=0;ik<idl1;ik++)c2[ik]=ch2[ik]; t1=0; t2=ipp2*idl1; for(j=1;j<ipph;j++){ t1+=t0; t2-=t0; t3=t1-ido; t4=t2-ido; for(k=0;k<l1;k++){ t3+=ido; t4+=ido; c1[t3]=ch[t3]+ch[t4]; c1[t4]=ch[t4]-ch[t3]; } } ar1=1.f; ai1=0.f; t1=0; t2=ipp2*idl1; t3=(ip-1)*idl1; for(l=1;l<ipph;l++){ t1+=idl1; t2-=idl1; ar1h=dcp*ar1-dsp*ai1; ai1=dcp*ai1+dsp*ar1; ar1=ar1h; t4=t1; t5=t2; t6=t3; t7=idl1; for(ik=0;ik<idl1;ik++){ ch2[t4++]=c2[ik]+ar1*c2[t7++]; ch2[t5++]=ai1*c2[t6++]; } dc2=ar1; ds2=ai1; ar2=ar1; ai2=ai1; t4=idl1; t5=(ipp2-1)*idl1; for(j=2;j<ipph;j++){ t4+=idl1; t5-=idl1; ar2h=dc2*ar2-ds2*ai2; ai2=dc2*ai2+ds2*ar2; ar2=ar2h; t6=t1; t7=t2; t8=t4; t9=t5; for(ik=0;ik<idl1;ik++){ ch2[t6++]+=ar2*c2[t8++]; ch2[t7++]+=ai2*c2[t9++]; } } } t1=0; for(j=1;j<ipph;j++){ t1+=idl1; t2=t1; for(ik=0;ik<idl1;ik++)ch2[ik]+=c2[t2++]; } if(ido<l1)goto L132; t1=0; t2=0; for(k=0;k<l1;k++){ t3=t1; t4=t2; for(i=0;i<ido;i++)cc[t4++]=ch[t3++]; t1+=ido; t2+=t10; } goto L135; L132: for(i=0;i<ido;i++){ t1=i; t2=i; for(k=0;k<l1;k++){ cc[t2]=ch[t1]; t1+=ido; t2+=t10; } } L135: t1=0; t2=ido<<1; t3=0; t4=ipp2*t0; for(j=1;j<ipph;j++){ t1+=t2; t3+=t0; t4-=t0; t5=t1; t6=t3; t7=t4; for(k=0;k<l1;k++){ cc[t5-1]=ch[t6]; cc[t5]=ch[t7]; t5+=t10; t6+=ido; t7+=ido; } } if(ido==1)return; if(nbd<l1)goto L141; t1=-ido; t3=0; t4=0; t5=ipp2*t0; for(j=1;j<ipph;j++){ t1+=t2; t3+=t2; t4+=t0; t5-=t0; t6=t1; t7=t3; t8=t4; t9=t5; for(k=0;k<l1;k++){ for(i=2;i<ido;i+=2){ ic=idp2-i; cc[i+t7-1]=ch[i+t8-1]+ch[i+t9-1]; cc[ic+t6-1]=ch[i+t8-1]-ch[i+t9-1]; cc[i+t7]=ch[i+t8]+ch[i+t9]; cc[ic+t6]=ch[i+t9]-ch[i+t8]; } t6+=t10; t7+=t10; t8+=ido; t9+=ido; } } return; L141: t1=-ido; t3=0; t4=0; t5=ipp2*t0; for(j=1;j<ipph;j++){ t1+=t2; t3+=t2; t4+=t0; t5-=t0; for(i=2;i<ido;i+=2){ t6=idp2+t1-i; t7=i+t3; t8=i+t4; t9=i+t5; for(k=0;k<l1;k++){ cc[t7-1]=ch[t8-1]+ch[t9-1]; cc[t6-1]=ch[t8-1]-ch[t9-1]; cc[t7]=ch[t8]+ch[t9]; cc[t6]=ch[t9]-ch[t8]; t6+=t10; t7+=t10; t8+=ido; t9+=ido; } } } } static void drftf1(int n,float *c,float *ch,float *wa,int *ifac){ int i,k1,l1,l2; int na,kh,nf; int ip,iw,ido,idl1,ix2,ix3; nf=ifac[1]; na=1; l2=n; iw=n; for(k1=0;k1<nf;k1++){ kh=nf-k1; ip=ifac[kh+1]; l1=l2/ip; ido=n/l2; idl1=ido*l1; iw-=(ip-1)*ido; na=1-na; if(ip!=4)goto L102; ix2=iw+ido; ix3=ix2+ido; if(na!=0) dradf4(ido,l1,ch,c,wa+iw-1,wa+ix2-1,wa+ix3-1); else dradf4(ido,l1,c,ch,wa+iw-1,wa+ix2-1,wa+ix3-1); goto L110; L102: if(ip!=2)goto L104; if(na!=0)goto L103; dradf2(ido,l1,c,ch,wa+iw-1); goto L110; L103: dradf2(ido,l1,ch,c,wa+iw-1); goto L110; L104: if(ido==1)na=1-na; if(na!=0)goto L109; dradfg(ido,ip,l1,idl1,c,c,c,ch,ch,wa+iw-1); na=1; goto L110; L109: dradfg(ido,ip,l1,idl1,ch,ch,ch,c,c,wa+iw-1); na=0; L110: l2=l1; } if(na==1)return; for(i=0;i<n;i++)c[i]=ch[i]; } static void dradb2(int ido,int l1,float *cc,float *ch,float *wa1){ int i,k,t0,t1,t2,t3,t4,t5,t6; float ti2,tr2; t0=l1*ido; t1=0; t2=0; t3=(ido<<1)-1; for(k=0;k<l1;k++){ ch[t1]=cc[t2]+cc[t3+t2]; ch[t1+t0]=cc[t2]-cc[t3+t2]; t2=(t1+=ido)<<1; } if(ido<2)return; if(ido==2)goto L105; t1=0; t2=0; for(k=0;k<l1;k++){ t3=t1; t5=(t4=t2)+(ido<<1); t6=t0+t1; for(i=2;i<ido;i+=2){ t3+=2; t4+=2; t5-=2; t6+=2; ch[t3-1]=cc[t4-1]+cc[t5-1]; tr2=cc[t4-1]-cc[t5-1]; ch[t3]=cc[t4]-cc[t5]; ti2=cc[t4]+cc[t5]; ch[t6-1]=wa1[i-2]*tr2-wa1[i-1]*ti2; ch[t6]=wa1[i-2]*ti2+wa1[i-1]*tr2; } t2=(t1+=ido)<<1; } if(ido%2==1)return; L105: t1=ido-1; t2=ido-1; for(k=0;k<l1;k++){ ch[t1]=cc[t2]+cc[t2]; ch[t1+t0]=-(cc[t2+1]+cc[t2+1]); t1+=ido; t2+=ido<<1; } } static void dradb3(int ido,int l1,float *cc,float *ch,float *wa1, float *wa2){ static float taur = -.5f; static float taui = .8660254037844386f; int i,k,t0,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; float ci2,ci3,di2,di3,cr2,cr3,dr2,dr3,ti2,tr2; t0=l1*ido; t1=0; t2=t0<<1; t3=ido<<1; t4=ido+(ido<<1); t5=0; for(k=0;k<l1;k++){ tr2=cc[t3-1]+cc[t3-1]; cr2=cc[t5]+(taur*tr2); ch[t1]=cc[t5]+tr2; ci3=taui*(cc[t3]+cc[t3]); ch[t1+t0]=cr2-ci3; ch[t1+t2]=cr2+ci3; t1+=ido; t3+=t4; t5+=t4; } if(ido==1)return; t1=0; t3=ido<<1; for(k=0;k<l1;k++){ t7=t1+(t1<<1); t6=(t5=t7+t3); t8=t1; t10=(t9=t1+t0)+t0; for(i=2;i<ido;i+=2){ t5+=2; t6-=2; t7+=2; t8+=2; t9+=2; t10+=2; tr2=cc[t5-1]+cc[t6-1]; cr2=cc[t7-1]+(taur*tr2); ch[t8-1]=cc[t7-1]+tr2; ti2=cc[t5]-cc[t6]; ci2=cc[t7]+(taur*ti2); ch[t8]=cc[t7]+ti2; cr3=taui*(cc[t5-1]-cc[t6-1]); ci3=taui*(cc[t5]+cc[t6]); dr2=cr2-ci3; dr3=cr2+ci3; di2=ci2+cr3; di3=ci2-cr3; ch[t9-1]=wa1[i-2]*dr2-wa1[i-1]*di2; ch[t9]=wa1[i-2]*di2+wa1[i-1]*dr2; ch[t10-1]=wa2[i-2]*dr3-wa2[i-1]*di3; ch[t10]=wa2[i-2]*di3+wa2[i-1]*dr3; } t1+=ido; } } static void dradb4(int ido,int l1,float *cc,float *ch,float *wa1, float *wa2,float *wa3){ static float sqrt2=1.414213562373095f; int i,k,t0,t1,t2,t3,t4,t5,t6,t7,t8; float ci2,ci3,ci4,cr2,cr3,cr4,ti1,ti2,ti3,ti4,tr1,tr2,tr3,tr4; t0=l1*ido; t1=0; t2=ido<<2; t3=0; t6=ido<<1; for(k=0;k<l1;k++){ t4=t3+t6; t5=t1; tr3=cc[t4-1]+cc[t4-1]; tr4=cc[t4]+cc[t4]; tr1=cc[t3]-cc[(t4+=t6)-1]; tr2=cc[t3]+cc[t4-1]; ch[t5]=tr2+tr3; ch[t5+=t0]=tr1-tr4; ch[t5+=t0]=tr2-tr3; ch[t5+=t0]=tr1+tr4; t1+=ido; t3+=t2; } if(ido<2)return; if(ido==2)goto L105; t1=0; for(k=0;k<l1;k++){ t5=(t4=(t3=(t2=t1<<2)+t6))+t6; t7=t1; for(i=2;i<ido;i+=2){ t2+=2; t3+=2; t4-=2; t5-=2; t7+=2; ti1=cc[t2]+cc[t5]; ti2=cc[t2]-cc[t5]; ti3=cc[t3]-cc[t4]; tr4=cc[t3]+cc[t4]; tr1=cc[t2-1]-cc[t5-1]; tr2=cc[t2-1]+cc[t5-1]; ti4=cc[t3-1]-cc[t4-1]; tr3=cc[t3-1]+cc[t4-1]; ch[t7-1]=tr2+tr3; cr3=tr2-tr3; ch[t7]=ti2+ti3; ci3=ti2-ti3; cr2=tr1-tr4; cr4=tr1+tr4; ci2=ti1+ti4; ci4=ti1-ti4; ch[(t8=t7+t0)-1]=wa1[i-2]*cr2-wa1[i-1]*ci2; ch[t8]=wa1[i-2]*ci2+wa1[i-1]*cr2; ch[(t8+=t0)-1]=wa2[i-2]*cr3-wa2[i-1]*ci3; ch[t8]=wa2[i-2]*ci3+wa2[i-1]*cr3; ch[(t8+=t0)-1]=wa3[i-2]*cr4-wa3[i-1]*ci4; ch[t8]=wa3[i-2]*ci4+wa3[i-1]*cr4; } t1+=ido; } if(ido%2 == 1)return; L105: t1=ido; t2=ido<<2; t3=ido-1; t4=ido+(ido<<1); for(k=0;k<l1;k++){ t5=t3; ti1=cc[t1]+cc[t4]; ti2=cc[t4]-cc[t1]; tr1=cc[t1-1]-cc[t4-1]; tr2=cc[t1-1]+cc[t4-1]; ch[t5]=tr2+tr2; ch[t5+=t0]=sqrt2*(tr1-ti1); ch[t5+=t0]=ti2+ti2; ch[t5+=t0]=-sqrt2*(tr1+ti1); t3+=ido; t1+=t2; t4+=t2; } } static void dradbg(int ido,int ip,int l1,int idl1,float *cc,float *c1, float *c2,float *ch,float *ch2,float *wa){ static float tpi=6.283185307179586f; int idij,ipph,i,j,k,l,ik,is,t0,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10, t11,t12; float dc2,ai1,ai2,ar1,ar2,ds2; int nbd; float dcp,arg,dsp,ar1h,ar2h; int ipp2; t10=ip*ido; t0=l1*ido; arg=tpi/(float)ip; dcp=cos(arg); dsp=sin(arg); nbd=(ido-1)>>1; ipp2=ip; ipph=(ip+1)>>1; if(ido<l1)goto L103; t1=0; t2=0; for(k=0;k<l1;k++){ t3=t1; t4=t2; for(i=0;i<ido;i++){ ch[t3]=cc[t4]; t3++; t4++; } t1+=ido; t2+=t10; } goto L106; L103: t1=0; for(i=0;i<ido;i++){ t2=t1; t3=t1; for(k=0;k<l1;k++){ ch[t2]=cc[t3]; t2+=ido; t3+=t10; } t1++; } L106: t1=0; t2=ipp2*t0; t7=(t5=ido<<1); for(j=1;j<ipph;j++){ t1+=t0; t2-=t0; t3=t1; t4=t2; t6=t5; for(k=0;k<l1;k++){ ch[t3]=cc[t6-1]+cc[t6-1]; ch[t4]=cc[t6]+cc[t6]; t3+=ido; t4+=ido; t6+=t10; } t5+=t7; } if (ido == 1)goto L116; if(nbd<l1)goto L112; t1=0; t2=ipp2*t0; t7=0; for(j=1;j<ipph;j++){ t1+=t0; t2-=t0; t3=t1; t4=t2; t7+=(ido<<1); t8=t7; for(k=0;k<l1;k++){ t5=t3; t6=t4; t9=t8; t11=t8; for(i=2;i<ido;i+=2){ t5+=2; t6+=2; t9+=2; t11-=2; ch[t5-1]=cc[t9-1]+cc[t11-1]; ch[t6-1]=cc[t9-1]-cc[t11-1]; ch[t5]=cc[t9]-cc[t11]; ch[t6]=cc[t9]+cc[t11]; } t3+=ido; t4+=ido; t8+=t10; } } goto L116; L112: t1=0; t2=ipp2*t0; t7=0; for(j=1;j<ipph;j++){ t1+=t0; t2-=t0; t3=t1; t4=t2; t7+=(ido<<1); t8=t7; t9=t7; for(i=2;i<ido;i+=2){ t3+=2; t4+=2; t8+=2; t9-=2; t5=t3; t6=t4; t11=t8; t12=t9; for(k=0;k<l1;k++){ ch[t5-1]=cc[t11-1]+cc[t12-1]; ch[t6-1]=cc[t11-1]-cc[t12-1]; ch[t5]=cc[t11]-cc[t12]; ch[t6]=cc[t11]+cc[t12]; t5+=ido; t6+=ido; t11+=t10; t12+=t10; } } } L116: ar1=1.f; ai1=0.f; t1=0; t9=(t2=ipp2*idl1); t3=(ip-1)*idl1; for(l=1;l<ipph;l++){ t1+=idl1; t2-=idl1; ar1h=dcp*ar1-dsp*ai1; ai1=dcp*ai1+dsp*ar1; ar1=ar1h; t4=t1; t5=t2; t6=0; t7=idl1; t8=t3; for(ik=0;ik<idl1;ik++){ c2[t4++]=ch2[t6++]+ar1*ch2[t7++]; c2[t5++]=ai1*ch2[t8++]; } dc2=ar1; ds2=ai1; ar2=ar1; ai2=ai1; t6=idl1; t7=t9-idl1; for(j=2;j<ipph;j++){ t6+=idl1; t7-=idl1; ar2h=dc2*ar2-ds2*ai2; ai2=dc2*ai2+ds2*ar2; ar2=ar2h; t4=t1; t5=t2; t11=t6; t12=t7; for(ik=0;ik<idl1;ik++){ c2[t4++]+=ar2*ch2[t11++]; c2[t5++]+=ai2*ch2[t12++]; } } } t1=0; for(j=1;j<ipph;j++){ t1+=idl1; t2=t1; for(ik=0;ik<idl1;ik++)ch2[ik]+=ch2[t2++]; } t1=0; t2=ipp2*t0; for(j=1;j<ipph;j++){ t1+=t0; t2-=t0; t3=t1; t4=t2; for(k=0;k<l1;k++){ ch[t3]=c1[t3]-c1[t4]; ch[t4]=c1[t3]+c1[t4]; t3+=ido; t4+=ido; } } if(ido==1)goto L132; if(nbd<l1)goto L128; t1=0; t2=ipp2*t0; for(j=1;j<ipph;j++){ t1+=t0; t2-=t0; t3=t1; t4=t2; for(k=0;k<l1;k++){ t5=t3; t6=t4; for(i=2;i<ido;i+=2){ t5+=2; t6+=2; ch[t5-1]=c1[t5-1]-c1[t6]; ch[t6-1]=c1[t5-1]+c1[t6]; ch[t5]=c1[t5]+c1[t6-1]; ch[t6]=c1[t5]-c1[t6-1]; } t3+=ido; t4+=ido; } } goto L132; L128: t1=0; t2=ipp2*t0; for(j=1;j<ipph;j++){ t1+=t0; t2-=t0; t3=t1; t4=t2; for(i=2;i<ido;i+=2){ t3+=2; t4+=2; t5=t3; t6=t4; for(k=0;k<l1;k++){ ch[t5-1]=c1[t5-1]-c1[t6]; ch[t6-1]=c1[t5-1]+c1[t6]; ch[t5]=c1[t5]+c1[t6-1]; ch[t6]=c1[t5]-c1[t6-1]; t5+=ido; t6+=ido; } } } L132: if(ido==1)return; for(ik=0;ik<idl1;ik++)c2[ik]=ch2[ik]; t1=0; for(j=1;j<ip;j++){ t2=(t1+=t0); for(k=0;k<l1;k++){ c1[t2]=ch[t2]; t2+=ido; } } if(nbd>l1)goto L139; is= -ido-1; t1=0; for(j=1;j<ip;j++){ is+=ido; t1+=t0; idij=is; t2=t1; for(i=2;i<ido;i+=2){ t2+=2; idij+=2; t3=t2; for(k=0;k<l1;k++){ c1[t3-1]=wa[idij-1]*ch[t3-1]-wa[idij]*ch[t3]; c1[t3]=wa[idij-1]*ch[t3]+wa[idij]*ch[t3-1]; t3+=ido; } } } return; L139: is= -ido-1; t1=0; for(j=1;j<ip;j++){ is+=ido; t1+=t0; t2=t1; for(k=0;k<l1;k++){ idij=is; t3=t2; for(i=2;i<ido;i+=2){ idij+=2; t3+=2; c1[t3-1]=wa[idij-1]*ch[t3-1]-wa[idij]*ch[t3]; c1[t3]=wa[idij-1]*ch[t3]+wa[idij]*ch[t3-1]; } t2+=ido; } } } static void drftb1(int n, float *c, float *ch, float *wa, int *ifac){ int i,k1,l1,l2; int na; int nf,ip,iw,ix2,ix3,ido,idl1; nf=ifac[1]; na=0; l1=1; iw=1; for(k1=0;k1<nf;k1++){ ip=ifac[k1 + 2]; l2=ip*l1; ido=n/l2; idl1=ido*l1; if(ip!=4)goto L103; ix2=iw+ido; ix3=ix2+ido; if(na!=0) dradb4(ido,l1,ch,c,wa+iw-1,wa+ix2-1,wa+ix3-1); else dradb4(ido,l1,c,ch,wa+iw-1,wa+ix2-1,wa+ix3-1); na=1-na; goto L115; L103: if(ip!=2)goto L106; if(na!=0) dradb2(ido,l1,ch,c,wa+iw-1); else dradb2(ido,l1,c,ch,wa+iw-1); na=1-na; goto L115; L106: if(ip!=3)goto L109; ix2=iw+ido; if(na!=0) dradb3(ido,l1,ch,c,wa+iw-1,wa+ix2-1); else dradb3(ido,l1,c,ch,wa+iw-1,wa+ix2-1); na=1-na; goto L115; L109: /* The radix five case can be translated later..... */ /* if(ip!=5)goto L112; ix2=iw+ido; ix3=ix2+ido; ix4=ix3+ido; if(na!=0) dradb5(ido,l1,ch,c,wa+iw-1,wa+ix2-1,wa+ix3-1,wa+ix4-1); else dradb5(ido,l1,c,ch,wa+iw-1,wa+ix2-1,wa+ix3-1,wa+ix4-1); na=1-na; goto L115; L112:*/ if(na!=0) dradbg(ido,ip,l1,idl1,ch,ch,ch,c,c,wa+iw-1); else dradbg(ido,ip,l1,idl1,c,c,c,ch,ch,wa+iw-1); if(ido==1)na=1-na; L115: l1=l2; iw+=(ip-1)*ido; } if(na==0)return; for(i=0;i<n;i++)c[i]=ch[i]; } void drft_forward(drft_lookup *l,float *data){ if(l->n==1)return; drftf1(l->n,data,l->trigcache,l->trigcache+l->n,l->splitcache); } void drft_backward(drft_lookup *l,float *data){ if (l->n==1)return; drftb1(l->n,data,l->trigcache,l->trigcache+l->n,l->splitcache); } void drft_init(drft_lookup *l,int n){ l->n=n; l->trigcache=_ogg_calloc(3*n,sizeof(*l->trigcache)); l->splitcache=_ogg_calloc(32,sizeof(*l->splitcache)); fdrffti(n, l->trigcache, l->splitcache); } void drft_clear(drft_lookup *l){ if(l){ if(l->trigcache)_ogg_free(l->trigcache); if(l->splitcache)_ogg_free(l->splitcache); memset(l,0,sizeof(*l)); } }
gpl-2.0
dhkim1027/linux
crypto/asymmetric_keys/pkcs7_verify.c
958
9844
/* Verify the signature on a PKCS#7 message. * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define pr_fmt(fmt) "PKCS7: "fmt #include <linux/kernel.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/asn1.h> #include <crypto/hash.h> #include "public_key.h" #include "pkcs7_parser.h" /* * Digest the relevant parts of the PKCS#7 data */ static int pkcs7_digest(struct pkcs7_message *pkcs7, struct pkcs7_signed_info *sinfo) { struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; void *digest; int ret; kenter(",%u,%u", sinfo->index, sinfo->sig.pkey_hash_algo); if (sinfo->sig.pkey_hash_algo >= PKEY_HASH__LAST || !hash_algo_name[sinfo->sig.pkey_hash_algo]) return -ENOPKG; /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(hash_algo_name[sinfo->sig.pkey_hash_algo], 0, 0); if (IS_ERR(tfm)) return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm); desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); sinfo->sig.digest_size = digest_size = crypto_shash_digestsize(tfm); ret = -ENOMEM; digest = kzalloc(digest_size + desc_size, GFP_KERNEL); if (!digest) goto error_no_desc; desc = digest + digest_size; desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* Digest the message [RFC2315 9.3] */ ret = crypto_shash_init(desc); if (ret < 0) goto error; ret = crypto_shash_finup(desc, pkcs7->data, pkcs7->data_len, digest); if (ret < 0) goto error; pr_devel("MsgDigest = [%*ph]\n", 8, digest); /* However, if there are authenticated attributes, there must be a * message digest attribute amongst them which corresponds to the * digest we just calculated. */ if (sinfo->msgdigest) { u8 tag; if (sinfo->msgdigest_len != sinfo->sig.digest_size) { pr_debug("Sig %u: Invalid digest size (%u)\n", sinfo->index, sinfo->msgdigest_len); ret = -EBADMSG; goto error; } if (memcmp(digest, sinfo->msgdigest, sinfo->msgdigest_len) != 0) { pr_debug("Sig %u: Message digest doesn't match\n", sinfo->index); ret = -EKEYREJECTED; goto error; } /* We then calculate anew, using the authenticated attributes * as the contents of the digest instead. Note that we need to * convert the attributes from a CONT.0 into a SET before we * hash it. */ memset(digest, 0, sinfo->sig.digest_size); ret = crypto_shash_init(desc); if (ret < 0) goto error; tag = ASN1_CONS_BIT | ASN1_SET; ret = crypto_shash_update(desc, &tag, 1); if (ret < 0) goto error; ret = crypto_shash_finup(desc, sinfo->authattrs, sinfo->authattrs_len, digest); if (ret < 0) goto error; pr_devel("AADigest = [%*ph]\n", 8, digest); } sinfo->sig.digest = digest; digest = NULL; error: kfree(digest); error_no_desc: crypto_free_shash(tfm); kleave(" = %d", ret); return ret; } /* * Find the key (X.509 certificate) to use to verify a PKCS#7 message. PKCS#7 * uses the issuer's name and the issuing certificate serial number for * matching purposes. These must match the certificate issuer's name (not * subject's name) and the certificate serial number [RFC 2315 6.7]. */ static int pkcs7_find_key(struct pkcs7_message *pkcs7, struct pkcs7_signed_info *sinfo) { struct x509_certificate *x509; unsigned certix = 1; kenter("%u", sinfo->index); for (x509 = pkcs7->certs; x509; x509 = x509->next, certix++) { /* I'm _assuming_ that the generator of the PKCS#7 message will * encode the fields from the X.509 cert in the same way in the * PKCS#7 message - but I can't be 100% sure of that. It's * possible this will need element-by-element comparison. */ if (!asymmetric_key_id_same(x509->id, sinfo->signing_cert_id)) continue; pr_devel("Sig %u: Found cert serial match X.509[%u]\n", sinfo->index, certix); if (x509->pub->pkey_algo != sinfo->sig.pkey_algo) { pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n", sinfo->index); continue; } sinfo->signer = x509; return 0; } /* The relevant X.509 cert isn't found here, but it might be found in * the trust keyring. */ pr_debug("Sig %u: Issuing X.509 cert not found (#%*phN)\n", sinfo->index, sinfo->signing_cert_id->len, sinfo->signing_cert_id->data); return 0; } /* * Verify the internal certificate chain as best we can. */ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7, struct pkcs7_signed_info *sinfo) { struct x509_certificate *x509 = sinfo->signer, *p; int ret; kenter(""); for (p = pkcs7->certs; p; p = p->next) p->seen = false; for (;;) { pr_debug("verify %s: %*phN\n", x509->subject, x509->raw_serial_size, x509->raw_serial); x509->seen = true; ret = x509_get_sig_params(x509); if (ret < 0) goto maybe_missing_crypto_in_x509; pr_debug("- issuer %s\n", x509->issuer); if (x509->authority) pr_debug("- authkeyid %*phN\n", x509->authority->len, x509->authority->data); if (!x509->authority || strcmp(x509->subject, x509->issuer) == 0) { /* If there's no authority certificate specified, then * the certificate must be self-signed and is the root * of the chain. Likewise if the cert is its own * authority. */ pr_debug("- no auth?\n"); if (x509->raw_subject_size != x509->raw_issuer_size || memcmp(x509->raw_subject, x509->raw_issuer, x509->raw_issuer_size) != 0) return 0; ret = x509_check_signature(x509->pub, x509); if (ret < 0) goto maybe_missing_crypto_in_x509; x509->signer = x509; pr_debug("- self-signed\n"); return 0; } /* Look through the X.509 certificates in the PKCS#7 message's * list to see if the next one is there. */ pr_debug("- want %*phN\n", x509->authority->len, x509->authority->data); for (p = pkcs7->certs; p; p = p->next) { if (!p->skid) continue; pr_debug("- cmp [%u] %*phN\n", p->index, p->skid->len, p->skid->data); if (asymmetric_key_id_same(p->skid, x509->authority)) goto found_issuer; } /* We didn't find the root of this chain */ pr_debug("- top\n"); return 0; found_issuer: pr_debug("- subject %s\n", p->subject); if (p->seen) { pr_warn("Sig %u: X.509 chain contains loop\n", sinfo->index); return 0; } ret = x509_check_signature(p->pub, x509); if (ret < 0) return ret; x509->signer = p; if (x509 == p) { pr_debug("- self-signed\n"); return 0; } x509 = p; might_sleep(); } maybe_missing_crypto_in_x509: /* Just prune the certificate chain at this point if we lack some * crypto module to go further. Note, however, we don't want to set * sinfo->missing_crypto as the signed info block may still be * validatable against an X.509 cert lower in the chain that we have a * trusted copy of. */ if (ret == -ENOPKG) return 0; return ret; } /* * Verify one signed information block from a PKCS#7 message. */ static int pkcs7_verify_one(struct pkcs7_message *pkcs7, struct pkcs7_signed_info *sinfo) { int ret; kenter(",%u", sinfo->index); /* First of all, digest the data in the PKCS#7 message and the * signed information block */ ret = pkcs7_digest(pkcs7, sinfo); if (ret < 0) return ret; /* Find the key for the signature if there is one */ ret = pkcs7_find_key(pkcs7, sinfo); if (ret < 0) return ret; if (!sinfo->signer) return 0; pr_devel("Using X.509[%u] for sig %u\n", sinfo->signer->index, sinfo->index); /* Verify the PKCS#7 binary against the key */ ret = public_key_verify_signature(sinfo->signer->pub, &sinfo->sig); if (ret < 0) return ret; pr_devel("Verified signature %u\n", sinfo->index); /* Verify the internal certificate chain */ return pkcs7_verify_sig_chain(pkcs7, sinfo); } /** * pkcs7_verify - Verify a PKCS#7 message * @pkcs7: The PKCS#7 message to be verified * * Verify a PKCS#7 message is internally consistent - that is, the data digest * matches the digest in the AuthAttrs and any signature in the message or one * of the X.509 certificates it carries that matches another X.509 cert in the * message can be verified. * * This does not look to match the contents of the PKCS#7 message against any * external public keys. * * Returns, in order of descending priority: * * (*) -EKEYREJECTED if a signature failed to match for which we found an * appropriate X.509 certificate, or: * * (*) -EBADMSG if some part of the message was invalid, or: * * (*) -ENOPKG if none of the signature chains are verifiable because suitable * crypto modules couldn't be found, or: * * (*) 0 if all the signature chains that don't incur -ENOPKG can be verified * (note that a signature chain may be of zero length), or: */ int pkcs7_verify(struct pkcs7_message *pkcs7) { struct pkcs7_signed_info *sinfo; struct x509_certificate *x509; int enopkg = -ENOPKG; int ret, n; kenter(""); for (n = 0, x509 = pkcs7->certs; x509; x509 = x509->next, n++) { ret = x509_get_sig_params(x509); if (ret < 0) return ret; pr_debug("X.509[%u] %*phN\n", n, x509->authority->len, x509->authority->data); } for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) { ret = pkcs7_verify_one(pkcs7, sinfo); if (ret < 0) { if (ret == -ENOPKG) { sinfo->unsupported_crypto = true; continue; } kleave(" = %d", ret); return ret; } enopkg = 0; } kleave(" = %d", enopkg); return enopkg; } EXPORT_SYMBOL_GPL(pkcs7_verify);
gpl-2.0
DC07/spirit_sprout
drivers/staging/comedi/drivers/ssv_dnp.c
1726
7313
/* comedi/drivers/ssv_dnp.c generic comedi driver for SSV Embedded Systems' DIL/Net-PCs Copyright (C) 2001 Robert Schwebel <robert@schwebel.de> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: ssv_dnp Description: SSV Embedded Systems DIL/Net-PC Author: Robert Schwebel <robert@schwebel.de> Devices: [SSV Embedded Systems] DIL/Net-PC 1486 (dnp-1486) Status: unknown */ /* include files ----------------------------------------------------------- */ #include "../comedidev.h" /* Some global definitions: the registers of the DNP ----------------------- */ /* */ /* For port A and B the mode register has bits corresponding to the output */ /* pins, where Bit-N = 0 -> input, Bit-N = 1 -> output. Note that bits */ /* 4 to 7 correspond to pin 0..3 for port C data register. Ensure that bits */ /* 0..3 remain unchanged! For details about Port C Mode Register see */ /* the remarks in dnp_insn_config() below. */ #define CSCIR 0x22 /* Chip Setup and Control Index Register */ #define CSCDR 0x23 /* Chip Setup and Control Data Register */ #define PAMR 0xa5 /* Port A Mode Register */ #define PADR 0xa9 /* Port A Data Register */ #define PBMR 0xa4 /* Port B Mode Register */ #define PBDR 0xa8 /* Port B Data Register */ #define PCMR 0xa3 /* Port C Mode Register */ #define PCDR 0xa7 /* Port C Data Register */ /* ------------------------------------------------------------------------- */ /* The insn_bits interface allows packed reading/writing of DIO channels. */ /* The comedi core can convert between insn_bits and insn_read/write, so you */ /* are able to use these instructions as well. */ /* ------------------------------------------------------------------------- */ static int dnp_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { /* The insn data is a mask in data[0] and the new data in data[1], */ /* each channel cooresponding to a bit. */ /* Ports A and B are straight forward: each bit corresponds to an */ /* output pin with the same order. Port C is different: bits 0...3 */ /* correspond to bits 4...7 of the output register (PCDR). */ if (data[0]) { outb(PADR, CSCIR); outb((inb(CSCDR) & ~(u8) (data[0] & 0x0000FF)) | (u8) (data[1] & 0x0000FF), CSCDR); outb(PBDR, CSCIR); outb((inb(CSCDR) & ~(u8) ((data[0] & 0x00FF00) >> 8)) | (u8) ((data[1] & 0x00FF00) >> 8), CSCDR); outb(PCDR, CSCIR); outb((inb(CSCDR) & ~(u8) ((data[0] & 0x0F0000) >> 12)) | (u8) ((data[1] & 0x0F0000) >> 12), CSCDR); } /* on return, data[1] contains the value of the digital input lines. */ outb(PADR, CSCIR); data[1] = inb(CSCDR); outb(PBDR, CSCIR); data[1] += inb(CSCDR) << 8; outb(PCDR, CSCIR); data[1] += ((inb(CSCDR) & 0xF0) << 12); return insn->n; } /* ------------------------------------------------------------------------- */ /* Configure the direction of the bidirectional digital i/o pins. chanspec */ /* contains the channel to be changed and data[0] contains either */ /* COMEDI_INPUT or COMEDI_OUTPUT. */ /* ------------------------------------------------------------------------- */ static int dnp_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { u8 register_buffer; /* reduces chanspec to lower 16 bits */ int chan = CR_CHAN(insn->chanspec); switch (data[0]) { case INSN_CONFIG_DIO_OUTPUT: case INSN_CONFIG_DIO_INPUT: break; case INSN_CONFIG_DIO_QUERY: data[1] = (inb(CSCDR) & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT; return insn->n; break; default: return -EINVAL; break; } /* Test: which port does the channel belong to? */ /* We have to pay attention with port C: this is the meaning of PCMR: */ /* Bit in PCMR: 7 6 5 4 3 2 1 0 */ /* Corresponding port C pin: d 3 d 2 d 1 d 0 d= don't touch */ if ((chan >= 0) && (chan <= 7)) { /* this is port A */ outb(PAMR, CSCIR); } else if ((chan >= 8) && (chan <= 15)) { /* this is port B */ chan -= 8; outb(PBMR, CSCIR); } else if ((chan >= 16) && (chan <= 19)) { /* this is port C; multiplication with 2 brings bits into */ /* correct position for PCMR! */ chan -= 16; chan *= 2; outb(PCMR, CSCIR); } else { return -EINVAL; } /* read 'old' direction of the port and set bits (out=1, in=0) */ register_buffer = inb(CSCDR); if (data[0] == COMEDI_OUTPUT) register_buffer |= (1 << chan); else register_buffer &= ~(1 << chan); outb(register_buffer, CSCDR); return 1; } static int dnp_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int ret; ret = comedi_alloc_subdevices(dev, 1); if (ret) return ret; s = &dev->subdevices[0]; /* digital i/o subdevice */ s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 20; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = dnp_dio_insn_bits; s->insn_config = dnp_dio_insn_config; /* We use the I/O ports 0x22,0x23 and 0xa3-0xa9, which are always * allocated for the primary 8259, so we don't need to allocate them * ourselves. */ /* configure all ports as input (default) */ outb(PAMR, CSCIR); outb(0x00, CSCDR); outb(PBMR, CSCIR); outb(0x00, CSCDR); outb(PCMR, CSCIR); outb((inb(CSCDR) & 0xAA), CSCDR); dev_info(dev->class_dev, "%s: attached\n", dev->board_name); return 1; } static void dnp_detach(struct comedi_device *dev) { outb(PAMR, CSCIR); outb(0x00, CSCDR); outb(PBMR, CSCIR); outb(0x00, CSCDR); outb(PCMR, CSCIR); outb((inb(CSCDR) & 0xAA), CSCDR); } static struct comedi_driver dnp_driver = { .driver_name = "dnp-1486", .module = THIS_MODULE, .attach = dnp_attach, .detach = dnp_detach, }; module_comedi_driver(dnp_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
arunov/Goldeneye-Bufflehead
drivers/video/matrox/matroxfb_Ti3026.c
1982
26196
/* * * Hardware accelerated Matrox Millennium I, II, Mystique, G100, G200 and G400 * * (c) 1998-2002 Petr Vandrovec <vandrove@vc.cvut.cz> * * Portions Copyright (c) 2001 Matrox Graphics Inc. * * Version: 1.65 2002/08/14 * * MTRR stuff: 1998 Tom Rini <trini@kernel.crashing.org> * * Contributors: "menion?" <menion@mindless.com> * Betatesting, fixes, ideas * * "Kurt Garloff" <garloff@suse.de> * Betatesting, fixes, ideas, videomodes, videomodes timmings * * "Tom Rini" <trini@kernel.crashing.org> * MTRR stuff, PPC cleanups, betatesting, fixes, ideas * * "Bibek Sahu" <scorpio@dodds.net> * Access device through readb|w|l and write b|w|l * Extensive debugging stuff * * "Daniel Haun" <haund@usa.net> * Testing, hardware cursor fixes * * "Scott Wood" <sawst46+@pitt.edu> * Fixes * * "Gerd Knorr" <kraxel@goldbach.isdn.cs.tu-berlin.de> * Betatesting * * "Kelly French" <targon@hazmat.com> * "Fernando Herrera" <fherrera@eurielec.etsit.upm.es> * Betatesting, bug reporting * * "Pablo Bianucci" <pbian@pccp.com.ar> * Fixes, ideas, betatesting * * "Inaky Perez Gonzalez" <inaky@peloncho.fis.ucm.es> * Fixes, enhandcements, ideas, betatesting * * "Ryuichi Oikawa" <roikawa@rr.iiij4u.or.jp> * PPC betatesting, PPC support, backward compatibility * * "Paul Womar" <Paul@pwomar.demon.co.uk> * "Owen Waller" <O.Waller@ee.qub.ac.uk> * PPC betatesting * * "Thomas Pornin" <pornin@bolet.ens.fr> * Alpha betatesting * * "Pieter van Leuven" <pvl@iae.nl> * "Ulf Jaenicke-Roessler" <ujr@physik.phy.tu-dresden.de> * G100 testing * * "H. Peter Arvin" <hpa@transmeta.com> * Ideas * * "Cort Dougan" <cort@cs.nmt.edu> * CHRP fixes and PReP cleanup * * "Mark Vojkovich" <mvojkovi@ucsd.edu> * G400 support * * (following author is not in any relation with this code, but his code * is included in this driver) * * Based on framebuffer driver for VBE 2.0 compliant graphic boards * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> * * (following author is not in any relation with this code, but his ideas * were used when writing this driver) * * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> * */ #include "matroxfb_Ti3026.h" #include "matroxfb_misc.h" #include "matroxfb_accel.h" #include <linux/matroxfb.h> #ifdef CONFIG_FB_MATROX_MILLENIUM #define outTi3026 matroxfb_DAC_out #define inTi3026 matroxfb_DAC_in #define TVP3026_INDEX 0x00 #define TVP3026_PALWRADD 0x00 #define TVP3026_PALDATA 0x01 #define TVP3026_PIXRDMSK 0x02 #define TVP3026_PALRDADD 0x03 #define TVP3026_CURCOLWRADD 0x04 #define TVP3026_CLOVERSCAN 0x00 #define TVP3026_CLCOLOR0 0x01 #define TVP3026_CLCOLOR1 0x02 #define TVP3026_CLCOLOR2 0x03 #define TVP3026_CURCOLDATA 0x05 #define TVP3026_CURCOLRDADD 0x07 #define TVP3026_CURCTRL 0x09 #define TVP3026_X_DATAREG 0x0A #define TVP3026_CURRAMDATA 0x0B #define TVP3026_CURPOSXL 0x0C #define TVP3026_CURPOSXH 0x0D #define TVP3026_CURPOSYL 0x0E #define TVP3026_CURPOSYH 0x0F #define TVP3026_XSILICONREV 0x01 #define TVP3026_XCURCTRL 0x06 #define TVP3026_XCURCTRL_DIS 0x00 /* transparent, transparent, transparent, transparent */ #define TVP3026_XCURCTRL_3COLOR 0x01 /* transparent, 0, 1, 2 */ #define TVP3026_XCURCTRL_XGA 0x02 /* 0, 1, transparent, complement */ #define TVP3026_XCURCTRL_XWIN 0x03 /* transparent, transparent, 0, 1 */ #define TVP3026_XCURCTRL_BLANK2048 0x00 #define TVP3026_XCURCTRL_BLANK4096 0x10 #define TVP3026_XCURCTRL_INTERLACED 0x20 #define TVP3026_XCURCTRL_ODD 0x00 /* ext.signal ODD/\EVEN */ #define TVP3026_XCURCTRL_EVEN 0x40 /* ext.signal EVEN/\ODD */ #define TVP3026_XCURCTRL_INDIRECT 0x00 #define TVP3026_XCURCTRL_DIRECT 0x80 #define TVP3026_XLATCHCTRL 0x0F #define TVP3026_XLATCHCTRL_1_1 0x06 #define TVP3026_XLATCHCTRL_2_1 0x07 #define TVP3026_XLATCHCTRL_4_1 0x06 #define TVP3026_XLATCHCTRL_8_1 0x06 #define TVP3026_XLATCHCTRL_16_1 0x06 #define TVP3026A_XLATCHCTRL_4_3 0x06 /* ??? do not understand... but it works... !!! */ #define TVP3026A_XLATCHCTRL_8_3 0x07 #define TVP3026B_XLATCHCTRL_4_3 0x08 #define TVP3026B_XLATCHCTRL_8_3 0x06 /* ??? do not understand... but it works... !!! */ #define TVP3026_XTRUECOLORCTRL 0x18 #define TVP3026_XTRUECOLORCTRL_VRAM_SHIFT_ACCEL 0x00 #define TVP3026_XTRUECOLORCTRL_VRAM_SHIFT_TVP 0x20 #define TVP3026_XTRUECOLORCTRL_PSEUDOCOLOR 0x80 #define TVP3026_XTRUECOLORCTRL_TRUECOLOR 0x40 /* paletized */ #define TVP3026_XTRUECOLORCTRL_DIRECTCOLOR 0x00 #define TVP3026_XTRUECOLORCTRL_24_ALTERNATE 0x08 /* 5:4/5:2 instead of 4:3/8:3 */ #define TVP3026_XTRUECOLORCTRL_RGB_888 0x16 /* 4:3/8:3 (or 5:4/5:2) */ #define TVP3026_XTRUECOLORCTRL_BGR_888 0x17 #define TVP3026_XTRUECOLORCTRL_ORGB_8888 0x06 #define TVP3026_XTRUECOLORCTRL_BGRO_8888 0x07 #define TVP3026_XTRUECOLORCTRL_RGB_565 0x05 #define TVP3026_XTRUECOLORCTRL_ORGB_1555 0x04 #define TVP3026_XTRUECOLORCTRL_RGB_664 0x03 #define TVP3026_XTRUECOLORCTRL_RGBO_4444 0x01 #define TVP3026_XMUXCTRL 0x19 #define TVP3026_XMUXCTRL_MEMORY_8BIT 0x01 /* - */ #define TVP3026_XMUXCTRL_MEMORY_16BIT 0x02 /* - */ #define TVP3026_XMUXCTRL_MEMORY_32BIT 0x03 /* 2MB RAM, 512K * 4 */ #define TVP3026_XMUXCTRL_MEMORY_64BIT 0x04 /* >2MB RAM, 512K * 8 & more */ #define TVP3026_XMUXCTRL_PIXEL_4BIT 0x40 /* L0,H0,L1,H1... */ #define TVP3026_XMUXCTRL_PIXEL_4BIT_SWAPPED 0x60 /* H0,L0,H1,L1... */ #define TVP3026_XMUXCTRL_PIXEL_8BIT 0x48 #define TVP3026_XMUXCTRL_PIXEL_16BIT 0x50 #define TVP3026_XMUXCTRL_PIXEL_32BIT 0x58 #define TVP3026_XMUXCTRL_VGA 0x98 /* VGA MEMORY, 8BIT PIXEL */ #define TVP3026_XCLKCTRL 0x1A #define TVP3026_XCLKCTRL_DIV1 0x00 #define TVP3026_XCLKCTRL_DIV2 0x10 #define TVP3026_XCLKCTRL_DIV4 0x20 #define TVP3026_XCLKCTRL_DIV8 0x30 #define TVP3026_XCLKCTRL_DIV16 0x40 #define TVP3026_XCLKCTRL_DIV32 0x50 #define TVP3026_XCLKCTRL_DIV64 0x60 #define TVP3026_XCLKCTRL_CLKSTOPPED 0x70 #define TVP3026_XCLKCTRL_SRC_CLK0 0x00 #define TVP3026_XCLKCTRL_SRC_CLK1 0x01 #define TVP3026_XCLKCTRL_SRC_CLK2 0x02 /* CLK2 is TTL source*/ #define TVP3026_XCLKCTRL_SRC_NCLK2 0x03 /* not CLK2 is TTL source */ #define TVP3026_XCLKCTRL_SRC_ECLK2 0x04 /* CLK2 and not CLK2 is ECL source */ #define TVP3026_XCLKCTRL_SRC_PLL 0x05 #define TVP3026_XCLKCTRL_SRC_DIS 0x06 /* disable & poweroff internal clock */ #define TVP3026_XCLKCTRL_SRC_CLK0VGA 0x07 #define TVP3026_XPALETTEPAGE 0x1C #define TVP3026_XGENCTRL 0x1D #define TVP3026_XGENCTRL_HSYNC_POS 0x00 #define TVP3026_XGENCTRL_HSYNC_NEG 0x01 #define TVP3026_XGENCTRL_VSYNC_POS 0x00 #define TVP3026_XGENCTRL_VSYNC_NEG 0x02 #define TVP3026_XGENCTRL_LITTLE_ENDIAN 0x00 #define TVP3026_XGENCTRL_BIG_ENDIAN 0x08 #define TVP3026_XGENCTRL_BLACK_0IRE 0x00 #define TVP3026_XGENCTRL_BLACK_75IRE 0x10 #define TVP3026_XGENCTRL_NO_SYNC_ON_GREEN 0x00 #define TVP3026_XGENCTRL_SYNC_ON_GREEN 0x20 #define TVP3026_XGENCTRL_OVERSCAN_DIS 0x00 #define TVP3026_XGENCTRL_OVERSCAN_EN 0x40 #define TVP3026_XMISCCTRL 0x1E #define TVP3026_XMISCCTRL_DAC_PUP 0x00 #define TVP3026_XMISCCTRL_DAC_PDOWN 0x01 #define TVP3026_XMISCCTRL_DAC_EXT 0x00 /* or 8, bit 3 is ignored */ #define TVP3026_XMISCCTRL_DAC_6BIT 0x04 #define TVP3026_XMISCCTRL_DAC_8BIT 0x0C #define TVP3026_XMISCCTRL_PSEL_DIS 0x00 #define TVP3026_XMISCCTRL_PSEL_EN 0x10 #define TVP3026_XMISCCTRL_PSEL_LOW 0x00 /* PSEL high selects directcolor */ #define TVP3026_XMISCCTRL_PSEL_HIGH 0x20 /* PSEL high selects truecolor or pseudocolor */ #define TVP3026_XGENIOCTRL 0x2A #define TVP3026_XGENIODATA 0x2B #define TVP3026_XPLLADDR 0x2C #define TVP3026_XPLLADDR_X(LOOP,MCLK,PIX) (((LOOP)<<4) | ((MCLK)<<2) | (PIX)) #define TVP3026_XPLLDATA_N 0x00 #define TVP3026_XPLLDATA_M 0x01 #define TVP3026_XPLLDATA_P 0x02 #define TVP3026_XPLLDATA_STAT 0x03 #define TVP3026_XPIXPLLDATA 0x2D #define TVP3026_XMEMPLLDATA 0x2E #define TVP3026_XLOOPPLLDATA 0x2F #define TVP3026_XCOLKEYOVRMIN 0x30 #define TVP3026_XCOLKEYOVRMAX 0x31 #define TVP3026_XCOLKEYREDMIN 0x32 #define TVP3026_XCOLKEYREDMAX 0x33 #define TVP3026_XCOLKEYGREENMIN 0x34 #define TVP3026_XCOLKEYGREENMAX 0x35 #define TVP3026_XCOLKEYBLUEMIN 0x36 #define TVP3026_XCOLKEYBLUEMAX 0x37 #define TVP3026_XCOLKEYCTRL 0x38 #define TVP3026_XCOLKEYCTRL_OVR_EN 0x01 #define TVP3026_XCOLKEYCTRL_RED_EN 0x02 #define TVP3026_XCOLKEYCTRL_GREEN_EN 0x04 #define TVP3026_XCOLKEYCTRL_BLUE_EN 0x08 #define TVP3026_XCOLKEYCTRL_NEGATE 0x10 #define TVP3026_XCOLKEYCTRL_ZOOM1 0x00 #define TVP3026_XCOLKEYCTRL_ZOOM2 0x20 #define TVP3026_XCOLKEYCTRL_ZOOM4 0x40 #define TVP3026_XCOLKEYCTRL_ZOOM8 0x60 #define TVP3026_XCOLKEYCTRL_ZOOM16 0x80 #define TVP3026_XCOLKEYCTRL_ZOOM32 0xA0 #define TVP3026_XMEMPLLCTRL 0x39 #define TVP3026_XMEMPLLCTRL_DIV(X) (((X)-1)>>1) /* 2,4,6,8,10,12,14,16, division applied to LOOP PLL after divide by 2^P */ #define TVP3026_XMEMPLLCTRL_STROBEMKC4 0x08 #define TVP3026_XMEMPLLCTRL_MCLK_DOTCLOCK 0x00 /* MKC4 */ #define TVP3026_XMEMPLLCTRL_MCLK_MCLKPLL 0x10 /* MKC4 */ #define TVP3026_XMEMPLLCTRL_RCLK_PIXPLL 0x00 #define TVP3026_XMEMPLLCTRL_RCLK_LOOPPLL 0x20 #define TVP3026_XMEMPLLCTRL_RCLK_DOTDIVN 0x40 /* dot clock divided by loop pclk N prescaler */ #define TVP3026_XSENSETEST 0x3A #define TVP3026_XTESTMODEDATA 0x3B #define TVP3026_XCRCREML 0x3C #define TVP3026_XCRCREMH 0x3D #define TVP3026_XCRCBITSEL 0x3E #define TVP3026_XID 0x3F static const unsigned char DACseq[] = { TVP3026_XLATCHCTRL, TVP3026_XTRUECOLORCTRL, TVP3026_XMUXCTRL, TVP3026_XCLKCTRL, TVP3026_XPALETTEPAGE, TVP3026_XGENCTRL, TVP3026_XMISCCTRL, TVP3026_XGENIOCTRL, TVP3026_XGENIODATA, TVP3026_XCOLKEYOVRMIN, TVP3026_XCOLKEYOVRMAX, TVP3026_XCOLKEYREDMIN, TVP3026_XCOLKEYREDMAX, TVP3026_XCOLKEYGREENMIN, TVP3026_XCOLKEYGREENMAX, TVP3026_XCOLKEYBLUEMIN, TVP3026_XCOLKEYBLUEMAX, TVP3026_XCOLKEYCTRL, TVP3026_XMEMPLLCTRL, TVP3026_XSENSETEST, TVP3026_XCURCTRL }; #define POS3026_XLATCHCTRL 0 #define POS3026_XTRUECOLORCTRL 1 #define POS3026_XMUXCTRL 2 #define POS3026_XCLKCTRL 3 #define POS3026_XGENCTRL 5 #define POS3026_XMISCCTRL 6 #define POS3026_XMEMPLLCTRL 18 #define POS3026_XCURCTRL 20 static const unsigned char MGADACbpp32[] = { TVP3026_XLATCHCTRL_2_1, TVP3026_XTRUECOLORCTRL_DIRECTCOLOR | TVP3026_XTRUECOLORCTRL_ORGB_8888, 0x00, TVP3026_XCLKCTRL_DIV1 | TVP3026_XCLKCTRL_SRC_PLL, 0x00, TVP3026_XGENCTRL_HSYNC_POS | TVP3026_XGENCTRL_VSYNC_POS | TVP3026_XGENCTRL_LITTLE_ENDIAN | TVP3026_XGENCTRL_BLACK_0IRE | TVP3026_XGENCTRL_NO_SYNC_ON_GREEN | TVP3026_XGENCTRL_OVERSCAN_DIS, TVP3026_XMISCCTRL_DAC_PUP | TVP3026_XMISCCTRL_DAC_8BIT | TVP3026_XMISCCTRL_PSEL_DIS | TVP3026_XMISCCTRL_PSEL_HIGH, 0x00, 0x1E, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, TVP3026_XCOLKEYCTRL_ZOOM1, 0x00, 0x00, TVP3026_XCURCTRL_DIS }; static int Ti3026_calcclock(const struct matrox_fb_info *minfo, unsigned int freq, unsigned int fmax, int *in, int *feed, int *post) { unsigned int fvco; unsigned int lin, lfeed, lpost; DBG(__func__) fvco = PLL_calcclock(minfo, freq, fmax, &lin, &lfeed, &lpost); fvco >>= (*post = lpost); *in = 64 - lin; *feed = 64 - lfeed; return fvco; } static int Ti3026_setpclk(struct matrox_fb_info *minfo, int clk) { unsigned int f_pll; unsigned int pixfeed, pixin, pixpost; struct matrox_hw_state *hw = &minfo->hw; DBG(__func__) f_pll = Ti3026_calcclock(minfo, clk, minfo->max_pixel_clock, &pixin, &pixfeed, &pixpost); hw->DACclk[0] = pixin | 0xC0; hw->DACclk[1] = pixfeed; hw->DACclk[2] = pixpost | 0xB0; { unsigned int loopfeed, loopin, looppost, loopdiv, z; unsigned int Bpp; Bpp = minfo->curr.final_bppShift; if (minfo->fbcon.var.bits_per_pixel == 24) { loopfeed = 3; /* set lm to any possible value */ loopin = 3 * 32 / Bpp; } else { loopfeed = 4; loopin = 4 * 32 / Bpp; } z = (110000 * loopin) / (f_pll * loopfeed); loopdiv = 0; /* div 2 */ if (z < 2) looppost = 0; else if (z < 4) looppost = 1; else if (z < 8) looppost = 2; else { looppost = 3; loopdiv = z/16; } if (minfo->fbcon.var.bits_per_pixel == 24) { hw->DACclk[3] = ((65 - loopin) & 0x3F) | 0xC0; hw->DACclk[4] = (65 - loopfeed) | 0x80; if (minfo->accel.ramdac_rev > 0x20) { if (isInterleave(minfo)) hw->DACreg[POS3026_XLATCHCTRL] = TVP3026B_XLATCHCTRL_8_3; else { hw->DACclk[4] &= ~0xC0; hw->DACreg[POS3026_XLATCHCTRL] = TVP3026B_XLATCHCTRL_4_3; } } else { if (isInterleave(minfo)) ; /* default... */ else { hw->DACclk[4] ^= 0xC0; /* change from 0x80 to 0x40 */ hw->DACreg[POS3026_XLATCHCTRL] = TVP3026A_XLATCHCTRL_4_3; } } hw->DACclk[5] = looppost | 0xF8; if (minfo->devflags.mga_24bpp_fix) hw->DACclk[5] ^= 0x40; } else { hw->DACclk[3] = ((65 - loopin) & 0x3F) | 0xC0; hw->DACclk[4] = 65 - loopfeed; hw->DACclk[5] = looppost | 0xF0; } hw->DACreg[POS3026_XMEMPLLCTRL] = loopdiv | TVP3026_XMEMPLLCTRL_MCLK_MCLKPLL | TVP3026_XMEMPLLCTRL_RCLK_LOOPPLL; } return 0; } static int Ti3026_init(struct matrox_fb_info *minfo, struct my_timming *m) { u_int8_t muxctrl = isInterleave(minfo) ? TVP3026_XMUXCTRL_MEMORY_64BIT : TVP3026_XMUXCTRL_MEMORY_32BIT; struct matrox_hw_state *hw = &minfo->hw; DBG(__func__) memcpy(hw->DACreg, MGADACbpp32, sizeof(hw->DACreg)); switch (minfo->fbcon.var.bits_per_pixel) { case 4: hw->DACreg[POS3026_XLATCHCTRL] = TVP3026_XLATCHCTRL_16_1; /* or _8_1, they are same */ hw->DACreg[POS3026_XTRUECOLORCTRL] = TVP3026_XTRUECOLORCTRL_PSEUDOCOLOR; hw->DACreg[POS3026_XMUXCTRL] = muxctrl | TVP3026_XMUXCTRL_PIXEL_4BIT; hw->DACreg[POS3026_XCLKCTRL] = TVP3026_XCLKCTRL_SRC_PLL | TVP3026_XCLKCTRL_DIV8; hw->DACreg[POS3026_XMISCCTRL] = TVP3026_XMISCCTRL_DAC_PUP | TVP3026_XMISCCTRL_DAC_8BIT | TVP3026_XMISCCTRL_PSEL_DIS | TVP3026_XMISCCTRL_PSEL_LOW; break; case 8: hw->DACreg[POS3026_XLATCHCTRL] = TVP3026_XLATCHCTRL_8_1; /* or _4_1, they are same */ hw->DACreg[POS3026_XTRUECOLORCTRL] = TVP3026_XTRUECOLORCTRL_PSEUDOCOLOR; hw->DACreg[POS3026_XMUXCTRL] = muxctrl | TVP3026_XMUXCTRL_PIXEL_8BIT; hw->DACreg[POS3026_XCLKCTRL] = TVP3026_XCLKCTRL_SRC_PLL | TVP3026_XCLKCTRL_DIV4; hw->DACreg[POS3026_XMISCCTRL] = TVP3026_XMISCCTRL_DAC_PUP | TVP3026_XMISCCTRL_DAC_8BIT | TVP3026_XMISCCTRL_PSEL_DIS | TVP3026_XMISCCTRL_PSEL_LOW; break; case 16: /* XLATCHCTRL should be _4_1 / _2_1... Why is not? (_2_1 is used every time) */ hw->DACreg[POS3026_XTRUECOLORCTRL] = (minfo->fbcon.var.green.length == 5) ? (TVP3026_XTRUECOLORCTRL_DIRECTCOLOR | TVP3026_XTRUECOLORCTRL_ORGB_1555) : (TVP3026_XTRUECOLORCTRL_DIRECTCOLOR | TVP3026_XTRUECOLORCTRL_RGB_565); hw->DACreg[POS3026_XMUXCTRL] = muxctrl | TVP3026_XMUXCTRL_PIXEL_16BIT; hw->DACreg[POS3026_XCLKCTRL] = TVP3026_XCLKCTRL_SRC_PLL | TVP3026_XCLKCTRL_DIV2; break; case 24: /* XLATCHCTRL is: for (A) use _4_3 (?_8_3 is same? TBD), for (B) it is set in setpclk */ hw->DACreg[POS3026_XTRUECOLORCTRL] = TVP3026_XTRUECOLORCTRL_DIRECTCOLOR | TVP3026_XTRUECOLORCTRL_RGB_888; hw->DACreg[POS3026_XMUXCTRL] = muxctrl | TVP3026_XMUXCTRL_PIXEL_32BIT; hw->DACreg[POS3026_XCLKCTRL] = TVP3026_XCLKCTRL_SRC_PLL | TVP3026_XCLKCTRL_DIV4; break; case 32: /* XLATCHCTRL should be _2_1 / _1_1... Why is not? (_2_1 is used every time) */ hw->DACreg[POS3026_XMUXCTRL] = muxctrl | TVP3026_XMUXCTRL_PIXEL_32BIT; break; default: return 1; /* TODO: failed */ } if (matroxfb_vgaHWinit(minfo, m)) return 1; /* set SYNC */ hw->MiscOutReg = 0xCB; if (m->sync & FB_SYNC_HOR_HIGH_ACT) hw->DACreg[POS3026_XGENCTRL] |= TVP3026_XGENCTRL_HSYNC_NEG; if (m->sync & FB_SYNC_VERT_HIGH_ACT) hw->DACreg[POS3026_XGENCTRL] |= TVP3026_XGENCTRL_VSYNC_NEG; if (m->sync & FB_SYNC_ON_GREEN) hw->DACreg[POS3026_XGENCTRL] |= TVP3026_XGENCTRL_SYNC_ON_GREEN; /* set DELAY */ if (minfo->video.len < 0x400000) hw->CRTCEXT[3] |= 0x08; else if (minfo->video.len > 0x400000) hw->CRTCEXT[3] |= 0x10; /* set HWCURSOR */ if (m->interlaced) { hw->DACreg[POS3026_XCURCTRL] |= TVP3026_XCURCTRL_INTERLACED; } if (m->HTotal >= 1536) hw->DACreg[POS3026_XCURCTRL] |= TVP3026_XCURCTRL_BLANK4096; /* set interleaving */ hw->MXoptionReg &= ~0x00001000; if (isInterleave(minfo)) hw->MXoptionReg |= 0x00001000; /* set DAC */ Ti3026_setpclk(minfo, m->pixclock); return 0; } static void ti3026_setMCLK(struct matrox_fb_info *minfo, int fout) { unsigned int f_pll; unsigned int pclk_m, pclk_n, pclk_p; unsigned int mclk_m, mclk_n, mclk_p; unsigned int rfhcnt, mclk_ctl; int tmout; DBG(__func__) f_pll = Ti3026_calcclock(minfo, fout, minfo->max_pixel_clock, &mclk_n, &mclk_m, &mclk_p); /* save pclk */ outTi3026(minfo, TVP3026_XPLLADDR, 0xFC); pclk_n = inTi3026(minfo, TVP3026_XPIXPLLDATA); outTi3026(minfo, TVP3026_XPLLADDR, 0xFD); pclk_m = inTi3026(minfo, TVP3026_XPIXPLLDATA); outTi3026(minfo, TVP3026_XPLLADDR, 0xFE); pclk_p = inTi3026(minfo, TVP3026_XPIXPLLDATA); /* stop pclk */ outTi3026(minfo, TVP3026_XPLLADDR, 0xFE); outTi3026(minfo, TVP3026_XPIXPLLDATA, 0x00); /* set pclk to new mclk */ outTi3026(minfo, TVP3026_XPLLADDR, 0xFC); outTi3026(minfo, TVP3026_XPIXPLLDATA, mclk_n | 0xC0); outTi3026(minfo, TVP3026_XPIXPLLDATA, mclk_m); outTi3026(minfo, TVP3026_XPIXPLLDATA, mclk_p | 0xB0); /* wait for PLL to lock */ for (tmout = 500000; tmout; tmout--) { if (inTi3026(minfo, TVP3026_XPIXPLLDATA) & 0x40) break; udelay(10); } if (!tmout) printk(KERN_ERR "matroxfb: Temporary pixel PLL not locked after 5 secs\n"); /* output pclk on mclk pin */ mclk_ctl = inTi3026(minfo, TVP3026_XMEMPLLCTRL); outTi3026(minfo, TVP3026_XMEMPLLCTRL, mclk_ctl & 0xE7); outTi3026(minfo, TVP3026_XMEMPLLCTRL, (mclk_ctl & 0xE7) | TVP3026_XMEMPLLCTRL_STROBEMKC4); /* stop MCLK */ outTi3026(minfo, TVP3026_XPLLADDR, 0xFB); outTi3026(minfo, TVP3026_XMEMPLLDATA, 0x00); /* set mclk to new freq */ outTi3026(minfo, TVP3026_XPLLADDR, 0xF3); outTi3026(minfo, TVP3026_XMEMPLLDATA, mclk_n | 0xC0); outTi3026(minfo, TVP3026_XMEMPLLDATA, mclk_m); outTi3026(minfo, TVP3026_XMEMPLLDATA, mclk_p | 0xB0); /* wait for PLL to lock */ for (tmout = 500000; tmout; tmout--) { if (inTi3026(minfo, TVP3026_XMEMPLLDATA) & 0x40) break; udelay(10); } if (!tmout) printk(KERN_ERR "matroxfb: Memory PLL not locked after 5 secs\n"); f_pll = f_pll * 333 / (10000 << mclk_p); if (isMilleniumII(minfo)) { rfhcnt = (f_pll - 128) / 256; if (rfhcnt > 15) rfhcnt = 15; } else { rfhcnt = (f_pll - 64) / 128; if (rfhcnt > 15) rfhcnt = 0; } minfo->hw.MXoptionReg = (minfo->hw.MXoptionReg & ~0x000F0000) | (rfhcnt << 16); pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg); /* output MCLK to MCLK pin */ outTi3026(minfo, TVP3026_XMEMPLLCTRL, (mclk_ctl & 0xE7) | TVP3026_XMEMPLLCTRL_MCLK_MCLKPLL); outTi3026(minfo, TVP3026_XMEMPLLCTRL, (mclk_ctl ) | TVP3026_XMEMPLLCTRL_MCLK_MCLKPLL | TVP3026_XMEMPLLCTRL_STROBEMKC4); /* stop PCLK */ outTi3026(minfo, TVP3026_XPLLADDR, 0xFE); outTi3026(minfo, TVP3026_XPIXPLLDATA, 0x00); /* restore pclk */ outTi3026(minfo, TVP3026_XPLLADDR, 0xFC); outTi3026(minfo, TVP3026_XPIXPLLDATA, pclk_n); outTi3026(minfo, TVP3026_XPIXPLLDATA, pclk_m); outTi3026(minfo, TVP3026_XPIXPLLDATA, pclk_p); /* wait for PLL to lock */ for (tmout = 500000; tmout; tmout--) { if (inTi3026(minfo, TVP3026_XPIXPLLDATA) & 0x40) break; udelay(10); } if (!tmout) printk(KERN_ERR "matroxfb: Pixel PLL not locked after 5 secs\n"); } static void ti3026_ramdac_init(struct matrox_fb_info *minfo) { DBG(__func__) minfo->features.pll.vco_freq_min = 110000; minfo->features.pll.ref_freq = 114545; minfo->features.pll.feed_div_min = 2; minfo->features.pll.feed_div_max = 24; minfo->features.pll.in_div_min = 2; minfo->features.pll.in_div_max = 63; minfo->features.pll.post_shift_max = 3; if (minfo->devflags.noinit) return; ti3026_setMCLK(minfo, 60000); } static void Ti3026_restore(struct matrox_fb_info *minfo) { int i; unsigned char progdac[6]; struct matrox_hw_state *hw = &minfo->hw; CRITFLAGS DBG(__func__) #ifdef DEBUG dprintk(KERN_INFO "EXTVGA regs: "); for (i = 0; i < 6; i++) dprintk("%02X:", hw->CRTCEXT[i]); dprintk("\n"); #endif CRITBEGIN pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg); CRITEND matroxfb_vgaHWrestore(minfo); CRITBEGIN minfo->crtc1.panpos = -1; for (i = 0; i < 6; i++) mga_setr(M_EXTVGA_INDEX, i, hw->CRTCEXT[i]); for (i = 0; i < 21; i++) { outTi3026(minfo, DACseq[i], hw->DACreg[i]); } outTi3026(minfo, TVP3026_XPLLADDR, 0x00); progdac[0] = inTi3026(minfo, TVP3026_XPIXPLLDATA); progdac[3] = inTi3026(minfo, TVP3026_XLOOPPLLDATA); outTi3026(minfo, TVP3026_XPLLADDR, 0x15); progdac[1] = inTi3026(minfo, TVP3026_XPIXPLLDATA); progdac[4] = inTi3026(minfo, TVP3026_XLOOPPLLDATA); outTi3026(minfo, TVP3026_XPLLADDR, 0x2A); progdac[2] = inTi3026(minfo, TVP3026_XPIXPLLDATA); progdac[5] = inTi3026(minfo, TVP3026_XLOOPPLLDATA); CRITEND if (memcmp(hw->DACclk, progdac, 6)) { /* agrhh... setting up PLL is very slow on Millennium... */ /* Mystique PLL is locked in few ms, but Millennium PLL lock takes about 0.15 s... */ /* Maybe even we should call schedule() ? */ CRITBEGIN outTi3026(minfo, TVP3026_XCLKCTRL, hw->DACreg[POS3026_XCLKCTRL]); outTi3026(minfo, TVP3026_XPLLADDR, 0x2A); outTi3026(minfo, TVP3026_XLOOPPLLDATA, 0); outTi3026(minfo, TVP3026_XPIXPLLDATA, 0); outTi3026(minfo, TVP3026_XPLLADDR, 0x00); for (i = 0; i < 3; i++) outTi3026(minfo, TVP3026_XPIXPLLDATA, hw->DACclk[i]); /* wait for PLL only if PLL clock requested (always for PowerMode, never for VGA) */ if (hw->MiscOutReg & 0x08) { int tmout; outTi3026(minfo, TVP3026_XPLLADDR, 0x3F); for (tmout = 500000; tmout; --tmout) { if (inTi3026(minfo, TVP3026_XPIXPLLDATA) & 0x40) break; udelay(10); } CRITEND if (!tmout) printk(KERN_ERR "matroxfb: Pixel PLL not locked after 5 secs\n"); else dprintk(KERN_INFO "PixelPLL: %d\n", 500000-tmout); CRITBEGIN } outTi3026(minfo, TVP3026_XMEMPLLCTRL, hw->DACreg[POS3026_XMEMPLLCTRL]); outTi3026(minfo, TVP3026_XPLLADDR, 0x00); for (i = 3; i < 6; i++) outTi3026(minfo, TVP3026_XLOOPPLLDATA, hw->DACclk[i]); CRITEND if ((hw->MiscOutReg & 0x08) && ((hw->DACclk[5] & 0x80) == 0x80)) { int tmout; CRITBEGIN outTi3026(minfo, TVP3026_XPLLADDR, 0x3F); for (tmout = 500000; tmout; --tmout) { if (inTi3026(minfo, TVP3026_XLOOPPLLDATA) & 0x40) break; udelay(10); } CRITEND if (!tmout) printk(KERN_ERR "matroxfb: Loop PLL not locked after 5 secs\n"); else dprintk(KERN_INFO "LoopPLL: %d\n", 500000-tmout); } } #ifdef DEBUG dprintk(KERN_DEBUG "3026DACregs "); for (i = 0; i < 21; i++) { dprintk("R%02X=%02X ", DACseq[i], hw->DACreg[i]); if ((i & 0x7) == 0x7) dprintk(KERN_DEBUG "continuing... "); } dprintk(KERN_DEBUG "DACclk "); for (i = 0; i < 6; i++) dprintk("C%02X=%02X ", i, hw->DACclk[i]); dprintk("\n"); #endif } static void Ti3026_reset(struct matrox_fb_info *minfo) { DBG(__func__) ti3026_ramdac_init(minfo); } static struct matrox_altout ti3026_output = { .name = "Primary output", }; static int Ti3026_preinit(struct matrox_fb_info *minfo) { static const int vxres_mill2[] = { 512, 640, 768, 800, 832, 960, 1024, 1152, 1280, 1600, 1664, 1920, 2048, 0}; static const int vxres_mill1[] = { 640, 768, 800, 960, 1024, 1152, 1280, 1600, 1920, 2048, 0}; struct matrox_hw_state *hw = &minfo->hw; DBG(__func__) minfo->millenium = 1; minfo->milleniumII = (minfo->pcidev->device != PCI_DEVICE_ID_MATROX_MIL); minfo->capable.cfb4 = 1; minfo->capable.text = 1; /* isMilleniumII(minfo); */ minfo->capable.vxres = isMilleniumII(minfo) ? vxres_mill2 : vxres_mill1; minfo->outputs[0].data = minfo; minfo->outputs[0].output = &ti3026_output; minfo->outputs[0].src = minfo->outputs[0].default_src; minfo->outputs[0].mode = MATROXFB_OUTPUT_MODE_MONITOR; if (minfo->devflags.noinit) return 0; /* preserve VGA I/O, BIOS and PPC */ hw->MXoptionReg &= 0xC0000100; hw->MXoptionReg |= 0x002C0000; if (minfo->devflags.novga) hw->MXoptionReg &= ~0x00000100; if (minfo->devflags.nobios) hw->MXoptionReg &= ~0x40000000; if (minfo->devflags.nopciretry) hw->MXoptionReg |= 0x20000000; pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg); minfo->accel.ramdac_rev = inTi3026(minfo, TVP3026_XSILICONREV); outTi3026(minfo, TVP3026_XCLKCTRL, TVP3026_XCLKCTRL_SRC_CLK0VGA | TVP3026_XCLKCTRL_CLKSTOPPED); outTi3026(minfo, TVP3026_XTRUECOLORCTRL, TVP3026_XTRUECOLORCTRL_PSEUDOCOLOR); outTi3026(minfo, TVP3026_XMUXCTRL, TVP3026_XMUXCTRL_VGA); outTi3026(minfo, TVP3026_XPLLADDR, 0x2A); outTi3026(minfo, TVP3026_XLOOPPLLDATA, 0x00); outTi3026(minfo, TVP3026_XPIXPLLDATA, 0x00); mga_outb(M_MISC_REG, 0x67); outTi3026(minfo, TVP3026_XMEMPLLCTRL, TVP3026_XMEMPLLCTRL_STROBEMKC4 | TVP3026_XMEMPLLCTRL_MCLK_MCLKPLL); mga_outl(M_RESET, 1); udelay(250); mga_outl(M_RESET, 0); udelay(250); mga_outl(M_MACCESS, 0x00008000); udelay(10); return 0; } struct matrox_switch matrox_millennium = { Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore }; EXPORT_SYMBOL(matrox_millennium); #endif MODULE_LICENSE("GPL");
gpl-2.0
leonardoafa/android_kernel_samsung_kyleopen
arch/mips/netlogic/xlr/time.c
2750
2028
/* * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights * reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the NetLogic * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/init.h> #include <asm/time.h> #include <asm/netlogic/interrupt.h> #include <asm/netlogic/psb-bootinfo.h> unsigned int __cpuinit get_c0_compare_int(void) { return IRQ_TIMER; } void __init plat_time_init(void) { mips_hpt_frequency = nlm_prom_info.cpu_frequency; pr_info("MIPS counter frequency [%ld]\n", (unsigned long)mips_hpt_frequency); }
gpl-2.0
tbalden/android_kernel_htc_endeavoru_jb
arch/microblaze/kernel/exceptions.c
2750
4340
/* * HW exception handling * * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2008 PetaLogix * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. */ /* * This file handles the architecture-dependent parts of hardware exceptions */ #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kallsyms.h> #include <linux/module.h> #include <asm/exceptions.h> #include <asm/entry.h> /* For KM CPU var */ #include <linux/uaccess.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <asm/current.h> #include <asm/cacheflush.h> #define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02 #define MICROBLAZE_IBUS_EXCEPTION 0x03 #define MICROBLAZE_DBUS_EXCEPTION 0x04 #define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05 #define MICROBLAZE_FPU_EXCEPTION 0x06 #define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07 static DEFINE_SPINLOCK(die_lock); void die(const char *str, struct pt_regs *fp, long err) { console_verbose(); spin_lock_irq(&die_lock); printk(KERN_WARNING "Oops: %s, sig: %ld\n", str, err); show_regs(fp); spin_unlock_irq(&die_lock); /* do_exit() should take care of panic'ing from an interrupt * context so we don't handle it here */ do_exit(err); } /* for user application debugging */ asmlinkage void sw_exception(struct pt_regs *regs) { _exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16); flush_dcache_range(regs->r16, regs->r16 + 0x4); flush_icache_range(regs->r16, regs->r16 + 0x4); } void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) { siginfo_t info; if (kernel_mode(regs)) { die("Exception in kernel mode", regs, signr); } info.si_signo = signr; info.si_errno = 0; info.si_code = code; info.si_addr = (void __user *) addr; force_sig_info(signr, &info, current); } asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, int fsr, int addr) { #ifdef CONFIG_MMU addr = regs->pc; #endif #if 0 printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x " \ "ESR=%08x\n", type, user_mode(regs) ? "user" : "kernel", fsr, (unsigned int) regs->pc, (unsigned int) regs->esr); #endif switch (type & 0x1F) { case MICROBLAZE_ILL_OPCODE_EXCEPTION: if (user_mode(regs)) { pr_debug("Illegal opcode exception in user mode\n"); _exception(SIGILL, regs, ILL_ILLOPC, addr); return; } printk(KERN_WARNING "Illegal opcode exception " \ "in kernel mode.\n"); die("opcode exception", regs, SIGBUS); break; case MICROBLAZE_IBUS_EXCEPTION: if (user_mode(regs)) { pr_debug("Instruction bus error exception in user mode\n"); _exception(SIGBUS, regs, BUS_ADRERR, addr); return; } printk(KERN_WARNING "Instruction bus error exception " \ "in kernel mode.\n"); die("bus exception", regs, SIGBUS); break; case MICROBLAZE_DBUS_EXCEPTION: if (user_mode(regs)) { pr_debug("Data bus error exception in user mode\n"); _exception(SIGBUS, regs, BUS_ADRERR, addr); return; } printk(KERN_WARNING "Data bus error exception " \ "in kernel mode.\n"); die("bus exception", regs, SIGBUS); break; case MICROBLAZE_DIV_ZERO_EXCEPTION: if (user_mode(regs)) { pr_debug("Divide by zero exception in user mode\n"); _exception(SIGILL, regs, FPE_INTDIV, addr); return; } printk(KERN_WARNING "Divide by zero exception " \ "in kernel mode.\n"); die("Divide by zero exception", regs, SIGBUS); break; case MICROBLAZE_FPU_EXCEPTION: pr_debug("FPU exception\n"); /* IEEE FP exception */ /* I removed fsr variable and use code var for storing fsr */ if (fsr & FSR_IO) fsr = FPE_FLTINV; else if (fsr & FSR_OF) fsr = FPE_FLTOVF; else if (fsr & FSR_UF) fsr = FPE_FLTUND; else if (fsr & FSR_DZ) fsr = FPE_FLTDIV; else if (fsr & FSR_DO) fsr = FPE_FLTRES; _exception(SIGFPE, regs, fsr, addr); break; #ifdef CONFIG_MMU case MICROBLAZE_PRIVILEGED_EXCEPTION: pr_debug("Privileged exception\n"); _exception(SIGILL, regs, ILL_PRVOPC, addr); break; #endif default: /* FIXME what to do in unexpected exception */ printk(KERN_WARNING "Unexpected exception %02x " "PC=%08x in %s mode\n", type, (unsigned int) addr, kernel_mode(regs) ? "kernel" : "user"); } return; }
gpl-2.0
k2wlxda/kernelmt
fs/nfsd/nfs4acl.c
4542
22055
/* * Common NFSv4 ACL handling code. * * Copyright (c) 2002, 2003 The Regents of the University of Michigan. * All rights reserved. * * Marius Aamodt Eriksen <marius@umich.edu> * Jeff Sedlak <jsedlak@umich.edu> * J. Bruce Fields <bfields@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/slab.h> #include <linux/nfs_fs.h> #include <linux/export.h> #include "acl.h" /* mode bit translations: */ #define NFS4_READ_MODE (NFS4_ACE_READ_DATA) #define NFS4_WRITE_MODE (NFS4_ACE_WRITE_DATA | NFS4_ACE_APPEND_DATA) #define NFS4_EXECUTE_MODE NFS4_ACE_EXECUTE #define NFS4_ANYONE_MODE (NFS4_ACE_READ_ATTRIBUTES | NFS4_ACE_READ_ACL | NFS4_ACE_SYNCHRONIZE) #define NFS4_OWNER_MODE (NFS4_ACE_WRITE_ATTRIBUTES | NFS4_ACE_WRITE_ACL) /* We don't support these bits; insist they be neither allowed nor denied */ #define NFS4_MASK_UNSUPP (NFS4_ACE_DELETE | NFS4_ACE_WRITE_OWNER \ | NFS4_ACE_READ_NAMED_ATTRS | NFS4_ACE_WRITE_NAMED_ATTRS) /* flags used to simulate posix default ACLs */ #define NFS4_INHERITANCE_FLAGS (NFS4_ACE_FILE_INHERIT_ACE \ | NFS4_ACE_DIRECTORY_INHERIT_ACE) #define NFS4_SUPPORTED_FLAGS (NFS4_INHERITANCE_FLAGS \ | NFS4_ACE_INHERIT_ONLY_ACE \ | NFS4_ACE_IDENTIFIER_GROUP) #define MASK_EQUAL(mask1, mask2) \ ( ((mask1) & NFS4_ACE_MASK_ALL) == ((mask2) & NFS4_ACE_MASK_ALL) ) static u32 mask_from_posix(unsigned short perm, unsigned int flags) { int mask = NFS4_ANYONE_MODE; if (flags & NFS4_ACL_OWNER) mask |= NFS4_OWNER_MODE; if (perm & ACL_READ) mask |= NFS4_READ_MODE; if (perm & ACL_WRITE) mask |= NFS4_WRITE_MODE; if ((perm & ACL_WRITE) && (flags & NFS4_ACL_DIR)) mask |= NFS4_ACE_DELETE_CHILD; if (perm & ACL_EXECUTE) mask |= NFS4_EXECUTE_MODE; return mask; } static u32 deny_mask_from_posix(unsigned short perm, u32 flags) { u32 mask = 0; if (perm & ACL_READ) mask |= NFS4_READ_MODE; if (perm & ACL_WRITE) mask |= NFS4_WRITE_MODE; if ((perm & ACL_WRITE) && (flags & NFS4_ACL_DIR)) mask |= NFS4_ACE_DELETE_CHILD; if (perm & ACL_EXECUTE) mask |= NFS4_EXECUTE_MODE; return mask; } /* XXX: modify functions to return NFS errors; they're only ever * used by nfs code, after all.... */ /* We only map from NFSv4 to POSIX ACLs when setting ACLs, when we err on the * side of being more restrictive, so the mode bit mapping below is * pessimistic. An optimistic version would be needed to handle DENY's, * but we espect to coalesce all ALLOWs and DENYs before mapping to mode * bits. */ static void low_mode_from_nfs4(u32 perm, unsigned short *mode, unsigned int flags) { u32 write_mode = NFS4_WRITE_MODE; if (flags & NFS4_ACL_DIR) write_mode |= NFS4_ACE_DELETE_CHILD; *mode = 0; if ((perm & NFS4_READ_MODE) == NFS4_READ_MODE) *mode |= ACL_READ; if ((perm & write_mode) == write_mode) *mode |= ACL_WRITE; if ((perm & NFS4_EXECUTE_MODE) == NFS4_EXECUTE_MODE) *mode |= ACL_EXECUTE; } struct ace_container { struct nfs4_ace *ace; struct list_head ace_l; }; static short ace2type(struct nfs4_ace *); static void _posix_to_nfsv4_one(struct posix_acl *, struct nfs4_acl *, unsigned int); struct nfs4_acl * nfs4_acl_posix_to_nfsv4(struct posix_acl *pacl, struct posix_acl *dpacl, unsigned int flags) { struct nfs4_acl *acl; int size = 0; if (pacl) { if (posix_acl_valid(pacl) < 0) return ERR_PTR(-EINVAL); size += 2*pacl->a_count; } if (dpacl) { if (posix_acl_valid(dpacl) < 0) return ERR_PTR(-EINVAL); size += 2*dpacl->a_count; } /* Allocate for worst case: one (deny, allow) pair each: */ acl = nfs4_acl_new(size); if (acl == NULL) return ERR_PTR(-ENOMEM); if (pacl) _posix_to_nfsv4_one(pacl, acl, flags & ~NFS4_ACL_TYPE_DEFAULT); if (dpacl) _posix_to_nfsv4_one(dpacl, acl, flags | NFS4_ACL_TYPE_DEFAULT); return acl; } struct posix_acl_summary { unsigned short owner; unsigned short users; unsigned short group; unsigned short groups; unsigned short other; unsigned short mask; }; static void summarize_posix_acl(struct posix_acl *acl, struct posix_acl_summary *pas) { struct posix_acl_entry *pa, *pe; /* * Only pas.users and pas.groups need initialization; previous * posix_acl_valid() calls ensure that the other fields will be * initialized in the following loop. But, just to placate gcc: */ memset(pas, 0, sizeof(*pas)); pas->mask = 07; pe = acl->a_entries + acl->a_count; FOREACH_ACL_ENTRY(pa, acl, pe) { switch (pa->e_tag) { case ACL_USER_OBJ: pas->owner = pa->e_perm; break; case ACL_GROUP_OBJ: pas->group = pa->e_perm; break; case ACL_USER: pas->users |= pa->e_perm; break; case ACL_GROUP: pas->groups |= pa->e_perm; break; case ACL_OTHER: pas->other = pa->e_perm; break; case ACL_MASK: pas->mask = pa->e_perm; break; } } /* We'll only care about effective permissions: */ pas->users &= pas->mask; pas->group &= pas->mask; pas->groups &= pas->mask; } /* We assume the acl has been verified with posix_acl_valid. */ static void _posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl, unsigned int flags) { struct posix_acl_entry *pa, *group_owner_entry; struct nfs4_ace *ace; struct posix_acl_summary pas; unsigned short deny; int eflag = ((flags & NFS4_ACL_TYPE_DEFAULT) ? NFS4_INHERITANCE_FLAGS | NFS4_ACE_INHERIT_ONLY_ACE : 0); BUG_ON(pacl->a_count < 3); summarize_posix_acl(pacl, &pas); pa = pacl->a_entries; ace = acl->aces + acl->naces; /* We could deny everything not granted by the owner: */ deny = ~pas.owner; /* * but it is equivalent (and simpler) to deny only what is not * granted by later entries: */ deny &= pas.users | pas.group | pas.groups | pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_OWNER; ace++; acl->naces++; } ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pa->e_perm, flags | NFS4_ACL_OWNER); ace->whotype = NFS4_ACL_WHO_OWNER; ace++; acl->naces++; pa++; while (pa->e_tag == ACL_USER) { deny = ~(pa->e_perm & pas.mask); deny &= pas.groups | pas.group | pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who = pa->e_id; ace++; acl->naces++; } ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pa->e_perm & pas.mask, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who = pa->e_id; ace++; acl->naces++; pa++; } /* In the case of groups, we apply allow ACEs first, then deny ACEs, * since a user can be in more than one group. */ /* allow ACEs */ group_owner_entry = pa; ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pas.group, flags); ace->whotype = NFS4_ACL_WHO_GROUP; ace++; acl->naces++; pa++; while (pa->e_tag == ACL_GROUP) { ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP; ace->access_mask = mask_from_posix(pa->e_perm & pas.mask, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who = pa->e_id; ace++; acl->naces++; pa++; } /* deny ACEs */ pa = group_owner_entry; deny = ~pas.group & pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_GROUP; ace++; acl->naces++; } pa++; while (pa->e_tag == ACL_GROUP) { deny = ~(pa->e_perm & pas.mask); deny &= pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who = pa->e_id; ace++; acl->naces++; } pa++; } if (pa->e_tag == ACL_MASK) pa++; ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pa->e_perm, flags); ace->whotype = NFS4_ACL_WHO_EVERYONE; acl->naces++; } static void sort_pacl_range(struct posix_acl *pacl, int start, int end) { int sorted = 0, i; struct posix_acl_entry tmp; /* We just do a bubble sort; easy to do in place, and we're not * expecting acl's to be long enough to justify anything more. */ while (!sorted) { sorted = 1; for (i = start; i < end; i++) { if (pacl->a_entries[i].e_id > pacl->a_entries[i+1].e_id) { sorted = 0; tmp = pacl->a_entries[i]; pacl->a_entries[i] = pacl->a_entries[i+1]; pacl->a_entries[i+1] = tmp; } } } } static void sort_pacl(struct posix_acl *pacl) { /* posix_acl_valid requires that users and groups be in order * by uid/gid. */ int i, j; if (pacl->a_count <= 4) return; /* no users or groups */ i = 1; while (pacl->a_entries[i].e_tag == ACL_USER) i++; sort_pacl_range(pacl, 1, i-1); BUG_ON(pacl->a_entries[i].e_tag != ACL_GROUP_OBJ); j = ++i; while (pacl->a_entries[j].e_tag == ACL_GROUP) j++; sort_pacl_range(pacl, i, j-1); return; } /* * While processing the NFSv4 ACE, this maintains bitmasks representing * which permission bits have been allowed and which denied to a given * entity: */ struct posix_ace_state { u32 allow; u32 deny; }; struct posix_user_ace_state { uid_t uid; struct posix_ace_state perms; }; struct posix_ace_state_array { int n; struct posix_user_ace_state aces[]; }; /* * While processing the NFSv4 ACE, this maintains the partial permissions * calculated so far: */ struct posix_acl_state { int empty; struct posix_ace_state owner; struct posix_ace_state group; struct posix_ace_state other; struct posix_ace_state everyone; struct posix_ace_state mask; /* Deny unused in this case */ struct posix_ace_state_array *users; struct posix_ace_state_array *groups; }; static int init_state(struct posix_acl_state *state, int cnt) { int alloc; memset(state, 0, sizeof(struct posix_acl_state)); state->empty = 1; /* * In the worst case, each individual acl could be for a distinct * named user or group, but we don't no which, so we allocate * enough space for either: */ alloc = sizeof(struct posix_ace_state_array) + cnt*sizeof(struct posix_user_ace_state); state->users = kzalloc(alloc, GFP_KERNEL); if (!state->users) return -ENOMEM; state->groups = kzalloc(alloc, GFP_KERNEL); if (!state->groups) { kfree(state->users); return -ENOMEM; } return 0; } static void free_state(struct posix_acl_state *state) { kfree(state->users); kfree(state->groups); } static inline void add_to_mask(struct posix_acl_state *state, struct posix_ace_state *astate) { state->mask.allow |= astate->allow; } /* * Certain bits (SYNCHRONIZE, DELETE, WRITE_OWNER, READ/WRITE_NAMED_ATTRS, * READ_ATTRIBUTES, READ_ACL) are currently unenforceable and don't translate * to traditional read/write/execute permissions. * * It's problematic to reject acls that use certain mode bits, because it * places the burden on users to learn the rules about which bits one * particular server sets, without giving the user a lot of help--we return an * error that could mean any number of different things. To make matters * worse, the problematic bits might be introduced by some application that's * automatically mapping from some other acl model. * * So wherever possible we accept anything, possibly erring on the side of * denying more permissions than necessary. * * However we do reject *explicit* DENY's of a few bits representing * permissions we could never deny: */ static inline int check_deny(u32 mask, int isowner) { if (mask & (NFS4_ACE_READ_ATTRIBUTES | NFS4_ACE_READ_ACL)) return -EINVAL; if (!isowner) return 0; if (mask & (NFS4_ACE_WRITE_ATTRIBUTES | NFS4_ACE_WRITE_ACL)) return -EINVAL; return 0; } static struct posix_acl * posix_state_to_acl(struct posix_acl_state *state, unsigned int flags) { struct posix_acl_entry *pace; struct posix_acl *pacl; int nace; int i, error = 0; /* * ACLs with no ACEs are treated differently in the inheritable * and effective cases: when there are no inheritable ACEs, we * set a zero-length default posix acl: */ if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) { pacl = posix_acl_alloc(0, GFP_KERNEL); return pacl ? pacl : ERR_PTR(-ENOMEM); } /* * When there are no effective ACEs, the following will end * up setting a 3-element effective posix ACL with all * permissions zero. */ nace = 4 + state->users->n + state->groups->n; pacl = posix_acl_alloc(nace, GFP_KERNEL); if (!pacl) return ERR_PTR(-ENOMEM); pace = pacl->a_entries; pace->e_tag = ACL_USER_OBJ; error = check_deny(state->owner.deny, 1); if (error) goto out_err; low_mode_from_nfs4(state->owner.allow, &pace->e_perm, flags); pace->e_id = ACL_UNDEFINED_ID; for (i=0; i < state->users->n; i++) { pace++; pace->e_tag = ACL_USER; error = check_deny(state->users->aces[i].perms.deny, 0); if (error) goto out_err; low_mode_from_nfs4(state->users->aces[i].perms.allow, &pace->e_perm, flags); pace->e_id = state->users->aces[i].uid; add_to_mask(state, &state->users->aces[i].perms); } pace++; pace->e_tag = ACL_GROUP_OBJ; error = check_deny(state->group.deny, 0); if (error) goto out_err; low_mode_from_nfs4(state->group.allow, &pace->e_perm, flags); pace->e_id = ACL_UNDEFINED_ID; add_to_mask(state, &state->group); for (i=0; i < state->groups->n; i++) { pace++; pace->e_tag = ACL_GROUP; error = check_deny(state->groups->aces[i].perms.deny, 0); if (error) goto out_err; low_mode_from_nfs4(state->groups->aces[i].perms.allow, &pace->e_perm, flags); pace->e_id = state->groups->aces[i].uid; add_to_mask(state, &state->groups->aces[i].perms); } pace++; pace->e_tag = ACL_MASK; low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags); pace->e_id = ACL_UNDEFINED_ID; pace++; pace->e_tag = ACL_OTHER; error = check_deny(state->other.deny, 0); if (error) goto out_err; low_mode_from_nfs4(state->other.allow, &pace->e_perm, flags); pace->e_id = ACL_UNDEFINED_ID; return pacl; out_err: posix_acl_release(pacl); return ERR_PTR(error); } static inline void allow_bits(struct posix_ace_state *astate, u32 mask) { /* Allow all bits in the mask not already denied: */ astate->allow |= mask & ~astate->deny; } static inline void deny_bits(struct posix_ace_state *astate, u32 mask) { /* Deny all bits in the mask not already allowed: */ astate->deny |= mask & ~astate->allow; } static int find_uid(struct posix_acl_state *state, struct posix_ace_state_array *a, uid_t uid) { int i; for (i = 0; i < a->n; i++) if (a->aces[i].uid == uid) return i; /* Not found: */ a->n++; a->aces[i].uid = uid; a->aces[i].perms.allow = state->everyone.allow; a->aces[i].perms.deny = state->everyone.deny; return i; } static void deny_bits_array(struct posix_ace_state_array *a, u32 mask) { int i; for (i=0; i < a->n; i++) deny_bits(&a->aces[i].perms, mask); } static void allow_bits_array(struct posix_ace_state_array *a, u32 mask) { int i; for (i=0; i < a->n; i++) allow_bits(&a->aces[i].perms, mask); } static void process_one_v4_ace(struct posix_acl_state *state, struct nfs4_ace *ace) { u32 mask = ace->access_mask; int i; state->empty = 0; switch (ace2type(ace)) { case ACL_USER_OBJ: if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->owner, mask); } else { deny_bits(&state->owner, mask); } break; case ACL_USER: i = find_uid(state, state->users, ace->who); if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->users->aces[i].perms, mask); } else { deny_bits(&state->users->aces[i].perms, mask); mask = state->users->aces[i].perms.deny; deny_bits(&state->owner, mask); } break; case ACL_GROUP_OBJ: if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->group, mask); } else { deny_bits(&state->group, mask); mask = state->group.deny; deny_bits(&state->owner, mask); deny_bits(&state->everyone, mask); deny_bits_array(state->users, mask); deny_bits_array(state->groups, mask); } break; case ACL_GROUP: i = find_uid(state, state->groups, ace->who); if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->groups->aces[i].perms, mask); } else { deny_bits(&state->groups->aces[i].perms, mask); mask = state->groups->aces[i].perms.deny; deny_bits(&state->owner, mask); deny_bits(&state->group, mask); deny_bits(&state->everyone, mask); deny_bits_array(state->users, mask); deny_bits_array(state->groups, mask); } break; case ACL_OTHER: if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->owner, mask); allow_bits(&state->group, mask); allow_bits(&state->other, mask); allow_bits(&state->everyone, mask); allow_bits_array(state->users, mask); allow_bits_array(state->groups, mask); } else { deny_bits(&state->owner, mask); deny_bits(&state->group, mask); deny_bits(&state->other, mask); deny_bits(&state->everyone, mask); deny_bits_array(state->users, mask); deny_bits_array(state->groups, mask); } } } int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl, struct posix_acl **pacl, struct posix_acl **dpacl, unsigned int flags) { struct posix_acl_state effective_acl_state, default_acl_state; struct nfs4_ace *ace; int ret; ret = init_state(&effective_acl_state, acl->naces); if (ret) return ret; ret = init_state(&default_acl_state, acl->naces); if (ret) goto out_estate; ret = -EINVAL; for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) { if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE && ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE) goto out_dstate; if (ace->flag & ~NFS4_SUPPORTED_FLAGS) goto out_dstate; if ((ace->flag & NFS4_INHERITANCE_FLAGS) == 0) { process_one_v4_ace(&effective_acl_state, ace); continue; } if (!(flags & NFS4_ACL_DIR)) goto out_dstate; /* * Note that when only one of FILE_INHERIT or DIRECTORY_INHERIT * is set, we're effectively turning on the other. That's OK, * according to rfc 3530. */ process_one_v4_ace(&default_acl_state, ace); if (!(ace->flag & NFS4_ACE_INHERIT_ONLY_ACE)) process_one_v4_ace(&effective_acl_state, ace); } *pacl = posix_state_to_acl(&effective_acl_state, flags); if (IS_ERR(*pacl)) { ret = PTR_ERR(*pacl); *pacl = NULL; goto out_dstate; } *dpacl = posix_state_to_acl(&default_acl_state, flags | NFS4_ACL_TYPE_DEFAULT); if (IS_ERR(*dpacl)) { ret = PTR_ERR(*dpacl); *dpacl = NULL; posix_acl_release(*pacl); *pacl = NULL; goto out_dstate; } sort_pacl(*pacl); sort_pacl(*dpacl); ret = 0; out_dstate: free_state(&default_acl_state); out_estate: free_state(&effective_acl_state); return ret; } static short ace2type(struct nfs4_ace *ace) { switch (ace->whotype) { case NFS4_ACL_WHO_NAMED: return (ace->flag & NFS4_ACE_IDENTIFIER_GROUP ? ACL_GROUP : ACL_USER); case NFS4_ACL_WHO_OWNER: return ACL_USER_OBJ; case NFS4_ACL_WHO_GROUP: return ACL_GROUP_OBJ; case NFS4_ACL_WHO_EVERYONE: return ACL_OTHER; } BUG(); return -1; } EXPORT_SYMBOL(nfs4_acl_posix_to_nfsv4); EXPORT_SYMBOL(nfs4_acl_nfsv4_to_posix); struct nfs4_acl * nfs4_acl_new(int n) { struct nfs4_acl *acl; acl = kmalloc(sizeof(*acl) + n*sizeof(struct nfs4_ace), GFP_KERNEL); if (acl == NULL) return NULL; acl->naces = 0; return acl; } static struct { char *string; int stringlen; int type; } s2t_map[] = { { .string = "OWNER@", .stringlen = sizeof("OWNER@") - 1, .type = NFS4_ACL_WHO_OWNER, }, { .string = "GROUP@", .stringlen = sizeof("GROUP@") - 1, .type = NFS4_ACL_WHO_GROUP, }, { .string = "EVERYONE@", .stringlen = sizeof("EVERYONE@") - 1, .type = NFS4_ACL_WHO_EVERYONE, }, }; int nfs4_acl_get_whotype(char *p, u32 len) { int i; for (i = 0; i < ARRAY_SIZE(s2t_map); i++) { if (s2t_map[i].stringlen == len && 0 == memcmp(s2t_map[i].string, p, len)) return s2t_map[i].type; } return NFS4_ACL_WHO_NAMED; } int nfs4_acl_write_who(int who, char *p) { int i; for (i = 0; i < ARRAY_SIZE(s2t_map); i++) { if (s2t_map[i].type == who) { memcpy(p, s2t_map[i].string, s2t_map[i].stringlen); return s2t_map[i].stringlen; } } BUG(); return -1; } EXPORT_SYMBOL(nfs4_acl_new); EXPORT_SYMBOL(nfs4_acl_get_whotype); EXPORT_SYMBOL(nfs4_acl_write_who);
gpl-2.0