repo_name
string
path
string
copies
string
size
string
content
string
license
string
ReflexBow/ghost
drivers/net/phy/davicom.c
8151
5160
/* * drivers/net/phy/davicom.c * * Driver for Davicom PHYs * * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> #define MII_DM9161_SCR 0x10 #define MII_DM9161_SCR_INIT 0x0610 #define MII_DM9161_SCR_RMII 0x0100 /* DM9161 Interrupt Register */ #define MII_DM9161_INTR 0x15 #define MII_DM9161_INTR_PEND 0x8000 #define MII_DM9161_INTR_DPLX_MASK 0x0800 #define MII_DM9161_INTR_SPD_MASK 0x0400 #define MII_DM9161_INTR_LINK_MASK 0x0200 #define MII_DM9161_INTR_MASK 0x0100 #define MII_DM9161_INTR_DPLX_CHANGE 0x0010 #define MII_DM9161_INTR_SPD_CHANGE 0x0008 #define MII_DM9161_INTR_LINK_CHANGE 0x0004 #define MII_DM9161_INTR_INIT 0x0000 #define MII_DM9161_INTR_STOP \ (MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \ | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK) /* DM9161 10BT Configuration/Status */ #define MII_DM9161_10BTCSR 0x12 #define MII_DM9161_10BTCSR_INIT 0x7800 MODULE_DESCRIPTION("Davicom PHY driver"); MODULE_AUTHOR("Andy Fleming"); MODULE_LICENSE("GPL"); #define DM9161_DELAY 1 static int dm9161_config_intr(struct phy_device *phydev) { int temp; temp = phy_read(phydev, MII_DM9161_INTR); if (temp < 0) return temp; if(PHY_INTERRUPT_ENABLED == phydev->interrupts ) temp &= ~(MII_DM9161_INTR_STOP); else temp |= MII_DM9161_INTR_STOP; temp = phy_write(phydev, MII_DM9161_INTR, temp); return temp; } static int dm9161_config_aneg(struct phy_device *phydev) { int err; /* Isolate the PHY */ err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE); if (err < 0) return err; /* Configure the new settings */ err = genphy_config_aneg(phydev); if (err < 0) return err; return 0; } static int dm9161_config_init(struct phy_device *phydev) { int err, temp; /* Isolate the PHY */ err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE); if (err < 0) return err; switch (phydev->interface) { case PHY_INTERFACE_MODE_MII: temp = MII_DM9161_SCR_INIT; break; case PHY_INTERFACE_MODE_RMII: temp = MII_DM9161_SCR_INIT | MII_DM9161_SCR_RMII; break; default: return -EINVAL; } /* Do not bypass the scrambler/descrambler */ err = phy_write(phydev, MII_DM9161_SCR, temp); if (err < 0) return err; /* Clear 10BTCSR to default */ err = phy_write(phydev, MII_DM9161_10BTCSR, MII_DM9161_10BTCSR_INIT); if (err < 0) return err; /* Reconnect the PHY, and enable Autonegotiation */ err = phy_write(phydev, MII_BMCR, BMCR_ANENABLE); if (err < 0) return err; return 0; } static int dm9161_ack_interrupt(struct phy_device *phydev) { int err = phy_read(phydev, MII_DM9161_INTR); return (err < 0) ? err : 0; } static struct phy_driver dm9161e_driver = { .phy_id = 0x0181b880, .name = "Davicom DM9161E", .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, .read_status = genphy_read_status, .driver = { .owner = THIS_MODULE,}, }; static struct phy_driver dm9161a_driver = { .phy_id = 0x0181b8a0, .name = "Davicom DM9161A", .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, .read_status = genphy_read_status, .driver = { .owner = THIS_MODULE,}, }; static struct phy_driver dm9131_driver = { .phy_id = 0x00181b80, .name = "Davicom DM9131", .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, .driver = { .owner = THIS_MODULE,}, }; static int __init davicom_init(void) { int ret; ret = phy_driver_register(&dm9161e_driver); if (ret) goto err1; ret = phy_driver_register(&dm9161a_driver); if (ret) goto err2; ret = phy_driver_register(&dm9131_driver); if (ret) goto err3; return 0; err3: phy_driver_unregister(&dm9161a_driver); err2: phy_driver_unregister(&dm9161e_driver); err1: return ret; } static void __exit davicom_exit(void) { phy_driver_unregister(&dm9161e_driver); phy_driver_unregister(&dm9161a_driver); phy_driver_unregister(&dm9131_driver); } module_init(davicom_init); module_exit(davicom_exit); static struct mdio_device_id __maybe_unused davicom_tbl[] = { { 0x0181b880, 0x0ffffff0 }, { 0x0181b8a0, 0x0ffffff0 }, { 0x00181b80, 0x0ffffff0 }, { } }; MODULE_DEVICE_TABLE(mdio, davicom_tbl);
gpl-2.0
bingfengxiaokai/kernel3.4.2-transplant
arch/sh/drivers/pci/fixups-cayman.c
9687
2200
#include <linux/kernel.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/types.h> #include <cpu/irq.h> #include "pci-sh5.h" int __init pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int result = -1; /* The complication here is that the PCI IRQ lines from the Cayman's 2 5V slots get into the CPU via a different path from the IRQ lines from the 3 3.3V slots. Thus, we have to detect whether the card's interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling' at the point where we cross from 5V to 3.3V is not the normal case. The added complication is that we don't know that the 5V slots are always bus 2, because a card containing a PCI-PCI bridge may be plugged into a 3.3V slot, and this changes the bus numbering. Also, the Cayman has an intermediate PCI bus that goes a custom expansion board header (and to the secondary bridge). This bus has never been used in practice. The 1ary onboard PCI-PCI bridge is device 3 on bus 0 The 2ary onboard PCI-PCI bridge is device 0 on the 2ary bus of the 1ary bridge. */ struct slot_pin { int slot; int pin; } path[4]; int i=0; while (dev->bus->number > 0) { slot = path[i].slot = PCI_SLOT(dev->devfn); pin = path[i].pin = pci_swizzle_interrupt_pin(dev, pin); dev = dev->bus->self; i++; if (i > 3) panic("PCI path to root bus too long!\n"); } slot = PCI_SLOT(dev->devfn); /* This is the slot on bus 0 through which the device is eventually reachable. */ /* Now work back up. */ if ((slot < 3) || (i == 0)) { /* Bus 0 (incl. PCI-PCI bridge itself) : perform the final swizzle now. */ result = IRQ_INTA + pci_swizzle_interrupt_pin(dev, pin) - 1; } else { i--; slot = path[i].slot; pin = path[i].pin; if (slot > 0) { panic("PCI expansion bus device found - not handled!\n"); } else { if (i > 0) { /* 5V slots */ i--; slot = path[i].slot; pin = path[i].pin; /* 'pin' was swizzled earlier wrt slot, don't do it again. */ result = IRQ_P2INTA + (pin - 1); } else { /* IRQ for 2ary PCI-PCI bridge : unused */ result = -1; } } } return result; }
gpl-2.0
wan5xp/android_kernel_xiaomi_armani
arch/sh/drivers/pci/fixups-cayman.c
9687
2200
#include <linux/kernel.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/types.h> #include <cpu/irq.h> #include "pci-sh5.h" int __init pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int result = -1; /* The complication here is that the PCI IRQ lines from the Cayman's 2 5V slots get into the CPU via a different path from the IRQ lines from the 3 3.3V slots. Thus, we have to detect whether the card's interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling' at the point where we cross from 5V to 3.3V is not the normal case. The added complication is that we don't know that the 5V slots are always bus 2, because a card containing a PCI-PCI bridge may be plugged into a 3.3V slot, and this changes the bus numbering. Also, the Cayman has an intermediate PCI bus that goes a custom expansion board header (and to the secondary bridge). This bus has never been used in practice. The 1ary onboard PCI-PCI bridge is device 3 on bus 0 The 2ary onboard PCI-PCI bridge is device 0 on the 2ary bus of the 1ary bridge. */ struct slot_pin { int slot; int pin; } path[4]; int i=0; while (dev->bus->number > 0) { slot = path[i].slot = PCI_SLOT(dev->devfn); pin = path[i].pin = pci_swizzle_interrupt_pin(dev, pin); dev = dev->bus->self; i++; if (i > 3) panic("PCI path to root bus too long!\n"); } slot = PCI_SLOT(dev->devfn); /* This is the slot on bus 0 through which the device is eventually reachable. */ /* Now work back up. */ if ((slot < 3) || (i == 0)) { /* Bus 0 (incl. PCI-PCI bridge itself) : perform the final swizzle now. */ result = IRQ_INTA + pci_swizzle_interrupt_pin(dev, pin) - 1; } else { i--; slot = path[i].slot; pin = path[i].pin; if (slot > 0) { panic("PCI expansion bus device found - not handled!\n"); } else { if (i > 0) { /* 5V slots */ i--; slot = path[i].slot; pin = path[i].pin; /* 'pin' was swizzled earlier wrt slot, don't do it again. */ result = IRQ_P2INTA + (pin - 1); } else { /* IRQ for 2ary PCI-PCI bridge : unused */ result = -1; } } } return result; }
gpl-2.0
El-Nath/bidji-find5
arch/powerpc/kernel/msi.c
11991
1099
/* * Copyright 2006-2007, Michael Ellerman, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/msi.h> #include <linux/pci.h> #include <asm/machdep.h> int arch_msi_check_device(struct pci_dev* dev, int nvec, int type) { if (!ppc_md.setup_msi_irqs || !ppc_md.teardown_msi_irqs) { pr_debug("msi: Platform doesn't provide MSI callbacks.\n"); return -ENOSYS; } /* PowerPC doesn't support multiple MSI yet */ if (type == PCI_CAP_ID_MSI && nvec > 1) return 1; if (ppc_md.msi_check_device) { pr_debug("msi: Using platform check routine.\n"); return ppc_md.msi_check_device(dev, nvec, type); } return 0; } int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { return ppc_md.setup_msi_irqs(dev, nvec, type); } void arch_teardown_msi_irqs(struct pci_dev *dev) { ppc_md.teardown_msi_irqs(dev); }
gpl-2.0
derekzhuo/android-goldfish-2.6.29
drivers/rapidio/rio-sysfs.c
216
5143
/* * RapidIO sysfs attributes and support * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/stat.h> #include "rio.h" /* Sysfs support */ #define rio_config_attr(field, format_string) \ static ssize_t \ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct rio_dev *rdev = to_rio_dev(dev); \ \ return sprintf(buf, format_string, rdev->field); \ } \ rio_config_attr(did, "0x%04x\n"); rio_config_attr(vid, "0x%04x\n"); rio_config_attr(device_rev, "0x%08x\n"); rio_config_attr(asm_did, "0x%04x\n"); rio_config_attr(asm_vid, "0x%04x\n"); rio_config_attr(asm_rev, "0x%04x\n"); static ssize_t routes_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rio_dev *rdev = to_rio_dev(dev); char *str = buf; int i; if (!rdev->rswitch) goto out; for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); i++) { if (rdev->rswitch->route_table[i] == RIO_INVALID_ROUTE) continue; str += sprintf(str, "%04x %02x\n", i, rdev->rswitch->route_table[i]); } out: return (str - buf); } struct device_attribute rio_dev_attrs[] = { __ATTR_RO(did), __ATTR_RO(vid), __ATTR_RO(device_rev), __ATTR_RO(asm_did), __ATTR_RO(asm_vid), __ATTR_RO(asm_rev), __ATTR_RO(routes), __ATTR_NULL, }; static ssize_t rio_read_config(struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct rio_dev *dev = to_rio_dev(container_of(kobj, struct device, kobj)); unsigned int size = 0x100; loff_t init_off = off; u8 *data = (u8 *) buf; /* Several chips lock up trying to read undefined config space */ if (capable(CAP_SYS_ADMIN)) size = 0x200000; if (off > size) return 0; if (off + count > size) { size -= off; count = size; } else { size = count; } if ((off & 1) && size) { u8 val; rio_read_config_8(dev, off, &val); data[off - init_off] = val; off++; size--; } if ((off & 3) && size > 2) { u16 val; rio_read_config_16(dev, off, &val); data[off - init_off] = (val >> 8) & 0xff; data[off - init_off + 1] = val & 0xff; off += 2; size -= 2; } while (size > 3) { u32 val; rio_read_config_32(dev, off, &val); data[off - init_off] = (val >> 24) & 0xff; data[off - init_off + 1] = (val >> 16) & 0xff; data[off - init_off + 2] = (val >> 8) & 0xff; data[off - init_off + 3] = val & 0xff; off += 4; size -= 4; } if (size >= 2) { u16 val; rio_read_config_16(dev, off, &val); data[off - init_off] = (val >> 8) & 0xff; data[off - init_off + 1] = val & 0xff; off += 2; size -= 2; } if (size > 0) { u8 val; rio_read_config_8(dev, off, &val); data[off - init_off] = val; off++; --size; } return count; } static ssize_t rio_write_config(struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct rio_dev *dev = to_rio_dev(container_of(kobj, struct device, kobj)); unsigned int size = count; loff_t init_off = off; u8 *data = (u8 *) buf; if (off > 0x200000) return 0; if (off + count > 0x200000) { size = 0x200000 - off; count = size; } if ((off & 1) && size) { rio_write_config_8(dev, off, data[off - init_off]); off++; size--; } if ((off & 3) && (size > 2)) { u16 val = data[off - init_off + 1]; val |= (u16) data[off - init_off] << 8; rio_write_config_16(dev, off, val); off += 2; size -= 2; } while (size > 3) { u32 val = data[off - init_off + 3]; val |= (u32) data[off - init_off + 2] << 8; val |= (u32) data[off - init_off + 1] << 16; val |= (u32) data[off - init_off] << 24; rio_write_config_32(dev, off, val); off += 4; size -= 4; } if (size >= 2) { u16 val = data[off - init_off + 1]; val |= (u16) data[off - init_off] << 8; rio_write_config_16(dev, off, val); off += 2; size -= 2; } if (size) { rio_write_config_8(dev, off, data[off - init_off]); off++; --size; } return count; } static struct bin_attribute rio_config_attr = { .attr = { .name = "config", .mode = S_IRUGO | S_IWUSR, }, .size = 0x200000, .read = rio_read_config, .write = rio_write_config, }; /** * rio_create_sysfs_dev_files - create RIO specific sysfs files * @rdev: device whose entries should be created * * Create files when @rdev is added to sysfs. */ int rio_create_sysfs_dev_files(struct rio_dev *rdev) { sysfs_create_bin_file(&rdev->dev.kobj, &rio_config_attr); return 0; } /** * rio_remove_sysfs_dev_files - cleanup RIO specific sysfs files * @rdev: device whose entries we should free * * Cleanup when @rdev is removed from sysfs. */ void rio_remove_sysfs_dev_files(struct rio_dev *rdev) { sysfs_remove_bin_file(&rdev->dev.kobj, &rio_config_attr); }
gpl-2.0
nadavitay/linux-3.14.1
net/ipv4/ah4.c
472
12748
#define pr_fmt(fmt) "IPsec: " fmt #include <crypto/hash.h> #include <linux/err.h> #include <linux/module.h> #include <linux/slab.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/ah.h> #include <linux/crypto.h> #include <linux/pfkeyv2.h> #include <linux/scatterlist.h> #include <net/icmp.h> #include <net/protocol.h> struct ah_skb_cb { struct xfrm_skb_cb xfrm; void *tmp; }; #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0])) static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, unsigned int size) { unsigned int len; len = size + crypto_ahash_digestsize(ahash) + (crypto_ahash_alignmask(ahash) & ~(crypto_tfm_ctx_alignment() - 1)); len = ALIGN(len, crypto_tfm_ctx_alignment()); len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash); len = ALIGN(len, __alignof__(struct scatterlist)); len += sizeof(struct scatterlist) * nfrags; return kmalloc(len, GFP_ATOMIC); } static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset) { return tmp + offset; } static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp, unsigned int offset) { return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1); } static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash, u8 *icv) { struct ahash_request *req; req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash), crypto_tfm_ctx_alignment()); ahash_request_set_tfm(req, ahash); return req; } static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash, struct ahash_request *req) { return (void *)ALIGN((unsigned long)(req + 1) + crypto_ahash_reqsize(ahash), __alignof__(struct scatterlist)); } /* Clear mutable options and find final destination to substitute * into IP header for icv calculation. Options are already checked * for validity, so paranoia is not required. */ static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr) { unsigned char *optptr = (unsigned char *)(iph+1); int l = iph->ihl*4 - sizeof(struct iphdr); int optlen; while (l > 0) { switch (*optptr) { case IPOPT_END: return 0; case IPOPT_NOOP: l--; optptr++; continue; } optlen = optptr[1]; if (optlen<2 || optlen>l) return -EINVAL; switch (*optptr) { case IPOPT_SEC: case 0x85: /* Some "Extended Security" crap. */ case IPOPT_CIPSO: case IPOPT_RA: case 0x80|21: /* RFC1770 */ break; case IPOPT_LSRR: case IPOPT_SSRR: if (optlen < 6) return -EINVAL; memcpy(daddr, optptr+optlen-4, 4); /* Fall through */ default: memset(optptr, 0, optlen); } l -= optlen; optptr += optlen; } return 0; } static void ah_output_done(struct crypto_async_request *base, int err) { u8 *icv; struct iphdr *iph; struct sk_buff *skb = base->data; struct xfrm_state *x = skb_dst(skb)->xfrm; struct ah_data *ahp = x->data; struct iphdr *top_iph = ip_hdr(skb); struct ip_auth_hdr *ah = ip_auth_hdr(skb); int ihl = ip_hdrlen(skb); iph = AH_SKB_CB(skb)->tmp; icv = ah_tmp_icv(ahp->ahash, iph, ihl); memcpy(ah->auth_data, icv, ahp->icv_trunc_len); top_iph->tos = iph->tos; top_iph->ttl = iph->ttl; top_iph->frag_off = iph->frag_off; if (top_iph->ihl != 5) { top_iph->daddr = iph->daddr; memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); } kfree(AH_SKB_CB(skb)->tmp); xfrm_output_resume(skb, err); } static int ah_output(struct xfrm_state *x, struct sk_buff *skb) { int err; int nfrags; int ihl; u8 *icv; struct sk_buff *trailer; struct crypto_ahash *ahash; struct ahash_request *req; struct scatterlist *sg; struct iphdr *iph, *top_iph; struct ip_auth_hdr *ah; struct ah_data *ahp; ahp = x->data; ahash = ahp->ahash; if ((err = skb_cow_data(skb, 0, &trailer)) < 0) goto out; nfrags = err; skb_push(skb, -skb_network_offset(skb)); ah = ip_auth_hdr(skb); ihl = ip_hdrlen(skb); err = -ENOMEM; iph = ah_alloc_tmp(ahash, nfrags, ihl); if (!iph) goto out; icv = ah_tmp_icv(ahash, iph, ihl); req = ah_tmp_req(ahash, icv); sg = ah_req_sg(ahash, req); memset(ah->auth_data, 0, ahp->icv_trunc_len); top_iph = ip_hdr(skb); iph->tos = top_iph->tos; iph->ttl = top_iph->ttl; iph->frag_off = top_iph->frag_off; if (top_iph->ihl != 5) { iph->daddr = top_iph->daddr; memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); err = ip_clear_mutable_options(top_iph, &top_iph->daddr); if (err) goto out_free; } ah->nexthdr = *skb_mac_header(skb); *skb_mac_header(skb) = IPPROTO_AH; top_iph->tos = 0; top_iph->tot_len = htons(skb->len); top_iph->frag_off = 0; top_iph->ttl = 0; top_iph->check = 0; if (x->props.flags & XFRM_STATE_ALIGN4) ah->hdrlen = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; else ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; ah->reserved = 0; ah->spi = x->id.spi; ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, 0, skb->len); ahash_request_set_crypt(req, sg, icv, skb->len); ahash_request_set_callback(req, 0, ah_output_done, skb); AH_SKB_CB(skb)->tmp = iph; err = crypto_ahash_digest(req); if (err) { if (err == -EINPROGRESS) goto out; if (err == -EBUSY) err = NET_XMIT_DROP; goto out_free; } memcpy(ah->auth_data, icv, ahp->icv_trunc_len); top_iph->tos = iph->tos; top_iph->ttl = iph->ttl; top_iph->frag_off = iph->frag_off; if (top_iph->ihl != 5) { top_iph->daddr = iph->daddr; memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); } out_free: kfree(iph); out: return err; } static void ah_input_done(struct crypto_async_request *base, int err) { u8 *auth_data; u8 *icv; struct iphdr *work_iph; struct sk_buff *skb = base->data; struct xfrm_state *x = xfrm_input_state(skb); struct ah_data *ahp = x->data; struct ip_auth_hdr *ah = ip_auth_hdr(skb); int ihl = ip_hdrlen(skb); int ah_hlen = (ah->hdrlen + 2) << 2; work_iph = AH_SKB_CB(skb)->tmp; auth_data = ah_tmp_auth(work_iph, ihl); icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; if (err) goto out; err = ah->nexthdr; skb->network_header += ah_hlen; memcpy(skb_network_header(skb), work_iph, ihl); __skb_pull(skb, ah_hlen + ihl); if (x->props.mode == XFRM_MODE_TUNNEL) skb_reset_transport_header(skb); else skb_set_transport_header(skb, -ihl); out: kfree(AH_SKB_CB(skb)->tmp); xfrm_input_resume(skb, err); } static int ah_input(struct xfrm_state *x, struct sk_buff *skb) { int ah_hlen; int ihl; int nexthdr; int nfrags; u8 *auth_data; u8 *icv; struct sk_buff *trailer; struct crypto_ahash *ahash; struct ahash_request *req; struct scatterlist *sg; struct iphdr *iph, *work_iph; struct ip_auth_hdr *ah; struct ah_data *ahp; int err = -ENOMEM; if (!pskb_may_pull(skb, sizeof(*ah))) goto out; ah = (struct ip_auth_hdr *)skb->data; ahp = x->data; ahash = ahp->ahash; nexthdr = ah->nexthdr; ah_hlen = (ah->hdrlen + 2) << 2; if (x->props.flags & XFRM_STATE_ALIGN4) { if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) && ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len)) goto out; } else { if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) && ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len)) goto out; } if (!pskb_may_pull(skb, ah_hlen)) goto out; /* We are going to _remove_ AH header to keep sockets happy, * so... Later this can change. */ if (skb_unclone(skb, GFP_ATOMIC)) goto out; skb->ip_summed = CHECKSUM_NONE; if ((err = skb_cow_data(skb, 0, &trailer)) < 0) goto out; nfrags = err; ah = (struct ip_auth_hdr *)skb->data; iph = ip_hdr(skb); ihl = ip_hdrlen(skb); work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); if (!work_iph) goto out; auth_data = ah_tmp_auth(work_iph, ihl); icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len); req = ah_tmp_req(ahash, icv); sg = ah_req_sg(ahash, req); memcpy(work_iph, iph, ihl); memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); memset(ah->auth_data, 0, ahp->icv_trunc_len); iph->ttl = 0; iph->tos = 0; iph->frag_off = 0; iph->check = 0; if (ihl > sizeof(*iph)) { __be32 dummy; err = ip_clear_mutable_options(iph, &dummy); if (err) goto out_free; } skb_push(skb, ihl); sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, 0, skb->len); ahash_request_set_crypt(req, sg, icv, skb->len); ahash_request_set_callback(req, 0, ah_input_done, skb); AH_SKB_CB(skb)->tmp = work_iph; err = crypto_ahash_digest(req); if (err) { if (err == -EINPROGRESS) goto out; goto out_free; } err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; if (err) goto out_free; skb->network_header += ah_hlen; memcpy(skb_network_header(skb), work_iph, ihl); __skb_pull(skb, ah_hlen + ihl); if (x->props.mode == XFRM_MODE_TUNNEL) skb_reset_transport_header(skb); else skb_set_transport_header(skb, -ihl); err = nexthdr; out_free: kfree (work_iph); out: return err; } static void ah4_err(struct sk_buff *skb, u32 info) { struct net *net = dev_net(skb->dev); const struct iphdr *iph = (const struct iphdr *)skb->data; struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); struct xfrm_state *x; switch (icmp_hdr(skb)->type) { case ICMP_DEST_UNREACH: if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) return; case ICMP_REDIRECT: break; default: return; } x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET); if (!x) return; if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0); else ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0); xfrm_state_put(x); } static int ah_init_state(struct xfrm_state *x) { struct ah_data *ahp = NULL; struct xfrm_algo_desc *aalg_desc; struct crypto_ahash *ahash; if (!x->aalg) goto error; if (x->encap) goto error; ahp = kzalloc(sizeof(*ahp), GFP_KERNEL); if (!ahp) return -ENOMEM; ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0); if (IS_ERR(ahash)) goto error; ahp->ahash = ahash; if (crypto_ahash_setkey(ahash, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8)) goto error; /* * Lookup the algorithm description maintained by xfrm_algo, * verify crypto transform properties, and store information * we need for AH processing. This lookup cannot fail here * after a successful crypto_alloc_ahash(). */ aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_ahash_digestsize(ahash)) { pr_info("%s: %s digestsize %u != %hu\n", __func__, x->aalg->alg_name, crypto_ahash_digestsize(ahash), aalg_desc->uinfo.auth.icv_fullbits / 8); goto error; } ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; ahp->icv_trunc_len = x->aalg->alg_trunc_len/8; BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); if (x->props.flags & XFRM_STATE_ALIGN4) x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); else x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct iphdr); x->data = ahp; return 0; error: if (ahp) { crypto_free_ahash(ahp->ahash); kfree(ahp); } return -EINVAL; } static void ah_destroy(struct xfrm_state *x) { struct ah_data *ahp = x->data; if (!ahp) return; crypto_free_ahash(ahp->ahash); kfree(ahp); } static const struct xfrm_type ah_type = { .description = "AH4", .owner = THIS_MODULE, .proto = IPPROTO_AH, .flags = XFRM_TYPE_REPLAY_PROT, .init_state = ah_init_state, .destructor = ah_destroy, .input = ah_input, .output = ah_output }; static const struct net_protocol ah4_protocol = { .handler = xfrm4_rcv, .err_handler = ah4_err, .no_policy = 1, .netns_ok = 1, }; static int __init ah4_init(void) { if (xfrm_register_type(&ah_type, AF_INET) < 0) { pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) { pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&ah_type, AF_INET); return -EAGAIN; } return 0; } static void __exit ah4_fini(void) { if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0) pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&ah_type, AF_INET) < 0) pr_info("%s: can't remove xfrm type\n", __func__); } module_init(ah4_init); module_exit(ah4_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);
gpl-2.0
DrSauerkraut/kernel_viewpad7e
drivers/media/dvb/frontends/si21xx.c
984
21530
/* DVB compliant Linux driver for the DVB-S si2109/2110 demodulator * * Copyright (C) 2008 Igor M. Liplianin (liplianin@me.by) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <asm/div64.h> #include "dvb_frontend.h" #include "si21xx.h" #define REVISION_REG 0x00 #define SYSTEM_MODE_REG 0x01 #define TS_CTRL_REG_1 0x02 #define TS_CTRL_REG_2 0x03 #define PIN_CTRL_REG_1 0x04 #define PIN_CTRL_REG_2 0x05 #define LOCK_STATUS_REG_1 0x0f #define LOCK_STATUS_REG_2 0x10 #define ACQ_STATUS_REG 0x11 #define ACQ_CTRL_REG_1 0x13 #define ACQ_CTRL_REG_2 0x14 #define PLL_DIVISOR_REG 0x15 #define COARSE_TUNE_REG 0x16 #define FINE_TUNE_REG_L 0x17 #define FINE_TUNE_REG_H 0x18 #define ANALOG_AGC_POWER_LEVEL_REG 0x28 #define CFO_ESTIMATOR_CTRL_REG_1 0x29 #define CFO_ESTIMATOR_CTRL_REG_2 0x2a #define CFO_ESTIMATOR_CTRL_REG_3 0x2b #define SYM_RATE_ESTIMATE_REG_L 0x31 #define SYM_RATE_ESTIMATE_REG_M 0x32 #define SYM_RATE_ESTIMATE_REG_H 0x33 #define CFO_ESTIMATOR_OFFSET_REG_L 0x36 #define CFO_ESTIMATOR_OFFSET_REG_H 0x37 #define CFO_ERROR_REG_L 0x38 #define CFO_ERROR_REG_H 0x39 #define SYM_RATE_ESTIMATOR_CTRL_REG 0x3a #define SYM_RATE_REG_L 0x3f #define SYM_RATE_REG_M 0x40 #define SYM_RATE_REG_H 0x41 #define SYM_RATE_ESTIMATOR_MAXIMUM_REG 0x42 #define SYM_RATE_ESTIMATOR_MINIMUM_REG 0x43 #define C_N_ESTIMATOR_CTRL_REG 0x7c #define C_N_ESTIMATOR_THRSHLD_REG 0x7d #define C_N_ESTIMATOR_LEVEL_REG_L 0x7e #define C_N_ESTIMATOR_LEVEL_REG_H 0x7f #define BLIND_SCAN_CTRL_REG 0x80 #define LSA_CTRL_REG_1 0x8D #define SPCTRM_TILT_CORR_THRSHLD_REG 0x8f #define ONE_DB_BNDWDTH_THRSHLD_REG 0x90 #define TWO_DB_BNDWDTH_THRSHLD_REG 0x91 #define THREE_DB_BNDWDTH_THRSHLD_REG 0x92 #define INBAND_POWER_THRSHLD_REG 0x93 #define REF_NOISE_LVL_MRGN_THRSHLD_REG 0x94 #define VIT_SRCH_CTRL_REG_1 0xa0 #define VIT_SRCH_CTRL_REG_2 0xa1 #define VIT_SRCH_CTRL_REG_3 0xa2 #define VIT_SRCH_STATUS_REG 0xa3 #define VITERBI_BER_COUNT_REG_L 0xab #define REED_SOLOMON_CTRL_REG 0xb0 #define REED_SOLOMON_ERROR_COUNT_REG_L 0xb1 #define PRBS_CTRL_REG 0xb5 #define LNB_CTRL_REG_1 0xc0 #define LNB_CTRL_REG_2 0xc1 #define LNB_CTRL_REG_3 0xc2 #define LNB_CTRL_REG_4 0xc3 #define LNB_CTRL_STATUS_REG 0xc4 #define LNB_FIFO_REGS_0 0xc5 #define LNB_FIFO_REGS_1 0xc6 #define LNB_FIFO_REGS_2 0xc7 #define LNB_FIFO_REGS_3 0xc8 #define LNB_FIFO_REGS_4 0xc9 #define LNB_FIFO_REGS_5 0xca #define LNB_SUPPLY_CTRL_REG_1 0xcb #define LNB_SUPPLY_CTRL_REG_2 0xcc #define LNB_SUPPLY_CTRL_REG_3 0xcd #define LNB_SUPPLY_CTRL_REG_4 0xce #define LNB_SUPPLY_STATUS_REG 0xcf #define FAIL -1 #define PASS 0 #define ALLOWABLE_FS_COUNT 10 #define STATUS_BER 0 #define STATUS_UCBLOCKS 1 static int debug; #define dprintk(args...) \ do { \ if (debug) \ printk(KERN_DEBUG "si21xx: " args); \ } while (0) enum { ACTIVE_HIGH, ACTIVE_LOW }; enum { BYTE_WIDE, BIT_WIDE }; enum { CLK_GAPPED_MODE, CLK_CONTINUOUS_MODE }; enum { RISING_EDGE, FALLING_EDGE }; enum { MSB_FIRST, LSB_FIRST }; enum { SERIAL, PARALLEL }; struct si21xx_state { struct i2c_adapter *i2c; const struct si21xx_config *config; struct dvb_frontend frontend; u8 initialised:1; int errmode; int fs; /*Sampling rate of the ADC in MHz*/ }; /* register default initialization */ static u8 serit_sp1511lhb_inittab[] = { 0x01, 0x28, /* set i2c_inc_disable */ 0x20, 0x03, 0x27, 0x20, 0xe0, 0x45, 0xe1, 0x08, 0xfe, 0x01, 0x01, 0x28, 0x89, 0x09, 0x04, 0x80, 0x05, 0x01, 0x06, 0x00, 0x20, 0x03, 0x24, 0x88, 0x29, 0x09, 0x2a, 0x0f, 0x2c, 0x10, 0x2d, 0x19, 0x2e, 0x08, 0x2f, 0x10, 0x30, 0x19, 0x34, 0x20, 0x35, 0x03, 0x45, 0x02, 0x46, 0x45, 0x47, 0xd0, 0x48, 0x00, 0x49, 0x40, 0x4a, 0x03, 0x4c, 0xfd, 0x4f, 0x2e, 0x50, 0x2e, 0x51, 0x10, 0x52, 0x10, 0x56, 0x92, 0x59, 0x00, 0x5a, 0x2d, 0x5b, 0x33, 0x5c, 0x1f, 0x5f, 0x76, 0x62, 0xc0, 0x63, 0xc0, 0x64, 0xf3, 0x65, 0xf3, 0x79, 0x40, 0x6a, 0x40, 0x6b, 0x0a, 0x6c, 0x80, 0x6d, 0x27, 0x71, 0x06, 0x75, 0x60, 0x78, 0x00, 0x79, 0xb5, 0x7c, 0x05, 0x7d, 0x1a, 0x87, 0x55, 0x88, 0x72, 0x8f, 0x08, 0x90, 0xe0, 0x94, 0x40, 0xa0, 0x3f, 0xa1, 0xc0, 0xa4, 0xcc, 0xa5, 0x66, 0xa6, 0x66, 0xa7, 0x7b, 0xa8, 0x7b, 0xa9, 0x7b, 0xaa, 0x9a, 0xed, 0x04, 0xad, 0x00, 0xae, 0x03, 0xcc, 0xab, 0x01, 0x08, 0xff, 0xff }; /* low level read/writes */ static int si21_writeregs(struct si21xx_state *state, u8 reg1, u8 *data, int len) { int ret; u8 buf[60];/* = { reg1, data };*/ struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = len + 1 }; msg.buf[0] = reg1; memcpy(msg.buf + 1, data, len); ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) dprintk("%s: writereg error (reg1 == 0x%02x, data == 0x%02x, " "ret == %i)\n", __func__, reg1, data[0], ret); return (ret != 1) ? -EREMOTEIO : 0; } static int si21_writereg(struct si21xx_state *state, u8 reg, u8 data) { int ret; u8 buf[] = { reg, data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) dprintk("%s: writereg error (reg == 0x%02x, data == 0x%02x, " "ret == %i)\n", __func__, reg, data, ret); return (ret != 1) ? -EREMOTEIO : 0; } static int si21_write(struct dvb_frontend *fe, u8 *buf, int len) { struct si21xx_state *state = fe->demodulator_priv; if (len != 2) return -EINVAL; return si21_writereg(state, buf[0], buf[1]); } static u8 si21_readreg(struct si21xx_state *state, u8 reg) { int ret; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) dprintk("%s: readreg error (reg == 0x%02x, ret == %i)\n", __func__, reg, ret); return b1[0]; } static int si21_readregs(struct si21xx_state *state, u8 reg1, u8 *b, u8 len) { int ret; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = &reg1, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b, .len = len } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) dprintk("%s: readreg error (ret == %i)\n", __func__, ret); return ret == 2 ? 0 : -1; } static int si21xx_wait_diseqc_idle(struct si21xx_state *state, int timeout) { unsigned long start = jiffies; dprintk("%s\n", __func__); while ((si21_readreg(state, LNB_CTRL_REG_1) & 0x8) == 8) { if (jiffies - start > timeout) { dprintk("%s: timeout!!\n", __func__); return -ETIMEDOUT; } msleep(10); }; return 0; } static int si21xx_set_symbolrate(struct dvb_frontend *fe, u32 srate) { struct si21xx_state *state = fe->demodulator_priv; u32 sym_rate, data_rate; int i; u8 sym_rate_bytes[3]; dprintk("%s : srate = %i\n", __func__ , srate); if ((srate < 1000000) || (srate > 45000000)) return -EINVAL; data_rate = srate; sym_rate = 0; for (i = 0; i < 4; ++i) { sym_rate /= 100; sym_rate = sym_rate + ((data_rate % 100) * 0x800000) / state->fs; data_rate /= 100; } for (i = 0; i < 3; ++i) sym_rate_bytes[i] = (u8)((sym_rate >> (i * 8)) & 0xff); si21_writeregs(state, SYM_RATE_REG_L, sym_rate_bytes, 0x03); return 0; } static int si21xx_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *m) { struct si21xx_state *state = fe->demodulator_priv; u8 lnb_status; u8 LNB_CTRL_1; int status; dprintk("%s\n", __func__); status = PASS; LNB_CTRL_1 = 0; status |= si21_readregs(state, LNB_CTRL_STATUS_REG, &lnb_status, 0x01); status |= si21_readregs(state, LNB_CTRL_REG_1, &lnb_status, 0x01); /*fill the FIFO*/ status |= si21_writeregs(state, LNB_FIFO_REGS_0, m->msg, m->msg_len); LNB_CTRL_1 = (lnb_status & 0x70); LNB_CTRL_1 |= m->msg_len; LNB_CTRL_1 |= 0x80; /* begin LNB signaling */ status |= si21_writeregs(state, LNB_CTRL_REG_1, &LNB_CTRL_1, 0x01); return status; } static int si21xx_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst) { struct si21xx_state *state = fe->demodulator_priv; u8 val; dprintk("%s\n", __func__); if (si21xx_wait_diseqc_idle(state, 100) < 0) return -ETIMEDOUT; val = (0x80 | si21_readreg(state, 0xc1)); if (si21_writereg(state, LNB_CTRL_REG_1, burst == SEC_MINI_A ? (val & ~0x10) : (val | 0x10))) return -EREMOTEIO; if (si21xx_wait_diseqc_idle(state, 100) < 0) return -ETIMEDOUT; if (si21_writereg(state, LNB_CTRL_REG_1, val)) return -EREMOTEIO; return 0; } /* 30.06.2008 */ static int si21xx_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { struct si21xx_state *state = fe->demodulator_priv; u8 val; dprintk("%s\n", __func__); val = (0x80 | si21_readreg(state, LNB_CTRL_REG_1)); switch (tone) { case SEC_TONE_ON: return si21_writereg(state, LNB_CTRL_REG_1, val | 0x20); case SEC_TONE_OFF: return si21_writereg(state, LNB_CTRL_REG_1, (val & ~0x20)); default: return -EINVAL; } } static int si21xx_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t volt) { struct si21xx_state *state = fe->demodulator_priv; u8 val; dprintk("%s: %s\n", __func__, volt == SEC_VOLTAGE_13 ? "SEC_VOLTAGE_13" : volt == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" : "??"); val = (0x80 | si21_readreg(state, LNB_CTRL_REG_1)); switch (volt) { case SEC_VOLTAGE_18: return si21_writereg(state, LNB_CTRL_REG_1, val | 0x40); break; case SEC_VOLTAGE_13: return si21_writereg(state, LNB_CTRL_REG_1, (val & ~0x40)); break; default: return -EINVAL; }; } static int si21xx_init(struct dvb_frontend *fe) { struct si21xx_state *state = fe->demodulator_priv; int i; int status = 0; u8 reg1; u8 val; u8 reg2[2]; dprintk("%s\n", __func__); for (i = 0; ; i += 2) { reg1 = serit_sp1511lhb_inittab[i]; val = serit_sp1511lhb_inittab[i+1]; if (reg1 == 0xff && val == 0xff) break; si21_writeregs(state, reg1, &val, 1); } /*DVB QPSK SYSTEM MODE REG*/ reg1 = 0x08; si21_writeregs(state, SYSTEM_MODE_REG, &reg1, 0x01); /*transport stream config*/ /* mode = PARALLEL; sdata_form = LSB_FIRST; clk_edge = FALLING_EDGE; clk_mode = CLK_GAPPED_MODE; strt_len = BYTE_WIDE; sync_pol = ACTIVE_HIGH; val_pol = ACTIVE_HIGH; err_pol = ACTIVE_HIGH; sclk_rate = 0x00; parity = 0x00 ; data_delay = 0x00; clk_delay = 0x00; pclk_smooth = 0x00; */ reg2[0] = PARALLEL + (LSB_FIRST << 1) + (FALLING_EDGE << 2) + (CLK_GAPPED_MODE << 3) + (BYTE_WIDE << 4) + (ACTIVE_HIGH << 5) + (ACTIVE_HIGH << 6) + (ACTIVE_HIGH << 7); reg2[1] = 0; /* sclk_rate + (parity << 2) + (data_delay << 3) + (clk_delay << 4) + (pclk_smooth << 5); */ status |= si21_writeregs(state, TS_CTRL_REG_1, reg2, 0x02); if (status != 0) dprintk(" %s : TS Set Error\n", __func__); return 0; } static int si21_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct si21xx_state *state = fe->demodulator_priv; u8 regs_read[2]; u8 reg_read; u8 i; u8 lock; u8 signal = si21_readreg(state, ANALOG_AGC_POWER_LEVEL_REG); si21_readregs(state, LOCK_STATUS_REG_1, regs_read, 0x02); reg_read = 0; for (i = 0; i < 7; ++i) reg_read |= ((regs_read[0] >> i) & 0x01) << (6 - i); lock = ((reg_read & 0x7f) | (regs_read[1] & 0x80)); dprintk("%s : FE_READ_STATUS : VSTATUS: 0x%02x\n", __func__, lock); *status = 0; if (signal > 10) *status |= FE_HAS_SIGNAL; if (lock & 0x2) *status |= FE_HAS_CARRIER; if (lock & 0x20) *status |= FE_HAS_VITERBI; if (lock & 0x40) *status |= FE_HAS_SYNC; if ((lock & 0x7b) == 0x7b) *status |= FE_HAS_LOCK; return 0; } static int si21_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct si21xx_state *state = fe->demodulator_priv; /*status = si21_readreg(state, ANALOG_AGC_POWER_LEVEL_REG, (u8*)agclevel, 0x01);*/ u16 signal = (3 * si21_readreg(state, 0x27) * si21_readreg(state, 0x28)); dprintk("%s : AGCPWR: 0x%02x%02x, signal=0x%04x\n", __func__, si21_readreg(state, 0x27), si21_readreg(state, 0x28), (int) signal); signal <<= 4; *strength = signal; return 0; } static int si21_read_ber(struct dvb_frontend *fe, u32 *ber) { struct si21xx_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); if (state->errmode != STATUS_BER) return 0; *ber = (si21_readreg(state, 0x1d) << 8) | si21_readreg(state, 0x1e); return 0; } static int si21_read_snr(struct dvb_frontend *fe, u16 *snr) { struct si21xx_state *state = fe->demodulator_priv; s32 xsnr = 0xffff - ((si21_readreg(state, 0x24) << 8) | si21_readreg(state, 0x25)); xsnr = 3 * (xsnr - 0xa100); *snr = (xsnr > 0xffff) ? 0xffff : (xsnr < 0) ? 0 : xsnr; dprintk("%s\n", __func__); return 0; } static int si21_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct si21xx_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); if (state->errmode != STATUS_UCBLOCKS) *ucblocks = 0; else *ucblocks = (si21_readreg(state, 0x1d) << 8) | si21_readreg(state, 0x1e); return 0; } /* initiates a channel acquisition sequence using the specified symbol rate and code rate */ static int si21xx_setacquire(struct dvb_frontend *fe, int symbrate, fe_code_rate_t crate) { struct si21xx_state *state = fe->demodulator_priv; u8 coderates[] = { 0x0, 0x01, 0x02, 0x04, 0x00, 0x8, 0x10, 0x20, 0x00, 0x3f }; u8 coderate_ptr; int status; u8 start_acq = 0x80; u8 reg, regs[3]; dprintk("%s\n", __func__); status = PASS; coderate_ptr = coderates[crate]; si21xx_set_symbolrate(fe, symbrate); /* write code rates to use in the Viterbi search */ status |= si21_writeregs(state, VIT_SRCH_CTRL_REG_1, &coderate_ptr, 0x01); /* clear acq_start bit */ status |= si21_readregs(state, ACQ_CTRL_REG_2, &reg, 0x01); reg &= ~start_acq; status |= si21_writeregs(state, ACQ_CTRL_REG_2, &reg, 0x01); /* use new Carrier Frequency Offset Estimator (QuickLock) */ regs[0] = 0xCB; regs[1] = 0x40; regs[2] = 0xCB; status |= si21_writeregs(state, TWO_DB_BNDWDTH_THRSHLD_REG, &regs[0], 0x03); reg = 0x56; status |= si21_writeregs(state, LSA_CTRL_REG_1, &reg, 1); reg = 0x05; status |= si21_writeregs(state, BLIND_SCAN_CTRL_REG, &reg, 1); /* start automatic acq */ status |= si21_writeregs(state, ACQ_CTRL_REG_2, &start_acq, 0x01); return status; } static int si21xx_set_property(struct dvb_frontend *fe, struct dtv_property *p) { dprintk("%s(..)\n", __func__); return 0; } static int si21xx_get_property(struct dvb_frontend *fe, struct dtv_property *p) { dprintk("%s(..)\n", __func__); return 0; } static int si21xx_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *dfp) { struct si21xx_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; /* freq Channel carrier frequency in KHz (i.e. 1550000 KHz) datarate Channel symbol rate in Sps (i.e. 22500000 Sps)*/ /* in MHz */ unsigned char coarse_tune_freq; int fine_tune_freq; unsigned char sample_rate = 0; /* boolean */ bool inband_interferer_ind; /* INTERMEDIATE VALUES */ int icoarse_tune_freq; /* MHz */ int ifine_tune_freq; /* MHz */ unsigned int band_high; unsigned int band_low; unsigned int x1; unsigned int x2; int i; bool inband_interferer_div2[ALLOWABLE_FS_COUNT]; bool inband_interferer_div4[ALLOWABLE_FS_COUNT]; int status; /* allowable sample rates for ADC in MHz */ int afs[ALLOWABLE_FS_COUNT] = { 200, 192, 193, 194, 195, 196, 204, 205, 206, 207 }; /* in MHz */ int if_limit_high; int if_limit_low; int lnb_lo; int lnb_uncertanity; int rf_freq; int data_rate; unsigned char regs[4]; dprintk("%s : FE_SET_FRONTEND\n", __func__); if (c->delivery_system != SYS_DVBS) { dprintk("%s: unsupported delivery system selected (%d)\n", __func__, c->delivery_system); return -EOPNOTSUPP; } for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) inband_interferer_div2[i] = inband_interferer_div4[i] = false; if_limit_high = -700000; if_limit_low = -100000; /* in MHz */ lnb_lo = 0; lnb_uncertanity = 0; rf_freq = 10 * c->frequency ; data_rate = c->symbol_rate / 100; status = PASS; band_low = (rf_freq - lnb_lo) - ((lnb_uncertanity * 200) + (data_rate * 135)) / 200; band_high = (rf_freq - lnb_lo) + ((lnb_uncertanity * 200) + (data_rate * 135)) / 200; icoarse_tune_freq = 100000 * (((rf_freq - lnb_lo) - (if_limit_low + if_limit_high) / 2) / 100000); ifine_tune_freq = (rf_freq - lnb_lo) - icoarse_tune_freq ; for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) { x1 = ((rf_freq - lnb_lo) / (afs[i] * 2500)) * (afs[i] * 2500) + afs[i] * 2500; x2 = ((rf_freq - lnb_lo) / (afs[i] * 2500)) * (afs[i] * 2500); if (((band_low < x1) && (x1 < band_high)) || ((band_low < x2) && (x2 < band_high))) inband_interferer_div4[i] = true; } for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) { x1 = ((rf_freq - lnb_lo) / (afs[i] * 5000)) * (afs[i] * 5000) + afs[i] * 5000; x2 = ((rf_freq - lnb_lo) / (afs[i] * 5000)) * (afs[i] * 5000); if (((band_low < x1) && (x1 < band_high)) || ((band_low < x2) && (x2 < band_high))) inband_interferer_div2[i] = true; } inband_interferer_ind = true; for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) { if (inband_interferer_div2[i] || inband_interferer_div4[i]) { inband_interferer_ind = false; break; } } if (inband_interferer_ind) { for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) { if (!inband_interferer_div2[i]) { sample_rate = (u8) afs[i]; break; } } } else { for (i = 0; i < ALLOWABLE_FS_COUNT; ++i) { if ((inband_interferer_div2[i] || !inband_interferer_div4[i])) { sample_rate = (u8) afs[i]; break; } } } if (sample_rate > 207 || sample_rate < 192) sample_rate = 200; fine_tune_freq = ((0x4000 * (ifine_tune_freq / 10)) / ((sample_rate) * 1000)); coarse_tune_freq = (u8)(icoarse_tune_freq / 100000); regs[0] = sample_rate; regs[1] = coarse_tune_freq; regs[2] = fine_tune_freq & 0xFF; regs[3] = fine_tune_freq >> 8 & 0xFF; status |= si21_writeregs(state, PLL_DIVISOR_REG, &regs[0], 0x04); state->fs = sample_rate;/*ADC MHz*/ si21xx_setacquire(fe, c->symbol_rate, c->fec_inner); return 0; } static int si21xx_sleep(struct dvb_frontend *fe) { struct si21xx_state *state = fe->demodulator_priv; u8 regdata; dprintk("%s\n", __func__); si21_readregs(state, SYSTEM_MODE_REG, &regdata, 0x01); regdata |= 1 << 6; si21_writeregs(state, SYSTEM_MODE_REG, &regdata, 0x01); state->initialised = 0; return 0; } static void si21xx_release(struct dvb_frontend *fe) { struct si21xx_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); kfree(state); } static struct dvb_frontend_ops si21xx_ops = { .info = { .name = "SL SI21XX DVB-S", .type = FE_QPSK, .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 125, /* kHz for QPSK frontends */ .frequency_tolerance = 0, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .symbol_rate_tolerance = 500, /* ppm */ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_QPSK | FE_CAN_FEC_AUTO }, .release = si21xx_release, .init = si21xx_init, .sleep = si21xx_sleep, .write = si21_write, .read_status = si21_read_status, .read_ber = si21_read_ber, .read_signal_strength = si21_read_signal_strength, .read_snr = si21_read_snr, .read_ucblocks = si21_read_ucblocks, .diseqc_send_master_cmd = si21xx_send_diseqc_msg, .diseqc_send_burst = si21xx_send_diseqc_burst, .set_tone = si21xx_set_tone, .set_voltage = si21xx_set_voltage, .set_property = si21xx_set_property, .get_property = si21xx_get_property, .set_frontend = si21xx_set_frontend, }; struct dvb_frontend *si21xx_attach(const struct si21xx_config *config, struct i2c_adapter *i2c) { struct si21xx_state *state = NULL; int id; dprintk("%s\n", __func__); /* allocate memory for the internal state */ state = kzalloc(sizeof(struct si21xx_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->config = config; state->i2c = i2c; state->initialised = 0; state->errmode = STATUS_BER; /* check if the demod is there */ id = si21_readreg(state, SYSTEM_MODE_REG); si21_writereg(state, SYSTEM_MODE_REG, id | 0x40); /* standby off */ msleep(200); id = si21_readreg(state, 0x00); /* register 0x00 contains: 0x34 for SI2107 0x24 for SI2108 0x14 for SI2109 0x04 for SI2110 */ if (id != 0x04 && id != 0x14) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &si21xx_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(si21xx_attach); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("SL SI21XX DVB Demodulator driver"); MODULE_AUTHOR("Igor M. Liplianin"); MODULE_LICENSE("GPL");
gpl-2.0
Entropy512/linux_kernel_galaxyplayer
samples/tracepoints/tracepoint-sample.c
1496
1188
/* tracepoint-sample.c * * Executes a tracepoint when /proc/tracepoint-sample is opened. * * (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> * * This file is released under the GPLv2. * See the file COPYING for more details. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/proc_fs.h> #include "tp-samples-trace.h" DEFINE_TRACE(subsys_event); DEFINE_TRACE(subsys_eventb); struct proc_dir_entry *pentry_sample; static int my_open(struct inode *inode, struct file *file) { int i; trace_subsys_event(inode, file); for (i = 0; i < 10; i++) trace_subsys_eventb(); return -EPERM; } static const struct file_operations mark_ops = { .open = my_open, }; static int __init sample_init(void) { printk(KERN_ALERT "sample init\n"); pentry_sample = proc_create("tracepoint-sample", 0444, NULL, &mark_ops); if (!pentry_sample) return -EPERM; return 0; } static void __exit sample_exit(void) { printk(KERN_ALERT "sample exit\n"); remove_proc_entry("tracepoint-sample", NULL); } module_init(sample_init) module_exit(sample_exit) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mathieu Desnoyers"); MODULE_DESCRIPTION("Tracepoint sample");
gpl-2.0
zlux/tq
drivers/media/video/pwc/pwc-misc.c
1752
4178
/* Linux driver for Philips webcam Various miscellaneous functions and tables. (C) 1999-2003 Nemosoft Unv. (C) 2004-2006 Luc Saillard (luc@saillard.org) NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx driver and thus may have bugs that are not present in the original version. Please send bug reports and support requests to <luc@saillard.org>. The decompression routines have been implemented by reverse-engineering the Nemosoft binary pwcx module. Caveat emptor. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "pwc.h" const struct pwc_coord pwc_image_sizes[PSZ_MAX] = { { 128, 96, 0 }, /* sqcif */ { 160, 120, 0 }, /* qsif */ { 176, 144, 0 }, /* qcif */ { 320, 240, 0 }, /* sif */ { 352, 288, 0 }, /* cif */ { 640, 480, 0 }, /* vga */ }; /* x,y -> PSZ_ */ int pwc_decode_size(struct pwc_device *pdev, int width, int height) { int i, find; /* Make sure we don't go beyond our max size. NB: we have different limits for RAW and normal modes. In case you don't have the decompressor loaded or use RAW mode, the maximum viewable size is smaller. */ if (pdev->vpalette == VIDEO_PALETTE_RAW) { if (width > pdev->abs_max.x || height > pdev->abs_max.y) { PWC_DEBUG_SIZE("VIDEO_PALETTE_RAW: going beyond abs_max.\n"); return -1; } } else { if (width > pdev->view_max.x || height > pdev->view_max.y) { PWC_DEBUG_SIZE("VIDEO_PALETTE_not RAW: going beyond view_max.\n"); return -1; } } /* Find the largest size supported by the camera that fits into the requested size. */ find = -1; for (i = 0; i < PSZ_MAX; i++) { if (pdev->image_mask & (1 << i)) { if (pwc_image_sizes[i].x <= width && pwc_image_sizes[i].y <= height) find = i; } } return find; } /* initialize variables depending on type and decompressor*/ void pwc_construct(struct pwc_device *pdev) { if (DEVICE_USE_CODEC1(pdev->type)) { pdev->view_min.x = 128; pdev->view_min.y = 96; pdev->view_max.x = 352; pdev->view_max.y = 288; pdev->abs_max.x = 352; pdev->abs_max.y = 288; pdev->image_mask = 1 << PSZ_SQCIF | 1 << PSZ_QCIF | 1 << PSZ_CIF; pdev->vcinterface = 2; pdev->vendpoint = 4; pdev->frame_header_size = 0; pdev->frame_trailer_size = 0; } else if (DEVICE_USE_CODEC3(pdev->type)) { pdev->view_min.x = 160; pdev->view_min.y = 120; pdev->view_max.x = 640; pdev->view_max.y = 480; pdev->image_mask = 1 << PSZ_QSIF | 1 << PSZ_SIF | 1 << PSZ_VGA; pdev->abs_max.x = 640; pdev->abs_max.y = 480; pdev->vcinterface = 3; pdev->vendpoint = 5; pdev->frame_header_size = TOUCAM_HEADER_SIZE; pdev->frame_trailer_size = TOUCAM_TRAILER_SIZE; } else /* if (DEVICE_USE_CODEC2(pdev->type)) */ { pdev->view_min.x = 128; pdev->view_min.y = 96; /* Anthill bug #38: PWC always reports max size, even without PWCX */ pdev->view_max.x = 640; pdev->view_max.y = 480; pdev->image_mask = 1 << PSZ_SQCIF | 1 << PSZ_QSIF | 1 << PSZ_QCIF | 1 << PSZ_SIF | 1 << PSZ_CIF | 1 << PSZ_VGA; pdev->abs_max.x = 640; pdev->abs_max.y = 480; pdev->vcinterface = 3; pdev->vendpoint = 4; pdev->frame_header_size = 0; pdev->frame_trailer_size = 0; } pdev->vpalette = VIDEO_PALETTE_YUV420P; /* default */ pdev->view_min.size = pdev->view_min.x * pdev->view_min.y; pdev->view_max.size = pdev->view_max.x * pdev->view_max.y; /* length of image, in YUV format; always allocate enough memory. */ pdev->len_per_image = PAGE_ALIGN((pdev->abs_max.x * pdev->abs_max.y * 3) / 2); }
gpl-2.0
EPDCenter/android_kernel_rikomagic_mk808
net/netfilter/ipvs/ip_vs_ftp.c
2520
11805
/* * ip_vs_ftp.c: IPVS ftp application module * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * * Changes: * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Most code here is taken from ip_masq_ftp.c in kernel 2.2. The difference * is that ip_vs_ftp module handles the reverse direction to ip_masq_ftp. * * IP_MASQ_FTP ftp masquerading module * * Version: @(#)ip_masq_ftp.c 0.04 02/05/96 * * Author: Wouter Gadeyne * */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/netfilter.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_helper.h> #include <linux/gfp.h> #include <net/protocol.h> #include <net/tcp.h> #include <asm/unaligned.h> #include <net/ip_vs.h> #define SERVER_STRING "227 Entering Passive Mode (" #define CLIENT_STRING "PORT " /* * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper * First port is set to the default port. */ static unsigned short ports[IP_VS_APP_MAX_PORTS] = {21, 0}; module_param_array(ports, ushort, NULL, 0); MODULE_PARM_DESC(ports, "Ports to monitor for FTP control commands"); /* Dummy variable */ static int ip_vs_ftp_pasv; static int ip_vs_ftp_init_conn(struct ip_vs_app *app, struct ip_vs_conn *cp) { /* We use connection tracking for the command connection */ cp->flags |= IP_VS_CONN_F_NFCT; return 0; } static int ip_vs_ftp_done_conn(struct ip_vs_app *app, struct ip_vs_conn *cp) { return 0; } /* * Get <addr,port> from the string "xxx.xxx.xxx.xxx,ppp,ppp", started * with the "pattern" and terminated with the "term" character. * <addr,port> is in network order. */ static int ip_vs_ftp_get_addrport(char *data, char *data_limit, const char *pattern, size_t plen, char term, __be32 *addr, __be16 *port, char **start, char **end) { unsigned char p[6]; int i = 0; if (data_limit - data < plen) { /* check if there is partial match */ if (strnicmp(data, pattern, data_limit - data) == 0) return -1; else return 0; } if (strnicmp(data, pattern, plen) != 0) { return 0; } *start = data + plen; for (data = *start; *data != term; data++) { if (data == data_limit) return -1; } *end = data; memset(p, 0, sizeof(p)); for (data = *start; data != *end; data++) { if (*data >= '0' && *data <= '9') { p[i] = p[i]*10 + *data - '0'; } else if (*data == ',' && i < 5) { i++; } else { /* unexpected character */ return -1; } } if (i != 5) return -1; *addr = get_unaligned((__be32 *)p); *port = get_unaligned((__be16 *)(p + 4)); return 1; } /* * Look at outgoing ftp packets to catch the response to a PASV command * from the server (inside-to-outside). * When we see one, we build a connection entry with the client address, * client port 0 (unknown at the moment), the server address and the * server port. Mark the current connection entry as a control channel * of the new entry. All this work is just to make the data connection * can be scheduled to the right server later. * * The outgoing packet should be something like * "227 Entering Passive Mode (xxx,xxx,xxx,xxx,ppp,ppp)". * xxx,xxx,xxx,xxx is the server address, ppp,ppp is the server port number. */ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, struct sk_buff *skb, int *diff) { struct iphdr *iph; struct tcphdr *th; char *data, *data_limit; char *start, *end; union nf_inet_addr from; __be16 port; struct ip_vs_conn *n_cp; char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ unsigned buf_len; int ret = 0; enum ip_conntrack_info ctinfo; struct nf_conn *ct; struct net *net; #ifdef CONFIG_IP_VS_IPV6 /* This application helper doesn't work with IPv6 yet, * so turn this into a no-op for IPv6 packets */ if (cp->af == AF_INET6) return 1; #endif *diff = 0; /* Only useful for established sessions */ if (cp->state != IP_VS_TCP_S_ESTABLISHED) return 1; /* Linear packets are much easier to deal with. */ if (!skb_make_writable(skb, skb->len)) return 0; if (cp->app_data == &ip_vs_ftp_pasv) { iph = ip_hdr(skb); th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]); data = (char *)th + (th->doff << 2); data_limit = skb_tail_pointer(skb); if (ip_vs_ftp_get_addrport(data, data_limit, SERVER_STRING, sizeof(SERVER_STRING)-1, ')', &from.ip, &port, &start, &end) != 1) return 1; IP_VS_DBG(7, "PASV response (%pI4:%d) -> %pI4:%d detected\n", &from.ip, ntohs(port), &cp->caddr.ip, 0); /* * Now update or create an connection entry for it */ { struct ip_vs_conn_param p; ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET, iph->protocol, &from, port, &cp->caddr, 0, &p); n_cp = ip_vs_conn_out_get(&p); } if (!n_cp) { struct ip_vs_conn_param p; ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET, IPPROTO_TCP, &cp->caddr, 0, &cp->vaddr, port, &p); n_cp = ip_vs_conn_new(&p, &from, port, IP_VS_CONN_F_NO_CPORT | IP_VS_CONN_F_NFCT, cp->dest, skb->mark); if (!n_cp) return 0; /* add its controller */ ip_vs_control_add(n_cp, cp); } /* * Replace the old passive address with the new one */ from.ip = n_cp->vaddr.ip; port = n_cp->vport; snprintf(buf, sizeof(buf), "%u,%u,%u,%u,%u,%u", ((unsigned char *)&from.ip)[0], ((unsigned char *)&from.ip)[1], ((unsigned char *)&from.ip)[2], ((unsigned char *)&from.ip)[3], ntohs(port) >> 8, ntohs(port) & 0xFF); buf_len = strlen(buf); ct = nf_ct_get(skb, &ctinfo); if (ct && !nf_ct_is_untracked(ct) && nfct_nat(ct)) { /* If mangling fails this function will return 0 * which will cause the packet to be dropped. * Mangling can only fail under memory pressure, * hopefully it will succeed on the retransmitted * packet. */ ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, start-data, end-start, buf, buf_len); if (ret) { ip_vs_nfct_expect_related(skb, ct, n_cp, IPPROTO_TCP, 0, 0); if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_UNNECESSARY; /* csum is updated */ ret = 1; } } /* * Not setting 'diff' is intentional, otherwise the sequence * would be adjusted twice. */ net = skb_net(skb); cp->app_data = NULL; ip_vs_tcp_conn_listen(net, n_cp); ip_vs_conn_put(n_cp); return ret; } return 1; } /* * Look at incoming ftp packets to catch the PASV/PORT command * (outside-to-inside). * * The incoming packet having the PORT command should be something like * "PORT xxx,xxx,xxx,xxx,ppp,ppp\n". * xxx,xxx,xxx,xxx is the client address, ppp,ppp is the client port number. * In this case, we create a connection entry using the client address and * port, so that the active ftp data connection from the server can reach * the client. */ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, struct sk_buff *skb, int *diff) { struct iphdr *iph; struct tcphdr *th; char *data, *data_start, *data_limit; char *start, *end; union nf_inet_addr to; __be16 port; struct ip_vs_conn *n_cp; struct net *net; #ifdef CONFIG_IP_VS_IPV6 /* This application helper doesn't work with IPv6 yet, * so turn this into a no-op for IPv6 packets */ if (cp->af == AF_INET6) return 1; #endif /* no diff required for incoming packets */ *diff = 0; /* Only useful for established sessions */ if (cp->state != IP_VS_TCP_S_ESTABLISHED) return 1; /* Linear packets are much easier to deal with. */ if (!skb_make_writable(skb, skb->len)) return 0; /* * Detecting whether it is passive */ iph = ip_hdr(skb); th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]); /* Since there may be OPTIONS in the TCP packet and the HLEN is the length of the header in 32-bit multiples, it is accurate to calculate data address by th+HLEN*4 */ data = data_start = (char *)th + (th->doff << 2); data_limit = skb_tail_pointer(skb); while (data <= data_limit - 6) { if (strnicmp(data, "PASV\r\n", 6) == 0) { /* Passive mode on */ IP_VS_DBG(7, "got PASV at %td of %td\n", data - data_start, data_limit - data_start); cp->app_data = &ip_vs_ftp_pasv; return 1; } data++; } /* * To support virtual FTP server, the scenerio is as follows: * FTP client ----> Load Balancer ----> FTP server * First detect the port number in the application data, * then create a new connection entry for the coming data * connection. */ if (ip_vs_ftp_get_addrport(data_start, data_limit, CLIENT_STRING, sizeof(CLIENT_STRING)-1, '\r', &to.ip, &port, &start, &end) != 1) return 1; IP_VS_DBG(7, "PORT %pI4:%d detected\n", &to.ip, ntohs(port)); /* Passive mode off */ cp->app_data = NULL; /* * Now update or create a connection entry for it */ IP_VS_DBG(7, "protocol %s %pI4:%d %pI4:%d\n", ip_vs_proto_name(iph->protocol), &to.ip, ntohs(port), &cp->vaddr.ip, 0); { struct ip_vs_conn_param p; ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET, iph->protocol, &to, port, &cp->vaddr, htons(ntohs(cp->vport)-1), &p); n_cp = ip_vs_conn_in_get(&p); if (!n_cp) { n_cp = ip_vs_conn_new(&p, &cp->daddr, htons(ntohs(cp->dport)-1), IP_VS_CONN_F_NFCT, cp->dest, skb->mark); if (!n_cp) return 0; /* add its controller */ ip_vs_control_add(n_cp, cp); } } /* * Move tunnel to listen state */ net = skb_net(skb); ip_vs_tcp_conn_listen(net, n_cp); ip_vs_conn_put(n_cp); return 1; } static struct ip_vs_app ip_vs_ftp = { .name = "ftp", .type = IP_VS_APP_TYPE_FTP, .protocol = IPPROTO_TCP, .module = THIS_MODULE, .incs_list = LIST_HEAD_INIT(ip_vs_ftp.incs_list), .init_conn = ip_vs_ftp_init_conn, .done_conn = ip_vs_ftp_done_conn, .bind_conn = NULL, .unbind_conn = NULL, .pkt_out = ip_vs_ftp_out, .pkt_in = ip_vs_ftp_in, }; /* * per netns ip_vs_ftp initialization */ static int __net_init __ip_vs_ftp_init(struct net *net) { int i, ret; struct ip_vs_app *app; struct netns_ipvs *ipvs = net_ipvs(net); app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL); if (!app) return -ENOMEM; INIT_LIST_HEAD(&app->a_list); INIT_LIST_HEAD(&app->incs_list); ipvs->ftp_app = app; ret = register_ip_vs_app(net, app); if (ret) goto err_exit; for (i=0; i<IP_VS_APP_MAX_PORTS; i++) { if (!ports[i]) continue; ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]); if (ret) goto err_unreg; pr_info("%s: loaded support on port[%d] = %d\n", app->name, i, ports[i]); } return 0; err_unreg: unregister_ip_vs_app(net, app); err_exit: kfree(ipvs->ftp_app); return ret; } /* * netns exit */ static void __ip_vs_ftp_exit(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); unregister_ip_vs_app(net, ipvs->ftp_app); kfree(ipvs->ftp_app); } static struct pernet_operations ip_vs_ftp_ops = { .init = __ip_vs_ftp_init, .exit = __ip_vs_ftp_exit, }; int __init ip_vs_ftp_init(void) { int rv; rv = register_pernet_subsys(&ip_vs_ftp_ops); return rv; } /* * ip_vs_ftp finish. */ static void __exit ip_vs_ftp_exit(void) { unregister_pernet_subsys(&ip_vs_ftp_ops); } module_init(ip_vs_ftp_init); module_exit(ip_vs_ftp_exit); MODULE_LICENSE("GPL");
gpl-2.0
Megatron007/megabyte_bullhead
arch/powerpc/platforms/cell/spufs/file.c
2520
65744
/* * SPU file system -- file contents * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * Author: Arnd Bergmann <arndb@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #undef DEBUG #include <linux/fs.h> #include <linux/ioctl.h> #include <linux/export.h> #include <linux/pagemap.h> #include <linux/poll.h> #include <linux/ptrace.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/time.h> #include <asm/spu.h> #include <asm/spu_info.h> #include <asm/uaccess.h> #include "spufs.h" #include "sputrace.h" #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) /* Simple attribute files */ struct spufs_attr { int (*get)(void *, u64 *); int (*set)(void *, u64); char get_buf[24]; /* enough to store a u64 and "\n\0" */ char set_buf[24]; void *data; const char *fmt; /* format for read operation */ struct mutex mutex; /* protects access to these buffers */ }; static int spufs_attr_open(struct inode *inode, struct file *file, int (*get)(void *, u64 *), int (*set)(void *, u64), const char *fmt) { struct spufs_attr *attr; attr = kmalloc(sizeof(*attr), GFP_KERNEL); if (!attr) return -ENOMEM; attr->get = get; attr->set = set; attr->data = inode->i_private; attr->fmt = fmt; mutex_init(&attr->mutex); file->private_data = attr; return nonseekable_open(inode, file); } static int spufs_attr_release(struct inode *inode, struct file *file) { kfree(file->private_data); return 0; } static ssize_t spufs_attr_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct spufs_attr *attr; size_t size; ssize_t ret; attr = file->private_data; if (!attr->get) return -EACCES; ret = mutex_lock_interruptible(&attr->mutex); if (ret) return ret; if (*ppos) { /* continued read */ size = strlen(attr->get_buf); } else { /* first read */ u64 val; ret = attr->get(attr->data, &val); if (ret) goto out; size = scnprintf(attr->get_buf, sizeof(attr->get_buf), attr->fmt, (unsigned long long)val); } ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); out: mutex_unlock(&attr->mutex); return ret; } static ssize_t spufs_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { struct spufs_attr *attr; u64 val; size_t size; ssize_t ret; attr = file->private_data; if (!attr->set) return -EACCES; ret = mutex_lock_interruptible(&attr->mutex); if (ret) return ret; ret = -EFAULT; size = min(sizeof(attr->set_buf) - 1, len); if (copy_from_user(attr->set_buf, buf, size)) goto out; ret = len; /* claim we got the whole input */ attr->set_buf[size] = '\0'; val = simple_strtol(attr->set_buf, NULL, 0); attr->set(attr->data, val); out: mutex_unlock(&attr->mutex); return ret; } #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ static int __fops ## _open(struct inode *inode, struct file *file) \ { \ __simple_attr_check_format(__fmt, 0ull); \ return spufs_attr_open(inode, file, __get, __set, __fmt); \ } \ static const struct file_operations __fops = { \ .open = __fops ## _open, \ .release = spufs_attr_release, \ .read = spufs_attr_read, \ .write = spufs_attr_write, \ .llseek = generic_file_llseek, \ }; static int spufs_mem_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->local_store = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); return 0; } static int spufs_mem_release(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->local_store = NULL; mutex_unlock(&ctx->mapping_lock); return 0; } static ssize_t __spufs_mem_read(struct spu_context *ctx, char __user *buffer, size_t size, loff_t *pos) { char *local_store = ctx->ops->get_ls(ctx); return simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE); } static ssize_t spufs_mem_read(struct file *file, char __user *buffer, size_t size, loff_t *pos) { struct spu_context *ctx = file->private_data; ssize_t ret; ret = spu_acquire(ctx); if (ret) return ret; ret = __spufs_mem_read(ctx, buffer, size, pos); spu_release(ctx); return ret; } static ssize_t spufs_mem_write(struct file *file, const char __user *buffer, size_t size, loff_t *ppos) { struct spu_context *ctx = file->private_data; char *local_store; loff_t pos = *ppos; int ret; if (pos > LS_SIZE) return -EFBIG; ret = spu_acquire(ctx); if (ret) return ret; local_store = ctx->ops->get_ls(ctx); size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size); spu_release(ctx); return size; } static int spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct spu_context *ctx = vma->vm_file->private_data; unsigned long address = (unsigned long)vmf->virtual_address; unsigned long pfn, offset; #ifdef CONFIG_SPU_FS_64K_LS struct spu_state *csa = &ctx->csa; int psize; /* Check what page size we are using */ psize = get_slice_psize(vma->vm_mm, address); /* Some sanity checking */ BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K)); /* Wow, 64K, cool, we need to align the address though */ if (csa->use_big_pages) { BUG_ON(vma->vm_start & 0xffff); address &= ~0xfffful; } #endif /* CONFIG_SPU_FS_64K_LS */ offset = vmf->pgoff << PAGE_SHIFT; if (offset >= LS_SIZE) return VM_FAULT_SIGBUS; pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n", address, offset); if (spu_acquire(ctx)) return VM_FAULT_NOPAGE; if (ctx->state == SPU_STATE_SAVED) { vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); } else { vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; } vm_insert_pfn(vma, address, pfn); spu_release(ctx); return VM_FAULT_NOPAGE; } static int spufs_mem_mmap_access(struct vm_area_struct *vma, unsigned long address, void *buf, int len, int write) { struct spu_context *ctx = vma->vm_file->private_data; unsigned long offset = address - vma->vm_start; char *local_store; if (write && !(vma->vm_flags & VM_WRITE)) return -EACCES; if (spu_acquire(ctx)) return -EINTR; if ((offset + len) > vma->vm_end) len = vma->vm_end - offset; local_store = ctx->ops->get_ls(ctx); if (write) memcpy_toio(local_store + offset, buf, len); else memcpy_fromio(buf, local_store + offset, len); spu_release(ctx); return len; } static const struct vm_operations_struct spufs_mem_mmap_vmops = { .fault = spufs_mem_mmap_fault, .access = spufs_mem_mmap_access, }; static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) { #ifdef CONFIG_SPU_FS_64K_LS struct spu_context *ctx = file->private_data; struct spu_state *csa = &ctx->csa; /* Sanity check VMA alignment */ if (csa->use_big_pages) { pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx," " pgoff=0x%lx\n", vma->vm_start, vma->vm_end, vma->vm_pgoff); if (vma->vm_start & 0xffff) return -EINVAL; if (vma->vm_pgoff & 0xf) return -EINVAL; } #endif /* CONFIG_SPU_FS_64K_LS */ if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP; vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); vma->vm_ops = &spufs_mem_mmap_vmops; return 0; } #ifdef CONFIG_SPU_FS_64K_LS static unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct spu_context *ctx = file->private_data; struct spu_state *csa = &ctx->csa; /* If not using big pages, fallback to normal MM g_u_a */ if (!csa->use_big_pages) return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); /* Else, try to obtain a 64K pages slice */ return slice_get_unmapped_area(addr, len, flags, MMU_PAGE_64K, 1); } #endif /* CONFIG_SPU_FS_64K_LS */ static const struct file_operations spufs_mem_fops = { .open = spufs_mem_open, .release = spufs_mem_release, .read = spufs_mem_read, .write = spufs_mem_write, .llseek = generic_file_llseek, .mmap = spufs_mem_mmap, #ifdef CONFIG_SPU_FS_64K_LS .get_unmapped_area = spufs_get_unmapped_area, #endif }; static int spufs_ps_fault(struct vm_area_struct *vma, struct vm_fault *vmf, unsigned long ps_offs, unsigned long ps_size) { struct spu_context *ctx = vma->vm_file->private_data; unsigned long area, offset = vmf->pgoff << PAGE_SHIFT; int ret = 0; spu_context_nospu_trace(spufs_ps_fault__enter, ctx); if (offset >= ps_size) return VM_FAULT_SIGBUS; if (fatal_signal_pending(current)) return VM_FAULT_SIGBUS; /* * Because we release the mmap_sem, the context may be destroyed while * we're in spu_wait. Grab an extra reference so it isn't destroyed * in the meantime. */ get_spu_context(ctx); /* * We have to wait for context to be loaded before we have * pages to hand out to the user, but we don't want to wait * with the mmap_sem held. * It is possible to drop the mmap_sem here, but then we need * to return VM_FAULT_NOPAGE because the mappings may have * hanged. */ if (spu_acquire(ctx)) goto refault; if (ctx->state == SPU_STATE_SAVED) { up_read(&current->mm->mmap_sem); spu_context_nospu_trace(spufs_ps_fault__sleep, ctx); ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu); down_read(&current->mm->mmap_sem); } else { area = ctx->spu->problem_phys + ps_offs; vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, (area + offset) >> PAGE_SHIFT); spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); } if (!ret) spu_release(ctx); refault: put_spu_context(ctx); return VM_FAULT_NOPAGE; } #if SPUFS_MMAP_4K static int spufs_cntl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE); } static const struct vm_operations_struct spufs_cntl_mmap_vmops = { .fault = spufs_cntl_mmap_fault, }; /* * mmap support for problem state control area [0x4000 - 0x4fff]. */ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) { if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_cntl_mmap_vmops; return 0; } #else /* SPUFS_MMAP_4K */ #define spufs_cntl_mmap NULL #endif /* !SPUFS_MMAP_4K */ static int spufs_cntl_get(void *data, u64 *val) { struct spu_context *ctx = data; int ret; ret = spu_acquire(ctx); if (ret) return ret; *val = ctx->ops->status_read(ctx); spu_release(ctx); return 0; } static int spufs_cntl_set(void *data, u64 val) { struct spu_context *ctx = data; int ret; ret = spu_acquire(ctx); if (ret) return ret; ctx->ops->runcntl_write(ctx, val); spu_release(ctx); return 0; } static int spufs_cntl_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->cntl = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); return simple_attr_open(inode, file, spufs_cntl_get, spufs_cntl_set, "0x%08lx"); } static int spufs_cntl_release(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; simple_attr_release(inode, file); mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->cntl = NULL; mutex_unlock(&ctx->mapping_lock); return 0; } static const struct file_operations spufs_cntl_fops = { .open = spufs_cntl_open, .release = spufs_cntl_release, .read = simple_attr_read, .write = simple_attr_write, .llseek = generic_file_llseek, .mmap = spufs_cntl_mmap, }; static int spufs_regs_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); file->private_data = i->i_ctx; return 0; } static ssize_t __spufs_regs_read(struct spu_context *ctx, char __user *buffer, size_t size, loff_t *pos) { struct spu_lscsa *lscsa = ctx->csa.lscsa; return simple_read_from_buffer(buffer, size, pos, lscsa->gprs, sizeof lscsa->gprs); } static ssize_t spufs_regs_read(struct file *file, char __user *buffer, size_t size, loff_t *pos) { int ret; struct spu_context *ctx = file->private_data; /* pre-check for file position: if we'd return EOF, there's no point * causing a deschedule */ if (*pos >= sizeof(ctx->csa.lscsa->gprs)) return 0; ret = spu_acquire_saved(ctx); if (ret) return ret; ret = __spufs_regs_read(ctx, buffer, size, pos); spu_release_saved(ctx); return ret; } static ssize_t spufs_regs_write(struct file *file, const char __user *buffer, size_t size, loff_t *pos) { struct spu_context *ctx = file->private_data; struct spu_lscsa *lscsa = ctx->csa.lscsa; int ret; if (*pos >= sizeof(lscsa->gprs)) return -EFBIG; ret = spu_acquire_saved(ctx); if (ret) return ret; size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos, buffer, size); spu_release_saved(ctx); return size; } static const struct file_operations spufs_regs_fops = { .open = spufs_regs_open, .read = spufs_regs_read, .write = spufs_regs_write, .llseek = generic_file_llseek, }; static ssize_t __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer, size_t size, loff_t * pos) { struct spu_lscsa *lscsa = ctx->csa.lscsa; return simple_read_from_buffer(buffer, size, pos, &lscsa->fpcr, sizeof(lscsa->fpcr)); } static ssize_t spufs_fpcr_read(struct file *file, char __user * buffer, size_t size, loff_t * pos) { int ret; struct spu_context *ctx = file->private_data; ret = spu_acquire_saved(ctx); if (ret) return ret; ret = __spufs_fpcr_read(ctx, buffer, size, pos); spu_release_saved(ctx); return ret; } static ssize_t spufs_fpcr_write(struct file *file, const char __user * buffer, size_t size, loff_t * pos) { struct spu_context *ctx = file->private_data; struct spu_lscsa *lscsa = ctx->csa.lscsa; int ret; if (*pos >= sizeof(lscsa->fpcr)) return -EFBIG; ret = spu_acquire_saved(ctx); if (ret) return ret; size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos, buffer, size); spu_release_saved(ctx); return size; } static const struct file_operations spufs_fpcr_fops = { .open = spufs_regs_open, .read = spufs_fpcr_read, .write = spufs_fpcr_write, .llseek = generic_file_llseek, }; /* generic open function for all pipe-like files */ static int spufs_pipe_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); file->private_data = i->i_ctx; return nonseekable_open(inode, file); } /* * Read as many bytes from the mailbox as possible, until * one of the conditions becomes true: * * - no more data available in the mailbox * - end of the user provided buffer * - end of the mapped area */ static ssize_t spufs_mbox_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; u32 mbox_data, __user *udata; ssize_t count; if (len < 4) return -EINVAL; if (!access_ok(VERIFY_WRITE, buf, len)) return -EFAULT; udata = (void __user *)buf; count = spu_acquire(ctx); if (count) return count; for (count = 0; (count + 4) <= len; count += 4, udata++) { int ret; ret = ctx->ops->mbox_read(ctx, &mbox_data); if (ret == 0) break; /* * at the end of the mapped area, we can fault * but still need to return the data we have * read successfully so far. */ ret = __put_user(mbox_data, udata); if (ret) { if (!count) count = -EFAULT; break; } } spu_release(ctx); if (!count) count = -EAGAIN; return count; } static const struct file_operations spufs_mbox_fops = { .open = spufs_pipe_open, .read = spufs_mbox_read, .llseek = no_llseek, }; static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; ssize_t ret; u32 mbox_stat; if (len < 4) return -EINVAL; ret = spu_acquire(ctx); if (ret) return ret; mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; spu_release(ctx); if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat)) return -EFAULT; return 4; } static const struct file_operations spufs_mbox_stat_fops = { .open = spufs_pipe_open, .read = spufs_mbox_stat_read, .llseek = no_llseek, }; /* low-level ibox access function */ size_t spu_ibox_read(struct spu_context *ctx, u32 *data) { return ctx->ops->ibox_read(ctx, data); } static int spufs_ibox_fasync(int fd, struct file *file, int on) { struct spu_context *ctx = file->private_data; return fasync_helper(fd, file, on, &ctx->ibox_fasync); } /* interrupt-level ibox callback function. */ void spufs_ibox_callback(struct spu *spu) { struct spu_context *ctx = spu->ctx; if (!ctx) return; wake_up_all(&ctx->ibox_wq); kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); } /* * Read as many bytes from the interrupt mailbox as possible, until * one of the conditions becomes true: * * - no more data available in the mailbox * - end of the user provided buffer * - end of the mapped area * * If the file is opened without O_NONBLOCK, we wait here until * any data is available, but return when we have been able to * read something. */ static ssize_t spufs_ibox_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; u32 ibox_data, __user *udata; ssize_t count; if (len < 4) return -EINVAL; if (!access_ok(VERIFY_WRITE, buf, len)) return -EFAULT; udata = (void __user *)buf; count = spu_acquire(ctx); if (count) goto out; /* wait only for the first element */ count = 0; if (file->f_flags & O_NONBLOCK) { if (!spu_ibox_read(ctx, &ibox_data)) { count = -EAGAIN; goto out_unlock; } } else { count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); if (count) goto out; } /* if we can't write at all, return -EFAULT */ count = __put_user(ibox_data, udata); if (count) goto out_unlock; for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { int ret; ret = ctx->ops->ibox_read(ctx, &ibox_data); if (ret == 0) break; /* * at the end of the mapped area, we can fault * but still need to return the data we have * read successfully so far. */ ret = __put_user(ibox_data, udata); if (ret) break; } out_unlock: spu_release(ctx); out: return count; } static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) { struct spu_context *ctx = file->private_data; unsigned int mask; poll_wait(file, &ctx->ibox_wq, wait); /* * For now keep this uninterruptible and also ignore the rule * that poll should not sleep. Will be fixed later. */ mutex_lock(&ctx->state_mutex); mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM); spu_release(ctx); return mask; } static const struct file_operations spufs_ibox_fops = { .open = spufs_pipe_open, .read = spufs_ibox_read, .poll = spufs_ibox_poll, .fasync = spufs_ibox_fasync, .llseek = no_llseek, }; static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; ssize_t ret; u32 ibox_stat; if (len < 4) return -EINVAL; ret = spu_acquire(ctx); if (ret) return ret; ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; spu_release(ctx); if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat)) return -EFAULT; return 4; } static const struct file_operations spufs_ibox_stat_fops = { .open = spufs_pipe_open, .read = spufs_ibox_stat_read, .llseek = no_llseek, }; /* low-level mailbox write */ size_t spu_wbox_write(struct spu_context *ctx, u32 data) { return ctx->ops->wbox_write(ctx, data); } static int spufs_wbox_fasync(int fd, struct file *file, int on) { struct spu_context *ctx = file->private_data; int ret; ret = fasync_helper(fd, file, on, &ctx->wbox_fasync); return ret; } /* interrupt-level wbox callback function. */ void spufs_wbox_callback(struct spu *spu) { struct spu_context *ctx = spu->ctx; if (!ctx) return; wake_up_all(&ctx->wbox_wq); kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); } /* * Write as many bytes to the interrupt mailbox as possible, until * one of the conditions becomes true: * * - the mailbox is full * - end of the user provided buffer * - end of the mapped area * * If the file is opened without O_NONBLOCK, we wait here until * space is availabyl, but return when we have been able to * write something. */ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; u32 wbox_data, __user *udata; ssize_t count; if (len < 4) return -EINVAL; udata = (void __user *)buf; if (!access_ok(VERIFY_READ, buf, len)) return -EFAULT; if (__get_user(wbox_data, udata)) return -EFAULT; count = spu_acquire(ctx); if (count) goto out; /* * make sure we can at least write one element, by waiting * in case of !O_NONBLOCK */ count = 0; if (file->f_flags & O_NONBLOCK) { if (!spu_wbox_write(ctx, wbox_data)) { count = -EAGAIN; goto out_unlock; } } else { count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); if (count) goto out; } /* write as much as possible */ for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { int ret; ret = __get_user(wbox_data, udata); if (ret) break; ret = spu_wbox_write(ctx, wbox_data); if (ret == 0) break; } out_unlock: spu_release(ctx); out: return count; } static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) { struct spu_context *ctx = file->private_data; unsigned int mask; poll_wait(file, &ctx->wbox_wq, wait); /* * For now keep this uninterruptible and also ignore the rule * that poll should not sleep. Will be fixed later. */ mutex_lock(&ctx->state_mutex); mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM); spu_release(ctx); return mask; } static const struct file_operations spufs_wbox_fops = { .open = spufs_pipe_open, .write = spufs_wbox_write, .poll = spufs_wbox_poll, .fasync = spufs_wbox_fasync, .llseek = no_llseek, }; static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; ssize_t ret; u32 wbox_stat; if (len < 4) return -EINVAL; ret = spu_acquire(ctx); if (ret) return ret; wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; spu_release(ctx); if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat)) return -EFAULT; return 4; } static const struct file_operations spufs_wbox_stat_fops = { .open = spufs_pipe_open, .read = spufs_wbox_stat_read, .llseek = no_llseek, }; static int spufs_signal1_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->signal1 = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } static int spufs_signal1_release(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->signal1 = NULL; mutex_unlock(&ctx->mapping_lock); return 0; } static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { int ret = 0; u32 data; if (len < 4) return -EINVAL; if (ctx->csa.spu_chnlcnt_RW[3]) { data = ctx->csa.spu_chnldata_RW[3]; ret = 4; } if (!ret) goto out; if (copy_to_user(buf, &data, 4)) return -EFAULT; out: return ret; } static ssize_t spufs_signal1_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { int ret; struct spu_context *ctx = file->private_data; ret = spu_acquire_saved(ctx); if (ret) return ret; ret = __spufs_signal1_read(ctx, buf, len, pos); spu_release_saved(ctx); return ret; } static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx; ssize_t ret; u32 data; ctx = file->private_data; if (len < 4) return -EINVAL; if (copy_from_user(&data, buf, 4)) return -EFAULT; ret = spu_acquire(ctx); if (ret) return ret; ctx->ops->signal1_write(ctx, data); spu_release(ctx); return 4; } static int spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE); #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole * signal 1 and 2 area */ return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); #else #error unsupported page size #endif } static const struct vm_operations_struct spufs_signal1_mmap_vmops = { .fault = spufs_signal1_mmap_fault, }; static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) { if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_signal1_mmap_vmops; return 0; } static const struct file_operations spufs_signal1_fops = { .open = spufs_signal1_open, .release = spufs_signal1_release, .read = spufs_signal1_read, .write = spufs_signal1_write, .mmap = spufs_signal1_mmap, .llseek = no_llseek, }; static const struct file_operations spufs_signal1_nosched_fops = { .open = spufs_signal1_open, .release = spufs_signal1_release, .write = spufs_signal1_write, .mmap = spufs_signal1_mmap, .llseek = no_llseek, }; static int spufs_signal2_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->signal2 = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } static int spufs_signal2_release(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->signal2 = NULL; mutex_unlock(&ctx->mapping_lock); return 0; } static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { int ret = 0; u32 data; if (len < 4) return -EINVAL; if (ctx->csa.spu_chnlcnt_RW[4]) { data = ctx->csa.spu_chnldata_RW[4]; ret = 4; } if (!ret) goto out; if (copy_to_user(buf, &data, 4)) return -EFAULT; out: return ret; } static ssize_t spufs_signal2_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; ret = __spufs_signal2_read(ctx, buf, len, pos); spu_release_saved(ctx); return ret; } static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx; ssize_t ret; u32 data; ctx = file->private_data; if (len < 4) return -EINVAL; if (copy_from_user(&data, buf, 4)) return -EFAULT; ret = spu_acquire(ctx); if (ret) return ret; ctx->ops->signal2_write(ctx, data); spu_release(ctx); return 4; } #if SPUFS_MMAP_4K static int spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE); #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole * signal 1 and 2 area */ return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); #else #error unsupported page size #endif } static const struct vm_operations_struct spufs_signal2_mmap_vmops = { .fault = spufs_signal2_mmap_fault, }; static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) { if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_signal2_mmap_vmops; return 0; } #else /* SPUFS_MMAP_4K */ #define spufs_signal2_mmap NULL #endif /* !SPUFS_MMAP_4K */ static const struct file_operations spufs_signal2_fops = { .open = spufs_signal2_open, .release = spufs_signal2_release, .read = spufs_signal2_read, .write = spufs_signal2_write, .mmap = spufs_signal2_mmap, .llseek = no_llseek, }; static const struct file_operations spufs_signal2_nosched_fops = { .open = spufs_signal2_open, .release = spufs_signal2_release, .write = spufs_signal2_write, .mmap = spufs_signal2_mmap, .llseek = no_llseek, }; /* * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the * work of acquiring (or not) the SPU context before calling through * to the actual get routine. The set routine is called directly. */ #define SPU_ATTR_NOACQUIRE 0 #define SPU_ATTR_ACQUIRE 1 #define SPU_ATTR_ACQUIRE_SAVED 2 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \ static int __##__get(void *data, u64 *val) \ { \ struct spu_context *ctx = data; \ int ret = 0; \ \ if (__acquire == SPU_ATTR_ACQUIRE) { \ ret = spu_acquire(ctx); \ if (ret) \ return ret; \ *val = __get(ctx); \ spu_release(ctx); \ } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \ ret = spu_acquire_saved(ctx); \ if (ret) \ return ret; \ *val = __get(ctx); \ spu_release_saved(ctx); \ } else \ *val = __get(ctx); \ \ return 0; \ } \ DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt); static int spufs_signal1_type_set(void *data, u64 val) { struct spu_context *ctx = data; int ret; ret = spu_acquire(ctx); if (ret) return ret; ctx->ops->signal1_type_set(ctx, val); spu_release(ctx); return 0; } static u64 spufs_signal1_type_get(struct spu_context *ctx) { return ctx->ops->signal1_type_get(ctx); } DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE); static int spufs_signal2_type_set(void *data, u64 val) { struct spu_context *ctx = data; int ret; ret = spu_acquire(ctx); if (ret) return ret; ctx->ops->signal2_type_set(ctx, val); spu_release(ctx); return 0; } static u64 spufs_signal2_type_get(struct spu_context *ctx) { return ctx->ops->signal2_type_get(ctx); } DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE); #if SPUFS_MMAP_4K static int spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE); } static const struct vm_operations_struct spufs_mss_mmap_vmops = { .fault = spufs_mss_mmap_fault, }; /* * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. */ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) { if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_mss_mmap_vmops; return 0; } #else /* SPUFS_MMAP_4K */ #define spufs_mss_mmap NULL #endif /* !SPUFS_MMAP_4K */ static int spufs_mss_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; file->private_data = i->i_ctx; mutex_lock(&ctx->mapping_lock); if (!i->i_openers++) ctx->mss = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } static int spufs_mss_release(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->mss = NULL; mutex_unlock(&ctx->mapping_lock); return 0; } static const struct file_operations spufs_mss_fops = { .open = spufs_mss_open, .release = spufs_mss_release, .mmap = spufs_mss_mmap, .llseek = no_llseek, }; static int spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE); } static const struct vm_operations_struct spufs_psmap_mmap_vmops = { .fault = spufs_psmap_mmap_fault, }; /* * mmap support for full problem state area [0x00000 - 0x1ffff]. */ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) { if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_psmap_mmap_vmops; return 0; } static int spufs_psmap_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); file->private_data = i->i_ctx; if (!i->i_openers++) ctx->psmap = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } static int spufs_psmap_release(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->psmap = NULL; mutex_unlock(&ctx->mapping_lock); return 0; } static const struct file_operations spufs_psmap_fops = { .open = spufs_psmap_open, .release = spufs_psmap_release, .mmap = spufs_psmap_mmap, .llseek = no_llseek, }; #if SPUFS_MMAP_4K static int spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE); } static const struct vm_operations_struct spufs_mfc_mmap_vmops = { .fault = spufs_mfc_mmap_fault, }; /* * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. */ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) { if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_mfc_mmap_vmops; return 0; } #else /* SPUFS_MMAP_4K */ #define spufs_mfc_mmap NULL #endif /* !SPUFS_MMAP_4K */ static int spufs_mfc_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; /* we don't want to deal with DMA into other processes */ if (ctx->owner != current->mm) return -EINVAL; if (atomic_read(&inode->i_count) != 1) return -EBUSY; mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->mfc = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } static int spufs_mfc_release(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->mfc = NULL; mutex_unlock(&ctx->mapping_lock); return 0; } /* interrupt-level mfc callback function. */ void spufs_mfc_callback(struct spu *spu) { struct spu_context *ctx = spu->ctx; if (!ctx) return; wake_up_all(&ctx->mfc_wq); pr_debug("%s %s\n", __func__, spu->name); if (ctx->mfc_fasync) { u32 free_elements, tagstatus; unsigned int mask; /* no need for spu_acquire in interrupt context */ free_elements = ctx->ops->get_mfc_free_elements(ctx); tagstatus = ctx->ops->read_mfc_tagstatus(ctx); mask = 0; if (free_elements & 0xffff) mask |= POLLOUT; if (tagstatus & ctx->tagwait) mask |= POLLIN; kill_fasync(&ctx->mfc_fasync, SIGIO, mask); } } static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) { /* See if there is one tag group is complete */ /* FIXME we need locking around tagwait */ *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait; ctx->tagwait &= ~*status; if (*status) return 1; /* enable interrupt waiting for any tag group, may silently fail if interrupts are already enabled */ ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); return 0; } static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, size_t size, loff_t *pos) { struct spu_context *ctx = file->private_data; int ret = -EINVAL; u32 status; if (size != 4) goto out; ret = spu_acquire(ctx); if (ret) return ret; ret = -EINVAL; if (file->f_flags & O_NONBLOCK) { status = ctx->ops->read_mfc_tagstatus(ctx); if (!(status & ctx->tagwait)) ret = -EAGAIN; else /* XXX(hch): shouldn't we clear ret here? */ ctx->tagwait &= ~status; } else { ret = spufs_wait(ctx->mfc_wq, spufs_read_mfc_tagstatus(ctx, &status)); if (ret) goto out; } spu_release(ctx); ret = 4; if (copy_to_user(buffer, &status, 4)) ret = -EFAULT; out: return ret; } static int spufs_check_valid_dma(struct mfc_dma_command *cmd) { pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa, cmd->ea, cmd->size, cmd->tag, cmd->cmd); switch (cmd->cmd) { case MFC_PUT_CMD: case MFC_PUTF_CMD: case MFC_PUTB_CMD: case MFC_GET_CMD: case MFC_GETF_CMD: case MFC_GETB_CMD: break; default: pr_debug("invalid DMA opcode %x\n", cmd->cmd); return -EIO; } if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) { pr_debug("invalid DMA alignment, ea %llx lsa %x\n", cmd->ea, cmd->lsa); return -EIO; } switch (cmd->size & 0xf) { case 1: break; case 2: if (cmd->lsa & 1) goto error; break; case 4: if (cmd->lsa & 3) goto error; break; case 8: if (cmd->lsa & 7) goto error; break; case 0: if (cmd->lsa & 15) goto error; break; error: default: pr_debug("invalid DMA alignment %x for size %x\n", cmd->lsa & 0xf, cmd->size); return -EIO; } if (cmd->size > 16 * 1024) { pr_debug("invalid DMA size %x\n", cmd->size); return -EIO; } if (cmd->tag & 0xfff0) { /* we reserve the higher tag numbers for kernel use */ pr_debug("invalid DMA tag\n"); return -EIO; } if (cmd->class) { /* not supported in this version */ pr_debug("invalid DMA class\n"); return -EIO; } return 0; } static int spu_send_mfc_command(struct spu_context *ctx, struct mfc_dma_command cmd, int *error) { *error = ctx->ops->send_mfc_command(ctx, &cmd); if (*error == -EAGAIN) { /* wait for any tag group to complete so we have space for the new command */ ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); /* try again, because the queue might be empty again */ *error = ctx->ops->send_mfc_command(ctx, &cmd); if (*error == -EAGAIN) return 0; } return 1; } static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, size_t size, loff_t *pos) { struct spu_context *ctx = file->private_data; struct mfc_dma_command cmd; int ret = -EINVAL; if (size != sizeof cmd) goto out; ret = -EFAULT; if (copy_from_user(&cmd, buffer, sizeof cmd)) goto out; ret = spufs_check_valid_dma(&cmd); if (ret) goto out; ret = spu_acquire(ctx); if (ret) goto out; ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); if (ret) goto out; if (file->f_flags & O_NONBLOCK) { ret = ctx->ops->send_mfc_command(ctx, &cmd); } else { int status; ret = spufs_wait(ctx->mfc_wq, spu_send_mfc_command(ctx, cmd, &status)); if (ret) goto out; if (status) ret = status; } if (ret) goto out_unlock; ctx->tagwait |= 1 << cmd.tag; ret = size; out_unlock: spu_release(ctx); out: return ret; } static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait) { struct spu_context *ctx = file->private_data; u32 free_elements, tagstatus; unsigned int mask; poll_wait(file, &ctx->mfc_wq, wait); /* * For now keep this uninterruptible and also ignore the rule * that poll should not sleep. Will be fixed later. */ mutex_lock(&ctx->state_mutex); ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2); free_elements = ctx->ops->get_mfc_free_elements(ctx); tagstatus = ctx->ops->read_mfc_tagstatus(ctx); spu_release(ctx); mask = 0; if (free_elements & 0xffff) mask |= POLLOUT | POLLWRNORM; if (tagstatus & ctx->tagwait) mask |= POLLIN | POLLRDNORM; pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__, free_elements, tagstatus, ctx->tagwait); return mask; } static int spufs_mfc_flush(struct file *file, fl_owner_t id) { struct spu_context *ctx = file->private_data; int ret; ret = spu_acquire(ctx); if (ret) goto out; #if 0 /* this currently hangs */ ret = spufs_wait(ctx->mfc_wq, ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); if (ret) goto out; ret = spufs_wait(ctx->mfc_wq, ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); if (ret) goto out; #else ret = 0; #endif spu_release(ctx); out: return ret; } static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file_inode(file); int err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (!err) { mutex_lock(&inode->i_mutex); err = spufs_mfc_flush(file, NULL); mutex_unlock(&inode->i_mutex); } return err; } static int spufs_mfc_fasync(int fd, struct file *file, int on) { struct spu_context *ctx = file->private_data; return fasync_helper(fd, file, on, &ctx->mfc_fasync); } static const struct file_operations spufs_mfc_fops = { .open = spufs_mfc_open, .release = spufs_mfc_release, .read = spufs_mfc_read, .write = spufs_mfc_write, .poll = spufs_mfc_poll, .flush = spufs_mfc_flush, .fsync = spufs_mfc_fsync, .fasync = spufs_mfc_fasync, .mmap = spufs_mfc_mmap, .llseek = no_llseek, }; static int spufs_npc_set(void *data, u64 val) { struct spu_context *ctx = data; int ret; ret = spu_acquire(ctx); if (ret) return ret; ctx->ops->npc_write(ctx, val); spu_release(ctx); return 0; } static u64 spufs_npc_get(struct spu_context *ctx) { return ctx->ops->npc_read(ctx); } DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "0x%llx\n", SPU_ATTR_ACQUIRE); static int spufs_decr_set(void *data, u64 val) { struct spu_context *ctx = data; struct spu_lscsa *lscsa = ctx->csa.lscsa; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; lscsa->decr.slot[0] = (u32) val; spu_release_saved(ctx); return 0; } static u64 spufs_decr_get(struct spu_context *ctx) { struct spu_lscsa *lscsa = ctx->csa.lscsa; return lscsa->decr.slot[0]; } DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); static int spufs_decr_status_set(void *data, u64 val) { struct spu_context *ctx = data; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; if (val) ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; else ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING; spu_release_saved(ctx); return 0; } static u64 spufs_decr_status_get(struct spu_context *ctx) { if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) return SPU_DECR_STATUS_RUNNING; else return 0; } DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, spufs_decr_status_set, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); static int spufs_event_mask_set(void *data, u64 val) { struct spu_context *ctx = data; struct spu_lscsa *lscsa = ctx->csa.lscsa; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; lscsa->event_mask.slot[0] = (u32) val; spu_release_saved(ctx); return 0; } static u64 spufs_event_mask_get(struct spu_context *ctx) { struct spu_lscsa *lscsa = ctx->csa.lscsa; return lscsa->event_mask.slot[0]; } DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, spufs_event_mask_set, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); static u64 spufs_event_status_get(struct spu_context *ctx) { struct spu_state *state = &ctx->csa; u64 stat; stat = state->spu_chnlcnt_RW[0]; if (stat) return state->spu_chnldata_RW[0]; return 0; } DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get, NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) static int spufs_srr0_set(void *data, u64 val) { struct spu_context *ctx = data; struct spu_lscsa *lscsa = ctx->csa.lscsa; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; lscsa->srr0.slot[0] = (u32) val; spu_release_saved(ctx); return 0; } static u64 spufs_srr0_get(struct spu_context *ctx) { struct spu_lscsa *lscsa = ctx->csa.lscsa; return lscsa->srr0.slot[0]; } DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) static u64 spufs_id_get(struct spu_context *ctx) { u64 num; if (ctx->state == SPU_STATE_RUNNABLE) num = ctx->spu->number; else num = (unsigned int)-1; return num; } DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n", SPU_ATTR_ACQUIRE) static u64 spufs_object_id_get(struct spu_context *ctx) { /* FIXME: Should there really be no locking here? */ return ctx->object_id; } static int spufs_object_id_set(void *data, u64 id) { struct spu_context *ctx = data; ctx->object_id = id; return 0; } DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE); static u64 spufs_lslr_get(struct spu_context *ctx) { return ctx->csa.priv2.spu_lslr_RW; } DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); static int spufs_info_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; file->private_data = ctx; return 0; } static int spufs_caps_show(struct seq_file *s, void *private) { struct spu_context *ctx = s->private; if (!(ctx->flags & SPU_CREATE_NOSCHED)) seq_puts(s, "sched\n"); if (!(ctx->flags & SPU_CREATE_ISOLATE)) seq_puts(s, "step\n"); return 0; } static int spufs_caps_open(struct inode *inode, struct file *file) { return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx); } static const struct file_operations spufs_caps_fops = { .open = spufs_caps_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { u32 data; /* EOF if there's no entry in the mbox */ if (!(ctx->csa.prob.mb_stat_R & 0x0000ff)) return 0; data = ctx->csa.prob.pu_mb_R; return simple_read_from_buffer(buf, len, pos, &data, sizeof data); } static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { int ret; struct spu_context *ctx = file->private_data; if (!access_ok(VERIFY_WRITE, buf, len)) return -EFAULT; ret = spu_acquire_saved(ctx); if (ret) return ret; spin_lock(&ctx->csa.register_lock); ret = __spufs_mbox_info_read(ctx, buf, len, pos); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); return ret; } static const struct file_operations spufs_mbox_info_fops = { .open = spufs_info_open, .read = spufs_mbox_info_read, .llseek = generic_file_llseek, }; static ssize_t __spufs_ibox_info_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { u32 data; /* EOF if there's no entry in the ibox */ if (!(ctx->csa.prob.mb_stat_R & 0xff0000)) return 0; data = ctx->csa.priv2.puint_mb_R; return simple_read_from_buffer(buf, len, pos, &data, sizeof data); } static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; int ret; if (!access_ok(VERIFY_WRITE, buf, len)) return -EFAULT; ret = spu_acquire_saved(ctx); if (ret) return ret; spin_lock(&ctx->csa.register_lock); ret = __spufs_ibox_info_read(ctx, buf, len, pos); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); return ret; } static const struct file_operations spufs_ibox_info_fops = { .open = spufs_info_open, .read = spufs_ibox_info_read, .llseek = generic_file_llseek, }; static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { int i, cnt; u32 data[4]; u32 wbox_stat; wbox_stat = ctx->csa.prob.mb_stat_R; cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); for (i = 0; i < cnt; i++) { data[i] = ctx->csa.spu_mailbox_data[i]; } return simple_read_from_buffer(buf, len, pos, &data, cnt * sizeof(u32)); } static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; int ret; if (!access_ok(VERIFY_WRITE, buf, len)) return -EFAULT; ret = spu_acquire_saved(ctx); if (ret) return ret; spin_lock(&ctx->csa.register_lock); ret = __spufs_wbox_info_read(ctx, buf, len, pos); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); return ret; } static const struct file_operations spufs_wbox_info_fops = { .open = spufs_info_open, .read = spufs_wbox_info_read, .llseek = generic_file_llseek, }; static ssize_t __spufs_dma_info_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { struct spu_dma_info info; struct mfc_cq_sr *qp, *spuqp; int i; info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; for (i = 0; i < 16; i++) { qp = &info.dma_info_command_data[i]; spuqp = &ctx->csa.priv2.spuq[i]; qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; } return simple_read_from_buffer(buf, len, pos, &info, sizeof info); } static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; int ret; if (!access_ok(VERIFY_WRITE, buf, len)) return -EFAULT; ret = spu_acquire_saved(ctx); if (ret) return ret; spin_lock(&ctx->csa.register_lock); ret = __spufs_dma_info_read(ctx, buf, len, pos); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); return ret; } static const struct file_operations spufs_dma_info_fops = { .open = spufs_info_open, .read = spufs_dma_info_read, .llseek = no_llseek, }; static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { struct spu_proxydma_info info; struct mfc_cq_sr *qp, *puqp; int ret = sizeof info; int i; if (len < ret) return -EINVAL; if (!access_ok(VERIFY_WRITE, buf, len)) return -EFAULT; info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; for (i = 0; i < 8; i++) { qp = &info.proxydma_info_command_data[i]; puqp = &ctx->csa.priv2.puq[i]; qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; } return simple_read_from_buffer(buf, len, pos, &info, sizeof info); } static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; spin_lock(&ctx->csa.register_lock); ret = __spufs_proxydma_info_read(ctx, buf, len, pos); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); return ret; } static const struct file_operations spufs_proxydma_info_fops = { .open = spufs_info_open, .read = spufs_proxydma_info_read, .llseek = no_llseek, }; static int spufs_show_tid(struct seq_file *s, void *private) { struct spu_context *ctx = s->private; seq_printf(s, "%d\n", ctx->tid); return 0; } static int spufs_tid_open(struct inode *inode, struct file *file) { return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx); } static const struct file_operations spufs_tid_fops = { .open = spufs_tid_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const char *ctx_state_names[] = { "user", "system", "iowait", "loaded" }; static unsigned long long spufs_acct_time(struct spu_context *ctx, enum spu_utilization_state state) { struct timespec ts; unsigned long long time = ctx->stats.times[state]; /* * In general, utilization statistics are updated by the controlling * thread as the spu context moves through various well defined * state transitions, but if the context is lazily loaded its * utilization statistics are not updated as the controlling thread * is not tightly coupled with the execution of the spu context. We * calculate and apply the time delta from the last recorded state * of the spu context. */ if (ctx->spu && ctx->stats.util_state == state) { ktime_get_ts(&ts); time += timespec_to_ns(&ts) - ctx->stats.tstamp; } return time / NSEC_PER_MSEC; } static unsigned long long spufs_slb_flts(struct spu_context *ctx) { unsigned long long slb_flts = ctx->stats.slb_flt; if (ctx->state == SPU_STATE_RUNNABLE) { slb_flts += (ctx->spu->stats.slb_flt - ctx->stats.slb_flt_base); } return slb_flts; } static unsigned long long spufs_class2_intrs(struct spu_context *ctx) { unsigned long long class2_intrs = ctx->stats.class2_intr; if (ctx->state == SPU_STATE_RUNNABLE) { class2_intrs += (ctx->spu->stats.class2_intr - ctx->stats.class2_intr_base); } return class2_intrs; } static int spufs_show_stat(struct seq_file *s, void *private) { struct spu_context *ctx = s->private; int ret; ret = spu_acquire(ctx); if (ret) return ret; seq_printf(s, "%s %llu %llu %llu %llu " "%llu %llu %llu %llu %llu %llu %llu %llu\n", ctx_state_names[ctx->stats.util_state], spufs_acct_time(ctx, SPU_UTIL_USER), spufs_acct_time(ctx, SPU_UTIL_SYSTEM), spufs_acct_time(ctx, SPU_UTIL_IOWAIT), spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED), ctx->stats.vol_ctx_switch, ctx->stats.invol_ctx_switch, spufs_slb_flts(ctx), ctx->stats.hash_flt, ctx->stats.min_flt, ctx->stats.maj_flt, spufs_class2_intrs(ctx), ctx->stats.libassist); spu_release(ctx); return 0; } static int spufs_stat_open(struct inode *inode, struct file *file) { return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx); } static const struct file_operations spufs_stat_fops = { .open = spufs_stat_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static inline int spufs_switch_log_used(struct spu_context *ctx) { return (ctx->switch_log->head - ctx->switch_log->tail) % SWITCH_LOG_BUFSIZE; } static inline int spufs_switch_log_avail(struct spu_context *ctx) { return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx); } static int spufs_switch_log_open(struct inode *inode, struct file *file) { struct spu_context *ctx = SPUFS_I(inode)->i_ctx; int rc; rc = spu_acquire(ctx); if (rc) return rc; if (ctx->switch_log) { rc = -EBUSY; goto out; } ctx->switch_log = kmalloc(sizeof(struct switch_log) + SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry), GFP_KERNEL); if (!ctx->switch_log) { rc = -ENOMEM; goto out; } ctx->switch_log->head = ctx->switch_log->tail = 0; init_waitqueue_head(&ctx->switch_log->wait); rc = 0; out: spu_release(ctx); return rc; } static int spufs_switch_log_release(struct inode *inode, struct file *file) { struct spu_context *ctx = SPUFS_I(inode)->i_ctx; int rc; rc = spu_acquire(ctx); if (rc) return rc; kfree(ctx->switch_log); ctx->switch_log = NULL; spu_release(ctx); return 0; } static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) { struct switch_log_entry *p; p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE; return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n", (unsigned int) p->tstamp.tv_sec, (unsigned int) p->tstamp.tv_nsec, p->spu_id, (unsigned int) p->type, (unsigned int) p->val, (unsigned long long) p->timebase); } static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct inode *inode = file_inode(file); struct spu_context *ctx = SPUFS_I(inode)->i_ctx; int error = 0, cnt = 0; if (!buf) return -EINVAL; error = spu_acquire(ctx); if (error) return error; while (cnt < len) { char tbuf[128]; int width; if (spufs_switch_log_used(ctx) == 0) { if (cnt > 0) { /* If there's data ready to go, we can * just return straight away */ break; } else if (file->f_flags & O_NONBLOCK) { error = -EAGAIN; break; } else { /* spufs_wait will drop the mutex and * re-acquire, but since we're in read(), the * file cannot be _released (and so * ctx->switch_log is stable). */ error = spufs_wait(ctx->switch_log->wait, spufs_switch_log_used(ctx) > 0); /* On error, spufs_wait returns without the * state mutex held */ if (error) return error; /* We may have had entries read from underneath * us while we dropped the mutex in spufs_wait, * so re-check */ if (spufs_switch_log_used(ctx) == 0) continue; } } width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); if (width < len) ctx->switch_log->tail = (ctx->switch_log->tail + 1) % SWITCH_LOG_BUFSIZE; else /* If the record is greater than space available return * partial buffer (so far) */ break; error = copy_to_user(buf + cnt, tbuf, width); if (error) break; cnt += width; } spu_release(ctx); return cnt == 0 ? error : cnt; } static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait) { struct inode *inode = file_inode(file); struct spu_context *ctx = SPUFS_I(inode)->i_ctx; unsigned int mask = 0; int rc; poll_wait(file, &ctx->switch_log->wait, wait); rc = spu_acquire(ctx); if (rc) return rc; if (spufs_switch_log_used(ctx) > 0) mask |= POLLIN; spu_release(ctx); return mask; } static const struct file_operations spufs_switch_log_fops = { .open = spufs_switch_log_open, .read = spufs_switch_log_read, .poll = spufs_switch_log_poll, .release = spufs_switch_log_release, .llseek = no_llseek, }; /** * Log a context switch event to a switch log reader. * * Must be called with ctx->state_mutex held. */ void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, u32 type, u32 val) { if (!ctx->switch_log) return; if (spufs_switch_log_avail(ctx) > 1) { struct switch_log_entry *p; p = ctx->switch_log->log + ctx->switch_log->head; ktime_get_ts(&p->tstamp); p->timebase = get_tb(); p->spu_id = spu ? spu->number : -1; p->type = type; p->val = val; ctx->switch_log->head = (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; } wake_up(&ctx->switch_log->wait); } static int spufs_show_ctx(struct seq_file *s, void *private) { struct spu_context *ctx = s->private; u64 mfc_control_RW; mutex_lock(&ctx->state_mutex); if (ctx->spu) { struct spu *spu = ctx->spu; struct spu_priv2 __iomem *priv2 = spu->priv2; spin_lock_irq(&spu->register_lock); mfc_control_RW = in_be64(&priv2->mfc_control_RW); spin_unlock_irq(&spu->register_lock); } else { struct spu_state *csa = &ctx->csa; mfc_control_RW = csa->priv2.mfc_control_RW; } seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)" " %c %llx %llx %llx %llx %x %x\n", ctx->state == SPU_STATE_SAVED ? 'S' : 'R', ctx->flags, ctx->sched_flags, ctx->prio, ctx->time_slice, ctx->spu ? ctx->spu->number : -1, !list_empty(&ctx->rq) ? 'q' : ' ', ctx->csa.class_0_pending, ctx->csa.class_0_dar, ctx->csa.class_1_dsisr, mfc_control_RW, ctx->ops->runcntl_read(ctx), ctx->ops->status_read(ctx)); mutex_unlock(&ctx->state_mutex); return 0; } static int spufs_ctx_open(struct inode *inode, struct file *file) { return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx); } static const struct file_operations spufs_ctx_fops = { .open = spufs_ctx_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; const struct spufs_tree_descr spufs_dir_contents[] = { { "capabilities", &spufs_caps_fops, 0444, }, { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), }, { "mbox", &spufs_mbox_fops, 0444, }, { "ibox", &spufs_ibox_fops, 0444, }, { "wbox", &spufs_wbox_fops, 0222, }, { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, { "signal1", &spufs_signal1_fops, 0666, }, { "signal2", &spufs_signal2_fops, 0666, }, { "signal1_type", &spufs_signal1_type, 0666, }, { "signal2_type", &spufs_signal2_type, 0666, }, { "cntl", &spufs_cntl_fops, 0666, }, { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), }, { "lslr", &spufs_lslr_ops, 0444, }, { "mfc", &spufs_mfc_fops, 0666, }, { "mss", &spufs_mss_fops, 0666, }, { "npc", &spufs_npc_ops, 0666, }, { "srr0", &spufs_srr0_ops, 0666, }, { "decr", &spufs_decr_ops, 0666, }, { "decr_status", &spufs_decr_status_ops, 0666, }, { "event_mask", &spufs_event_mask_ops, 0666, }, { "event_status", &spufs_event_status_ops, 0444, }, { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, { "phys-id", &spufs_id_ops, 0666, }, { "object-id", &spufs_object_id_ops, 0666, }, { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), }, { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), }, { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), }, { "dma_info", &spufs_dma_info_fops, 0444, sizeof(struct spu_dma_info), }, { "proxydma_info", &spufs_proxydma_info_fops, 0444, sizeof(struct spu_proxydma_info)}, { "tid", &spufs_tid_fops, 0444, }, { "stat", &spufs_stat_fops, 0444, }, { "switch_log", &spufs_switch_log_fops, 0444 }, {}, }; const struct spufs_tree_descr spufs_dir_nosched_contents[] = { { "capabilities", &spufs_caps_fops, 0444, }, { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, { "mbox", &spufs_mbox_fops, 0444, }, { "ibox", &spufs_ibox_fops, 0444, }, { "wbox", &spufs_wbox_fops, 0222, }, { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, { "signal1", &spufs_signal1_nosched_fops, 0222, }, { "signal2", &spufs_signal2_nosched_fops, 0222, }, { "signal1_type", &spufs_signal1_type, 0666, }, { "signal2_type", &spufs_signal2_type, 0666, }, { "mss", &spufs_mss_fops, 0666, }, { "mfc", &spufs_mfc_fops, 0666, }, { "cntl", &spufs_cntl_fops, 0666, }, { "npc", &spufs_npc_ops, 0666, }, { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, { "phys-id", &spufs_id_ops, 0666, }, { "object-id", &spufs_object_id_ops, 0666, }, { "tid", &spufs_tid_fops, 0444, }, { "stat", &spufs_stat_fops, 0444, }, {}, }; const struct spufs_tree_descr spufs_dir_debug_contents[] = { { ".ctx", &spufs_ctx_fops, 0444, }, {}, }; const struct spufs_coredump_reader spufs_coredump_read[] = { { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])}, { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) }, { "lslr", NULL, spufs_lslr_get, 19 }, { "decr", NULL, spufs_decr_get, 19 }, { "decr_status", NULL, spufs_decr_status_get, 19 }, { "mem", __spufs_mem_read, NULL, LS_SIZE, }, { "signal1", __spufs_signal1_read, NULL, sizeof(u32) }, { "signal1_type", NULL, spufs_signal1_type_get, 19 }, { "signal2", __spufs_signal2_read, NULL, sizeof(u32) }, { "signal2_type", NULL, spufs_signal2_type_get, 19 }, { "event_mask", NULL, spufs_event_mask_get, 19 }, { "event_status", NULL, spufs_event_status_get, 19 }, { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) }, { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) }, { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)}, { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)}, { "proxydma_info", __spufs_proxydma_info_read, NULL, sizeof(struct spu_proxydma_info)}, { "object-id", NULL, spufs_object_id_get, 19 }, { "npc", NULL, spufs_npc_get, 19 }, { NULL }, };
gpl-2.0
neonicus/Paralax
drivers/media/dvb/dvb-usb/nova-t-usb2.c
2776
6367
/* DVB USB framework compliant Linux driver for the Hauppauge WinTV-NOVA-T usb2 * DVB-T receiver. * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "dibusb.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=rc,2=eeprom (|-able))." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); #define deb_rc(args...) dprintk(debug,0x01,args) #define deb_ee(args...) dprintk(debug,0x02,args) /* Hauppauge NOVA-T USB2 keys */ static struct rc_map_table rc_map_haupp_table[] = { { 0x1e00, KEY_0 }, { 0x1e01, KEY_1 }, { 0x1e02, KEY_2 }, { 0x1e03, KEY_3 }, { 0x1e04, KEY_4 }, { 0x1e05, KEY_5 }, { 0x1e06, KEY_6 }, { 0x1e07, KEY_7 }, { 0x1e08, KEY_8 }, { 0x1e09, KEY_9 }, { 0x1e0a, KEY_KPASTERISK }, { 0x1e0b, KEY_RED }, { 0x1e0c, KEY_RADIO }, { 0x1e0d, KEY_MENU }, { 0x1e0e, KEY_GRAVE }, /* # */ { 0x1e0f, KEY_MUTE }, { 0x1e10, KEY_VOLUMEUP }, { 0x1e11, KEY_VOLUMEDOWN }, { 0x1e12, KEY_CHANNEL }, { 0x1e14, KEY_UP }, { 0x1e15, KEY_DOWN }, { 0x1e16, KEY_LEFT }, { 0x1e17, KEY_RIGHT }, { 0x1e18, KEY_VIDEO }, { 0x1e19, KEY_AUDIO }, { 0x1e1a, KEY_IMAGES }, { 0x1e1b, KEY_EPG }, { 0x1e1c, KEY_TV }, { 0x1e1e, KEY_NEXT }, { 0x1e1f, KEY_BACK }, { 0x1e20, KEY_CHANNELUP }, { 0x1e21, KEY_CHANNELDOWN }, { 0x1e24, KEY_LAST }, /* Skip backwards */ { 0x1e25, KEY_OK }, { 0x1e29, KEY_BLUE}, { 0x1e2e, KEY_GREEN }, { 0x1e30, KEY_PAUSE }, { 0x1e32, KEY_REWIND }, { 0x1e34, KEY_FASTFORWARD }, { 0x1e35, KEY_PLAY }, { 0x1e36, KEY_STOP }, { 0x1e37, KEY_RECORD }, { 0x1e38, KEY_YELLOW }, { 0x1e3b, KEY_GOTO }, { 0x1e3d, KEY_POWER }, }; /* Firmware bug? sometimes, when a new key is pressed, the previous pressed key * is delivered. No workaround yet, maybe a new firmware. */ static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { u8 key[5],cmd[2] = { DIBUSB_REQ_POLL_REMOTE, 0x35 }, data,toggle,custom; u16 raw; int i; struct dibusb_device_state *st = d->priv; dvb_usb_generic_rw(d,cmd,2,key,5,0); *state = REMOTE_NO_KEY_PRESSED; switch (key[0]) { case DIBUSB_RC_HAUPPAUGE_KEY_PRESSED: raw = ((key[1] << 8) | key[2]) >> 3; toggle = !!(raw & 0x800); data = raw & 0x3f; custom = (raw >> 6) & 0x1f; deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",key[1],key[2],key[3],custom,data,toggle); for (i = 0; i < ARRAY_SIZE(rc_map_haupp_table); i++) { if (rc5_data(&rc_map_haupp_table[i]) == data && rc5_custom(&rc_map_haupp_table[i]) == custom) { deb_rc("c: %x, d: %x\n", rc5_data(&rc_map_haupp_table[i]), rc5_custom(&rc_map_haupp_table[i])); *event = rc_map_haupp_table[i].keycode; *state = REMOTE_KEY_PRESSED; if (st->old_toggle == toggle) { if (st->last_repeat_count++ < 2) *state = REMOTE_NO_KEY_PRESSED; } else { st->last_repeat_count = 0; st->old_toggle = toggle; } break; } } break; case DIBUSB_RC_HAUPPAUGE_KEY_EMPTY: default: break; } return 0; } static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6]) { int i; u8 b; mac[0] = 0x00; mac[1] = 0x0d; mac[2] = 0xfe; /* this is a complete guess, but works for my box */ for (i = 136; i < 139; i++) { dibusb_read_eeprom_byte(d,i, &b); mac[5 - (i - 136)] = b; } return 0; } /* USB Driver stuff */ static struct dvb_usb_device_properties nova_t_properties; static int nova_t_probe(struct usb_interface *intf, const struct usb_device_id *id) { return dvb_usb_device_init(intf, &nova_t_properties, THIS_MODULE, NULL, adapter_nr); } /* do not change the order of the ID table */ static struct usb_device_id nova_t_table [] = { /* 00 */ { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_WINTV_NOVA_T_USB2_COLD) }, /* 01 */ { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_WINTV_NOVA_T_USB2_WARM) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, nova_t_table); static struct dvb_usb_device_properties nova_t_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-nova-t-usb2-02.fw", .num_adapters = 1, .adapter = { { .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .streaming_ctrl = dibusb2_0_streaming_ctrl, .pid_filter = dibusb_pid_filter, .pid_filter_ctrl = dibusb_pid_filter_ctrl, .frontend_attach = dibusb_dib3000mc_frontend_attach, .tuner_attach = dibusb_dib3000mc_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 7, .endpoint = 0x06, .u = { .bulk = { .buffersize = 4096, } } }, .size_of_priv = sizeof(struct dibusb_state), } }, .size_of_priv = sizeof(struct dibusb_device_state), .power_ctrl = dibusb2_0_power_ctrl, .read_mac_address = nova_t_read_mac_address, .rc.legacy = { .rc_interval = 100, .rc_map_table = rc_map_haupp_table, .rc_map_size = ARRAY_SIZE(rc_map_haupp_table), .rc_query = nova_t_rc_query, }, .i2c_algo = &dibusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 1, .devices = { { "Hauppauge WinTV-NOVA-T usb2", { &nova_t_table[0], NULL }, { &nova_t_table[1], NULL }, }, { NULL }, } }; static struct usb_driver nova_t_driver = { .name = "dvb_usb_nova_t_usb2", .probe = nova_t_probe, .disconnect = dvb_usb_device_exit, .id_table = nova_t_table, }; /* module stuff */ static int __init nova_t_module_init(void) { int result; if ((result = usb_register(&nova_t_driver))) { err("usb_register failed. Error number %d",result); return result; } return 0; } static void __exit nova_t_module_exit(void) { /* deregister this driver from the USB subsystem */ usb_deregister(&nova_t_driver); } module_init (nova_t_module_init); module_exit (nova_t_module_exit); MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); MODULE_DESCRIPTION("Hauppauge WinTV-NOVA-T usb2"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
gpl-2.0
spezi77/android_kernel_htcbravo-3.0
drivers/hwmon/ads7828.c
3032
7940
/* ads7828.c - lm_sensors driver for ads7828 12-bit 8-channel ADC (C) 2007 EADS Astrium This driver is based on the lm75 and other lm_sensors/hwmon drivers Written by Steve Hardy <shardy@redhat.com> Datasheet available at: http://focus.ti.com/lit/ds/symlink/ads7828.pdf This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> /* The ADS7828 registers */ #define ADS7828_NCH 8 /* 8 channels of 12-bit A-D supported */ #define ADS7828_CMD_SD_SE 0x80 /* Single ended inputs */ #define ADS7828_CMD_SD_DIFF 0x00 /* Differential inputs */ #define ADS7828_CMD_PD0 0x0 /* Power Down between A-D conversions */ #define ADS7828_CMD_PD1 0x04 /* Internal ref OFF && A-D ON */ #define ADS7828_CMD_PD2 0x08 /* Internal ref ON && A-D OFF */ #define ADS7828_CMD_PD3 0x0C /* Internal ref ON && A-D ON */ #define ADS7828_INT_VREF_MV 2500 /* Internal vref is 2.5V, 2500mV */ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END }; /* Module parameters */ static int se_input = 1; /* Default is SE, 0 == diff */ static int int_vref = 1; /* Default is internal ref ON */ static int vref_mv = ADS7828_INT_VREF_MV; /* set if vref != 2.5V */ module_param(se_input, bool, S_IRUGO); module_param(int_vref, bool, S_IRUGO); module_param(vref_mv, int, S_IRUGO); /* Global Variables */ static u8 ads7828_cmd_byte; /* cmd byte without channel bits */ static unsigned int ads7828_lsb_resol; /* resolution of the ADC sample lsb */ /* Each client has this additional data */ struct ads7828_data { struct device *hwmon_dev; struct mutex update_lock; /* mutex protect updates */ char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u16 adc_input[ADS7828_NCH]; /* ADS7828_NCH 12-bit samples */ }; /* Function declaration - necessary due to function dependencies */ static int ads7828_detect(struct i2c_client *client, struct i2c_board_info *info); static int ads7828_probe(struct i2c_client *client, const struct i2c_device_id *id); /* The ADS7828 returns the 12-bit sample in two bytes, these are read as a word then byte-swapped */ static u16 ads7828_read_value(struct i2c_client *client, u8 reg) { return swab16(i2c_smbus_read_word_data(client, reg)); } static inline u8 channel_cmd_byte(int ch) { /* cmd byte C2,C1,C0 - see datasheet */ u8 cmd = (((ch>>1) | (ch&0x01)<<2)<<4); cmd |= ads7828_cmd_byte; return cmd; } /* Update data for the device (all 8 channels) */ static struct ads7828_data *ads7828_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct ads7828_data *data = i2c_get_clientdata(client); mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { unsigned int ch; dev_dbg(&client->dev, "Starting ads7828 update\n"); for (ch = 0; ch < ADS7828_NCH; ch++) { u8 cmd = channel_cmd_byte(ch); data->adc_input[ch] = ads7828_read_value(client, cmd); } data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* sysfs callback function */ static ssize_t show_in(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct ads7828_data *data = ads7828_update_device(dev); /* Print value (in mV as specified in sysfs-interface documentation) */ return sprintf(buf, "%d\n", (data->adc_input[attr->index] * ads7828_lsb_resol)/1000); } #define in_reg(offset)\ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, show_in,\ NULL, offset) in_reg(0); in_reg(1); in_reg(2); in_reg(3); in_reg(4); in_reg(5); in_reg(6); in_reg(7); static struct attribute *ads7828_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, NULL }; static const struct attribute_group ads7828_group = { .attrs = ads7828_attributes, }; static int ads7828_remove(struct i2c_client *client) { struct ads7828_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &ads7828_group); kfree(i2c_get_clientdata(client)); return 0; } static const struct i2c_device_id ads7828_id[] = { { "ads7828", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ads7828_id); /* This is the driver that will be inserted */ static struct i2c_driver ads7828_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "ads7828", }, .probe = ads7828_probe, .remove = ads7828_remove, .id_table = ads7828_id, .detect = ads7828_detect, .address_list = normal_i2c, }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int ads7828_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int ch; /* Check we have a valid client */ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)) return -ENODEV; /* Now, we do the remaining detection. There is no identification dedicated register so attempt to sanity check using knowledge of the chip - Read from the 8 channel addresses - Check the top 4 bits of each result are not set (12 data bits) */ for (ch = 0; ch < ADS7828_NCH; ch++) { u16 in_data; u8 cmd = channel_cmd_byte(ch); in_data = ads7828_read_value(client, cmd); if (in_data & 0xF000) { pr_debug("%s : Doesn't look like an ads7828 device\n", __func__); return -ENODEV; } } strlcpy(info->type, "ads7828", I2C_NAME_SIZE); return 0; } static int ads7828_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ads7828_data *data; int err; data = kzalloc(sizeof(struct ads7828_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &ads7828_group); if (err) goto exit_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove; } return 0; exit_remove: sysfs_remove_group(&client->dev.kobj, &ads7828_group); exit_free: kfree(data); exit: return err; } static int __init sensors_ads7828_init(void) { /* Initialize the command byte according to module parameters */ ads7828_cmd_byte = se_input ? ADS7828_CMD_SD_SE : ADS7828_CMD_SD_DIFF; ads7828_cmd_byte |= int_vref ? ADS7828_CMD_PD3 : ADS7828_CMD_PD1; /* Calculate the LSB resolution */ ads7828_lsb_resol = (vref_mv*1000)/4096; return i2c_add_driver(&ads7828_driver); } static void __exit sensors_ads7828_exit(void) { i2c_del_driver(&ads7828_driver); } MODULE_AUTHOR("Steve Hardy <shardy@redhat.com>"); MODULE_DESCRIPTION("ADS7828 driver"); MODULE_LICENSE("GPL"); module_init(sensors_ads7828_init); module_exit(sensors_ads7828_exit);
gpl-2.0
whoi-acomms/linux
drivers/media/video/w9966.c
3032
25284
/* Winbond w9966cf Webcam parport driver. Version 0.33 Copyright (C) 2001 Jakob Kemi <jakob.kemi@post.utfors.se> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Supported devices: *Lifeview FlyCam Supra (using the Philips saa7111a chip) Does any other model using the w9966 interface chip exist ? Todo: *Add a working EPP mode, since DMA ECP read isn't implemented in the parport drivers. (That's why it's so sloow) *Add support for other ccd-control chips than the saa7111 please send me feedback on what kind of chips you have. *Add proper probing. I don't know what's wrong with the IEEE1284 parport drivers but (IEEE1284_MODE_NIBBLE|IEEE1284_DEVICE_ID) and nibble read seems to be broken for some peripherals. *Add probing for onboard SRAM, port directions etc. (if possible) *Add support for the hardware compressed modes (maybe using v4l2) *Fix better support for the capture window (no skewed images, v4l interface to capt. window) *Probably some bugs that I don't know of Please support me by sending feedback! Changes: Alan Cox: Removed RGB mode for kernel merge, added THIS_MODULE and owner support for newer module locks */ #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/videodev2.h> #include <linux/slab.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-device.h> #include <media/v4l2-fh.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <linux/parport.h> /*#define DEBUG*/ /* Undef me for production */ #ifdef DEBUG #define DPRINTF(x, a...) printk(KERN_DEBUG "W9966: %s(): "x, __func__ , ##a) #else #define DPRINTF(x...) #endif /* * Defines, simple typedefs etc. */ #define W9966_DRIVERNAME "W9966CF Webcam" #define W9966_MAXCAMS 4 /* Maximum number of cameras */ #define W9966_RBUFFER 2048 /* Read buffer (must be an even number) */ #define W9966_SRAMSIZE 131072 /* 128kb */ #define W9966_SRAMID 0x02 /* check w9966cf.pdf */ /* Empirically determined window limits */ #define W9966_WND_MIN_X 16 #define W9966_WND_MIN_Y 14 #define W9966_WND_MAX_X 705 #define W9966_WND_MAX_Y 253 #define W9966_WND_MAX_W (W9966_WND_MAX_X - W9966_WND_MIN_X) #define W9966_WND_MAX_H (W9966_WND_MAX_Y - W9966_WND_MIN_Y) /* Keep track of our current state */ #define W9966_STATE_PDEV 0x01 #define W9966_STATE_CLAIMED 0x02 #define W9966_STATE_VDEV 0x04 #define W9966_I2C_W_ID 0x48 #define W9966_I2C_R_ID 0x49 #define W9966_I2C_R_DATA 0x08 #define W9966_I2C_R_CLOCK 0x04 #define W9966_I2C_W_DATA 0x02 #define W9966_I2C_W_CLOCK 0x01 struct w9966 { struct v4l2_device v4l2_dev; struct v4l2_ctrl_handler hdl; unsigned char dev_state; unsigned char i2c_state; unsigned short ppmode; struct parport *pport; struct pardevice *pdev; struct video_device vdev; unsigned short width; unsigned short height; unsigned char brightness; signed char contrast; signed char color; signed char hue; struct mutex lock; }; /* * Module specific properties */ MODULE_AUTHOR("Jakob Kemi <jakob.kemi@post.utfors.se>"); MODULE_DESCRIPTION("Winbond w9966cf WebCam driver (0.32)"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.33.1"); #ifdef MODULE static char *pardev[] = {[0 ... W9966_MAXCAMS] = ""}; #else static char *pardev[] = {[0 ... W9966_MAXCAMS] = "aggressive"}; #endif module_param_array(pardev, charp, NULL, 0); MODULE_PARM_DESC(pardev, "pardev: where to search for\n" "\teach camera. 'aggressive' means brute-force search.\n" "\tEg: >pardev=parport3,aggressive,parport2,parport1< would assign\n" "\tcam 1 to parport3 and search every parport for cam 2 etc..."); static int parmode; module_param(parmode, int, 0); MODULE_PARM_DESC(parmode, "parmode: transfer mode (0=auto, 1=ecp, 2=epp"); static int video_nr = -1; module_param(video_nr, int, 0); static struct w9966 w9966_cams[W9966_MAXCAMS]; /* * Private function defines */ /* Set camera phase flags, so we know what to uninit when terminating */ static inline void w9966_set_state(struct w9966 *cam, int mask, int val) { cam->dev_state = (cam->dev_state & ~mask) ^ val; } /* Get camera phase flags */ static inline int w9966_get_state(struct w9966 *cam, int mask, int val) { return ((cam->dev_state & mask) == val); } /* Claim parport for ourself */ static void w9966_pdev_claim(struct w9966 *cam) { if (w9966_get_state(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED)) return; parport_claim_or_block(cam->pdev); w9966_set_state(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED); } /* Release parport for others to use */ static void w9966_pdev_release(struct w9966 *cam) { if (w9966_get_state(cam, W9966_STATE_CLAIMED, 0)) return; parport_release(cam->pdev); w9966_set_state(cam, W9966_STATE_CLAIMED, 0); } /* Read register from W9966 interface-chip Expects a claimed pdev -1 on error, else register data (byte) */ static int w9966_read_reg(struct w9966 *cam, int reg) { /* ECP, read, regtransfer, REG, REG, REG, REG, REG */ const unsigned char addr = 0x80 | (reg & 0x1f); unsigned char val; if (parport_negotiate(cam->pport, cam->ppmode | IEEE1284_ADDR) != 0) return -1; if (parport_write(cam->pport, &addr, 1) != 1) return -1; if (parport_negotiate(cam->pport, cam->ppmode | IEEE1284_DATA) != 0) return -1; if (parport_read(cam->pport, &val, 1) != 1) return -1; return val; } /* Write register to W9966 interface-chip Expects a claimed pdev -1 on error */ static int w9966_write_reg(struct w9966 *cam, int reg, int data) { /* ECP, write, regtransfer, REG, REG, REG, REG, REG */ const unsigned char addr = 0xc0 | (reg & 0x1f); const unsigned char val = data; if (parport_negotiate(cam->pport, cam->ppmode | IEEE1284_ADDR) != 0) return -1; if (parport_write(cam->pport, &addr, 1) != 1) return -1; if (parport_negotiate(cam->pport, cam->ppmode | IEEE1284_DATA) != 0) return -1; if (parport_write(cam->pport, &val, 1) != 1) return -1; return 0; } /* * Ugly and primitive i2c protocol functions */ /* Sets the data line on the i2c bus. Expects a claimed pdev. */ static void w9966_i2c_setsda(struct w9966 *cam, int state) { if (state) cam->i2c_state |= W9966_I2C_W_DATA; else cam->i2c_state &= ~W9966_I2C_W_DATA; w9966_write_reg(cam, 0x18, cam->i2c_state); udelay(5); } /* Get peripheral clock line Expects a claimed pdev. */ static int w9966_i2c_getscl(struct w9966 *cam) { const unsigned char state = w9966_read_reg(cam, 0x18); return ((state & W9966_I2C_R_CLOCK) > 0); } /* Sets the clock line on the i2c bus. Expects a claimed pdev. -1 on error */ static int w9966_i2c_setscl(struct w9966 *cam, int state) { unsigned long timeout; if (state) cam->i2c_state |= W9966_I2C_W_CLOCK; else cam->i2c_state &= ~W9966_I2C_W_CLOCK; w9966_write_reg(cam, 0x18, cam->i2c_state); udelay(5); /* we go to high, we also expect the peripheral to ack. */ if (state) { timeout = jiffies + 100; while (!w9966_i2c_getscl(cam)) { if (time_after(jiffies, timeout)) return -1; } } return 0; } #if 0 /* Get peripheral data line Expects a claimed pdev. */ static int w9966_i2c_getsda(struct w9966 *cam) { const unsigned char state = w9966_read_reg(cam, 0x18); return ((state & W9966_I2C_R_DATA) > 0); } #endif /* Write a byte with ack to the i2c bus. Expects a claimed pdev. -1 on error */ static int w9966_i2c_wbyte(struct w9966 *cam, int data) { int i; for (i = 7; i >= 0; i--) { w9966_i2c_setsda(cam, (data >> i) & 0x01); if (w9966_i2c_setscl(cam, 1) == -1) return -1; w9966_i2c_setscl(cam, 0); } w9966_i2c_setsda(cam, 1); if (w9966_i2c_setscl(cam, 1) == -1) return -1; w9966_i2c_setscl(cam, 0); return 0; } /* Read a data byte with ack from the i2c-bus Expects a claimed pdev. -1 on error */ #if 0 static int w9966_i2c_rbyte(struct w9966 *cam) { unsigned char data = 0x00; int i; w9966_i2c_setsda(cam, 1); for (i = 0; i < 8; i++) { if (w9966_i2c_setscl(cam, 1) == -1) return -1; data = data << 1; if (w9966_i2c_getsda(cam)) data |= 0x01; w9966_i2c_setscl(cam, 0); } return data; } #endif /* Read a register from the i2c device. Expects claimed pdev. -1 on error */ #if 0 static int w9966_read_reg_i2c(struct w9966 *cam, int reg) { int data; w9966_i2c_setsda(cam, 0); w9966_i2c_setscl(cam, 0); if (w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 || w9966_i2c_wbyte(cam, reg) == -1) return -1; w9966_i2c_setsda(cam, 1); if (w9966_i2c_setscl(cam, 1) == -1) return -1; w9966_i2c_setsda(cam, 0); w9966_i2c_setscl(cam, 0); if (w9966_i2c_wbyte(cam, W9966_I2C_R_ID) == -1) return -1; data = w9966_i2c_rbyte(cam); if (data == -1) return -1; w9966_i2c_setsda(cam, 0); if (w9966_i2c_setscl(cam, 1) == -1) return -1; w9966_i2c_setsda(cam, 1); return data; } #endif /* Write a register to the i2c device. Expects claimed pdev. -1 on error */ static int w9966_write_reg_i2c(struct w9966 *cam, int reg, int data) { w9966_i2c_setsda(cam, 0); w9966_i2c_setscl(cam, 0); if (w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 || w9966_i2c_wbyte(cam, reg) == -1 || w9966_i2c_wbyte(cam, data) == -1) return -1; w9966_i2c_setsda(cam, 0); if (w9966_i2c_setscl(cam, 1) == -1) return -1; w9966_i2c_setsda(cam, 1); return 0; } /* Find a good length for capture window (used both for W and H) A bit ugly but pretty functional. The capture length have to match the downscale */ static int w9966_findlen(int near, int size, int maxlen) { int bestlen = size; int besterr = abs(near - bestlen); int len; for (len = size + 1; len < maxlen; len++) { int err; if (((64 * size) % len) != 0) continue; err = abs(near - len); /* Only continue as long as we keep getting better values */ if (err > besterr) break; besterr = err; bestlen = len; } return bestlen; } /* Modify capture window (if necessary) and calculate downscaling Return -1 on error */ static int w9966_calcscale(int size, int min, int max, int *beg, int *end, unsigned char *factor) { int maxlen = max - min; int len = *end - *beg + 1; int newlen = w9966_findlen(len, size, maxlen); int err = newlen - len; /* Check for bad format */ if (newlen > maxlen || newlen < size) return -1; /* Set factor (6 bit fixed) */ *factor = (64 * size) / newlen; if (*factor == 64) *factor = 0x00; /* downscale is disabled */ else *factor |= 0x80; /* set downscale-enable bit */ /* Modify old beginning and end */ *beg -= err / 2; *end += err - (err / 2); /* Move window if outside borders */ if (*beg < min) { *end += min - *beg; *beg += min - *beg; } if (*end > max) { *beg -= *end - max; *end -= *end - max; } return 0; } /* Setup the cameras capture window etc. Expects a claimed pdev return -1 on error */ static int w9966_setup(struct w9966 *cam, int x1, int y1, int x2, int y2, int w, int h) { unsigned int i; unsigned int enh_s, enh_e; unsigned char scale_x, scale_y; unsigned char regs[0x1c]; unsigned char saa7111_regs[] = { 0x21, 0x00, 0xd8, 0x23, 0x00, 0x80, 0x80, 0x00, 0x88, 0x10, 0x80, 0x40, 0x40, 0x00, 0x01, 0x00, 0x48, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x71, 0xe7, 0x00, 0x00, 0xc0 }; if (w * h * 2 > W9966_SRAMSIZE) { DPRINTF("capture window exceeds SRAM size!.\n"); w = 200; h = 160; /* Pick default values */ } w &= ~0x1; if (w < 2) w = 2; if (h < 1) h = 1; if (w > W9966_WND_MAX_W) w = W9966_WND_MAX_W; if (h > W9966_WND_MAX_H) h = W9966_WND_MAX_H; cam->width = w; cam->height = h; enh_s = 0; enh_e = w * h * 2; /* Modify capture window if necessary and calculate downscaling */ if (w9966_calcscale(w, W9966_WND_MIN_X, W9966_WND_MAX_X, &x1, &x2, &scale_x) != 0 || w9966_calcscale(h, W9966_WND_MIN_Y, W9966_WND_MAX_Y, &y1, &y2, &scale_y) != 0) return -1; DPRINTF("%dx%d, x: %d<->%d, y: %d<->%d, sx: %d/64, sy: %d/64.\n", w, h, x1, x2, y1, y2, scale_x & ~0x80, scale_y & ~0x80); /* Setup registers */ regs[0x00] = 0x00; /* Set normal operation */ regs[0x01] = 0x18; /* Capture mode */ regs[0x02] = scale_y; /* V-scaling */ regs[0x03] = scale_x; /* H-scaling */ /* Capture window */ regs[0x04] = (x1 & 0x0ff); /* X-start (8 low bits) */ regs[0x05] = (x1 & 0x300)>>8; /* X-start (2 high bits) */ regs[0x06] = (y1 & 0x0ff); /* Y-start (8 low bits) */ regs[0x07] = (y1 & 0x300)>>8; /* Y-start (2 high bits) */ regs[0x08] = (x2 & 0x0ff); /* X-end (8 low bits) */ regs[0x09] = (x2 & 0x300)>>8; /* X-end (2 high bits) */ regs[0x0a] = (y2 & 0x0ff); /* Y-end (8 low bits) */ regs[0x0c] = W9966_SRAMID; /* SRAM-banks (1x 128kb) */ /* Enhancement layer */ regs[0x0d] = (enh_s & 0x000ff); /* Enh. start (0-7) */ regs[0x0e] = (enh_s & 0x0ff00) >> 8; /* Enh. start (8-15) */ regs[0x0f] = (enh_s & 0x70000) >> 16; /* Enh. start (16-17/18??) */ regs[0x10] = (enh_e & 0x000ff); /* Enh. end (0-7) */ regs[0x11] = (enh_e & 0x0ff00) >> 8; /* Enh. end (8-15) */ regs[0x12] = (enh_e & 0x70000) >> 16; /* Enh. end (16-17/18??) */ /* Misc */ regs[0x13] = 0x40; /* VEE control (raw 4:2:2) */ regs[0x17] = 0x00; /* ??? */ regs[0x18] = cam->i2c_state = 0x00; /* Serial bus */ regs[0x19] = 0xff; /* I/O port direction control */ regs[0x1a] = 0xff; /* I/O port data register */ regs[0x1b] = 0x10; /* ??? */ /* SAA7111 chip settings */ saa7111_regs[0x0a] = cam->brightness; saa7111_regs[0x0b] = cam->contrast; saa7111_regs[0x0c] = cam->color; saa7111_regs[0x0d] = cam->hue; /* Reset (ECP-fifo & serial-bus) */ if (w9966_write_reg(cam, 0x00, 0x03) == -1) return -1; /* Write regs to w9966cf chip */ for (i = 0; i < 0x1c; i++) if (w9966_write_reg(cam, i, regs[i]) == -1) return -1; /* Write regs to saa7111 chip */ for (i = 0; i < 0x20; i++) if (w9966_write_reg_i2c(cam, i, saa7111_regs[i]) == -1) return -1; return 0; } /* * Video4linux interfacing */ static int cam_querycap(struct file *file, void *priv, struct v4l2_capability *vcap) { struct w9966 *cam = video_drvdata(file); strlcpy(vcap->driver, cam->v4l2_dev.name, sizeof(vcap->driver)); strlcpy(vcap->card, W9966_DRIVERNAME, sizeof(vcap->card)); strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info)); vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE; vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } static int cam_enum_input(struct file *file, void *fh, struct v4l2_input *vin) { if (vin->index > 0) return -EINVAL; strlcpy(vin->name, "Camera", sizeof(vin->name)); vin->type = V4L2_INPUT_TYPE_CAMERA; vin->audioset = 0; vin->tuner = 0; vin->std = 0; vin->status = 0; return 0; } static int cam_g_input(struct file *file, void *fh, unsigned int *inp) { *inp = 0; return 0; } static int cam_s_input(struct file *file, void *fh, unsigned int inp) { return (inp > 0) ? -EINVAL : 0; } static int cam_s_ctrl(struct v4l2_ctrl *ctrl) { struct w9966 *cam = container_of(ctrl->handler, struct w9966, hdl); int ret = 0; mutex_lock(&cam->lock); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: cam->brightness = ctrl->val; break; case V4L2_CID_CONTRAST: cam->contrast = ctrl->val; break; case V4L2_CID_SATURATION: cam->color = ctrl->val; break; case V4L2_CID_HUE: cam->hue = ctrl->val; break; default: ret = -EINVAL; break; } if (ret == 0) { w9966_pdev_claim(cam); if (w9966_write_reg_i2c(cam, 0x0a, cam->brightness) == -1 || w9966_write_reg_i2c(cam, 0x0b, cam->contrast) == -1 || w9966_write_reg_i2c(cam, 0x0c, cam->color) == -1 || w9966_write_reg_i2c(cam, 0x0d, cam->hue) == -1) { ret = -EIO; } w9966_pdev_release(cam); } mutex_unlock(&cam->lock); return ret; } static int cam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct w9966 *cam = video_drvdata(file); struct v4l2_pix_format *pix = &fmt->fmt.pix; pix->width = cam->width; pix->height = cam->height; pix->pixelformat = V4L2_PIX_FMT_YUYV; pix->field = V4L2_FIELD_NONE; pix->bytesperline = 2 * cam->width; pix->sizeimage = 2 * cam->width * cam->height; /* Just a guess */ pix->colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; } static int cam_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct v4l2_pix_format *pix = &fmt->fmt.pix; if (pix->width < 2) pix->width = 2; if (pix->height < 1) pix->height = 1; if (pix->width > W9966_WND_MAX_W) pix->width = W9966_WND_MAX_W; if (pix->height > W9966_WND_MAX_H) pix->height = W9966_WND_MAX_H; pix->pixelformat = V4L2_PIX_FMT_YUYV; pix->field = V4L2_FIELD_NONE; pix->bytesperline = 2 * pix->width; pix->sizeimage = 2 * pix->width * pix->height; /* Just a guess */ pix->colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; } static int cam_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct w9966 *cam = video_drvdata(file); struct v4l2_pix_format *pix = &fmt->fmt.pix; int ret = cam_try_fmt_vid_cap(file, fh, fmt); if (ret) return ret; mutex_lock(&cam->lock); /* Update camera regs */ w9966_pdev_claim(cam); ret = w9966_setup(cam, 0, 0, 1023, 1023, pix->width, pix->height); w9966_pdev_release(cam); mutex_unlock(&cam->lock); return ret; } static int cam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt) { static struct v4l2_fmtdesc formats[] = { { 0, 0, 0, "YUV 4:2:2", V4L2_PIX_FMT_YUYV, { 0, 0, 0, 0 } }, }; enum v4l2_buf_type type = fmt->type; if (fmt->index > 0) return -EINVAL; *fmt = formats[fmt->index]; fmt->type = type; return 0; } /* Capture data */ static ssize_t w9966_v4l_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct w9966 *cam = video_drvdata(file); unsigned char addr = 0xa0; /* ECP, read, CCD-transfer, 00000 */ unsigned char __user *dest = (unsigned char __user *)buf; unsigned long dleft = count; unsigned char *tbuf; /* Why would anyone want more than this?? */ if (count > cam->width * cam->height * 2) return -EINVAL; mutex_lock(&cam->lock); w9966_pdev_claim(cam); w9966_write_reg(cam, 0x00, 0x02); /* Reset ECP-FIFO buffer */ w9966_write_reg(cam, 0x00, 0x00); /* Return to normal operation */ w9966_write_reg(cam, 0x01, 0x98); /* Enable capture */ /* write special capture-addr and negotiate into data transfer */ if ((parport_negotiate(cam->pport, cam->ppmode|IEEE1284_ADDR) != 0) || (parport_write(cam->pport, &addr, 1) != 1) || (parport_negotiate(cam->pport, cam->ppmode|IEEE1284_DATA) != 0)) { w9966_pdev_release(cam); mutex_unlock(&cam->lock); return -EFAULT; } tbuf = kmalloc(W9966_RBUFFER, GFP_KERNEL); if (tbuf == NULL) { count = -ENOMEM; goto out; } while (dleft > 0) { unsigned long tsize = (dleft > W9966_RBUFFER) ? W9966_RBUFFER : dleft; if (parport_read(cam->pport, tbuf, tsize) < tsize) { count = -EFAULT; goto out; } if (copy_to_user(dest, tbuf, tsize) != 0) { count = -EFAULT; goto out; } dest += tsize; dleft -= tsize; } w9966_write_reg(cam, 0x01, 0x18); /* Disable capture */ out: kfree(tbuf); w9966_pdev_release(cam); mutex_unlock(&cam->lock); return count; } static const struct v4l2_file_operations w9966_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = v4l2_fh_release, .poll = v4l2_ctrl_poll, .unlocked_ioctl = video_ioctl2, .read = w9966_v4l_read, }; static const struct v4l2_ioctl_ops w9966_ioctl_ops = { .vidioc_querycap = cam_querycap, .vidioc_g_input = cam_g_input, .vidioc_s_input = cam_s_input, .vidioc_enum_input = cam_enum_input, .vidioc_enum_fmt_vid_cap = cam_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = cam_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = cam_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap = cam_try_fmt_vid_cap, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static const struct v4l2_ctrl_ops cam_ctrl_ops = { .s_ctrl = cam_s_ctrl, }; /* Initialize camera device. Setup all internal flags, set a default video mode, setup ccd-chip, register v4l device etc.. Also used for 'probing' of hardware. -1 on error */ static int w9966_init(struct w9966 *cam, struct parport *port) { struct v4l2_device *v4l2_dev = &cam->v4l2_dev; if (cam->dev_state != 0) return -1; strlcpy(v4l2_dev->name, "w9966", sizeof(v4l2_dev->name)); if (v4l2_device_register(NULL, v4l2_dev) < 0) { v4l2_err(v4l2_dev, "Could not register v4l2_device\n"); return -1; } v4l2_ctrl_handler_init(&cam->hdl, 4); v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops, V4L2_CID_CONTRAST, -64, 64, 1, 64); v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops, V4L2_CID_SATURATION, -64, 64, 1, 64); v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops, V4L2_CID_HUE, -128, 127, 1, 0); if (cam->hdl.error) { v4l2_err(v4l2_dev, "couldn't register controls\n"); return -1; } cam->pport = port; cam->brightness = 128; cam->contrast = 64; cam->color = 64; cam->hue = 0; /* Select requested transfer mode */ switch (parmode) { default: /* Auto-detect (priority: hw-ecp, hw-epp, sw-ecp) */ case 0: if (port->modes & PARPORT_MODE_ECP) cam->ppmode = IEEE1284_MODE_ECP; else if (port->modes & PARPORT_MODE_EPP) cam->ppmode = IEEE1284_MODE_EPP; else cam->ppmode = IEEE1284_MODE_ECP; break; case 1: /* hw- or sw-ecp */ cam->ppmode = IEEE1284_MODE_ECP; break; case 2: /* hw- or sw-epp */ cam->ppmode = IEEE1284_MODE_EPP; break; } /* Tell the parport driver that we exists */ cam->pdev = parport_register_device(port, "w9966", NULL, NULL, NULL, 0, NULL); if (cam->pdev == NULL) { DPRINTF("parport_register_device() failed\n"); return -1; } w9966_set_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV); w9966_pdev_claim(cam); /* Setup a default capture mode */ if (w9966_setup(cam, 0, 0, 1023, 1023, 200, 160) != 0) { DPRINTF("w9966_setup() failed.\n"); return -1; } w9966_pdev_release(cam); /* Fill in the video_device struct and register us to v4l */ strlcpy(cam->vdev.name, W9966_DRIVERNAME, sizeof(cam->vdev.name)); cam->vdev.v4l2_dev = v4l2_dev; cam->vdev.fops = &w9966_fops; cam->vdev.ioctl_ops = &w9966_ioctl_ops; cam->vdev.release = video_device_release_empty; cam->vdev.ctrl_handler = &cam->hdl; set_bit(V4L2_FL_USE_FH_PRIO, &cam->vdev.flags); video_set_drvdata(&cam->vdev, cam); mutex_init(&cam->lock); if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) return -1; w9966_set_state(cam, W9966_STATE_VDEV, W9966_STATE_VDEV); /* All ok */ v4l2_info(v4l2_dev, "Found and initialized a webcam on %s.\n", cam->pport->name); return 0; } /* Terminate everything gracefully */ static void w9966_term(struct w9966 *cam) { /* Unregister from v4l */ if (w9966_get_state(cam, W9966_STATE_VDEV, W9966_STATE_VDEV)) { video_unregister_device(&cam->vdev); w9966_set_state(cam, W9966_STATE_VDEV, 0); } v4l2_ctrl_handler_free(&cam->hdl); /* Terminate from IEEE1284 mode and release pdev block */ if (w9966_get_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) { w9966_pdev_claim(cam); parport_negotiate(cam->pport, IEEE1284_MODE_COMPAT); w9966_pdev_release(cam); } /* Unregister from parport */ if (w9966_get_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) { parport_unregister_device(cam->pdev); w9966_set_state(cam, W9966_STATE_PDEV, 0); } memset(cam, 0, sizeof(*cam)); } /* Called once for every parport on init */ static void w9966_attach(struct parport *port) { int i; for (i = 0; i < W9966_MAXCAMS; i++) { if (w9966_cams[i].dev_state != 0) /* Cam is already assigned */ continue; if (strcmp(pardev[i], "aggressive") == 0 || strcmp(pardev[i], port->name) == 0) { if (w9966_init(&w9966_cams[i], port) != 0) w9966_term(&w9966_cams[i]); break; /* return */ } } } /* Called once for every parport on termination */ static void w9966_detach(struct parport *port) { int i; for (i = 0; i < W9966_MAXCAMS; i++) if (w9966_cams[i].dev_state != 0 && w9966_cams[i].pport == port) w9966_term(&w9966_cams[i]); } static struct parport_driver w9966_ppd = { .name = W9966_DRIVERNAME, .attach = w9966_attach, .detach = w9966_detach, }; /* Module entry point */ static int __init w9966_mod_init(void) { int i; for (i = 0; i < W9966_MAXCAMS; i++) w9966_cams[i].dev_state = 0; return parport_register_driver(&w9966_ppd); } /* Module cleanup */ static void __exit w9966_mod_term(void) { parport_unregister_driver(&w9966_ppd); } module_init(w9966_mod_init); module_exit(w9966_mod_term);
gpl-2.0
bbelos/rk3188-kernel
drivers/isdn/mISDN/layer1.c
3288
9535
/* * * Author Karsten Keil <kkeil@novell.com> * * Copyright 2008 by Karsten Keil <kkeil@novell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/slab.h> #include <linux/module.h> #include <linux/mISDNhw.h> #include "core.h" #include "layer1.h" #include "fsm.h" static u_int *debug; struct layer1 { u_long Flags; struct FsmInst l1m; struct FsmTimer timer; int delay; struct dchannel *dch; dchannel_l1callback *dcb; }; #define TIMER3_VALUE 7000 static struct Fsm l1fsm_s = {NULL, 0, 0, NULL, NULL}; enum { ST_L1_F2, ST_L1_F3, ST_L1_F4, ST_L1_F5, ST_L1_F6, ST_L1_F7, ST_L1_F8, }; #define L1S_STATE_COUNT (ST_L1_F8+1) static char *strL1SState[] = { "ST_L1_F2", "ST_L1_F3", "ST_L1_F4", "ST_L1_F5", "ST_L1_F6", "ST_L1_F7", "ST_L1_F8", }; enum { EV_PH_ACTIVATE, EV_PH_DEACTIVATE, EV_RESET_IND, EV_DEACT_CNF, EV_DEACT_IND, EV_POWER_UP, EV_ANYSIG_IND, EV_INFO2_IND, EV_INFO4_IND, EV_TIMER_DEACT, EV_TIMER_ACT, EV_TIMER3, }; #define L1_EVENT_COUNT (EV_TIMER3 + 1) static char *strL1Event[] = { "EV_PH_ACTIVATE", "EV_PH_DEACTIVATE", "EV_RESET_IND", "EV_DEACT_CNF", "EV_DEACT_IND", "EV_POWER_UP", "EV_ANYSIG_IND", "EV_INFO2_IND", "EV_INFO4_IND", "EV_TIMER_DEACT", "EV_TIMER_ACT", "EV_TIMER3", }; static void l1m_debug(struct FsmInst *fi, char *fmt, ...) { struct layer1 *l1 = fi->userdata; struct va_format vaf; va_list va; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; printk(KERN_DEBUG "%s: %pV\n", dev_name(&l1->dch->dev.dev), &vaf); va_end(va); } static void l1_reset(struct FsmInst *fi, int event, void *arg) { mISDN_FsmChangeState(fi, ST_L1_F3); } static void l1_deact_cnf(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; mISDN_FsmChangeState(fi, ST_L1_F3); if (test_bit(FLG_L1_ACTIVATING, &l1->Flags)) l1->dcb(l1->dch, HW_POWERUP_REQ); } static void l1_deact_req_s(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; mISDN_FsmChangeState(fi, ST_L1_F3); mISDN_FsmRestartTimer(&l1->timer, 550, EV_TIMER_DEACT, NULL, 2); test_and_set_bit(FLG_L1_DEACTTIMER, &l1->Flags); } static void l1_power_up_s(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; if (test_bit(FLG_L1_ACTIVATING, &l1->Flags)) { mISDN_FsmChangeState(fi, ST_L1_F4); l1->dcb(l1->dch, INFO3_P8); } else mISDN_FsmChangeState(fi, ST_L1_F3); } static void l1_go_F5(struct FsmInst *fi, int event, void *arg) { mISDN_FsmChangeState(fi, ST_L1_F5); } static void l1_go_F8(struct FsmInst *fi, int event, void *arg) { mISDN_FsmChangeState(fi, ST_L1_F8); } static void l1_info2_ind(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; mISDN_FsmChangeState(fi, ST_L1_F6); l1->dcb(l1->dch, INFO3_P8); } static void l1_info4_ind(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; mISDN_FsmChangeState(fi, ST_L1_F7); l1->dcb(l1->dch, INFO3_P8); if (test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags)) mISDN_FsmDelTimer(&l1->timer, 4); if (!test_bit(FLG_L1_ACTIVATED, &l1->Flags)) { if (test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags)) mISDN_FsmDelTimer(&l1->timer, 3); mISDN_FsmRestartTimer(&l1->timer, 110, EV_TIMER_ACT, NULL, 2); test_and_set_bit(FLG_L1_ACTTIMER, &l1->Flags); } } static void l1_timer3(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags); if (test_and_clear_bit(FLG_L1_ACTIVATING, &l1->Flags)) { if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags)) l1->dcb(l1->dch, HW_D_NOBLOCKED); l1->dcb(l1->dch, PH_DEACTIVATE_IND); } if (l1->l1m.state != ST_L1_F6) { mISDN_FsmChangeState(fi, ST_L1_F3); l1->dcb(l1->dch, HW_POWERUP_REQ); } } static void l1_timer_act(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; test_and_clear_bit(FLG_L1_ACTTIMER, &l1->Flags); test_and_set_bit(FLG_L1_ACTIVATED, &l1->Flags); l1->dcb(l1->dch, PH_ACTIVATE_IND); } static void l1_timer_deact(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags); test_and_clear_bit(FLG_L1_ACTIVATED, &l1->Flags); if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags)) l1->dcb(l1->dch, HW_D_NOBLOCKED); l1->dcb(l1->dch, PH_DEACTIVATE_IND); l1->dcb(l1->dch, HW_DEACT_REQ); } static void l1_activate_s(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; mISDN_FsmRestartTimer(&l1->timer, TIMER3_VALUE, EV_TIMER3, NULL, 2); test_and_set_bit(FLG_L1_T3RUN, &l1->Flags); l1->dcb(l1->dch, HW_RESET_REQ); } static void l1_activate_no(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; if ((!test_bit(FLG_L1_DEACTTIMER, &l1->Flags)) && (!test_bit(FLG_L1_T3RUN, &l1->Flags))) { test_and_clear_bit(FLG_L1_ACTIVATING, &l1->Flags); if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags)) l1->dcb(l1->dch, HW_D_NOBLOCKED); l1->dcb(l1->dch, PH_DEACTIVATE_IND); } } static struct FsmNode L1SFnList[] = { {ST_L1_F3, EV_PH_ACTIVATE, l1_activate_s}, {ST_L1_F6, EV_PH_ACTIVATE, l1_activate_no}, {ST_L1_F8, EV_PH_ACTIVATE, l1_activate_no}, {ST_L1_F3, EV_RESET_IND, l1_reset}, {ST_L1_F4, EV_RESET_IND, l1_reset}, {ST_L1_F5, EV_RESET_IND, l1_reset}, {ST_L1_F6, EV_RESET_IND, l1_reset}, {ST_L1_F7, EV_RESET_IND, l1_reset}, {ST_L1_F8, EV_RESET_IND, l1_reset}, {ST_L1_F3, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F4, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F5, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F6, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F7, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F8, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F6, EV_DEACT_IND, l1_deact_req_s}, {ST_L1_F7, EV_DEACT_IND, l1_deact_req_s}, {ST_L1_F8, EV_DEACT_IND, l1_deact_req_s}, {ST_L1_F3, EV_POWER_UP, l1_power_up_s}, {ST_L1_F4, EV_ANYSIG_IND, l1_go_F5}, {ST_L1_F6, EV_ANYSIG_IND, l1_go_F8}, {ST_L1_F7, EV_ANYSIG_IND, l1_go_F8}, {ST_L1_F3, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F4, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F5, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F7, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F8, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F3, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F4, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F5, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F6, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F8, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F3, EV_TIMER3, l1_timer3}, {ST_L1_F4, EV_TIMER3, l1_timer3}, {ST_L1_F5, EV_TIMER3, l1_timer3}, {ST_L1_F6, EV_TIMER3, l1_timer3}, {ST_L1_F8, EV_TIMER3, l1_timer3}, {ST_L1_F7, EV_TIMER_ACT, l1_timer_act}, {ST_L1_F3, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F4, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F5, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F6, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F7, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F8, EV_TIMER_DEACT, l1_timer_deact}, }; static void release_l1(struct layer1 *l1) { mISDN_FsmDelTimer(&l1->timer, 0); if (l1->dch) l1->dch->l1 = NULL; module_put(THIS_MODULE); kfree(l1); } int l1_event(struct layer1 *l1, u_int event) { int err = 0; if (!l1) return -EINVAL; switch (event) { case HW_RESET_IND: mISDN_FsmEvent(&l1->l1m, EV_RESET_IND, NULL); break; case HW_DEACT_IND: mISDN_FsmEvent(&l1->l1m, EV_DEACT_IND, NULL); break; case HW_POWERUP_IND: mISDN_FsmEvent(&l1->l1m, EV_POWER_UP, NULL); break; case HW_DEACT_CNF: mISDN_FsmEvent(&l1->l1m, EV_DEACT_CNF, NULL); break; case ANYSIGNAL: mISDN_FsmEvent(&l1->l1m, EV_ANYSIG_IND, NULL); break; case LOSTFRAMING: mISDN_FsmEvent(&l1->l1m, EV_ANYSIG_IND, NULL); break; case INFO2: mISDN_FsmEvent(&l1->l1m, EV_INFO2_IND, NULL); break; case INFO4_P8: mISDN_FsmEvent(&l1->l1m, EV_INFO4_IND, NULL); break; case INFO4_P10: mISDN_FsmEvent(&l1->l1m, EV_INFO4_IND, NULL); break; case PH_ACTIVATE_REQ: if (test_bit(FLG_L1_ACTIVATED, &l1->Flags)) l1->dcb(l1->dch, PH_ACTIVATE_IND); else { test_and_set_bit(FLG_L1_ACTIVATING, &l1->Flags); mISDN_FsmEvent(&l1->l1m, EV_PH_ACTIVATE, NULL); } break; case CLOSE_CHANNEL: release_l1(l1); break; default: if (*debug & DEBUG_L1) printk(KERN_DEBUG "%s %x unhandled\n", __func__, event); err = -EINVAL; } return err; } EXPORT_SYMBOL(l1_event); int create_l1(struct dchannel *dch, dchannel_l1callback *dcb) { struct layer1 *nl1; nl1 = kzalloc(sizeof(struct layer1), GFP_ATOMIC); if (!nl1) { printk(KERN_ERR "kmalloc struct layer1 failed\n"); return -ENOMEM; } nl1->l1m.fsm = &l1fsm_s; nl1->l1m.state = ST_L1_F3; nl1->Flags = 0; nl1->l1m.debug = *debug & DEBUG_L1_FSM; nl1->l1m.userdata = nl1; nl1->l1m.userint = 0; nl1->l1m.printdebug = l1m_debug; nl1->dch = dch; nl1->dcb = dcb; mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer); __module_get(THIS_MODULE); dch->l1 = nl1; return 0; } EXPORT_SYMBOL(create_l1); int l1_init(u_int *deb) { debug = deb; l1fsm_s.state_count = L1S_STATE_COUNT; l1fsm_s.event_count = L1_EVENT_COUNT; l1fsm_s.strEvent = strL1Event; l1fsm_s.strState = strL1SState; mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList)); return 0; } void l1_cleanup(void) { mISDN_FsmFree(&l1fsm_s); }
gpl-2.0
mifl/android_kernel_pantech_ef34k
arch/sparc/kernel/traps_32.c
4312
12547
/* * arch/sparc/kernel/traps.c * * Copyright 1995, 2008 David S. Miller (davem@davemloft.net) * Copyright 2000 Jakub Jelinek (jakub@redhat.com) */ /* * I hate traps on the sparc, grrr... */ #include <linux/sched.h> /* for jiffies */ #include <linux/kernel.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/kdebug.h> #include <asm/delay.h> #include <asm/system.h> #include <asm/ptrace.h> #include <asm/oplib.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/unistd.h> #include <asm/traps.h> #include "entry.h" #include "kernel.h" /* #define TRAP_DEBUG */ static void instruction_dump(unsigned long *pc) { int i; if((((unsigned long) pc) & 3)) return; for(i = -3; i < 6; i++) printk("%c%08lx%c",i?' ':'<',pc[i],i?' ':'>'); printk("\n"); } #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") void die_if_kernel(char *str, struct pt_regs *regs) { static int die_counter; int count = 0; /* Amuse the user. */ printk( " \\|/ ____ \\|/\n" " \"@'/ ,. \\`@\"\n" " /_| \\__/ |_\\\n" " \\__U_/\n"); printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); show_regs(regs); add_taint(TAINT_DIE); __SAVE; __SAVE; __SAVE; __SAVE; __SAVE; __SAVE; __SAVE; __SAVE; __RESTORE; __RESTORE; __RESTORE; __RESTORE; __RESTORE; __RESTORE; __RESTORE; __RESTORE; { struct reg_window32 *rw = (struct reg_window32 *)regs->u_regs[UREG_FP]; /* Stop the back trace when we hit userland or we * find some badly aligned kernel stack. Set an upper * bound in case our stack is trashed and we loop. */ while(rw && count++ < 30 && (((unsigned long) rw) >= PAGE_OFFSET) && !(((unsigned long) rw) & 0x7)) { printk("Caller[%08lx]: %pS\n", rw->ins[7], (void *) rw->ins[7]); rw = (struct reg_window32 *)rw->ins[6]; } } printk("Instruction DUMP:"); instruction_dump ((unsigned long *) regs->pc); if(regs->psr & PSR_PS) do_exit(SIGKILL); do_exit(SIGSEGV); } void do_hw_interrupt(struct pt_regs *regs, unsigned long type) { siginfo_t info; if(type < 0x80) { /* Sun OS's puke from bad traps, Linux survives! */ printk("Unimplemented Sparc TRAP, type = %02lx\n", type); die_if_kernel("Whee... Hello Mr. Penguin", regs); } if(regs->psr & PSR_PS) die_if_kernel("Kernel bad trap", regs); info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLTRP; info.si_addr = (void __user *)regs->pc; info.si_trapno = type - 0x80; force_sig_info(SIGILL, &info, current); } void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { siginfo_t info; if(psr & PSR_PS) die_if_kernel("Kernel illegal instruction", regs); #ifdef TRAP_DEBUG printk("Ill instr. at pc=%08lx instruction is %08lx\n", regs->pc, *(unsigned long *)regs->pc); #endif if (!do_user_muldiv (regs, pc)) return; info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLOPC; info.si_addr = (void __user *)pc; info.si_trapno = 0; send_sig_info(SIGILL, &info, current); } void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { siginfo_t info; if(psr & PSR_PS) die_if_kernel("Penguin instruction from Penguin mode??!?!", regs); info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_PRVOPC; info.si_addr = (void __user *)pc; info.si_trapno = 0; send_sig_info(SIGILL, &info, current); } /* XXX User may want to be allowed to do this. XXX */ void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { siginfo_t info; if(regs->psr & PSR_PS) { printk("KERNEL MNA at pc %08lx npc %08lx called by %08lx\n", pc, npc, regs->u_regs[UREG_RETPC]); die_if_kernel("BOGUS", regs); /* die_if_kernel("Kernel MNA access", regs); */ } #if 0 show_regs (regs); instruction_dump ((unsigned long *) regs->pc); printk ("do_MNA!\n"); #endif info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = /* FIXME: Should dig out mna address */ (void *)0; info.si_trapno = 0; send_sig_info(SIGBUS, &info, current); } static unsigned long init_fsr = 0x0UL; static unsigned long init_fregs[32] __attribute__ ((aligned (8))) = { ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL }; void do_fpd_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { /* Sanity check... */ if(psr & PSR_PS) die_if_kernel("Kernel gets FloatingPenguinUnit disabled trap", regs); put_psr(get_psr() | PSR_EF); /* Allow FPU ops. */ regs->psr |= PSR_EF; #ifndef CONFIG_SMP if(last_task_used_math == current) return; if(last_task_used_math) { /* Other processes fpu state, save away */ struct task_struct *fptask = last_task_used_math; fpsave(&fptask->thread.float_regs[0], &fptask->thread.fsr, &fptask->thread.fpqueue[0], &fptask->thread.fpqdepth); } last_task_used_math = current; if(used_math()) { fpload(&current->thread.float_regs[0], &current->thread.fsr); } else { /* Set initial sane state. */ fpload(&init_fregs[0], &init_fsr); set_used_math(); } #else if(!used_math()) { fpload(&init_fregs[0], &init_fsr); set_used_math(); } else { fpload(&current->thread.float_regs[0], &current->thread.fsr); } set_thread_flag(TIF_USEDFPU); #endif } static unsigned long fake_regs[32] __attribute__ ((aligned (8))); static unsigned long fake_fsr; static unsigned long fake_queue[32] __attribute__ ((aligned (8))); static unsigned long fake_depth; extern int do_mathemu(struct pt_regs *, struct task_struct *); void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { static int calls; siginfo_t info; unsigned long fsr; int ret = 0; #ifndef CONFIG_SMP struct task_struct *fpt = last_task_used_math; #else struct task_struct *fpt = current; #endif put_psr(get_psr() | PSR_EF); /* If nobody owns the fpu right now, just clear the * error into our fake static buffer and hope it don't * happen again. Thank you crashme... */ #ifndef CONFIG_SMP if(!fpt) { #else if (!test_tsk_thread_flag(fpt, TIF_USEDFPU)) { #endif fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth); regs->psr &= ~PSR_EF; return; } fpsave(&fpt->thread.float_regs[0], &fpt->thread.fsr, &fpt->thread.fpqueue[0], &fpt->thread.fpqdepth); #ifdef DEBUG_FPU printk("Hmm, FP exception, fsr was %016lx\n", fpt->thread.fsr); #endif switch ((fpt->thread.fsr & 0x1c000)) { /* switch on the contents of the ftt [floating point trap type] field */ #ifdef DEBUG_FPU case (1 << 14): printk("IEEE_754_exception\n"); break; #endif case (2 << 14): /* unfinished_FPop (underflow & co) */ case (3 << 14): /* unimplemented_FPop (quad stuff, maybe sqrt) */ ret = do_mathemu(regs, fpt); break; #ifdef DEBUG_FPU case (4 << 14): printk("sequence_error (OS bug...)\n"); break; case (5 << 14): printk("hardware_error (uhoh!)\n"); break; case (6 << 14): printk("invalid_fp_register (user error)\n"); break; #endif /* DEBUG_FPU */ } /* If we successfully emulated the FPop, we pretend the trap never happened :-> */ if (ret) { fpload(&current->thread.float_regs[0], &current->thread.fsr); return; } /* nope, better SIGFPE the offending process... */ #ifdef CONFIG_SMP clear_tsk_thread_flag(fpt, TIF_USEDFPU); #endif if(psr & PSR_PS) { /* The first fsr store/load we tried trapped, * the second one will not (we hope). */ printk("WARNING: FPU exception from kernel mode. at pc=%08lx\n", regs->pc); regs->pc = regs->npc; regs->npc += 4; calls++; if(calls > 2) die_if_kernel("Too many Penguin-FPU traps from kernel mode", regs); return; } fsr = fpt->thread.fsr; info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void __user *)pc; info.si_trapno = 0; info.si_code = __SI_FAULT; if ((fsr & 0x1c000) == (1 << 14)) { if (fsr & 0x10) info.si_code = FPE_FLTINV; else if (fsr & 0x08) info.si_code = FPE_FLTOVF; else if (fsr & 0x04) info.si_code = FPE_FLTUND; else if (fsr & 0x02) info.si_code = FPE_FLTDIV; else if (fsr & 0x01) info.si_code = FPE_FLTRES; } send_sig_info(SIGFPE, &info, fpt); #ifndef CONFIG_SMP last_task_used_math = NULL; #endif regs->psr &= ~PSR_EF; if(calls > 0) calls=0; } void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { siginfo_t info; if(psr & PSR_PS) die_if_kernel("Penguin overflow trap from kernel mode", regs); info.si_signo = SIGEMT; info.si_errno = 0; info.si_code = EMT_TAGOVF; info.si_addr = (void __user *)pc; info.si_trapno = 0; send_sig_info(SIGEMT, &info, current); } void handle_watchpoint(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { #ifdef TRAP_DEBUG printk("Watchpoint detected at PC %08lx NPC %08lx PSR %08lx\n", pc, npc, psr); #endif if(psr & PSR_PS) panic("Tell me what a watchpoint trap is, and I'll then deal " "with such a beast..."); } void handle_reg_access(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { siginfo_t info; #ifdef TRAP_DEBUG printk("Register Access Exception at PC %08lx NPC %08lx PSR %08lx\n", pc, npc, psr); #endif info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_OBJERR; info.si_addr = (void __user *)pc; info.si_trapno = 0; force_sig_info(SIGBUS, &info, current); } void handle_cp_disabled(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { siginfo_t info; info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_COPROC; info.si_addr = (void __user *)pc; info.si_trapno = 0; send_sig_info(SIGILL, &info, current); } void handle_cp_exception(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { siginfo_t info; #ifdef TRAP_DEBUG printk("Co-Processor Exception at PC %08lx NPC %08lx PSR %08lx\n", pc, npc, psr); #endif info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_COPROC; info.si_addr = (void __user *)pc; info.si_trapno = 0; send_sig_info(SIGILL, &info, current); } void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { siginfo_t info; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = FPE_INTDIV; info.si_addr = (void __user *)pc; info.si_trapno = 0; send_sig_info(SIGFPE, &info, current); } #ifdef CONFIG_DEBUG_BUGVERBOSE void do_BUG(const char *file, int line) { // bust_spinlocks(1); XXX Not in our original BUG() printk("kernel BUG at %s:%d!\n", file, line); } EXPORT_SYMBOL(do_BUG); #endif /* Since we have our mappings set up, on multiprocessors we can spin them * up here so that timer interrupts work during initialization. */ void trap_init(void) { extern void thread_info_offsets_are_bolixed_pete(void); /* Force linker to barf if mismatched */ if (TI_UWINMASK != offsetof(struct thread_info, uwinmask) || TI_TASK != offsetof(struct thread_info, task) || TI_EXECDOMAIN != offsetof(struct thread_info, exec_domain) || TI_FLAGS != offsetof(struct thread_info, flags) || TI_CPU != offsetof(struct thread_info, cpu) || TI_PREEMPT != offsetof(struct thread_info, preempt_count) || TI_SOFTIRQ != offsetof(struct thread_info, softirq_count) || TI_HARDIRQ != offsetof(struct thread_info, hardirq_count) || TI_KSP != offsetof(struct thread_info, ksp) || TI_KPC != offsetof(struct thread_info, kpc) || TI_KPSR != offsetof(struct thread_info, kpsr) || TI_KWIM != offsetof(struct thread_info, kwim) || TI_REG_WINDOW != offsetof(struct thread_info, reg_window) || TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) || TI_W_SAVED != offsetof(struct thread_info, w_saved)) thread_info_offsets_are_bolixed_pete(); /* Attach to the address space of init_task. */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; /* NOTE: Other cpus have this done as they are started * up on SMP. */ }
gpl-2.0
UnORoms/SebastianFM-kernel
arch/arm/plat-s5p/sysmmu.c
4824
7462
/* linux/arch/arm/plat-s5p/sysmmu.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/export.h> #include <asm/pgtable.h> #include <mach/map.h> #include <mach/regs-sysmmu.h> #include <plat/sysmmu.h> #define CTRL_ENABLE 0x5 #define CTRL_BLOCK 0x7 #define CTRL_DISABLE 0x0 static struct device *dev; static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = { S5P_PAGE_FAULT_ADDR, S5P_AR_FAULT_ADDR, S5P_AW_FAULT_ADDR, S5P_DEFAULT_SLAVE_ADDR, S5P_AR_FAULT_ADDR, S5P_AR_FAULT_ADDR, S5P_AW_FAULT_ADDR, S5P_AW_FAULT_ADDR }; static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = { "PAGE FAULT", "AR MULTI-HIT FAULT", "AW MULTI-HIT FAULT", "BUS ERROR", "AR SECURITY PROTECTION FAULT", "AR ACCESS PROTECTION FAULT", "AW SECURITY PROTECTION FAULT", "AW ACCESS PROTECTION FAULT" }; static int (*fault_handlers[S5P_SYSMMU_TOTAL_IPNUM])( enum S5P_SYSMMU_INTERRUPT_TYPE itype, unsigned long pgtable_base, unsigned long fault_addr); /* * If adjacent 2 bits are true, the system MMU is enabled. * The system MMU is disabled, otherwise. */ static unsigned long sysmmu_states; static inline void set_sysmmu_active(sysmmu_ips ips) { sysmmu_states |= 3 << (ips * 2); } static inline void set_sysmmu_inactive(sysmmu_ips ips) { sysmmu_states &= ~(3 << (ips * 2)); } static inline int is_sysmmu_active(sysmmu_ips ips) { return sysmmu_states & (3 << (ips * 2)); } static void __iomem *sysmmusfrs[S5P_SYSMMU_TOTAL_IPNUM]; static inline void sysmmu_block(sysmmu_ips ips) { __raw_writel(CTRL_BLOCK, sysmmusfrs[ips] + S5P_MMU_CTRL); dev_dbg(dev, "%s is blocked.\n", sysmmu_ips_name[ips]); } static inline void sysmmu_unblock(sysmmu_ips ips) { __raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL); dev_dbg(dev, "%s is unblocked.\n", sysmmu_ips_name[ips]); } static inline void __sysmmu_tlb_invalidate(sysmmu_ips ips) { __raw_writel(0x1, sysmmusfrs[ips] + S5P_MMU_FLUSH); dev_dbg(dev, "TLB of %s is invalidated.\n", sysmmu_ips_name[ips]); } static inline void __sysmmu_set_ptbase(sysmmu_ips ips, unsigned long pgd) { if (unlikely(pgd == 0)) { pgd = (unsigned long)ZERO_PAGE(0); __raw_writel(0x20, sysmmusfrs[ips] + S5P_MMU_CFG); /* 4KB LV1 */ } else { __raw_writel(0x0, sysmmusfrs[ips] + S5P_MMU_CFG); /* 16KB LV1 */ } __raw_writel(pgd, sysmmusfrs[ips] + S5P_PT_BASE_ADDR); dev_dbg(dev, "Page table base of %s is initialized with 0x%08lX.\n", sysmmu_ips_name[ips], pgd); __sysmmu_tlb_invalidate(ips); } void sysmmu_set_fault_handler(sysmmu_ips ips, int (*handler)(enum S5P_SYSMMU_INTERRUPT_TYPE itype, unsigned long pgtable_base, unsigned long fault_addr)) { BUG_ON(!((ips >= SYSMMU_MDMA) && (ips < S5P_SYSMMU_TOTAL_IPNUM))); fault_handlers[ips] = handler; } static irqreturn_t s5p_sysmmu_irq(int irq, void *dev_id) { /* SYSMMU is in blocked when interrupt occurred. */ unsigned long base = 0; sysmmu_ips ips = (sysmmu_ips)dev_id; enum S5P_SYSMMU_INTERRUPT_TYPE itype; itype = (enum S5P_SYSMMU_INTERRUPT_TYPE) __ffs(__raw_readl(sysmmusfrs[ips] + S5P_INT_STATUS)); BUG_ON(!((itype >= 0) && (itype < 8))); dev_alert(dev, "%s occurred by %s.\n", sysmmu_fault_name[itype], sysmmu_ips_name[ips]); if (fault_handlers[ips]) { unsigned long addr; base = __raw_readl(sysmmusfrs[ips] + S5P_PT_BASE_ADDR); addr = __raw_readl(sysmmusfrs[ips] + fault_reg_offset[itype]); if (fault_handlers[ips](itype, base, addr)) { __raw_writel(1 << itype, sysmmusfrs[ips] + S5P_INT_CLEAR); dev_notice(dev, "%s from %s is resolved." " Retrying translation.\n", sysmmu_fault_name[itype], sysmmu_ips_name[ips]); } else { base = 0; } } sysmmu_unblock(ips); if (!base) dev_notice(dev, "%s from %s is not handled.\n", sysmmu_fault_name[itype], sysmmu_ips_name[ips]); return IRQ_HANDLED; } void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd) { if (is_sysmmu_active(ips)) { sysmmu_block(ips); __sysmmu_set_ptbase(ips, pgd); sysmmu_unblock(ips); } else { dev_dbg(dev, "%s is disabled. " "Skipping initializing page table base.\n", sysmmu_ips_name[ips]); } } void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd) { if (!is_sysmmu_active(ips)) { sysmmu_clk_enable(ips); __sysmmu_set_ptbase(ips, pgd); __raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL); set_sysmmu_active(ips); dev_dbg(dev, "%s is enabled.\n", sysmmu_ips_name[ips]); } else { dev_dbg(dev, "%s is already enabled.\n", sysmmu_ips_name[ips]); } } void s5p_sysmmu_disable(sysmmu_ips ips) { if (is_sysmmu_active(ips)) { __raw_writel(CTRL_DISABLE, sysmmusfrs[ips] + S5P_MMU_CTRL); set_sysmmu_inactive(ips); sysmmu_clk_disable(ips); dev_dbg(dev, "%s is disabled.\n", sysmmu_ips_name[ips]); } else { dev_dbg(dev, "%s is already disabled.\n", sysmmu_ips_name[ips]); } } void s5p_sysmmu_tlb_invalidate(sysmmu_ips ips) { if (is_sysmmu_active(ips)) { sysmmu_block(ips); __sysmmu_tlb_invalidate(ips); sysmmu_unblock(ips); } else { dev_dbg(dev, "%s is disabled. " "Skipping invalidating TLB.\n", sysmmu_ips_name[ips]); } } static int s5p_sysmmu_probe(struct platform_device *pdev) { int i, ret; struct resource *res, *mem; dev = &pdev->dev; for (i = 0; i < S5P_SYSMMU_TOTAL_IPNUM; i++) { int irq; sysmmu_clk_init(dev, i); sysmmu_clk_disable(i); res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!res) { dev_err(dev, "Failed to get the resource of %s.\n", sysmmu_ips_name[i]); ret = -ENODEV; goto err_res; } mem = request_mem_region(res->start, resource_size(res), pdev->name); if (!mem) { dev_err(dev, "Failed to request the memory region of %s.\n", sysmmu_ips_name[i]); ret = -EBUSY; goto err_res; } sysmmusfrs[i] = ioremap(res->start, resource_size(res)); if (!sysmmusfrs[i]) { dev_err(dev, "Failed to ioremap() for %s.\n", sysmmu_ips_name[i]); ret = -ENXIO; goto err_reg; } irq = platform_get_irq(pdev, i); if (irq <= 0) { dev_err(dev, "Failed to get the IRQ resource of %s.\n", sysmmu_ips_name[i]); ret = -ENOENT; goto err_map; } if (request_irq(irq, s5p_sysmmu_irq, IRQF_DISABLED, pdev->name, (void *)i)) { dev_err(dev, "Failed to request IRQ for %s.\n", sysmmu_ips_name[i]); ret = -ENOENT; goto err_map; } } return 0; err_map: iounmap(sysmmusfrs[i]); err_reg: release_mem_region(mem->start, resource_size(mem)); err_res: return ret; } static int s5p_sysmmu_remove(struct platform_device *pdev) { return 0; } int s5p_sysmmu_runtime_suspend(struct device *dev) { return 0; } int s5p_sysmmu_runtime_resume(struct device *dev) { return 0; } const struct dev_pm_ops s5p_sysmmu_pm_ops = { .runtime_suspend = s5p_sysmmu_runtime_suspend, .runtime_resume = s5p_sysmmu_runtime_resume, }; static struct platform_driver s5p_sysmmu_driver = { .probe = s5p_sysmmu_probe, .remove = s5p_sysmmu_remove, .driver = { .owner = THIS_MODULE, .name = "s5p-sysmmu", .pm = &s5p_sysmmu_pm_ops, } }; static int __init s5p_sysmmu_init(void) { return platform_driver_register(&s5p_sysmmu_driver); } arch_initcall(s5p_sysmmu_init);
gpl-2.0
krizhanovsky/linux-3.10.10-sync_sockets
net/ceph/ceph_hash.c
4824
2855
#include <linux/ceph/types.h> #include <linux/module.h> /* * Robert Jenkin's hash function. * http://burtleburtle.net/bob/hash/evahash.html * This is in the public domain. */ #define mix(a, b, c) \ do { \ a = a - b; a = a - c; a = a ^ (c >> 13); \ b = b - c; b = b - a; b = b ^ (a << 8); \ c = c - a; c = c - b; c = c ^ (b >> 13); \ a = a - b; a = a - c; a = a ^ (c >> 12); \ b = b - c; b = b - a; b = b ^ (a << 16); \ c = c - a; c = c - b; c = c ^ (b >> 5); \ a = a - b; a = a - c; a = a ^ (c >> 3); \ b = b - c; b = b - a; b = b ^ (a << 10); \ c = c - a; c = c - b; c = c ^ (b >> 15); \ } while (0) unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length) { const unsigned char *k = (const unsigned char *)str; __u32 a, b, c; /* the internal state */ __u32 len; /* how many key bytes still need mixing */ /* Set up the internal state */ len = length; a = 0x9e3779b9; /* the golden ratio; an arbitrary value */ b = a; c = 0; /* variable initialization of internal state */ /* handle most of the key */ while (len >= 12) { a = a + (k[0] + ((__u32)k[1] << 8) + ((__u32)k[2] << 16) + ((__u32)k[3] << 24)); b = b + (k[4] + ((__u32)k[5] << 8) + ((__u32)k[6] << 16) + ((__u32)k[7] << 24)); c = c + (k[8] + ((__u32)k[9] << 8) + ((__u32)k[10] << 16) + ((__u32)k[11] << 24)); mix(a, b, c); k = k + 12; len = len - 12; } /* handle the last 11 bytes */ c = c + length; switch (len) { /* all the case statements fall through */ case 11: c = c + ((__u32)k[10] << 24); case 10: c = c + ((__u32)k[9] << 16); case 9: c = c + ((__u32)k[8] << 8); /* the first byte of c is reserved for the length */ case 8: b = b + ((__u32)k[7] << 24); case 7: b = b + ((__u32)k[6] << 16); case 6: b = b + ((__u32)k[5] << 8); case 5: b = b + k[4]; case 4: a = a + ((__u32)k[3] << 24); case 3: a = a + ((__u32)k[2] << 16); case 2: a = a + ((__u32)k[1] << 8); case 1: a = a + k[0]; /* case 0: nothing left to add */ } mix(a, b, c); return c; } /* * linux dcache hash */ unsigned int ceph_str_hash_linux(const char *str, unsigned int length) { unsigned long hash = 0; unsigned char c; while (length--) { c = *str++; hash = (hash + (c << 4) + (c >> 4)) * 11; } return hash; } unsigned int ceph_str_hash(int type, const char *s, unsigned int len) { switch (type) { case CEPH_STR_HASH_LINUX: return ceph_str_hash_linux(s, len); case CEPH_STR_HASH_RJENKINS: return ceph_str_hash_rjenkins(s, len); default: return -1; } } EXPORT_SYMBOL(ceph_str_hash); const char *ceph_str_hash_name(int type) { switch (type) { case CEPH_STR_HASH_LINUX: return "linux"; case CEPH_STR_HASH_RJENKINS: return "rjenkins"; default: return "unknown"; } } EXPORT_SYMBOL(ceph_str_hash_name);
gpl-2.0
AOKP/kernel_sony_common
drivers/watchdog/wdt977.c
7384
12117
/* * Wdt977 0.04: A Watchdog Device for Netwinder W83977AF chip * * (c) Copyright 1998 Rebel.com (Woody Suwalski <woody@netwinder.org>) * * ----------------------- * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * ----------------------- * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com> * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT * 19-Dec-2001 Woody Suwalski: Netwinder fixes, ioctl interface * 06-Jan-2002 Woody Suwalski: For compatibility, convert all timeouts * from minutes to seconds. * 07-Jul-2003 Daniele Bellucci: Audit return code of misc_register in * nwwatchdog_init. * 25-Oct-2005 Woody Suwalski: Convert addresses to #defs, add spinlocks * remove limitiation to be used on * Netwinders only */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/watchdog.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/io.h> #include <linux/uaccess.h> #include <asm/mach-types.h> #define WATCHDOG_VERSION "0.04" #define WATCHDOG_NAME "Wdt977" #define IO_INDEX_PORT 0x370 /* on some systems it can be 0x3F0 */ #define IO_DATA_PORT (IO_INDEX_PORT + 1) #define UNLOCK_DATA 0x87 #define LOCK_DATA 0xAA #define DEVICE_REGISTER 0x07 #define DEFAULT_TIMEOUT 60 /* default timeout in seconds */ static int timeout = DEFAULT_TIMEOUT; static int timeoutM; /* timeout in minutes */ static unsigned long timer_alive; static int testmode; static char expect_close; static DEFINE_SPINLOCK(spinlock); module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (60..15300, default=" __MODULE_STRING(DEFAULT_TIMEOUT) ")"); module_param(testmode, int, 0); MODULE_PARM_DESC(testmode, "Watchdog testmode (1 = no reboot), default=0"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* * Start the watchdog */ static int wdt977_start(void) { unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* select device Aux2 (device=8) and set watchdog regs F2, F3 and F4 * F2 has the timeout in minutes * F3 could be set to the POWER LED blink (with GP17 set to PowerLed) * at timeout, and to reset timer on kbd/mouse activity (not impl.) * F4 is used to just clear the TIMEOUT'ed state (bit 0) */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(timeoutM, IO_DATA_PORT); outb_p(0xF3, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); /* another setting is 0E for kbd/mouse/LED */ outb_p(0xF4, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); /* At last select device Aux1 (dev=7) and set GP16 as a * watchdog output. In test mode watch the bit 1 on F4 to * indicate "triggered" */ if (!testmode) { outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x07, IO_DATA_PORT); outb_p(0xE6, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); } /* lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); pr_info("activated\n"); return 0; } /* * Stop the watchdog */ static int wdt977_stop(void) { unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* select device Aux2 (device=8) and set watchdog regs F2,F3 and F4 * F3 is reset to its default state * F4 can clear the TIMEOUT'ed state (bit 0) - back to default * We can not use GP17 as a PowerLed, as we use its usage as a RedLed */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(0xFF, IO_DATA_PORT); outb_p(0xF3, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); outb_p(0xF4, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); /* at last select device Aux1 (dev=7) and set GP16 as a watchdog output */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x07, IO_DATA_PORT); outb_p(0xE6, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); /* lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); pr_info("shutdown\n"); return 0; } /* * Send a keepalive ping to the watchdog * This is done by simply re-writing the timeout to reg. 0xF2 */ static int wdt977_keepalive(void) { unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* select device Aux2 (device=8) and kicks watchdog reg F2 */ /* F2 has the timeout in minutes */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(timeoutM, IO_DATA_PORT); /* lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); return 0; } /* * Set the watchdog timeout value */ static int wdt977_set_timeout(int t) { int tmrval; /* convert seconds to minutes, rounding up */ tmrval = (t + 59) / 60; if (machine_is_netwinder()) { /* we have a hw bug somewhere, so each 977 minute is actually * only 30sec. This limits the max timeout to half of device * max of 255 minutes... */ tmrval += tmrval; } if (tmrval < 1 || tmrval > 255) return -EINVAL; /* timeout is the timeout in seconds, timeoutM is the timeout in minutes) */ timeout = t; timeoutM = tmrval; return 0; } /* * Get the watchdog status */ static int wdt977_get_status(int *status) { int new_status; unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* select device Aux2 (device=8) and read watchdog reg F4 */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF4, IO_INDEX_PORT); new_status = inb_p(IO_DATA_PORT); /* lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); *status = 0; if (new_status & 1) *status |= WDIOF_CARDRESET; return 0; } /* * /dev/watchdog handling */ static int wdt977_open(struct inode *inode, struct file *file) { /* If the watchdog is alive we don't need to start it again */ if (test_and_set_bit(0, &timer_alive)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); wdt977_start(); return nonseekable_open(inode, file); } static int wdt977_release(struct inode *inode, struct file *file) { /* * Shut off the timer. * Lock it in if it's a module and we set nowayout */ if (expect_close == 42) { wdt977_stop(); clear_bit(0, &timer_alive); } else { wdt977_keepalive(); pr_crit("Unexpected close, not stopping watchdog!\n"); } expect_close = 0; return 0; } /* * wdt977_write: * @file: file handle to the watchdog * @buf: buffer to write (unused as data does not matter here * @count: count of bytes * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any * write of data will do, as we we don't define content meaning. */ static ssize_t wdt977_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { if (count) { if (!nowayout) { size_t i; /* In case it was set long ago */ expect_close = 0; for (i = 0; i != count; i++) { char c; if (get_user(c, buf + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } /* someone wrote to us, we should restart timer */ wdt977_keepalive(); } return count; } static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .firmware_version = 1, .identity = WATCHDOG_NAME, }; /* * wdt977_ioctl: * @inode: inode of the device * @file: file handle to the device * @cmd: watchdog command * @arg: argument pointer * * The watchdog API defines a common set of functions for all watchdogs * according to their available features. */ static long wdt977_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int status; int new_options, retval = -EINVAL; int new_timeout; union { struct watchdog_info __user *ident; int __user *i; } uarg; uarg.i = (int __user *)arg; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(uarg.ident, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: wdt977_get_status(&status); return put_user(status, uarg.i); case WDIOC_GETBOOTSTATUS: return put_user(0, uarg.i); case WDIOC_SETOPTIONS: if (get_user(new_options, uarg.i)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { wdt977_stop(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { wdt977_start(); retval = 0; } return retval; case WDIOC_KEEPALIVE: wdt977_keepalive(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_timeout, uarg.i)) return -EFAULT; if (wdt977_set_timeout(new_timeout)) return -EINVAL; wdt977_keepalive(); /* Fall */ case WDIOC_GETTIMEOUT: return put_user(timeout, uarg.i); default: return -ENOTTY; } } static int wdt977_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) wdt977_stop(); return NOTIFY_DONE; } static const struct file_operations wdt977_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = wdt977_write, .unlocked_ioctl = wdt977_ioctl, .open = wdt977_open, .release = wdt977_release, }; static struct miscdevice wdt977_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wdt977_fops, }; static struct notifier_block wdt977_notifier = { .notifier_call = wdt977_notify_sys, }; static int __init wd977_init(void) { int rc; pr_info("driver v%s\n", WATCHDOG_VERSION); /* Check that the timeout value is within its range; if not reset to the default */ if (wdt977_set_timeout(timeout)) { wdt977_set_timeout(DEFAULT_TIMEOUT); pr_info("timeout value must be 60 < timeout < 15300, using %d\n", DEFAULT_TIMEOUT); } /* on Netwinder the IOports are already reserved by * arch/arm/mach-footbridge/netwinder-hw.c */ if (!machine_is_netwinder()) { if (!request_region(IO_INDEX_PORT, 2, WATCHDOG_NAME)) { pr_err("I/O address 0x%04x already in use\n", IO_INDEX_PORT); rc = -EIO; goto err_out; } } rc = register_reboot_notifier(&wdt977_notifier); if (rc) { pr_err("cannot register reboot notifier (err=%d)\n", rc); goto err_out_region; } rc = misc_register(&wdt977_miscdev); if (rc) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", wdt977_miscdev.minor, rc); goto err_out_reboot; } pr_info("initialized. timeout=%d sec (nowayout=%d, testmode=%i)\n", timeout, nowayout, testmode); return 0; err_out_reboot: unregister_reboot_notifier(&wdt977_notifier); err_out_region: if (!machine_is_netwinder()) release_region(IO_INDEX_PORT, 2); err_out: return rc; } static void __exit wd977_exit(void) { wdt977_stop(); misc_deregister(&wdt977_miscdev); unregister_reboot_notifier(&wdt977_notifier); release_region(IO_INDEX_PORT, 2); } module_init(wd977_init); module_exit(wd977_exit); MODULE_AUTHOR("Woody Suwalski <woodys@xandros.com>"); MODULE_DESCRIPTION("W83977AF Watchdog driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
guilhem/LGE975_G_Kitkat_Android_V20a_Kernel
drivers/watchdog/w83977f_wdt.c
7384
12122
/* * W83977F Watchdog Timer Driver for Winbond W83977F I/O Chip * * (c) Copyright 2005 Jose Goncalves <jose.goncalves@inov.pt> * * Based on w83877f_wdt.c by Scott Jennings, * and wdt977.c by Woody Suwalski * * ----------------------- * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/watchdog.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/uaccess.h> #include <linux/io.h> #define WATCHDOG_VERSION "1.00" #define WATCHDOG_NAME "W83977F WDT" #define IO_INDEX_PORT 0x3F0 #define IO_DATA_PORT (IO_INDEX_PORT+1) #define UNLOCK_DATA 0x87 #define LOCK_DATA 0xAA #define DEVICE_REGISTER 0x07 #define DEFAULT_TIMEOUT 45 /* default timeout in seconds */ static int timeout = DEFAULT_TIMEOUT; static int timeoutW; /* timeout in watchdog counter units */ static unsigned long timer_alive; static int testmode; static char expect_close; static DEFINE_SPINLOCK(spinlock); module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (15..7635), default=" __MODULE_STRING(DEFAULT_TIMEOUT) ")"); module_param(testmode, int, 0); MODULE_PARM_DESC(testmode, "Watchdog testmode (1 = no reboot), default=0"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* * Start the watchdog */ static int wdt_start(void) { unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* Unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* * Select device Aux2 (device=8) to set watchdog regs F2, F3 and F4. * F2 has the timeout in watchdog counter units. * F3 is set to enable watchdog LED blink at timeout. * F4 is used to just clear the TIMEOUT'ed state (bit 0). */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(timeoutW, IO_DATA_PORT); outb_p(0xF3, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF4, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); /* Set device Aux2 active */ outb_p(0x30, IO_INDEX_PORT); outb_p(0x01, IO_DATA_PORT); /* * Select device Aux1 (dev=7) to set GP16 as the watchdog output * (in reg E6) and GP13 as the watchdog LED output (in reg E3). * Map GP16 at pin 119. * In test mode watch the bit 0 on F4 to indicate "triggered" or * check watchdog LED on SBC. */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x07, IO_DATA_PORT); if (!testmode) { unsigned pin_map; outb_p(0xE6, IO_INDEX_PORT); outb_p(0x0A, IO_DATA_PORT); outb_p(0x2C, IO_INDEX_PORT); pin_map = inb_p(IO_DATA_PORT); pin_map |= 0x10; pin_map &= ~(0x20); outb_p(0x2C, IO_INDEX_PORT); outb_p(pin_map, IO_DATA_PORT); } outb_p(0xE3, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); /* Set device Aux1 active */ outb_p(0x30, IO_INDEX_PORT); outb_p(0x01, IO_DATA_PORT); /* Lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); pr_info("activated\n"); return 0; } /* * Stop the watchdog */ static int wdt_stop(void) { unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* Unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* * Select device Aux2 (device=8) to set watchdog regs F2, F3 and F4. * F2 is reset to its default value (watchdog timer disabled). * F3 is reset to its default state. * F4 clears the TIMEOUT'ed state (bit 0) - back to default. */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(0xFF, IO_DATA_PORT); outb_p(0xF3, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); outb_p(0xF4, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); /* * Select device Aux1 (dev=7) to set GP16 (in reg E6) and * Gp13 (in reg E3) as inputs. */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x07, IO_DATA_PORT); if (!testmode) { outb_p(0xE6, IO_INDEX_PORT); outb_p(0x01, IO_DATA_PORT); } outb_p(0xE3, IO_INDEX_PORT); outb_p(0x01, IO_DATA_PORT); /* Lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); pr_info("shutdown\n"); return 0; } /* * Send a keepalive ping to the watchdog * This is done by simply re-writing the timeout to reg. 0xF2 */ static int wdt_keepalive(void) { unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* Unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* Select device Aux2 (device=8) to kick watchdog reg F2 */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(timeoutW, IO_DATA_PORT); /* Lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); return 0; } /* * Set the watchdog timeout value */ static int wdt_set_timeout(int t) { int tmrval; /* * Convert seconds to watchdog counter time units, rounding up. * On PCM-5335 watchdog units are 30 seconds/step with 15 sec startup * value. This information is supplied in the PCM-5335 manual and was * checked by me on a real board. This is a bit strange because W83977f * datasheet says counter unit is in minutes! */ if (t < 15) return -EINVAL; tmrval = ((t + 15) + 29) / 30; if (tmrval > 255) return -EINVAL; /* * timeout is the timeout in seconds, * timeoutW is the timeout in watchdog counter units. */ timeoutW = tmrval; timeout = (timeoutW * 30) - 15; return 0; } /* * Get the watchdog status */ static int wdt_get_status(int *status) { int new_status; unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* Unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* Select device Aux2 (device=8) to read watchdog reg F4 */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF4, IO_INDEX_PORT); new_status = inb_p(IO_DATA_PORT); /* Lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); *status = 0; if (new_status & 1) *status |= WDIOF_CARDRESET; return 0; } /* * /dev/watchdog handling */ static int wdt_open(struct inode *inode, struct file *file) { /* If the watchdog is alive we don't need to start it again */ if (test_and_set_bit(0, &timer_alive)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); wdt_start(); return nonseekable_open(inode, file); } static int wdt_release(struct inode *inode, struct file *file) { /* * Shut off the timer. * Lock it in if it's a module and we set nowayout */ if (expect_close == 42) { wdt_stop(); clear_bit(0, &timer_alive); } else { wdt_keepalive(); pr_crit("unexpected close, not stopping watchdog!\n"); } expect_close = 0; return 0; } /* * wdt_write: * @file: file handle to the watchdog * @buf: buffer to write (unused as data does not matter here * @count: count of bytes * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any * write of data will do, as we we don't define content meaning. */ static ssize_t wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (count) { if (!nowayout) { size_t ofs; /* note: just in case someone wrote the magic character long ago */ expect_close = 0; /* scan to see whether or not we got the magic character */ for (ofs = 0; ofs != count; ofs++) { char c; if (get_user(c, buf + ofs)) return -EFAULT; if (c == 'V') expect_close = 42; } } /* someone wrote to us, we should restart timer */ wdt_keepalive(); } return count; } /* * wdt_ioctl: * @inode: inode of the device * @file: file handle to the device * @cmd: watchdog command * @arg: argument pointer * * The watchdog API defines a common set of functions for all watchdogs * according to their available features. */ static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .firmware_version = 1, .identity = WATCHDOG_NAME, }; static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int status; int new_options, retval = -EINVAL; int new_timeout; union { struct watchdog_info __user *ident; int __user *i; } uarg; uarg.i = (int __user *)arg; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(uarg.ident, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: wdt_get_status(&status); return put_user(status, uarg.i); case WDIOC_GETBOOTSTATUS: return put_user(0, uarg.i); case WDIOC_SETOPTIONS: if (get_user(new_options, uarg.i)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { wdt_stop(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { wdt_start(); retval = 0; } return retval; case WDIOC_KEEPALIVE: wdt_keepalive(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_timeout, uarg.i)) return -EFAULT; if (wdt_set_timeout(new_timeout)) return -EINVAL; wdt_keepalive(); /* Fall */ case WDIOC_GETTIMEOUT: return put_user(timeout, uarg.i); default: return -ENOTTY; } } static int wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) wdt_stop(); return NOTIFY_DONE; } static const struct file_operations wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = wdt_write, .unlocked_ioctl = wdt_ioctl, .open = wdt_open, .release = wdt_release, }; static struct miscdevice wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wdt_fops, }; static struct notifier_block wdt_notifier = { .notifier_call = wdt_notify_sys, }; static int __init w83977f_wdt_init(void) { int rc; pr_info("driver v%s\n", WATCHDOG_VERSION); /* * Check that the timeout value is within it's range; * if not reset to the default */ if (wdt_set_timeout(timeout)) { wdt_set_timeout(DEFAULT_TIMEOUT); pr_info("timeout value must be 15 <= timeout <= 7635, using %d\n", DEFAULT_TIMEOUT); } if (!request_region(IO_INDEX_PORT, 2, WATCHDOG_NAME)) { pr_err("I/O address 0x%04x already in use\n", IO_INDEX_PORT); rc = -EIO; goto err_out; } rc = register_reboot_notifier(&wdt_notifier); if (rc) { pr_err("cannot register reboot notifier (err=%d)\n", rc); goto err_out_region; } rc = misc_register(&wdt_miscdev); if (rc) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", wdt_miscdev.minor, rc); goto err_out_reboot; } pr_info("initialized. timeout=%d sec (nowayout=%d testmode=%d)\n", timeout, nowayout, testmode); return 0; err_out_reboot: unregister_reboot_notifier(&wdt_notifier); err_out_region: release_region(IO_INDEX_PORT, 2); err_out: return rc; } static void __exit w83977f_wdt_exit(void) { wdt_stop(); misc_deregister(&wdt_miscdev); unregister_reboot_notifier(&wdt_notifier); release_region(IO_INDEX_PORT, 2); } module_init(w83977f_wdt_init); module_exit(w83977f_wdt_exit); MODULE_AUTHOR("Jose Goncalves <jose.goncalves@inov.pt>"); MODULE_DESCRIPTION("Driver for watchdog timer in W83977F I/O chip"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
defconoi/Unleashed-Flo-Kernel
drivers/bluetooth/bfusb.c
7896
17035
/* * * AVM BlueFRITZ! USB driver * * Copyright (C) 2003-2006 Marcel Holtmann <marcel@holtmann.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/usb.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "1.2" static struct usb_driver bfusb_driver; static struct usb_device_id bfusb_table[] = { /* AVM BlueFRITZ! USB */ { USB_DEVICE(0x057c, 0x2200) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, bfusb_table); #define BFUSB_MAX_BLOCK_SIZE 256 #define BFUSB_BLOCK_TIMEOUT 3000 #define BFUSB_TX_PROCESS 1 #define BFUSB_TX_WAKEUP 2 #define BFUSB_MAX_BULK_TX 2 #define BFUSB_MAX_BULK_RX 2 struct bfusb_data { struct hci_dev *hdev; unsigned long state; struct usb_device *udev; unsigned int bulk_in_ep; unsigned int bulk_out_ep; unsigned int bulk_pkt_size; rwlock_t lock; struct sk_buff_head transmit_q; struct sk_buff *reassembly; atomic_t pending_tx; struct sk_buff_head pending_q; struct sk_buff_head completed_q; }; struct bfusb_data_scb { struct urb *urb; }; static void bfusb_tx_complete(struct urb *urb); static void bfusb_rx_complete(struct urb *urb); static struct urb *bfusb_get_completed(struct bfusb_data *data) { struct sk_buff *skb; struct urb *urb = NULL; BT_DBG("bfusb %p", data); skb = skb_dequeue(&data->completed_q); if (skb) { urb = ((struct bfusb_data_scb *) skb->cb)->urb; kfree_skb(skb); } return urb; } static void bfusb_unlink_urbs(struct bfusb_data *data) { struct sk_buff *skb; struct urb *urb; BT_DBG("bfusb %p", data); while ((skb = skb_dequeue(&data->pending_q))) { urb = ((struct bfusb_data_scb *) skb->cb)->urb; usb_kill_urb(urb); skb_queue_tail(&data->completed_q, skb); } while ((urb = bfusb_get_completed(data))) usb_free_urb(urb); } static int bfusb_send_bulk(struct bfusb_data *data, struct sk_buff *skb) { struct bfusb_data_scb *scb = (void *) skb->cb; struct urb *urb = bfusb_get_completed(data); int err, pipe; BT_DBG("bfusb %p skb %p len %d", data, skb, skb->len); if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC))) return -ENOMEM; pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, bfusb_tx_complete, skb); scb->urb = urb; skb_queue_tail(&data->pending_q, skb); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { BT_ERR("%s bulk tx submit failed urb %p err %d", data->hdev->name, urb, err); skb_unlink(skb, &data->pending_q); usb_free_urb(urb); } else atomic_inc(&data->pending_tx); return err; } static void bfusb_tx_wakeup(struct bfusb_data *data) { struct sk_buff *skb; BT_DBG("bfusb %p", data); if (test_and_set_bit(BFUSB_TX_PROCESS, &data->state)) { set_bit(BFUSB_TX_WAKEUP, &data->state); return; } do { clear_bit(BFUSB_TX_WAKEUP, &data->state); while ((atomic_read(&data->pending_tx) < BFUSB_MAX_BULK_TX) && (skb = skb_dequeue(&data->transmit_q))) { if (bfusb_send_bulk(data, skb) < 0) { skb_queue_head(&data->transmit_q, skb); break; } } } while (test_bit(BFUSB_TX_WAKEUP, &data->state)); clear_bit(BFUSB_TX_PROCESS, &data->state); } static void bfusb_tx_complete(struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct bfusb_data *data = (struct bfusb_data *) skb->dev; BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len); atomic_dec(&data->pending_tx); if (!test_bit(HCI_RUNNING, &data->hdev->flags)) return; if (!urb->status) data->hdev->stat.byte_tx += skb->len; else data->hdev->stat.err_tx++; read_lock(&data->lock); skb_unlink(skb, &data->pending_q); skb_queue_tail(&data->completed_q, skb); bfusb_tx_wakeup(data); read_unlock(&data->lock); } static int bfusb_rx_submit(struct bfusb_data *data, struct urb *urb) { struct bfusb_data_scb *scb; struct sk_buff *skb; int err, pipe, size = HCI_MAX_FRAME_SIZE + 32; BT_DBG("bfusb %p urb %p", data, urb); if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC))) return -ENOMEM; skb = bt_skb_alloc(size, GFP_ATOMIC); if (!skb) { usb_free_urb(urb); return -ENOMEM; } skb->dev = (void *) data; scb = (struct bfusb_data_scb *) skb->cb; scb->urb = urb; pipe = usb_rcvbulkpipe(data->udev, data->bulk_in_ep); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, size, bfusb_rx_complete, skb); skb_queue_tail(&data->pending_q, skb); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { BT_ERR("%s bulk rx submit failed urb %p err %d", data->hdev->name, urb, err); skb_unlink(skb, &data->pending_q); kfree_skb(skb); usb_free_urb(urb); } return err; } static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned char *buf, int len) { BT_DBG("bfusb %p hdr 0x%02x data %p len %d", data, hdr, buf, len); if (hdr & 0x10) { BT_ERR("%s error in block", data->hdev->name); kfree_skb(data->reassembly); data->reassembly = NULL; return -EIO; } if (hdr & 0x04) { struct sk_buff *skb; unsigned char pkt_type; int pkt_len = 0; if (data->reassembly) { BT_ERR("%s unexpected start block", data->hdev->name); kfree_skb(data->reassembly); data->reassembly = NULL; } if (len < 1) { BT_ERR("%s no packet type found", data->hdev->name); return -EPROTO; } pkt_type = *buf++; len--; switch (pkt_type) { case HCI_EVENT_PKT: if (len >= HCI_EVENT_HDR_SIZE) { struct hci_event_hdr *hdr = (struct hci_event_hdr *) buf; pkt_len = HCI_EVENT_HDR_SIZE + hdr->plen; } else { BT_ERR("%s event block is too short", data->hdev->name); return -EILSEQ; } break; case HCI_ACLDATA_PKT: if (len >= HCI_ACL_HDR_SIZE) { struct hci_acl_hdr *hdr = (struct hci_acl_hdr *) buf; pkt_len = HCI_ACL_HDR_SIZE + __le16_to_cpu(hdr->dlen); } else { BT_ERR("%s data block is too short", data->hdev->name); return -EILSEQ; } break; case HCI_SCODATA_PKT: if (len >= HCI_SCO_HDR_SIZE) { struct hci_sco_hdr *hdr = (struct hci_sco_hdr *) buf; pkt_len = HCI_SCO_HDR_SIZE + hdr->dlen; } else { BT_ERR("%s audio block is too short", data->hdev->name); return -EILSEQ; } break; } skb = bt_skb_alloc(pkt_len, GFP_ATOMIC); if (!skb) { BT_ERR("%s no memory for the packet", data->hdev->name); return -ENOMEM; } skb->dev = (void *) data->hdev; bt_cb(skb)->pkt_type = pkt_type; data->reassembly = skb; } else { if (!data->reassembly) { BT_ERR("%s unexpected continuation block", data->hdev->name); return -EIO; } } if (len > 0) memcpy(skb_put(data->reassembly, len), buf, len); if (hdr & 0x08) { hci_recv_frame(data->reassembly); data->reassembly = NULL; } return 0; } static void bfusb_rx_complete(struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct bfusb_data *data = (struct bfusb_data *) skb->dev; unsigned char *buf = urb->transfer_buffer; int count = urb->actual_length; int err, hdr, len; BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len); read_lock(&data->lock); if (!test_bit(HCI_RUNNING, &data->hdev->flags)) goto unlock; if (urb->status || !count) goto resubmit; data->hdev->stat.byte_rx += count; skb_put(skb, count); while (count) { hdr = buf[0] | (buf[1] << 8); if (hdr & 0x4000) { len = 0; count -= 2; buf += 2; } else { len = (buf[2] == 0) ? 256 : buf[2]; count -= 3; buf += 3; } if (count < len) { BT_ERR("%s block extends over URB buffer ranges", data->hdev->name); } if ((hdr & 0xe1) == 0xc1) bfusb_recv_block(data, hdr, buf, len); count -= len; buf += len; } skb_unlink(skb, &data->pending_q); kfree_skb(skb); bfusb_rx_submit(data, urb); read_unlock(&data->lock); return; resubmit: urb->dev = data->udev; err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { BT_ERR("%s bulk resubmit failed urb %p err %d", data->hdev->name, urb, err); } unlock: read_unlock(&data->lock); } static int bfusb_open(struct hci_dev *hdev) { struct bfusb_data *data = hdev->driver_data; unsigned long flags; int i, err; BT_DBG("hdev %p bfusb %p", hdev, data); if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) return 0; write_lock_irqsave(&data->lock, flags); err = bfusb_rx_submit(data, NULL); if (!err) { for (i = 1; i < BFUSB_MAX_BULK_RX; i++) bfusb_rx_submit(data, NULL); } else { clear_bit(HCI_RUNNING, &hdev->flags); } write_unlock_irqrestore(&data->lock, flags); return err; } static int bfusb_flush(struct hci_dev *hdev) { struct bfusb_data *data = hdev->driver_data; BT_DBG("hdev %p bfusb %p", hdev, data); skb_queue_purge(&data->transmit_q); return 0; } static int bfusb_close(struct hci_dev *hdev) { struct bfusb_data *data = hdev->driver_data; unsigned long flags; BT_DBG("hdev %p bfusb %p", hdev, data); if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) return 0; write_lock_irqsave(&data->lock, flags); write_unlock_irqrestore(&data->lock, flags); bfusb_unlink_urbs(data); bfusb_flush(hdev); return 0; } static int bfusb_send_frame(struct sk_buff *skb) { struct hci_dev *hdev = (struct hci_dev *) skb->dev; struct bfusb_data *data; struct sk_buff *nskb; unsigned char buf[3]; int sent = 0, size, count; BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len); if (!hdev) { BT_ERR("Frame for unknown HCI device (hdev=NULL)"); return -ENODEV; } if (!test_bit(HCI_RUNNING, &hdev->flags)) return -EBUSY; data = hdev->driver_data; switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; }; /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); count = skb->len; /* Max HCI frame size seems to be 1511 + 1 */ nskb = bt_skb_alloc(count + 32, GFP_ATOMIC); if (!nskb) { BT_ERR("Can't allocate memory for new packet"); return -ENOMEM; } nskb->dev = (void *) data; while (count) { size = min_t(uint, count, BFUSB_MAX_BLOCK_SIZE); buf[0] = 0xc1 | ((sent == 0) ? 0x04 : 0) | ((count == size) ? 0x08 : 0); buf[1] = 0x00; buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size; memcpy(skb_put(nskb, 3), buf, 3); skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size); sent += size; count -= size; } /* Don't send frame with multiple size of bulk max packet */ if ((nskb->len % data->bulk_pkt_size) == 0) { buf[0] = 0xdd; buf[1] = 0x00; memcpy(skb_put(nskb, 2), buf, 2); } read_lock(&data->lock); skb_queue_tail(&data->transmit_q, nskb); bfusb_tx_wakeup(data); read_unlock(&data->lock); kfree_skb(skb); return 0; } static void bfusb_destruct(struct hci_dev *hdev) { struct bfusb_data *data = hdev->driver_data; BT_DBG("hdev %p bfusb %p", hdev, data); kfree(data); } static int bfusb_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg) { return -ENOIOCTLCMD; } static int bfusb_load_firmware(struct bfusb_data *data, const unsigned char *firmware, int count) { unsigned char *buf; int err, pipe, len, size, sent = 0; BT_DBG("bfusb %p udev %p", data, data->udev); BT_INFO("BlueFRITZ! USB loading firmware"); pipe = usb_sndctrlpipe(data->udev, 0); if (usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, 0, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT) < 0) { BT_ERR("Can't change to loading configuration"); return -EBUSY; } data->udev->toggle[0] = data->udev->toggle[1] = 0; buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_ATOMIC); if (!buf) { BT_ERR("Can't allocate memory chunk for firmware"); return -ENOMEM; } pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep); while (count) { size = min_t(uint, count, BFUSB_MAX_BLOCK_SIZE + 3); memcpy(buf, firmware + sent, size); err = usb_bulk_msg(data->udev, pipe, buf, size, &len, BFUSB_BLOCK_TIMEOUT); if (err || (len != size)) { BT_ERR("Error in firmware loading"); goto error; } sent += size; count -= size; } err = usb_bulk_msg(data->udev, pipe, NULL, 0, &len, BFUSB_BLOCK_TIMEOUT); if (err < 0) { BT_ERR("Error in null packet request"); goto error; } pipe = usb_sndctrlpipe(data->udev, 0); err = usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, 0, 2, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (err < 0) { BT_ERR("Can't change to running configuration"); goto error; } data->udev->toggle[0] = data->udev->toggle[1] = 0; BT_INFO("BlueFRITZ! USB device ready"); kfree(buf); return 0; error: kfree(buf); pipe = usb_sndctrlpipe(data->udev, 0); usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, 0, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); return err; } static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct firmware *firmware; struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_endpoint *bulk_out_ep; struct usb_host_endpoint *bulk_in_ep; struct hci_dev *hdev; struct bfusb_data *data; BT_DBG("intf %p id %p", intf, id); /* Check number of endpoints */ if (intf->cur_altsetting->desc.bNumEndpoints < 2) return -EIO; bulk_out_ep = &intf->cur_altsetting->endpoint[0]; bulk_in_ep = &intf->cur_altsetting->endpoint[1]; if (!bulk_out_ep || !bulk_in_ep) { BT_ERR("Bulk endpoints not found"); goto done; } /* Initialize control structure and load firmware */ data = kzalloc(sizeof(struct bfusb_data), GFP_KERNEL); if (!data) { BT_ERR("Can't allocate memory for control structure"); goto done; } data->udev = udev; data->bulk_in_ep = bulk_in_ep->desc.bEndpointAddress; data->bulk_out_ep = bulk_out_ep->desc.bEndpointAddress; data->bulk_pkt_size = le16_to_cpu(bulk_out_ep->desc.wMaxPacketSize); rwlock_init(&data->lock); data->reassembly = NULL; skb_queue_head_init(&data->transmit_q); skb_queue_head_init(&data->pending_q); skb_queue_head_init(&data->completed_q); if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) { BT_ERR("Firmware request failed"); goto error; } BT_DBG("firmware data %p size %zu", firmware->data, firmware->size); if (bfusb_load_firmware(data, firmware->data, firmware->size) < 0) { BT_ERR("Firmware loading failed"); goto release; } release_firmware(firmware); /* Initialize and register HCI device */ hdev = hci_alloc_dev(); if (!hdev) { BT_ERR("Can't allocate HCI device"); goto error; } data->hdev = hdev; hdev->bus = HCI_USB; hdev->driver_data = data; SET_HCIDEV_DEV(hdev, &intf->dev); hdev->open = bfusb_open; hdev->close = bfusb_close; hdev->flush = bfusb_flush; hdev->send = bfusb_send_frame; hdev->destruct = bfusb_destruct; hdev->ioctl = bfusb_ioctl; hdev->owner = THIS_MODULE; if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); hci_free_dev(hdev); goto error; } usb_set_intfdata(intf, data); return 0; release: release_firmware(firmware); error: kfree(data); done: return -EIO; } static void bfusb_disconnect(struct usb_interface *intf) { struct bfusb_data *data = usb_get_intfdata(intf); struct hci_dev *hdev = data->hdev; BT_DBG("intf %p", intf); if (!hdev) return; usb_set_intfdata(intf, NULL); bfusb_close(hdev); if (hci_unregister_dev(hdev) < 0) BT_ERR("Can't unregister HCI device %s", hdev->name); hci_free_dev(hdev); } static struct usb_driver bfusb_driver = { .name = "bfusb", .probe = bfusb_probe, .disconnect = bfusb_disconnect, .id_table = bfusb_table, }; static int __init bfusb_init(void) { int err; BT_INFO("BlueFRITZ! USB driver ver %s", VERSION); err = usb_register(&bfusb_driver); if (err < 0) BT_ERR("Failed to register BlueFRITZ! USB driver"); return err; } static void __exit bfusb_exit(void) { usb_deregister(&bfusb_driver); } module_init(bfusb_init); module_exit(bfusb_exit); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("BlueFRITZ! USB driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("bfubase.frm");
gpl-2.0
desaishivam26/android_kernel_motorola_msm8916
net/netfilter/xt_nfacct.c
7896
1966
/* * (C) 2011 Pablo Neira Ayuso <pablo@netfilter.org> * (C) 2011 Intra2net AG <http://www.intra2net.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 (or any * later at your option) as published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/nfnetlink_acct.h> #include <linux/netfilter/xt_nfacct.h> MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); MODULE_DESCRIPTION("Xtables: match for the extended accounting infrastructure"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_nfacct"); MODULE_ALIAS("ip6t_nfacct"); static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_nfacct_match_info *info = par->targinfo; nfnl_acct_update(skb, info->nfacct); return true; } static int nfacct_mt_checkentry(const struct xt_mtchk_param *par) { struct xt_nfacct_match_info *info = par->matchinfo; struct nf_acct *nfacct; nfacct = nfnl_acct_find_get(info->name); if (nfacct == NULL) { pr_info("xt_nfacct: accounting object with name `%s' " "does not exists\n", info->name); return -ENOENT; } info->nfacct = nfacct; return 0; } static void nfacct_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_nfacct_match_info *info = par->matchinfo; nfnl_acct_put(info->nfacct); } static struct xt_match nfacct_mt_reg __read_mostly = { .name = "nfacct", .family = NFPROTO_UNSPEC, .checkentry = nfacct_mt_checkentry, .match = nfacct_mt, .destroy = nfacct_mt_destroy, .matchsize = sizeof(struct xt_nfacct_match_info), .me = THIS_MODULE, }; static int __init nfacct_mt_init(void) { return xt_register_match(&nfacct_mt_reg); } static void __exit nfacct_mt_exit(void) { xt_unregister_match(&nfacct_mt_reg); } module_init(nfacct_mt_init); module_exit(nfacct_mt_exit);
gpl-2.0
tiny4579/tinykernel-gnex
fs/proc/kmsg.c
9432
1489
/* * linux/fs/proc/kmsg.c * * Copyright (C) 1992 by Linus Torvalds * */ #include <linux/types.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/kernel.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/fs.h> #include <linux/syslog.h> #include <asm/uaccess.h> #include <asm/io.h> extern wait_queue_head_t log_wait; static int kmsg_open(struct inode * inode, struct file * file) { return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE); } static int kmsg_release(struct inode * inode, struct file * file) { (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE); return 0; } static ssize_t kmsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { if ((file->f_flags & O_NONBLOCK) && !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) return -EAGAIN; return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE); } static unsigned int kmsg_poll(struct file *file, poll_table *wait) { poll_wait(file, &log_wait, wait); if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) return POLLIN | POLLRDNORM; return 0; } static const struct file_operations proc_kmsg_operations = { .read = kmsg_read, .poll = kmsg_poll, .open = kmsg_open, .release = kmsg_release, .llseek = generic_file_llseek, }; static int __init proc_kmsg_init(void) { proc_create("kmsg", S_IRUSR, NULL, &proc_kmsg_operations); return 0; } module_init(proc_kmsg_init);
gpl-2.0
Feche/android_kernel_motorola_olympus_oc
arch/cris/arch-v10/kernel/irq.c
9688
6470
/* * linux/arch/cris/kernel/irq.c * * Copyright (c) 2000-2002 Axis Communications AB * * Authors: Bjorn Wesen (bjornw@axis.com) * * This file contains the interrupt vectors and some * helper functions * */ #include <asm/irq.h> #include <asm/current.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/init.h> #define crisv10_mask_irq(irq_nr) (*R_VECT_MASK_CLR = 1 << (irq_nr)); #define crisv10_unmask_irq(irq_nr) (*R_VECT_MASK_SET = 1 << (irq_nr)); extern void kgdb_init(void); extern void breakpoint(void); /* don't use set_int_vector, it bypasses the linux interrupt handlers. it is * global just so that the kernel gdb can use it. */ void set_int_vector(int n, irqvectptr addr) { etrax_irv->v[n + 0x20] = (irqvectptr)addr; } /* the breakpoint vector is obviously not made just like the normal irq handlers * but needs to contain _code_ to jump to addr. * * the BREAK n instruction jumps to IBR + n * 8 */ void set_break_vector(int n, irqvectptr addr) { unsigned short *jinstr = (unsigned short *)&etrax_irv->v[n*2]; unsigned long *jaddr = (unsigned long *)(jinstr + 1); /* if you don't know what this does, do not touch it! */ *jinstr = 0x0d3f; *jaddr = (unsigned long)addr; /* 00000026 <clrlop+1a> 3f0d82000000 jump 0x82 */ } /* * This builds up the IRQ handler stubs using some ugly macros in irq.h * * These macros create the low-level assembly IRQ routines that do all * the operations that are needed. They are also written to be fast - and to * disable interrupts as little as humanly possible. * */ /* IRQ0 and 1 are special traps */ void hwbreakpoint(void); void IRQ1_interrupt(void); BUILD_TIMER_IRQ(2, 0x04) /* the timer interrupt is somewhat special */ BUILD_IRQ(3, 0x08) BUILD_IRQ(4, 0x10) BUILD_IRQ(5, 0x20) BUILD_IRQ(6, 0x40) BUILD_IRQ(7, 0x80) BUILD_IRQ(8, 0x100) BUILD_IRQ(9, 0x200) BUILD_IRQ(10, 0x400) BUILD_IRQ(11, 0x800) BUILD_IRQ(12, 0x1000) BUILD_IRQ(13, 0x2000) void mmu_bus_fault(void); /* IRQ 14 is the bus fault interrupt */ void multiple_interrupt(void); /* IRQ 15 is the multiple IRQ interrupt */ BUILD_IRQ(16, 0x10000 | 0x20000) /* ethernet tx interrupt needs to block rx */ BUILD_IRQ(17, 0x20000 | 0x10000) /* ...and vice versa */ BUILD_IRQ(18, 0x40000) BUILD_IRQ(19, 0x80000) BUILD_IRQ(20, 0x100000) BUILD_IRQ(21, 0x200000) BUILD_IRQ(22, 0x400000) BUILD_IRQ(23, 0x800000) BUILD_IRQ(24, 0x1000000) BUILD_IRQ(25, 0x2000000) /* IRQ 26-30 are reserved */ BUILD_IRQ(31, 0x80000000) /* * Pointers to the low-level handlers */ static void (*interrupt[NR_IRQS])(void) = { NULL, NULL, IRQ2_interrupt, IRQ3_interrupt, IRQ4_interrupt, IRQ5_interrupt, IRQ6_interrupt, IRQ7_interrupt, IRQ8_interrupt, IRQ9_interrupt, IRQ10_interrupt, IRQ11_interrupt, IRQ12_interrupt, IRQ13_interrupt, NULL, NULL, IRQ16_interrupt, IRQ17_interrupt, IRQ18_interrupt, IRQ19_interrupt, IRQ20_interrupt, IRQ21_interrupt, IRQ22_interrupt, IRQ23_interrupt, IRQ24_interrupt, IRQ25_interrupt, NULL, NULL, NULL, NULL, NULL, IRQ31_interrupt }; static void enable_crisv10_irq(struct irq_data *data) { crisv10_unmask_irq(data->irq); } static void disable_crisv10_irq(struct irq_data *data) { crisv10_mask_irq(data->irq); } static struct irq_chip crisv10_irq_type = { .name = "CRISv10", .irq_shutdown = disable_crisv10_irq, .irq_enable = enable_crisv10_irq, .irq_disable = disable_crisv10_irq, }; void weird_irq(void); void system_call(void); /* from entry.S */ void do_sigtrap(void); /* from entry.S */ void gdb_handle_breakpoint(void); /* from entry.S */ extern void do_IRQ(int irq, struct pt_regs * regs); /* Handle multiple IRQs */ void do_multiple_IRQ(struct pt_regs* regs) { int bit; unsigned masked; unsigned mask; unsigned ethmask = 0; /* Get interrupts to mask and handle */ mask = masked = *R_VECT_MASK_RD; /* Never mask timer IRQ */ mask &= ~(IO_MASK(R_VECT_MASK_RD, timer0)); /* * If either ethernet interrupt (rx or tx) is active then block * the other one too. Unblock afterwards also. */ if (mask & (IO_STATE(R_VECT_MASK_RD, dma0, active) | IO_STATE(R_VECT_MASK_RD, dma1, active))) { ethmask = (IO_MASK(R_VECT_MASK_RD, dma0) | IO_MASK(R_VECT_MASK_RD, dma1)); } /* Block them */ *R_VECT_MASK_CLR = (mask | ethmask); /* An extra irq_enter here to prevent softIRQs to run after * each do_IRQ. This will decrease the interrupt latency. */ irq_enter(); /* Handle all IRQs */ for (bit = 2; bit < 32; bit++) { if (masked & (1 << bit)) { do_IRQ(bit, regs); } } /* This irq_exit() will trigger the soft IRQs. */ irq_exit(); /* Unblock the IRQs again */ *R_VECT_MASK_SET = (masked | ethmask); } /* init_IRQ() is called by start_kernel and is responsible for fixing IRQ masks and setting the irq vector table. */ void __init init_IRQ(void) { int i; /* clear all interrupt masks */ #ifndef CONFIG_SVINTO_SIM *R_IRQ_MASK0_CLR = 0xffffffff; *R_IRQ_MASK1_CLR = 0xffffffff; *R_IRQ_MASK2_CLR = 0xffffffff; #endif *R_VECT_MASK_CLR = 0xffffffff; for (i = 0; i < 256; i++) etrax_irv->v[i] = weird_irq; /* Initialize IRQ handler descriptors. */ for(i = 2; i < NR_IRQS; i++) { irq_set_chip_and_handler(i, &crisv10_irq_type, handle_simple_irq); set_int_vector(i, interrupt[i]); } /* the entries in the break vector contain actual code to be executed by the associated break handler, rather than just a jump address. therefore we need to setup a default breakpoint handler for all breakpoints */ for (i = 0; i < 16; i++) set_break_vector(i, do_sigtrap); /* except IRQ 15 which is the multiple-IRQ handler on Etrax100 */ set_int_vector(15, multiple_interrupt); /* 0 and 1 which are special breakpoint/NMI traps */ set_int_vector(0, hwbreakpoint); set_int_vector(1, IRQ1_interrupt); /* and irq 14 which is the mmu bus fault handler */ set_int_vector(14, mmu_bus_fault); /* setup the system-call trap, which is reached by BREAK 13 */ set_break_vector(13, system_call); /* setup a breakpoint handler for debugging used for both user and kernel mode debugging (which is why it is not inside an ifdef CONFIG_ETRAX_KGDB) */ set_break_vector(8, gdb_handle_breakpoint); #ifdef CONFIG_ETRAX_KGDB /* setup kgdb if its enabled, and break into the debugger */ kgdb_init(); breakpoint(); #endif }
gpl-2.0
pjsports/kernel-2.6.32.9-A88
drivers/usb/host/whci/wusb.c
12504
5554
/* * Wireless Host Controller (WHC) WUSB operations. * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/uwb/umc.h> #include "../../wusbcore/wusbhc.h" #include "whcd.h" static int whc_update_di(struct whc *whc, int idx) { int offset = idx / 32; u32 bit = 1 << (idx % 32); le_writel(bit, whc->base + WUSBDIBUPDATED + offset); return whci_wait_for(&whc->umc->dev, whc->base + WUSBDIBUPDATED + offset, bit, 0, 100, "DI update"); } /* * WHCI starts MMCs based on there being a valid GTK so these need * only start/stop the asynchronous and periodic schedules and send a * channel stop command. */ int whc_wusbhc_start(struct wusbhc *wusbhc) { struct whc *whc = wusbhc_to_whc(wusbhc); asl_start(whc); pzl_start(whc); return 0; } void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay) { struct whc *whc = wusbhc_to_whc(wusbhc); u32 stop_time, now_time; int ret; pzl_stop(whc); asl_stop(whc); now_time = le_readl(whc->base + WUSBTIME) & WUSBTIME_CHANNEL_TIME_MASK; stop_time = (now_time + ((delay * 8) << 7)) & 0x00ffffff; ret = whc_do_gencmd(whc, WUSBGENCMDSTS_CHAN_STOP, stop_time, NULL, 0); if (ret == 0) msleep(delay); } int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, u8 handle, struct wuie_hdr *wuie) { struct whc *whc = wusbhc_to_whc(wusbhc); u32 params; params = (interval << 24) | (repeat_cnt << 16) | (wuie->bLength << 8) | handle; return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_ADD, params, wuie, wuie->bLength); } int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle) { struct whc *whc = wusbhc_to_whc(wusbhc); u32 params; params = handle; return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_RM, params, NULL, 0); } int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm) { struct whc *whc = wusbhc_to_whc(wusbhc); if (stream_index >= 0) whc_write_wusbcmd(whc, WUSBCMD_WUSBSI_MASK, WUSBCMD_WUSBSI(stream_index)); return whc_do_gencmd(whc, WUSBGENCMDSTS_SET_MAS, 0, (void *)mas_bm, sizeof(*mas_bm)); } int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { struct whc *whc = wusbhc_to_whc(wusbhc); int idx = wusb_dev->port_idx; struct di_buf_entry *di = &whc->di_buf[idx]; int ret; mutex_lock(&whc->mutex); uwb_mas_bm_copy_le(di->availability_info, &wusb_dev->availability); di->addr_sec_info &= ~(WHC_DI_DISABLE | WHC_DI_DEV_ADDR_MASK); di->addr_sec_info |= WHC_DI_DEV_ADDR(wusb_dev->addr); ret = whc_update_di(whc, idx); mutex_unlock(&whc->mutex); return ret; } /* * Set the number of Device Notification Time Slots (DNTS) and enable * device notifications. */ int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots) { struct whc *whc = wusbhc_to_whc(wusbhc); u32 dntsctrl; dntsctrl = WUSBDNTSCTRL_ACTIVE | WUSBDNTSCTRL_INTERVAL(interval) | WUSBDNTSCTRL_SLOTS(slots); le_writel(dntsctrl, whc->base + WUSBDNTSCTRL); return 0; } static int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid, const void *key, size_t key_size, bool is_gtk) { uint32_t setkeycmd; uint32_t seckey[4]; int i; int ret; memcpy(seckey, key, key_size); setkeycmd = WUSBSETSECKEYCMD_SET | WUSBSETSECKEYCMD_IDX(key_index); if (is_gtk) setkeycmd |= WUSBSETSECKEYCMD_GTK; le_writel(tkid, whc->base + WUSBTKID); for (i = 0; i < 4; i++) le_writel(seckey[i], whc->base + WUSBSECKEY + 4*i); le_writel(setkeycmd, whc->base + WUSBSETSECKEYCMD); ret = whci_wait_for(&whc->umc->dev, whc->base + WUSBSETSECKEYCMD, WUSBSETSECKEYCMD_SET, 0, 100, "set key"); return ret; } /** * whc_set_ptk - set the PTK to use for a device. * * The index into the key table for this PTK is the same as the * device's port index. */ int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, const void *ptk, size_t key_size) { struct whc *whc = wusbhc_to_whc(wusbhc); struct di_buf_entry *di = &whc->di_buf[port_idx]; int ret; mutex_lock(&whc->mutex); if (ptk) { ret = whc_set_key(whc, port_idx, tkid, ptk, key_size, false); if (ret) goto out; di->addr_sec_info &= ~WHC_DI_KEY_IDX_MASK; di->addr_sec_info |= WHC_DI_SECURE | WHC_DI_KEY_IDX(port_idx); } else di->addr_sec_info &= ~WHC_DI_SECURE; ret = whc_update_di(whc, port_idx); out: mutex_unlock(&whc->mutex); return ret; } /** * whc_set_gtk - set the GTK for subsequent broadcast packets * * The GTK is stored in the last entry in the key table (the previous * N_DEVICES entries are for the per-device PTKs). */ int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid, const void *gtk, size_t key_size) { struct whc *whc = wusbhc_to_whc(wusbhc); int ret; mutex_lock(&whc->mutex); ret = whc_set_key(whc, whc->n_devices, tkid, gtk, key_size, true); mutex_unlock(&whc->mutex); return ret; } int whc_set_cluster_id(struct whc *whc, u8 bcid) { whc_write_wusbcmd(whc, WUSBCMD_BCID_MASK, WUSBCMD_BCID(bcid)); return 0; }
gpl-2.0
Vegaviet-Dev/Kernel_N4_N910SLK
net/rmnet_data/rmnet_data_config.c
217
29841
/* * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * RMNET Data configuration engine * */ #include <net/sock.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/rmnet_data.h> #include "rmnet_data_config.h" #include "rmnet_data_handlers.h" #include "rmnet_data_vnd.h" #include "rmnet_data_private.h" RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_CONFIG); /* ***************** Local Definitions and Declarations ********************* */ static struct sock *nl_socket_handle; #ifndef RMNET_KERNEL_PRE_3_8 static struct netlink_kernel_cfg rmnet_netlink_cfg = { .input = rmnet_config_netlink_msg_handler }; #endif static struct notifier_block rmnet_dev_notifier = { .notifier_call = rmnet_config_notify_cb, .next = 0, .priority = 0 }; #define RMNET_NL_MSG_SIZE(Y) (sizeof(((struct rmnet_nl_msg_s *)0)->Y)) struct rmnet_free_vnd_work { struct work_struct work; int vnd_id; }; /* ***************** Init and Cleanup *************************************** */ #ifdef RMNET_KERNEL_PRE_3_8 static struct sock *_rmnet_config_start_netlink(void) { return netlink_kernel_create(&init_net, RMNET_NETLINK_PROTO, 0, rmnet_config_netlink_msg_handler, NULL, THIS_MODULE); } #else static struct sock *_rmnet_config_start_netlink(void) { return netlink_kernel_create(&init_net, RMNET_NETLINK_PROTO, &rmnet_netlink_cfg); } #endif /* RMNET_KERNEL_PRE_3_8 */ /** * rmnet_config_init() - Startup init * * Registers netlink protocol with kernel and opens socket. Netlink handler is * registered with kernel. */ int rmnet_config_init(void) { int rc; nl_socket_handle = _rmnet_config_start_netlink(); if (!nl_socket_handle) { LOGE("%s", "Failed to init netlink socket"); return RMNET_INIT_ERROR; } rc = register_netdevice_notifier(&rmnet_dev_notifier); if (rc != 0) { LOGE("Failed to register device notifier; rc=%d", rc); /* TODO: Cleanup the nl socket */ return RMNET_INIT_ERROR; } return 0; } /** * rmnet_config_exit() - Cleans up all netlink related resources */ void rmnet_config_exit(void) { netlink_kernel_release(nl_socket_handle); } /* ***************** Helper Functions *************************************** */ /** * _rmnet_is_physical_endpoint_associated() - Determines if device is associated * @dev: Device to get check * * Compares device rx_handler callback pointer against known funtion * * Return: * - 1 if associated * - 0 if NOT associated */ static inline int _rmnet_is_physical_endpoint_associated(struct net_device *dev) { rx_handler_func_t *rx_handler; rx_handler = rcu_dereference(dev->rx_handler); if (rx_handler == rmnet_rx_handler) return 1; else return 0; } /** * _rmnet_get_phys_ep_config() - Get physical ep config for an associated device * @dev: Device to get endpoint configuration from * * Return: * - pointer to configuration if successful * - 0 (null) if device is not associated */ static inline struct rmnet_phys_ep_conf_s *_rmnet_get_phys_ep_config (struct net_device *dev) { if (_rmnet_is_physical_endpoint_associated(dev)) return (struct rmnet_phys_ep_conf_s *) rcu_dereference(dev->rx_handler_data); else return 0; } /** * _rmnet_get_logical_ep() - Gets the logical end point configuration * structure for a network device * @dev: Device to get endpoint configuration from * @config_id: Logical endpoint id on device * Retrieves the logical_endpoint_config structure. * * Return: * - End point configuration structure * - NULL in case of an error */ struct rmnet_logical_ep_conf_s *_rmnet_get_logical_ep(struct net_device *dev, int config_id) { struct rmnet_phys_ep_conf_s *config; struct rmnet_logical_ep_conf_s *epconfig_l; if (rmnet_vnd_is_vnd(dev)) epconfig_l = rmnet_vnd_get_le_config(dev); else { config = _rmnet_get_phys_ep_config(dev); if (!config) return NULL; if (config_id == RMNET_LOCAL_LOGICAL_ENDPOINT) epconfig_l = &config->local_ep; else epconfig_l = &config->muxed_ep[config_id]; } return epconfig_l; } /* ***************** Netlink Handler **************************************** */ #define _RMNET_NETLINK_NULL_CHECKS() do { if (!rmnet_header || !resp_rmnet) \ BUG(); \ } while (0) static void _rmnet_netlink_set_link_egress_data_format (struct rmnet_nl_msg_s *rmnet_header, struct rmnet_nl_msg_s *resp_rmnet) { struct net_device *dev; _RMNET_NETLINK_NULL_CHECKS(); resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev); if (!dev) { resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; return; } resp_rmnet->return_code = rmnet_set_egress_data_format(dev, rmnet_header->data_format.flags, rmnet_header->data_format.agg_size, rmnet_header->data_format.agg_count ); dev_put(dev); } static void _rmnet_netlink_set_link_ingress_data_format (struct rmnet_nl_msg_s *rmnet_header, struct rmnet_nl_msg_s *resp_rmnet) { struct net_device *dev; _RMNET_NETLINK_NULL_CHECKS(); resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev); if (!dev) { resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; return; } resp_rmnet->return_code = rmnet_set_ingress_data_format( dev, rmnet_header->data_format.flags, rmnet_header->data_format.tail_spacing); dev_put(dev); } static void _rmnet_netlink_set_logical_ep_config (struct rmnet_nl_msg_s *rmnet_header, struct rmnet_nl_msg_s *resp_rmnet) { struct net_device *dev, *dev2; _RMNET_NETLINK_NULL_CHECKS(); resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; if (rmnet_header->local_ep_config.ep_id < -1 || rmnet_header->local_ep_config.ep_id > 254) { resp_rmnet->return_code = RMNET_CONFIG_BAD_ARGUMENTS; return; } dev = dev_get_by_name(&init_net, rmnet_header->local_ep_config.dev); dev2 = dev_get_by_name(&init_net, rmnet_header->local_ep_config.next_dev); if (dev != 0 && dev2 != 0) resp_rmnet->return_code = rmnet_set_logical_endpoint_config( dev, rmnet_header->local_ep_config.ep_id, rmnet_header->local_ep_config.operating_mode, dev2); else resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; if (dev != 0) dev_put(dev); if (dev2 != 0) dev_put(dev2); } static void _rmnet_netlink_unset_logical_ep_config (struct rmnet_nl_msg_s *rmnet_header, struct rmnet_nl_msg_s *resp_rmnet) { struct net_device *dev; _RMNET_NETLINK_NULL_CHECKS(); resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; if (rmnet_header->local_ep_config.ep_id < -1 || rmnet_header->local_ep_config.ep_id > 254) { resp_rmnet->return_code = RMNET_CONFIG_BAD_ARGUMENTS; return; } dev = dev_get_by_name(&init_net, rmnet_header->local_ep_config.dev); if (dev != 0) { resp_rmnet->return_code = rmnet_unset_logical_endpoint_config( dev, rmnet_header->local_ep_config.ep_id); dev_put(dev); } else { resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; } } static void _rmnet_netlink_associate_network_device (struct rmnet_nl_msg_s *rmnet_header, struct rmnet_nl_msg_s *resp_rmnet) { struct net_device *dev; _RMNET_NETLINK_NULL_CHECKS(); resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; dev = dev_get_by_name(&init_net, rmnet_header->data); if (!dev) { resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; return; } resp_rmnet->return_code = rmnet_associate_network_device(dev); dev_put(dev); } static void _rmnet_netlink_unassociate_network_device (struct rmnet_nl_msg_s *rmnet_header, struct rmnet_nl_msg_s *resp_rmnet) { struct net_device *dev; _RMNET_NETLINK_NULL_CHECKS(); resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; dev = dev_get_by_name(&init_net, rmnet_header->data); if (!dev) { resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; return; } resp_rmnet->return_code = rmnet_unassociate_network_device(dev); dev_put(dev); } static void _rmnet_netlink_get_link_egress_data_format (struct rmnet_nl_msg_s *rmnet_header, struct rmnet_nl_msg_s *resp_rmnet) { struct net_device *dev; struct rmnet_phys_ep_conf_s *config; _RMNET_NETLINK_NULL_CHECKS(); resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev); if (!dev) { resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; return; } config = _rmnet_get_phys_ep_config(dev); if (!config) { resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST; dev_put(dev); return; } /* Begin Data */ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA; resp_rmnet->arg_length = RMNET_NL_MSG_SIZE(data_format); resp_rmnet->data_format.flags = config->egress_data_format; resp_rmnet->data_format.agg_count = config->egress_agg_count; resp_rmnet->data_format.agg_size = config->egress_agg_size; dev_put(dev); } static void _rmnet_netlink_get_link_ingress_data_format (struct rmnet_nl_msg_s *rmnet_header, struct rmnet_nl_msg_s *resp_rmnet) { struct net_device *dev; struct rmnet_phys_ep_conf_s *config; _RMNET_NETLINK_NULL_CHECKS(); resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev); if (!dev) { resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; return; } config = _rmnet_get_phys_ep_config(dev); if (!config) { resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST; dev_put(dev); return; } /* Begin Data */ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA; resp_rmnet->arg_length = RMNET_NL_MSG_SIZE(data_format); resp_rmnet->data_format.flags = config->ingress_data_format; resp_rmnet->data_format.tail_spacing = config->tail_spacing; dev_put(dev); } static void _rmnet_netlink_get_vnd_name (struct rmnet_nl_msg_s *rmnet_header, struct rmnet_nl_msg_s *resp_rmnet) { int r; _RMNET_NETLINK_NULL_CHECKS(); resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; r = rmnet_vnd_get_name(rmnet_header->vnd.id, resp_rmnet->vnd.vnd_name, RMNET_MAX_STR_LEN); if (r != 0) { resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST; return; } /* Begin Data */ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA; resp_rmnet->arg_length = RMNET_NL_MSG_SIZE(vnd); } static void _rmnet_netlink_add_del_vnd_tc_flow (uint32_t command, struct rmnet_nl_msg_s *rmnet_header, struct rmnet_nl_msg_s *resp_rmnet) { uint32_t id; uint32_t map_flow_id; uint32_t tc_flow_id; _RMNET_NETLINK_NULL_CHECKS(); resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; id = rmnet_header->flow_control.id; map_flow_id = rmnet_header->flow_control.map_flow_id; tc_flow_id = rmnet_header->flow_control.tc_flow_id; switch (command) { case RMNET_NETLINK_ADD_VND_TC_FLOW: resp_rmnet->return_code = rmnet_vnd_add_tc_flow(id, map_flow_id, tc_flow_id); break; case RMNET_NETLINK_DEL_VND_TC_FLOW: resp_rmnet->return_code = rmnet_vnd_del_tc_flow(id, map_flow_id, tc_flow_id); break; default: LOGM("Called with unhandled command %d", command); resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST; break; } } /** * rmnet_config_netlink_msg_handler() - Netlink message handler callback * @skb: Packet containing netlink messages * * Standard kernel-expected format for a netlink message handler. Processes SKBs * which contain RmNet data specific netlink messages. */ void rmnet_config_netlink_msg_handler(struct sk_buff *skb) { struct nlmsghdr *nlmsg_header, *resp_nlmsg; struct rmnet_nl_msg_s *rmnet_header, *resp_rmnet; int return_pid, response_data_length; struct sk_buff *skb_response; response_data_length = 0; nlmsg_header = (struct nlmsghdr *) skb->data; rmnet_header = (struct rmnet_nl_msg_s *) nlmsg_data(nlmsg_header); LOGL("Netlink message pid=%d, seq=%d, length=%d, rmnet_type=%d", nlmsg_header->nlmsg_pid, nlmsg_header->nlmsg_seq, nlmsg_header->nlmsg_len, rmnet_header->message_type); return_pid = nlmsg_header->nlmsg_pid; skb_response = nlmsg_new(sizeof(struct nlmsghdr) + sizeof(struct rmnet_nl_msg_s), GFP_KERNEL); if (!skb_response) { LOGH("%s", "Failed to allocate response buffer"); return; } resp_nlmsg = nlmsg_put(skb_response, 0, nlmsg_header->nlmsg_seq, NLMSG_DONE, sizeof(struct rmnet_nl_msg_s), 0); resp_rmnet = nlmsg_data(resp_nlmsg); if (!resp_rmnet) BUG(); resp_rmnet->message_type = rmnet_header->message_type; rtnl_lock(); switch (rmnet_header->message_type) { case RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE: _rmnet_netlink_associate_network_device (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE: _rmnet_netlink_unassociate_network_device (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT: _rmnet_netlink_set_link_egress_data_format (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT: _rmnet_netlink_get_link_egress_data_format (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT: _rmnet_netlink_set_link_ingress_data_format (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT: _rmnet_netlink_get_link_ingress_data_format (rmnet_header, resp_rmnet); break; case RMNET_NETLINK_SET_LOGICAL_EP_CONFIG: _rmnet_netlink_set_logical_ep_config(rmnet_header, resp_rmnet); break; case RMNET_NETLINK_UNSET_LOGICAL_EP_CONFIG: _rmnet_netlink_unset_logical_ep_config(rmnet_header, resp_rmnet); break; case RMNET_NETLINK_NEW_VND: resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; resp_rmnet->return_code = rmnet_create_vnd(rmnet_header->vnd.id); break; case RMNET_NETLINK_NEW_VND_WITH_PREFIX: resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; resp_rmnet->return_code = rmnet_create_vnd_prefix( rmnet_header->vnd.id, rmnet_header->vnd.vnd_name); break; case RMNET_NETLINK_FREE_VND: resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; /* Please check rmnet_vnd_free_dev documentation regarding the below locking sequence */ rtnl_unlock(); resp_rmnet->return_code = rmnet_free_vnd(rmnet_header->vnd.id); rtnl_lock(); break; case RMNET_NETLINK_GET_VND_NAME: _rmnet_netlink_get_vnd_name(rmnet_header, resp_rmnet); break; case RMNET_NETLINK_DEL_VND_TC_FLOW: case RMNET_NETLINK_ADD_VND_TC_FLOW: _rmnet_netlink_add_del_vnd_tc_flow(rmnet_header->message_type, rmnet_header, resp_rmnet); break; default: resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; resp_rmnet->return_code = RMNET_CONFIG_UNKNOWN_MESSAGE; break; } rtnl_unlock(); nlmsg_unicast(nl_socket_handle, skb_response, return_pid); LOGD("%s", "Done processing command"); } /* ***************** Configuration API ************************************** */ /** * rmnet_unassociate_network_device() - Unassociate network device * @dev: Device to unassociate * * Frees all structures generate for device. Unregisters rx_handler * todo: needs to do some sanity verification first (is device in use, etc...) * * Return: * - RMNET_CONFIG_OK if successful * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null * - RMNET_CONFIG_INVALID_REQUEST if device is not already associated * - RMNET_CONFIG_DEVICE_IN_USE if device has logical ep that wasn't unset * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null */ int rmnet_unassociate_network_device(struct net_device *dev) { struct rmnet_phys_ep_conf_s *config; int config_id = RMNET_LOCAL_LOGICAL_ENDPOINT; struct rmnet_logical_ep_conf_s *epconfig_l; ASSERT_RTNL(); LOGL("(%s);", dev->name); if (!dev) return RMNET_CONFIG_NO_SUCH_DEVICE; if (!_rmnet_is_physical_endpoint_associated(dev)) return RMNET_CONFIG_INVALID_REQUEST; for (; config_id < RMNET_DATA_MAX_LOGICAL_EP; config_id++) { epconfig_l = _rmnet_get_logical_ep(dev, config_id); if (epconfig_l && epconfig_l->refcount) return RMNET_CONFIG_DEVICE_IN_USE; } config = (struct rmnet_phys_ep_conf_s *) rcu_dereference(dev->rx_handler_data); if (!config) return RMNET_CONFIG_UNKNOWN_ERROR; kfree(config); netdev_rx_handler_unregister(dev); /* Explicitly release the reference from the device */ dev_put(dev); return RMNET_CONFIG_OK; } /** * rmnet_set_ingress_data_format() - Set ingress data format on network device * @dev: Device to ingress data format on * @egress_data_format: 32-bit unsigned bitmask of ingress format * * Network device must already have association with RmNet Data driver * * Return: * - RMNET_CONFIG_OK if successful * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null */ int rmnet_set_ingress_data_format(struct net_device *dev, uint32_t ingress_data_format, uint8_t tail_spacing) { struct rmnet_phys_ep_conf_s *config; ASSERT_RTNL(); LOGL("(%s,0x%08X);", dev->name, ingress_data_format); if (!dev) return RMNET_CONFIG_NO_SUCH_DEVICE; config = _rmnet_get_phys_ep_config(dev); if (!config) return RMNET_CONFIG_INVALID_REQUEST; config->ingress_data_format = ingress_data_format; config->tail_spacing = tail_spacing; return RMNET_CONFIG_OK; } /** * rmnet_set_egress_data_format() - Set egress data format on network device * @dev: Device to egress data format on * @egress_data_format: 32-bit unsigned bitmask of egress format * * Network device must already have association with RmNet Data driver * todo: Bounds check on agg_* * * Return: * - RMNET_CONFIG_OK if successful * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null */ int rmnet_set_egress_data_format(struct net_device *dev, uint32_t egress_data_format, uint16_t agg_size, uint16_t agg_count) { struct rmnet_phys_ep_conf_s *config; ASSERT_RTNL(); LOGL("(%s,0x%08X, %d, %d);", dev->name, egress_data_format, agg_size, agg_count); if (!dev) return RMNET_CONFIG_NO_SUCH_DEVICE; config = _rmnet_get_phys_ep_config(dev); if (!config) return RMNET_CONFIG_UNKNOWN_ERROR; config->egress_data_format = egress_data_format; config->egress_agg_size = agg_size; config->egress_agg_count = agg_count; return RMNET_CONFIG_OK; } /** * rmnet_associate_network_device() - Associate network device * @dev: Device to register with RmNet data * * Typically used on physical network devices. Registers RX handler and private * metadata structures. * * Return: * - RMNET_CONFIG_OK if successful * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null * - RMNET_CONFIG_INVALID_REQUEST if the device to be associated is a vnd * - RMNET_CONFIG_DEVICE_IN_USE if dev rx_handler is already filled * - RMNET_CONFIG_DEVICE_IN_USE if netdev_rx_handler_register() fails */ int rmnet_associate_network_device(struct net_device *dev) { struct rmnet_phys_ep_conf_s *config; int rc; ASSERT_RTNL(); LOGL("(%s);\n", dev->name); if (!dev) return RMNET_CONFIG_NO_SUCH_DEVICE; if (_rmnet_is_physical_endpoint_associated(dev)) { LOGM("%s is already regestered", dev->name); return RMNET_CONFIG_DEVICE_IN_USE; } if (rmnet_vnd_is_vnd(dev)) { LOGM("%s is a vnd", dev->name); return RMNET_CONFIG_INVALID_REQUEST; } config = (struct rmnet_phys_ep_conf_s *) kmalloc(sizeof(struct rmnet_phys_ep_conf_s), GFP_ATOMIC); if (!config) return RMNET_CONFIG_NOMEM; memset(config, 0, sizeof(struct rmnet_phys_ep_conf_s)); config->dev = dev; spin_lock_init(&config->agg_lock); rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config); if (rc) { LOGM("netdev_rx_handler_register returns %d", rc); kfree(config); return RMNET_CONFIG_DEVICE_IN_USE; } /* Explicitly hold a reference to the device */ dev_hold(dev); return RMNET_CONFIG_OK; } /** * _rmnet_set_logical_endpoint_config() - Set logical endpoing config on device * @dev: Device to set endpoint configuration on * @config_id: logical endpoint id on device * @epconfig: endpoing configuration structure to set * * Return: * - RMNET_CONFIG_OK if successful * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null * - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null * - RMNET_CONFIG_DEVICE_IN_USE if device already has a logical ep * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range */ int _rmnet_set_logical_endpoint_config(struct net_device *dev, int config_id, struct rmnet_logical_ep_conf_s *epconfig) { struct rmnet_logical_ep_conf_s *epconfig_l; ASSERT_RTNL(); if (!dev) return RMNET_CONFIG_NO_SUCH_DEVICE; if (config_id < RMNET_LOCAL_LOGICAL_ENDPOINT || config_id >= RMNET_DATA_MAX_LOGICAL_EP) return RMNET_CONFIG_BAD_ARGUMENTS; epconfig_l = _rmnet_get_logical_ep(dev, config_id); if (!epconfig_l) return RMNET_CONFIG_UNKNOWN_ERROR; if (epconfig_l->refcount) return RMNET_CONFIG_DEVICE_IN_USE; memcpy(epconfig_l, epconfig, sizeof(struct rmnet_logical_ep_conf_s)); if (config_id == RMNET_LOCAL_LOGICAL_ENDPOINT) epconfig_l->mux_id = 0; else epconfig_l->mux_id = config_id; /* Explicitly hold a reference to the egress device */ dev_hold(epconfig_l->egress_dev); return RMNET_CONFIG_OK; } /** * _rmnet_unset_logical_endpoint_config() - Un-set the logical endpoing config * on device * @dev: Device to set endpoint configuration on * @config_id: logical endpoint id on device * * Return: * - RMNET_CONFIG_OK if successful * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null * - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range */ int _rmnet_unset_logical_endpoint_config(struct net_device *dev, int config_id) { struct rmnet_logical_ep_conf_s *epconfig_l = 0; ASSERT_RTNL(); if (!dev) return RMNET_CONFIG_NO_SUCH_DEVICE; if (config_id < RMNET_LOCAL_LOGICAL_ENDPOINT || config_id >= RMNET_DATA_MAX_LOGICAL_EP) return RMNET_CONFIG_BAD_ARGUMENTS; epconfig_l = _rmnet_get_logical_ep(dev, config_id); if (!epconfig_l || !epconfig_l->refcount) return RMNET_CONFIG_NO_SUCH_DEVICE; /* Explicitly release the reference from the egress device */ dev_put(epconfig_l->egress_dev); memset(epconfig_l, 0, sizeof(struct rmnet_logical_ep_conf_s)); return RMNET_CONFIG_OK; } /** * rmnet_set_logical_endpoint_config() - Set logical endpoint config on a device * @dev: Device to set endpoint configuration on * @config_id: logical endpoint id on device * @rmnet_mode: endpoint mode. Values from: rmnet_config_endpoint_modes_e * @egress_device: device node to forward packet to once done processing in * ingress/egress handlers * * Creates a logical_endpoint_config structure and fills in the information from * function arguments. Calls _rmnet_set_logical_endpoint_config() to finish * configuration. Network device must already have association with RmNet Data * driver * * Return: * - RMNET_CONFIG_OK if successful * - RMNET_CONFIG_BAD_EGRESS_DEVICE if egress device is null * - RMNET_CONFIG_BAD_EGRESS_DEVICE if egress device is not handled by * RmNet data module * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null * - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range */ int rmnet_set_logical_endpoint_config(struct net_device *dev, int config_id, uint8_t rmnet_mode, struct net_device *egress_dev) { struct rmnet_logical_ep_conf_s epconfig; LOGL("(%s, %d, %d, %s);", dev->name, config_id, rmnet_mode, egress_dev->name); if (!egress_dev || ((!_rmnet_is_physical_endpoint_associated(egress_dev)) && (!rmnet_vnd_is_vnd(egress_dev)))) { return RMNET_CONFIG_BAD_EGRESS_DEVICE; } memset(&epconfig, 0, sizeof(struct rmnet_logical_ep_conf_s)); epconfig.refcount = 1; epconfig.rmnet_mode = rmnet_mode; epconfig.egress_dev = egress_dev; return _rmnet_set_logical_endpoint_config(dev, config_id, &epconfig); } /** * rmnet_unset_logical_endpoint_config() - Un-set logical endpoing configuration * on a device * @dev: Device to set endpoint configuration on * @config_id: logical endpoint id on device * * Retrieves the logical_endpoint_config structure and frees the egress device. * Network device must already have association with RmNet Data driver * * Return: * - RMNET_CONFIG_OK if successful * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null * - RMNET_CONFIG_NO_SUCH_DEVICE device is not associated * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range */ int rmnet_unset_logical_endpoint_config(struct net_device *dev, int config_id) { LOGL("(%s, %d);", dev->name, config_id); if (!dev || ((!_rmnet_is_physical_endpoint_associated(dev)) && (!rmnet_vnd_is_vnd(dev)))) { return RMNET_CONFIG_NO_SUCH_DEVICE; } return _rmnet_unset_logical_endpoint_config(dev, config_id); } /** * rmnet_create_vnd() - Create virtual network device node * @id: RmNet virtual device node id * * Return: * - result of rmnet_vnd_create_dev() */ int rmnet_create_vnd(int id) { struct net_device *dev; ASSERT_RTNL(); LOGL("(%d);", id); pr_info("[MIF] %s\n", __func__); return rmnet_vnd_create_dev(id, &dev, NULL); } /** * rmnet_create_vnd() - Create virtual network device node * @id: RmNet virtual device node id * @prefix: String prefix for device name * * Return: * - result of rmnet_vnd_create_dev() */ int rmnet_create_vnd_prefix(int id, const char *prefix) { struct net_device *dev; ASSERT_RTNL(); LOGL("(%d, \"%s\");", id, prefix); pr_info("[MIF] %s\n", __func__); return rmnet_vnd_create_dev(id, &dev, prefix); } /** * rmnet_free_vnd() - Free virtual network device node * @id: RmNet virtual device node id * * Return: * - result of rmnet_vnd_free_dev() */ int rmnet_free_vnd(int id) { LOGL("(%d);", id); return rmnet_vnd_free_dev(id); } static void _rmnet_free_vnd_later(struct work_struct *work) { struct rmnet_free_vnd_work *fwork; fwork = (struct rmnet_free_vnd_work *) work; rmnet_free_vnd(fwork->vnd_id); kfree(work); } /** * rmnet_free_vnd_later() - Schedule a work item to free virtual network device * @id: RmNet virtual device node id * * Schedule the VND to be freed at a later time. We need to do this if the * rtnl lock is already held as to prevent a deadlock. */ static void rmnet_free_vnd_later(int id) { struct rmnet_free_vnd_work *work; LOGL("(%d);", id); work = (struct rmnet_free_vnd_work *) kmalloc(sizeof(struct rmnet_free_vnd_work), GFP_KERNEL); if (!work) { LOGH("Failed to queue removal of VND:%d", id); return; } INIT_WORK((struct work_struct *)work, _rmnet_free_vnd_later); work->vnd_id = id; schedule_work((struct work_struct *)work); } /** * rmnet_force_unassociate_device() - Force a device to unassociate * @dev: Device to unassociate * * Return: * - void */ static void rmnet_force_unassociate_device(struct net_device *dev) { int i; struct net_device *vndev; struct rmnet_logical_ep_conf_s *cfg; if (!dev) BUG(); if (!_rmnet_is_physical_endpoint_associated(dev)) { LOGM("%s", "Called on unassociated device, skipping"); return; } /* Check the VNDs for offending mappings */ for (i = 0; i < RMNET_DATA_MAX_VND; i++) { vndev = rmnet_vnd_get_by_id(i); if (!vndev) { LOGL("VND %d not in use; skipping", i); continue; } cfg = rmnet_vnd_get_le_config(vndev); if (!cfg) { LOGH("Got NULL config from VND %d", i); BUG(); continue; } if (cfg->refcount && (cfg->egress_dev == dev)) { rmnet_unset_logical_endpoint_config(vndev, RMNET_LOCAL_LOGICAL_ENDPOINT); rmnet_free_vnd_later(i); } } /* Clear on the mappings on the phys ep */ rmnet_unset_logical_endpoint_config(dev, RMNET_LOCAL_LOGICAL_ENDPOINT); for (i = 0; i < RMNET_DATA_MAX_LOGICAL_EP; i++) rmnet_unset_logical_endpoint_config(dev, i); rmnet_unassociate_network_device(dev); } /** * rmnet_config_notify_cb() - Callback for netdevice notifier chain * @nb: Notifier block data * @event: Netdevice notifier event ID * @data: Contains a net device for which we are getting notified * * Return: * - result of NOTIFY_DONE() */ int rmnet_config_notify_cb(struct notifier_block *nb, unsigned long event, void *data) { struct net_device *dev = data; if (!dev) BUG(); LOGL("(..., %lu, %s)", event, dev->name); switch (event) { case NETDEV_UNREGISTER_FINAL: case NETDEV_UNREGISTER: if (_rmnet_is_physical_endpoint_associated(dev)) { LOGH("Kernel is trying to unregister %s", dev->name); rmnet_force_unassociate_device(dev); } break; default: LOGD("Unhandeled event [%lu]", event); break; } return NOTIFY_DONE; }
gpl-2.0
lexi6725/linux-3.17.0
drivers/watchdog/ts72xx_wdt.c
217
10065
/* * Watchdog driver for Technologic Systems TS-72xx based SBCs * (TS-7200, TS-7250 and TS-7260). These boards have external * glue logic CPLD chip, which includes programmable watchdog * timer. * * Copyright (c) 2009 Mika Westerberg <mika.westerberg@iki.fi> * * This driver is based on ep93xx_wdt and wm831x_wdt drivers. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/fs.h> #include <linux/io.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/watchdog.h> #include <linux/uaccess.h> #define TS72XX_WDT_FEED_VAL 0x05 #define TS72XX_WDT_DEFAULT_TIMEOUT 8 static int timeout = TS72XX_WDT_DEFAULT_TIMEOUT; module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. " "(1 <= timeout <= 8, default=" __MODULE_STRING(TS72XX_WDT_DEFAULT_TIMEOUT) ")"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close"); /** * struct ts72xx_wdt - watchdog control structure * @lock: lock that protects this structure * @regval: watchdog timeout value suitable for control register * @flags: flags controlling watchdog device state * @control_reg: watchdog control register * @feed_reg: watchdog feed register * @pdev: back pointer to platform dev */ struct ts72xx_wdt { struct mutex lock; int regval; #define TS72XX_WDT_BUSY_FLAG 1 #define TS72XX_WDT_EXPECT_CLOSE_FLAG 2 int flags; void __iomem *control_reg; void __iomem *feed_reg; struct platform_device *pdev; }; static struct platform_device *ts72xx_wdt_pdev; /* * TS-72xx Watchdog supports following timeouts (value written * to control register): * value description * ------------------------- * 0x00 watchdog disabled * 0x01 250ms * 0x02 500ms * 0x03 1s * 0x04 reserved * 0x05 2s * 0x06 4s * 0x07 8s * * Timeouts below 1s are not very usable so we don't * allow them at all. * * We provide two functions that convert between these: * timeout_to_regval() and regval_to_timeout(). */ static const struct { int timeout; int regval; } ts72xx_wdt_map[] = { { 1, 3 }, { 2, 5 }, { 4, 6 }, { 8, 7 }, }; /** * timeout_to_regval() - converts given timeout to control register value * @new_timeout: timeout in seconds to be converted * * Function converts given @new_timeout into valid value that can * be programmed into watchdog control register. When conversion is * not possible, function returns %-EINVAL. */ static int timeout_to_regval(int new_timeout) { int i; /* first limit it to 1 - 8 seconds */ new_timeout = clamp_val(new_timeout, 1, 8); for (i = 0; i < ARRAY_SIZE(ts72xx_wdt_map); i++) { if (ts72xx_wdt_map[i].timeout >= new_timeout) return ts72xx_wdt_map[i].regval; } return -EINVAL; } /** * regval_to_timeout() - converts control register value to timeout * @regval: control register value to be converted * * Function converts given @regval to timeout in seconds (1, 2, 4 or 8). * If @regval cannot be converted, function returns %-EINVAL. */ static int regval_to_timeout(int regval) { int i; for (i = 0; i < ARRAY_SIZE(ts72xx_wdt_map); i++) { if (ts72xx_wdt_map[i].regval == regval) return ts72xx_wdt_map[i].timeout; } return -EINVAL; } /** * ts72xx_wdt_kick() - kick the watchdog * @wdt: watchdog to be kicked * * Called with @wdt->lock held. */ static inline void ts72xx_wdt_kick(struct ts72xx_wdt *wdt) { __raw_writeb(TS72XX_WDT_FEED_VAL, wdt->feed_reg); } /** * ts72xx_wdt_start() - starts the watchdog timer * @wdt: watchdog to be started * * This function programs timeout to watchdog timer * and starts it. * * Called with @wdt->lock held. */ static void ts72xx_wdt_start(struct ts72xx_wdt *wdt) { /* * To program the wdt, it first must be "fed" and * only after that (within 30 usecs) the configuration * can be changed. */ ts72xx_wdt_kick(wdt); __raw_writeb((u8)wdt->regval, wdt->control_reg); } /** * ts72xx_wdt_stop() - stops the watchdog timer * @wdt: watchdog to be stopped * * Called with @wdt->lock held. */ static void ts72xx_wdt_stop(struct ts72xx_wdt *wdt) { ts72xx_wdt_kick(wdt); __raw_writeb(0, wdt->control_reg); } static int ts72xx_wdt_open(struct inode *inode, struct file *file) { struct ts72xx_wdt *wdt = platform_get_drvdata(ts72xx_wdt_pdev); int regval; /* * Try to convert default timeout to valid register * value first. */ regval = timeout_to_regval(timeout); if (regval < 0) { dev_err(&wdt->pdev->dev, "failed to convert timeout (%d) to register value\n", timeout); return regval; } if (mutex_lock_interruptible(&wdt->lock)) return -ERESTARTSYS; if ((wdt->flags & TS72XX_WDT_BUSY_FLAG) != 0) { mutex_unlock(&wdt->lock); return -EBUSY; } wdt->flags = TS72XX_WDT_BUSY_FLAG; wdt->regval = regval; file->private_data = wdt; ts72xx_wdt_start(wdt); mutex_unlock(&wdt->lock); return nonseekable_open(inode, file); } static int ts72xx_wdt_release(struct inode *inode, struct file *file) { struct ts72xx_wdt *wdt = file->private_data; if (mutex_lock_interruptible(&wdt->lock)) return -ERESTARTSYS; if ((wdt->flags & TS72XX_WDT_EXPECT_CLOSE_FLAG) != 0) { ts72xx_wdt_stop(wdt); } else { dev_warn(&wdt->pdev->dev, "TS-72XX WDT device closed unexpectly. " "Watchdog timer will not stop!\n"); /* * Kick it one more time, to give userland some time * to recover (for example, respawning the kicker * daemon). */ ts72xx_wdt_kick(wdt); } wdt->flags = 0; mutex_unlock(&wdt->lock); return 0; } static ssize_t ts72xx_wdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { struct ts72xx_wdt *wdt = file->private_data; if (!len) return 0; if (mutex_lock_interruptible(&wdt->lock)) return -ERESTARTSYS; ts72xx_wdt_kick(wdt); /* * Support for magic character closing. User process * writes 'V' into the device, just before it is closed. * This means that we know that the wdt timer can be * stopped after user closes the device. */ if (!nowayout) { int i; for (i = 0; i < len; i++) { char c; /* In case it was set long ago */ wdt->flags &= ~TS72XX_WDT_EXPECT_CLOSE_FLAG; if (get_user(c, data + i)) { mutex_unlock(&wdt->lock); return -EFAULT; } if (c == 'V') { wdt->flags |= TS72XX_WDT_EXPECT_CLOSE_FLAG; break; } } } mutex_unlock(&wdt->lock); return len; } static const struct watchdog_info winfo = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "TS-72XX WDT", }; static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ts72xx_wdt *wdt = file->private_data; void __user *argp = (void __user *)arg; int __user *p = (int __user *)argp; int error = 0; if (mutex_lock_interruptible(&wdt->lock)) return -ERESTARTSYS; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user(argp, &winfo, sizeof(winfo))) error = -EFAULT; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: error = put_user(0, p); break; case WDIOC_KEEPALIVE: ts72xx_wdt_kick(wdt); break; case WDIOC_SETOPTIONS: { int options; error = get_user(options, p); if (error) break; error = -EINVAL; if ((options & WDIOS_DISABLECARD) != 0) { ts72xx_wdt_stop(wdt); error = 0; } if ((options & WDIOS_ENABLECARD) != 0) { ts72xx_wdt_start(wdt); error = 0; } break; } case WDIOC_SETTIMEOUT: { int new_timeout; int regval; error = get_user(new_timeout, p); if (error) break; regval = timeout_to_regval(new_timeout); if (regval < 0) { error = regval; break; } ts72xx_wdt_stop(wdt); wdt->regval = regval; ts72xx_wdt_start(wdt); /*FALLTHROUGH*/ } case WDIOC_GETTIMEOUT: error = put_user(regval_to_timeout(wdt->regval), p); break; default: error = -ENOTTY; break; } mutex_unlock(&wdt->lock); return error; } static const struct file_operations ts72xx_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .open = ts72xx_wdt_open, .release = ts72xx_wdt_release, .write = ts72xx_wdt_write, .unlocked_ioctl = ts72xx_wdt_ioctl, }; static struct miscdevice ts72xx_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &ts72xx_wdt_fops, }; static int ts72xx_wdt_probe(struct platform_device *pdev) { struct ts72xx_wdt *wdt; struct resource *r1, *r2; int error = 0; wdt = devm_kzalloc(&pdev->dev, sizeof(struct ts72xx_wdt), GFP_KERNEL); if (!wdt) return -ENOMEM; r1 = platform_get_resource(pdev, IORESOURCE_MEM, 0); wdt->control_reg = devm_ioremap_resource(&pdev->dev, r1); if (IS_ERR(wdt->control_reg)) return PTR_ERR(wdt->control_reg); r2 = platform_get_resource(pdev, IORESOURCE_MEM, 1); wdt->feed_reg = devm_ioremap_resource(&pdev->dev, r2); if (IS_ERR(wdt->feed_reg)) return PTR_ERR(wdt->feed_reg); platform_set_drvdata(pdev, wdt); ts72xx_wdt_pdev = pdev; wdt->pdev = pdev; mutex_init(&wdt->lock); /* make sure that the watchdog is disabled */ ts72xx_wdt_stop(wdt); error = misc_register(&ts72xx_wdt_miscdev); if (error) { dev_err(&pdev->dev, "failed to register miscdev\n"); return error; } dev_info(&pdev->dev, "TS-72xx Watchdog driver\n"); return 0; } static int ts72xx_wdt_remove(struct platform_device *pdev) { int error; error = misc_deregister(&ts72xx_wdt_miscdev); return error; } static struct platform_driver ts72xx_wdt_driver = { .probe = ts72xx_wdt_probe, .remove = ts72xx_wdt_remove, .driver = { .name = "ts72xx-wdt", .owner = THIS_MODULE, }, }; module_platform_driver(ts72xx_wdt_driver); MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); MODULE_DESCRIPTION("TS-72xx SBC Watchdog"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ts72xx-wdt");
gpl-2.0
devmapal/linux
drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
473
3354
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "priv.h" #include <core/client.h> #include <engine/fifo.h> #include <nvif/class.h> static int nvkm_dma_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { struct nvkm_dma *dma = nvkm_dma(oclass->engine); struct nvkm_dmaobj *dmaobj = NULL; int ret; ret = dma->func->class_new(dma, oclass, data, size, &dmaobj); if (dmaobj) *pobject = &dmaobj->object; return ret; } static const struct nvkm_device_oclass nvkm_dma_oclass_base = { .ctor = nvkm_dma_oclass_new, }; static int nvkm_dma_oclass_fifo_new(const struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { return nvkm_dma_oclass_new(oclass->engine->subdev.device, oclass, data, size, pobject); } static const struct nvkm_sclass nvkm_dma_sclass[] = { { 0, 0, NV_DMA_FROM_MEMORY, NULL, nvkm_dma_oclass_fifo_new }, { 0, 0, NV_DMA_TO_MEMORY, NULL, nvkm_dma_oclass_fifo_new }, { 0, 0, NV_DMA_IN_MEMORY, NULL, nvkm_dma_oclass_fifo_new }, }; static int nvkm_dma_oclass_base_get(struct nvkm_oclass *sclass, int index, const struct nvkm_device_oclass **class) { const int count = ARRAY_SIZE(nvkm_dma_sclass); if (index < count) { const struct nvkm_sclass *oclass = &nvkm_dma_sclass[index]; sclass->base = oclass[0]; sclass->engn = oclass; *class = &nvkm_dma_oclass_base; return index; } return count; } static int nvkm_dma_oclass_fifo_get(struct nvkm_oclass *oclass, int index) { const int count = ARRAY_SIZE(nvkm_dma_sclass); if (index < count) { oclass->base = nvkm_dma_sclass[index]; return index; } return count; } static void * nvkm_dma_dtor(struct nvkm_engine *engine) { return nvkm_dma(engine); } static const struct nvkm_engine_func nvkm_dma = { .dtor = nvkm_dma_dtor, .base.sclass = nvkm_dma_oclass_base_get, .fifo.sclass = nvkm_dma_oclass_fifo_get, }; int nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device, int index, struct nvkm_dma **pdma) { struct nvkm_dma *dma; if (!(dma = *pdma = kzalloc(sizeof(*dma), GFP_KERNEL))) return -ENOMEM; dma->func = func; return nvkm_engine_ctor(&nvkm_dma, device, index, true, &dma->engine); }
gpl-2.0
crseanpaul/kernel
drivers/crypto/bfin_crc.c
473
19344
/* * Cryptographic API. * * Support Blackfin CRC HW acceleration. * * Copyright 2012 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/err.h> #include <linux/device.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/unaligned/access_ok.h> #include <linux/crypto.h> #include <linux/cryptohash.h> #include <crypto/scatterwalk.h> #include <crypto/algapi.h> #include <crypto/hash.h> #include <crypto/internal/hash.h> #include <asm/dma.h> #include <asm/portmux.h> #include <asm/io.h> #include "bfin_crc.h" #define CRC_CCRYPTO_QUEUE_LENGTH 5 #define DRIVER_NAME "bfin-hmac-crc" #define CHKSUM_DIGEST_SIZE 4 #define CHKSUM_BLOCK_SIZE 1 #define CRC_MAX_DMA_DESC 100 #define CRC_CRYPTO_STATE_UPDATE 1 #define CRC_CRYPTO_STATE_FINALUPDATE 2 #define CRC_CRYPTO_STATE_FINISH 3 struct bfin_crypto_crc { struct list_head list; struct device *dev; spinlock_t lock; int irq; int dma_ch; u32 poly; struct crc_register *regs; struct ahash_request *req; /* current request in operation */ struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */ dma_addr_t sg_dma; /* phy addr of sg dma descriptors */ u8 *sg_mid_buf; dma_addr_t sg_mid_dma; /* phy addr of sg mid buffer */ struct tasklet_struct done_task; struct crypto_queue queue; /* waiting requests */ u8 busy:1; /* crc device in operation flag */ }; static struct bfin_crypto_crc_list { struct list_head dev_list; spinlock_t lock; } crc_list; struct bfin_crypto_crc_reqctx { struct bfin_crypto_crc *crc; unsigned int total; /* total request bytes */ size_t sg_buflen; /* bytes for this update */ unsigned int sg_nents; struct scatterlist *sg; /* sg list head for this update*/ struct scatterlist bufsl[2]; /* chained sg list */ size_t bufnext_len; size_t buflast_len; u8 bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */ u8 buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */ u8 flag; }; struct bfin_crypto_crc_ctx { struct bfin_crypto_crc *crc; u32 key; }; /* * derive number of elements in scatterlist */ static int sg_count(struct scatterlist *sg_list) { struct scatterlist *sg = sg_list; int sg_nents = 1; if (sg_list == NULL) return 0; while (!sg_is_last(sg)) { sg_nents++; sg = scatterwalk_sg_next(sg); } return sg_nents; } /* * get element in scatter list by given index */ static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents, unsigned int index) { struct scatterlist *sg = NULL; int i; for_each_sg(sg_list, sg, nents, i) if (i == index) break; return sg; } static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key) { writel(0, &crc->regs->datacntrld); writel(MODE_CALC_CRC << OPMODE_OFFSET, &crc->regs->control); writel(key, &crc->regs->curresult); /* setup CRC interrupts */ writel(CMPERRI | DCNTEXPI, &crc->regs->status); writel(CMPERRI | DCNTEXPI, &crc->regs->intrenset); return 0; } static int bfin_crypto_crc_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); struct bfin_crypto_crc *crc; dev_dbg(ctx->crc->dev, "crc_init\n"); spin_lock_bh(&crc_list.lock); list_for_each_entry(crc, &crc_list.dev_list, list) { crc_ctx->crc = crc; break; } spin_unlock_bh(&crc_list.lock); if (sg_count(req->src) > CRC_MAX_DMA_DESC) { dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n", CRC_MAX_DMA_DESC); return -EINVAL; } ctx->crc = crc; ctx->bufnext_len = 0; ctx->buflast_len = 0; ctx->sg_buflen = 0; ctx->total = 0; ctx->flag = 0; /* init crc results */ put_unaligned_le32(crc_ctx->key, req->result); dev_dbg(ctx->crc->dev, "init: digest size: %d\n", crypto_ahash_digestsize(tfm)); return bfin_crypto_crc_init_hw(crc, crc_ctx->key); } static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc) { struct scatterlist *sg; struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req); int i = 0, j = 0; unsigned long dma_config; unsigned int dma_count; unsigned int dma_addr; unsigned int mid_dma_count = 0; int dma_mod; dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE); for_each_sg(ctx->sg, sg, ctx->sg_nents, j) { dma_addr = sg_dma_address(sg); /* deduce extra bytes in last sg */ if (sg_is_last(sg)) dma_count = sg_dma_len(sg) - ctx->bufnext_len; else dma_count = sg_dma_len(sg); if (mid_dma_count) { /* Append last middle dma buffer to 4 bytes with first bytes in current sg buffer. Move addr of current sg and deduce the length of current sg. */ memcpy(crc->sg_mid_buf +(i << 2) + mid_dma_count, sg_virt(sg), CHKSUM_DIGEST_SIZE - mid_dma_count); dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count; dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count; dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32; /* setup new dma descriptor for next middle dma */ crc->sg_cpu[i].start_addr = crc->sg_mid_dma + (i << 2); crc->sg_cpu[i].cfg = dma_config; crc->sg_cpu[i].x_count = 1; crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE; dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, " "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", i, crc->sg_cpu[i].start_addr, crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count, crc->sg_cpu[i].x_modify); i++; } dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32; /* chop current sg dma len to multiple of 32 bits */ mid_dma_count = dma_count % 4; dma_count &= ~0x3; if (dma_addr % 4 == 0) { dma_config |= WDSIZE_32; dma_count >>= 2; dma_mod = 4; } else if (dma_addr % 2 == 0) { dma_config |= WDSIZE_16; dma_count >>= 1; dma_mod = 2; } else { dma_config |= WDSIZE_8; dma_mod = 1; } crc->sg_cpu[i].start_addr = dma_addr; crc->sg_cpu[i].cfg = dma_config; crc->sg_cpu[i].x_count = dma_count; crc->sg_cpu[i].x_modify = dma_mod; dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, " "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", i, crc->sg_cpu[i].start_addr, crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count, crc->sg_cpu[i].x_modify); i++; if (mid_dma_count) { /* copy extra bytes to next middle dma buffer */ memcpy(crc->sg_mid_buf + (i << 2), (u8*)sg_virt(sg) + (dma_count << 2), mid_dma_count); } } dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32; /* For final update req, append the buffer for next update as well*/ if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE || ctx->flag == CRC_CRYPTO_STATE_FINISH)) { crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext, CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE); crc->sg_cpu[i].cfg = dma_config; crc->sg_cpu[i].x_count = 1; crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE; dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, " "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", i, crc->sg_cpu[i].start_addr, crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count, crc->sg_cpu[i].x_modify); i++; } if (i == 0) return; /* Set the last descriptor to stop mode */ crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE); crc->sg_cpu[i - 1].cfg |= DI_EN; set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma); set_dma_x_count(crc->dma_ch, 0); set_dma_x_modify(crc->dma_ch, 0); set_dma_config(crc->dma_ch, dma_config); } static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc, struct ahash_request *req) { struct crypto_async_request *async_req, *backlog; struct bfin_crypto_crc_reqctx *ctx; struct scatterlist *sg; int ret = 0; int nsg, i, j; unsigned int nextlen; unsigned long flags; u32 reg; spin_lock_irqsave(&crc->lock, flags); if (req) ret = ahash_enqueue_request(&crc->queue, req); if (crc->busy) { spin_unlock_irqrestore(&crc->lock, flags); return ret; } backlog = crypto_get_backlog(&crc->queue); async_req = crypto_dequeue_request(&crc->queue); if (async_req) crc->busy = 1; spin_unlock_irqrestore(&crc->lock, flags); if (!async_req) return ret; if (backlog) backlog->complete(backlog, -EINPROGRESS); req = ahash_request_cast(async_req); crc->req = req; ctx = ahash_request_ctx(req); ctx->sg = NULL; ctx->sg_buflen = 0; ctx->sg_nents = 0; dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n", ctx->flag, req->nbytes); if (ctx->flag == CRC_CRYPTO_STATE_FINISH) { if (ctx->bufnext_len == 0) { crc->busy = 0; return 0; } /* Pack last crc update buffer to 32bit */ memset(ctx->bufnext + ctx->bufnext_len, 0, CHKSUM_DIGEST_SIZE - ctx->bufnext_len); } else { /* Pack small data which is less than 32bit to buffer for next update. */ if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) { memcpy(ctx->bufnext + ctx->bufnext_len, sg_virt(req->src), req->nbytes); ctx->bufnext_len += req->nbytes; if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE && ctx->bufnext_len) { goto finish_update; } else { crc->busy = 0; return 0; } } if (ctx->bufnext_len) { /* Chain in extra bytes of last update */ ctx->buflast_len = ctx->bufnext_len; memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len); nsg = ctx->sg_buflen ? 2 : 1; sg_init_table(ctx->bufsl, nsg); sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len); if (nsg > 1) scatterwalk_sg_chain(ctx->bufsl, nsg, req->src); ctx->sg = ctx->bufsl; } else ctx->sg = req->src; /* Chop crc buffer size to multiple of 32 bit */ nsg = ctx->sg_nents = sg_count(ctx->sg); ctx->sg_buflen = ctx->buflast_len + req->nbytes; ctx->bufnext_len = ctx->sg_buflen % 4; ctx->sg_buflen &= ~0x3; if (ctx->bufnext_len) { /* copy extra bytes to buffer for next update */ memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE); nextlen = ctx->bufnext_len; for (i = nsg - 1; i >= 0; i--) { sg = sg_get(ctx->sg, nsg, i); j = min(nextlen, sg_dma_len(sg)); memcpy(ctx->bufnext + nextlen - j, sg_virt(sg) + sg_dma_len(sg) - j, j); if (j == sg_dma_len(sg)) ctx->sg_nents--; nextlen -= j; if (nextlen == 0) break; } } } finish_update: if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE || ctx->flag == CRC_CRYPTO_STATE_FINISH)) ctx->sg_buflen += CHKSUM_DIGEST_SIZE; /* set CRC data count before start DMA */ writel(ctx->sg_buflen >> 2, &crc->regs->datacnt); /* setup and enable CRC DMA */ bfin_crypto_crc_config_dma(crc); /* finally kick off CRC operation */ reg = readl(&crc->regs->control); writel(reg | BLKEN, &crc->regs->control); return -EINPROGRESS; } static int bfin_crypto_crc_update(struct ahash_request *req) { struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); if (!req->nbytes) return 0; dev_dbg(ctx->crc->dev, "crc_update\n"); ctx->total += req->nbytes; ctx->flag = CRC_CRYPTO_STATE_UPDATE; return bfin_crypto_crc_handle_queue(ctx->crc, req); } static int bfin_crypto_crc_final(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); dev_dbg(ctx->crc->dev, "crc_final\n"); ctx->flag = CRC_CRYPTO_STATE_FINISH; crc_ctx->key = 0; return bfin_crypto_crc_handle_queue(ctx->crc, req); } static int bfin_crypto_crc_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); dev_dbg(ctx->crc->dev, "crc_finishupdate\n"); ctx->total += req->nbytes; ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE; crc_ctx->key = 0; return bfin_crypto_crc_handle_queue(ctx->crc, req); } static int bfin_crypto_crc_digest(struct ahash_request *req) { int ret; ret = bfin_crypto_crc_init(req); if (ret) return ret; return bfin_crypto_crc_finup(req); } static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); dev_dbg(crc_ctx->crc->dev, "crc_setkey\n"); if (keylen != CHKSUM_DIGEST_SIZE) { crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } crc_ctx->key = get_unaligned_le32(key); return 0; } static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm) { struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm); crc_ctx->key = 0; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct bfin_crypto_crc_reqctx)); return 0; } static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm) { } static struct ahash_alg algs = { .init = bfin_crypto_crc_init, .update = bfin_crypto_crc_update, .final = bfin_crypto_crc_final, .finup = bfin_crypto_crc_finup, .digest = bfin_crypto_crc_digest, .setkey = bfin_crypto_crc_setkey, .halg.digestsize = CHKSUM_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(crc32)", .cra_driver_name = DRIVER_NAME, .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_init = bfin_crypto_crc_cra_init, .cra_exit = bfin_crypto_crc_cra_exit, } }; static void bfin_crypto_crc_done_task(unsigned long data) { struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data; bfin_crypto_crc_handle_queue(crc, NULL); } static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id) { struct bfin_crypto_crc *crc = dev_id; u32 reg; if (readl(&crc->regs->status) & DCNTEXP) { writel(DCNTEXP, &crc->regs->status); /* prepare results */ put_unaligned_le32(readl(&crc->regs->result), crc->req->result); reg = readl(&crc->regs->control); writel(reg & ~BLKEN, &crc->regs->control); crc->busy = 0; if (crc->req->base.complete) crc->req->base.complete(&crc->req->base, 0); tasklet_schedule(&crc->done_task); return IRQ_HANDLED; } else return IRQ_NONE; } #ifdef CONFIG_PM /** * bfin_crypto_crc_suspend - suspend crc device * @pdev: device being suspended * @state: requested suspend state */ static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state) { struct bfin_crypto_crc *crc = platform_get_drvdata(pdev); int i = 100000; while ((readl(&crc->regs->control) & BLKEN) && --i) cpu_relax(); if (i == 0) return -EBUSY; return 0; } #else # define bfin_crypto_crc_suspend NULL #endif #define bfin_crypto_crc_resume NULL /** * bfin_crypto_crc_probe - Initialize module * */ static int bfin_crypto_crc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; struct bfin_crypto_crc *crc; unsigned int timeout = 100000; int ret; crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL); if (!crc) { dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n"); return -ENOMEM; } crc->dev = dev; INIT_LIST_HEAD(&crc->list); spin_lock_init(&crc->lock); tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc); crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); return -ENOENT; } crc->regs = devm_ioremap_resource(dev, res); if (IS_ERR((void *)crc->regs)) { dev_err(&pdev->dev, "Cannot map CRC IO\n"); return PTR_ERR((void *)crc->regs); } crc->irq = platform_get_irq(pdev, 0); if (crc->irq < 0) { dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n"); return -ENOENT; } ret = devm_request_irq(dev, crc->irq, bfin_crypto_crc_handler, IRQF_SHARED, dev_name(dev), crc); if (ret) { dev_err(&pdev->dev, "Unable to request blackfin crc irq\n"); return ret; } res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (res == NULL) { dev_err(&pdev->dev, "No CRC DMA channel specified\n"); return -ENOENT; } crc->dma_ch = res->start; ret = request_dma(crc->dma_ch, dev_name(dev)); if (ret) { dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n"); return ret; } crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL); if (crc->sg_cpu == NULL) { ret = -ENOMEM; goto out_error_dma; } /* * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle + * 1 last + 1 next dma descriptors */ crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1)); crc->sg_mid_dma = crc->sg_dma + sizeof(struct dma_desc_array) * ((CRC_MAX_DMA_DESC + 1) << 1); writel(0, &crc->regs->control); crc->poly = (u32)pdev->dev.platform_data; writel(crc->poly, &crc->regs->poly); while (!(readl(&crc->regs->status) & LUTDONE) && (--timeout) > 0) cpu_relax(); if (timeout == 0) dev_info(&pdev->dev, "init crc poly timeout\n"); platform_set_drvdata(pdev, crc); spin_lock(&crc_list.lock); list_add(&crc->list, &crc_list.dev_list); spin_unlock(&crc_list.lock); if (list_is_singular(&crc_list.dev_list)) { ret = crypto_register_ahash(&algs); if (ret) { dev_err(&pdev->dev, "Can't register crypto ahash device\n"); goto out_error_dma; } } dev_info(&pdev->dev, "initialized\n"); return 0; out_error_dma: if (crc->sg_cpu) dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma); free_dma(crc->dma_ch); return ret; } /** * bfin_crypto_crc_remove - Initialize module * */ static int bfin_crypto_crc_remove(struct platform_device *pdev) { struct bfin_crypto_crc *crc = platform_get_drvdata(pdev); if (!crc) return -ENODEV; spin_lock(&crc_list.lock); list_del(&crc->list); spin_unlock(&crc_list.lock); crypto_unregister_ahash(&algs); tasklet_kill(&crc->done_task); free_dma(crc->dma_ch); return 0; } static struct platform_driver bfin_crypto_crc_driver = { .probe = bfin_crypto_crc_probe, .remove = bfin_crypto_crc_remove, .suspend = bfin_crypto_crc_suspend, .resume = bfin_crypto_crc_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; /** * bfin_crypto_crc_mod_init - Initialize module * * Checks the module params and registers the platform driver. * Real work is in the platform probe function. */ static int __init bfin_crypto_crc_mod_init(void) { int ret; pr_info("Blackfin hardware CRC crypto driver\n"); INIT_LIST_HEAD(&crc_list.dev_list); spin_lock_init(&crc_list.lock); ret = platform_driver_register(&bfin_crypto_crc_driver); if (ret) { pr_info(KERN_ERR "unable to register driver\n"); return ret; } return 0; } /** * bfin_crypto_crc_mod_exit - Deinitialize module */ static void __exit bfin_crypto_crc_mod_exit(void) { platform_driver_unregister(&bfin_crypto_crc_driver); } module_init(bfin_crypto_crc_mod_init); module_exit(bfin_crypto_crc_mod_exit); MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>"); MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver"); MODULE_LICENSE("GPL");
gpl-2.0
faux123/kernel-MB860
net/netfilter/ipvs/ip_vs_core.c
473
41482
/* * IPVS An implementation of the IP virtual server support for the * LINUX operating system. IPVS is now implemented as a module * over the Netfilter framework. IPVS can be used to build a * high-performance and highly available server based on a * cluster of servers. * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * Peter Kese <peter.kese@ijs.si> * Julian Anastasov <ja@ssi.bg> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese, * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms * and others. * * Changes: * Paul `Rusty' Russell properly handle non-linear skbs * Harald Welte don't use nfcache * */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/icmp.h> #include <net/ip.h> #include <net/tcp.h> #include <net/udp.h> #include <net/icmp.h> /* for icmp_send */ #include <net/route.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #ifdef CONFIG_IP_VS_IPV6 #include <net/ipv6.h> #include <linux/netfilter_ipv6.h> #endif #include <net/ip_vs.h> EXPORT_SYMBOL(register_ip_vs_scheduler); EXPORT_SYMBOL(unregister_ip_vs_scheduler); EXPORT_SYMBOL(ip_vs_skb_replace); EXPORT_SYMBOL(ip_vs_proto_name); EXPORT_SYMBOL(ip_vs_conn_new); EXPORT_SYMBOL(ip_vs_conn_in_get); EXPORT_SYMBOL(ip_vs_conn_out_get); #ifdef CONFIG_IP_VS_PROTO_TCP EXPORT_SYMBOL(ip_vs_tcp_conn_listen); #endif EXPORT_SYMBOL(ip_vs_conn_put); #ifdef CONFIG_IP_VS_DEBUG EXPORT_SYMBOL(ip_vs_get_debug_level); #endif /* ID used in ICMP lookups */ #define icmp_id(icmph) (((icmph)->un).echo.id) #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier) const char *ip_vs_proto_name(unsigned proto) { static char buf[20]; switch (proto) { case IPPROTO_IP: return "IP"; case IPPROTO_UDP: return "UDP"; case IPPROTO_TCP: return "TCP"; case IPPROTO_ICMP: return "ICMP"; #ifdef CONFIG_IP_VS_IPV6 case IPPROTO_ICMPV6: return "ICMPv6"; #endif default: sprintf(buf, "IP_%d", proto); return buf; } } void ip_vs_init_hash_table(struct list_head *table, int rows) { while (--rows >= 0) INIT_LIST_HEAD(&table[rows]); } static inline void ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb) { struct ip_vs_dest *dest = cp->dest; if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { spin_lock(&dest->stats.lock); dest->stats.ustats.inpkts++; dest->stats.ustats.inbytes += skb->len; spin_unlock(&dest->stats.lock); spin_lock(&dest->svc->stats.lock); dest->svc->stats.ustats.inpkts++; dest->svc->stats.ustats.inbytes += skb->len; spin_unlock(&dest->svc->stats.lock); spin_lock(&ip_vs_stats.lock); ip_vs_stats.ustats.inpkts++; ip_vs_stats.ustats.inbytes += skb->len; spin_unlock(&ip_vs_stats.lock); } } static inline void ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb) { struct ip_vs_dest *dest = cp->dest; if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { spin_lock(&dest->stats.lock); dest->stats.ustats.outpkts++; dest->stats.ustats.outbytes += skb->len; spin_unlock(&dest->stats.lock); spin_lock(&dest->svc->stats.lock); dest->svc->stats.ustats.outpkts++; dest->svc->stats.ustats.outbytes += skb->len; spin_unlock(&dest->svc->stats.lock); spin_lock(&ip_vs_stats.lock); ip_vs_stats.ustats.outpkts++; ip_vs_stats.ustats.outbytes += skb->len; spin_unlock(&ip_vs_stats.lock); } } static inline void ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc) { spin_lock(&cp->dest->stats.lock); cp->dest->stats.ustats.conns++; spin_unlock(&cp->dest->stats.lock); spin_lock(&svc->stats.lock); svc->stats.ustats.conns++; spin_unlock(&svc->stats.lock); spin_lock(&ip_vs_stats.lock); ip_vs_stats.ustats.conns++; spin_unlock(&ip_vs_stats.lock); } static inline int ip_vs_set_state(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_protocol *pp) { if (unlikely(!pp->state_transition)) return 0; return pp->state_transition(cp, direction, skb, pp); } /* * IPVS persistent scheduling function * It creates a connection entry according to its template if exists, * or selects a server and creates a connection entry plus a template. * Locking: we are svc user (svc->refcnt), so we hold all dests too * Protocols supported: TCP, UDP */ static struct ip_vs_conn * ip_vs_sched_persist(struct ip_vs_service *svc, const struct sk_buff *skb, __be16 ports[2]) { struct ip_vs_conn *cp = NULL; struct ip_vs_iphdr iph; struct ip_vs_dest *dest; struct ip_vs_conn *ct; __be16 dport; /* destination port to forward */ union nf_inet_addr snet; /* source network of the client, after masking */ ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); /* Mask saddr with the netmask to adjust template granularity */ #ifdef CONFIG_IP_VS_IPV6 if (svc->af == AF_INET6) ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask); else #endif snet.ip = iph.saddr.ip & svc->netmask; IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u " "mnet %s\n", IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]), IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]), IP_VS_DBG_ADDR(svc->af, &snet)); /* * As far as we know, FTP is a very complicated network protocol, and * it uses control connection and data connections. For active FTP, * FTP server initialize data connection to the client, its source port * is often 20. For passive FTP, FTP server tells the clients the port * that it passively listens to, and the client issues the data * connection. In the tunneling or direct routing mode, the load * balancer is on the client-to-server half of connection, the port * number is unknown to the load balancer. So, a conn template like * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP * service, and a template like <caddr, 0, vaddr, vport, daddr, dport> * is created for other persistent services. */ if (ports[1] == svc->port) { /* Check if a template already exists */ if (svc->port != FTPPORT) ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0, &iph.daddr, ports[1]); else ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0, &iph.daddr, 0); if (!ct || !ip_vs_check_template(ct)) { /* * No template found or the dest of the connection * template is not available. */ dest = svc->scheduler->schedule(svc, skb); if (dest == NULL) { IP_VS_DBG(1, "p-schedule: no dest found.\n"); return NULL; } /* * Create a template like <protocol,caddr,0, * vaddr,vport,daddr,dport> for non-ftp service, * and <protocol,caddr,0,vaddr,0,daddr,0> * for ftp service. */ if (svc->port != FTPPORT) ct = ip_vs_conn_new(svc->af, iph.protocol, &snet, 0, &iph.daddr, ports[1], &dest->addr, dest->port, IP_VS_CONN_F_TEMPLATE, dest); else ct = ip_vs_conn_new(svc->af, iph.protocol, &snet, 0, &iph.daddr, 0, &dest->addr, 0, IP_VS_CONN_F_TEMPLATE, dest); if (ct == NULL) return NULL; ct->timeout = svc->timeout; } else { /* set destination with the found template */ dest = ct->dest; } dport = dest->port; } else { /* * Note: persistent fwmark-based services and persistent * port zero service are handled here. * fwmark template: <IPPROTO_IP,caddr,0,fwmark,0,daddr,0> * port zero template: <protocol,caddr,0,vaddr,0,daddr,0> */ if (svc->fwmark) { union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) }; ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0, &fwmark, 0); } else ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0, &iph.daddr, 0); if (!ct || !ip_vs_check_template(ct)) { /* * If it is not persistent port zero, return NULL, * otherwise create a connection template. */ if (svc->port) return NULL; dest = svc->scheduler->schedule(svc, skb); if (dest == NULL) { IP_VS_DBG(1, "p-schedule: no dest found.\n"); return NULL; } /* * Create a template according to the service */ if (svc->fwmark) { union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) }; ct = ip_vs_conn_new(svc->af, IPPROTO_IP, &snet, 0, &fwmark, 0, &dest->addr, 0, IP_VS_CONN_F_TEMPLATE, dest); } else ct = ip_vs_conn_new(svc->af, iph.protocol, &snet, 0, &iph.daddr, 0, &dest->addr, 0, IP_VS_CONN_F_TEMPLATE, dest); if (ct == NULL) return NULL; ct->timeout = svc->timeout; } else { /* set destination with the found template */ dest = ct->dest; } dport = ports[1]; } /* * Create a new connection according to the template */ cp = ip_vs_conn_new(svc->af, iph.protocol, &iph.saddr, ports[0], &iph.daddr, ports[1], &dest->addr, dport, 0, dest); if (cp == NULL) { ip_vs_conn_put(ct); return NULL; } /* * Add its control */ ip_vs_control_add(cp, ct); ip_vs_conn_put(ct); ip_vs_conn_stats(cp, svc); return cp; } /* * IPVS main scheduling function * It selects a server according to the virtual service, and * creates a connection entry. * Protocols supported: TCP, UDP */ struct ip_vs_conn * ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) { struct ip_vs_conn *cp = NULL; struct ip_vs_iphdr iph; struct ip_vs_dest *dest; __be16 _ports[2], *pptr; ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports); if (pptr == NULL) return NULL; /* * Persistent service */ if (svc->flags & IP_VS_SVC_F_PERSISTENT) return ip_vs_sched_persist(svc, skb, pptr); /* * Non-persistent service */ if (!svc->fwmark && pptr[1] != svc->port) { if (!svc->port) pr_err("Schedule: port zero only supported " "in persistent services, " "check your ipvs configuration\n"); return NULL; } dest = svc->scheduler->schedule(svc, skb); if (dest == NULL) { IP_VS_DBG(1, "Schedule: no dest found.\n"); return NULL; } /* * Create a connection entry. */ cp = ip_vs_conn_new(svc->af, iph.protocol, &iph.saddr, pptr[0], &iph.daddr, pptr[1], &dest->addr, dest->port ? dest->port : pptr[1], 0, dest); if (cp == NULL) return NULL; IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u " "d:%s:%u conn->flags:%X conn->refcnt:%d\n", ip_vs_fwd_tag(cp), IP_VS_DBG_ADDR(svc->af, &cp->caddr), ntohs(cp->cport), IP_VS_DBG_ADDR(svc->af, &cp->vaddr), ntohs(cp->vport), IP_VS_DBG_ADDR(svc->af, &cp->daddr), ntohs(cp->dport), cp->flags, atomic_read(&cp->refcnt)); ip_vs_conn_stats(cp, svc); return cp; } /* * Pass or drop the packet. * Called by ip_vs_in, when the virtual service is available but * no destination is available for a new connection. */ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, struct ip_vs_protocol *pp) { __be16 _ports[2], *pptr; struct ip_vs_iphdr iph; int unicast; ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports); if (pptr == NULL) { ip_vs_service_put(svc); return NF_DROP; } #ifdef CONFIG_IP_VS_IPV6 if (svc->af == AF_INET6) unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST; else #endif unicast = (inet_addr_type(&init_net, iph.daddr.ip) == RTN_UNICAST); /* if it is fwmark-based service, the cache_bypass sysctl is up and the destination is a non-local unicast, then create a cache_bypass connection entry */ if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) { int ret, cs; struct ip_vs_conn *cp; union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } }; ip_vs_service_put(svc); /* create a new connection entry */ IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__); cp = ip_vs_conn_new(svc->af, iph.protocol, &iph.saddr, pptr[0], &iph.daddr, pptr[1], &daddr, 0, IP_VS_CONN_F_BYPASS, NULL); if (cp == NULL) return NF_DROP; /* statistics */ ip_vs_in_stats(cp, skb); /* set state */ cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp); /* transmit the first SYN packet */ ret = cp->packet_xmit(skb, cp, pp); /* do not touch skb anymore */ atomic_inc(&cp->in_pkts); ip_vs_conn_put(cp); return ret; } /* * When the virtual ftp service is presented, packets destined * for other services on the VIP may get here (except services * listed in the ipvs table), pass the packets, because it is * not ipvs job to decide to drop the packets. */ if ((svc->port == FTPPORT) && (pptr[1] != FTPPORT)) { ip_vs_service_put(svc); return NF_ACCEPT; } ip_vs_service_put(svc); /* * Notify the client that the destination is unreachable, and * release the socket buffer. * Since it is in IP layer, the TCP socket is not actually * created, the TCP RST packet cannot be sent, instead that * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ */ #ifdef CONFIG_IP_VS_IPV6 if (svc->af == AF_INET6) icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev); else #endif icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); return NF_DROP; } /* * It is hooked before NF_IP_PRI_NAT_SRC at the NF_INET_POST_ROUTING * chain, and is used for VS/NAT. * It detects packets for VS/NAT connections and sends the packets * immediately. This can avoid that iptable_nat mangles the packets * for VS/NAT. */ static unsigned int ip_vs_post_routing(unsigned int hooknum, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { if (!skb->ipvs_property) return NF_ACCEPT; /* The packet was sent from IPVS, exit this chain */ return NF_STOP; } __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset) { return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0)); } static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user) { int err = ip_defrag(skb, user); if (!err) ip_send_check(ip_hdr(skb)); return err; } #ifdef CONFIG_IP_VS_IPV6 static inline int ip_vs_gather_frags_v6(struct sk_buff *skb, u_int32_t user) { /* TODO IPv6: Find out what to do here for IPv6 */ return 0; } #endif /* * Packet has been made sufficiently writable in caller * - inout: 1=in->out, 0=out->in */ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, int inout) { struct iphdr *iph = ip_hdr(skb); unsigned int icmp_offset = iph->ihl*4; struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) + icmp_offset); struct iphdr *ciph = (struct iphdr *)(icmph + 1); if (inout) { iph->saddr = cp->vaddr.ip; ip_send_check(iph); ciph->daddr = cp->vaddr.ip; ip_send_check(ciph); } else { iph->daddr = cp->daddr.ip; ip_send_check(iph); ciph->saddr = cp->daddr.ip; ip_send_check(ciph); } /* the TCP/UDP port */ if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol) { __be16 *ports = (void *)ciph + ciph->ihl*4; if (inout) ports[1] = cp->vport; else ports[0] = cp->dport; } /* And finally the ICMP checksum */ icmph->checksum = 0; icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset); skb->ip_summed = CHECKSUM_UNNECESSARY; if (inout) IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph, "Forwarding altered outgoing ICMP"); else IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph, "Forwarding altered incoming ICMP"); } #ifdef CONFIG_IP_VS_IPV6 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, int inout) { struct ipv6hdr *iph = ipv6_hdr(skb); unsigned int icmp_offset = sizeof(struct ipv6hdr); struct icmp6hdr *icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset); struct ipv6hdr *ciph = (struct ipv6hdr *)(icmph + 1); if (inout) { iph->saddr = cp->vaddr.in6; ciph->daddr = cp->vaddr.in6; } else { iph->daddr = cp->daddr.in6; ciph->saddr = cp->daddr.in6; } /* the TCP/UDP port */ if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr) { __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr); if (inout) ports[1] = cp->vport; else ports[0] = cp->dport; } /* And finally the ICMP checksum */ icmph->icmp6_cksum = 0; /* TODO IPv6: is this correct for ICMPv6? */ ip_vs_checksum_complete(skb, icmp_offset); skb->ip_summed = CHECKSUM_UNNECESSARY; if (inout) IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph, "Forwarding altered outgoing ICMPv6"); else IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph, "Forwarding altered incoming ICMPv6"); } #endif /* Handle relevant response ICMP messages - forward to the right * destination host. Used for NAT and local client. */ static int handle_response_icmp(int af, struct sk_buff *skb, union nf_inet_addr *snet, __u8 protocol, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, unsigned int offset, unsigned int ihl) { unsigned int verdict = NF_DROP; if (IP_VS_FWD_METHOD(cp) != 0) { pr_err("shouldn't reach here, because the box is on the " "half connection in the tun/dr module.\n"); } /* Ensure the checksum is correct */ if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { /* Failed checksum! */ IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n", IP_VS_DBG_ADDR(af, snet)); goto out; } if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol) offset += 2 * sizeof(__u16); if (!skb_make_writable(skb, offset)) goto out; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) ip_vs_nat_icmp_v6(skb, pp, cp, 1); else #endif ip_vs_nat_icmp(skb, pp, cp, 1); /* do the statistics and put it back */ ip_vs_out_stats(cp, skb); skb->ipvs_property = 1; verdict = NF_ACCEPT; out: __ip_vs_conn_put(cp); return verdict; } /* * Handle ICMP messages in the inside-to-outside direction (outgoing). * Find any that might be relevant, check against existing connections. * Currently handles error types - unreachable, quench, ttl exceeded. */ static int ip_vs_out_icmp(struct sk_buff *skb, int *related) { struct iphdr *iph; struct icmphdr _icmph, *ic; struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ struct ip_vs_iphdr ciph; struct ip_vs_conn *cp; struct ip_vs_protocol *pp; unsigned int offset, ihl; union nf_inet_addr snet; *related = 1; /* reassemble IP fragments */ if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT)) return NF_STOLEN; } iph = ip_hdr(skb); offset = ihl = iph->ihl * 4; ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); if (ic == NULL) return NF_DROP; IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %pI4->%pI4\n", ic->type, ntohs(icmp_id(ic)), &iph->saddr, &iph->daddr); /* * Work through seeing if this is for us. * These checks are supposed to be in an order that means easy * things are checked first to speed up processing.... however * this means that some packets will manage to get a long way * down this stack and then be rejected, but that's life. */ if ((ic->type != ICMP_DEST_UNREACH) && (ic->type != ICMP_SOURCE_QUENCH) && (ic->type != ICMP_TIME_EXCEEDED)) { *related = 0; return NF_ACCEPT; } /* Now find the contained IP header */ offset += sizeof(_icmph); cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); if (cih == NULL) return NF_ACCEPT; /* The packet looks wrong, ignore */ pp = ip_vs_proto_get(cih->protocol); if (!pp) return NF_ACCEPT; /* Is the embedded protocol header present? */ if (unlikely(cih->frag_off & htons(IP_OFFSET) && pp->dont_defrag)) return NF_ACCEPT; IP_VS_DBG_PKT(11, pp, skb, offset, "Checking outgoing ICMP for"); offset += cih->ihl * 4; ip_vs_fill_iphdr(AF_INET, cih, &ciph); /* The embedded headers contain source and dest in reverse order */ cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1); if (!cp) return NF_ACCEPT; snet.ip = iph->saddr; return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, pp, offset, ihl); } #ifdef CONFIG_IP_VS_IPV6 static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related) { struct ipv6hdr *iph; struct icmp6hdr _icmph, *ic; struct ipv6hdr _ciph, *cih; /* The ip header contained within the ICMP */ struct ip_vs_iphdr ciph; struct ip_vs_conn *cp; struct ip_vs_protocol *pp; unsigned int offset; union nf_inet_addr snet; *related = 1; /* reassemble IP fragments */ if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) { if (ip_vs_gather_frags_v6(skb, IP_DEFRAG_VS_OUT)) return NF_STOLEN; } iph = ipv6_hdr(skb); offset = sizeof(struct ipv6hdr); ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); if (ic == NULL) return NF_DROP; IP_VS_DBG(12, "Outgoing ICMPv6 (%d,%d) %pI6->%pI6\n", ic->icmp6_type, ntohs(icmpv6_id(ic)), &iph->saddr, &iph->daddr); /* * Work through seeing if this is for us. * These checks are supposed to be in an order that means easy * things are checked first to speed up processing.... however * this means that some packets will manage to get a long way * down this stack and then be rejected, but that's life. */ if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) && (ic->icmp6_type != ICMPV6_PKT_TOOBIG) && (ic->icmp6_type != ICMPV6_TIME_EXCEED)) { *related = 0; return NF_ACCEPT; } /* Now find the contained IP header */ offset += sizeof(_icmph); cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); if (cih == NULL) return NF_ACCEPT; /* The packet looks wrong, ignore */ pp = ip_vs_proto_get(cih->nexthdr); if (!pp) return NF_ACCEPT; /* Is the embedded protocol header present? */ /* TODO: we don't support fragmentation at the moment anyways */ if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag)) return NF_ACCEPT; IP_VS_DBG_PKT(11, pp, skb, offset, "Checking outgoing ICMPv6 for"); offset += sizeof(struct ipv6hdr); ip_vs_fill_iphdr(AF_INET6, cih, &ciph); /* The embedded headers contain source and dest in reverse order */ cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1); if (!cp) return NF_ACCEPT; ipv6_addr_copy(&snet.in6, &iph->saddr); return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp, pp, offset, sizeof(struct ipv6hdr)); } #endif static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len) { struct tcphdr _tcph, *th; th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph); if (th == NULL) return 0; return th->rst; } /* Handle response packets: rewrite addresses and send away... * Used for NAT and local client. */ static unsigned int handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, int ihl) { IP_VS_DBG_PKT(11, pp, skb, 0, "Outgoing packet"); if (!skb_make_writable(skb, ihl)) goto drop; /* mangle the packet */ if (pp->snat_handler && !pp->snat_handler(skb, pp, cp)) goto drop; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) ipv6_hdr(skb)->saddr = cp->vaddr.in6; else #endif { ip_hdr(skb)->saddr = cp->vaddr.ip; ip_send_check(ip_hdr(skb)); } /* For policy routing, packets originating from this * machine itself may be routed differently to packets * passing through. We want this packet to be routed as * if it came from this machine itself. So re-compute * the routing information. */ #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { if (ip6_route_me_harder(skb) != 0) goto drop; } else #endif if (ip_route_me_harder(skb, RTN_LOCAL) != 0) goto drop; IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT"); ip_vs_out_stats(cp, skb); ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); ip_vs_conn_put(cp); skb->ipvs_property = 1; LeaveFunction(11); return NF_ACCEPT; drop: ip_vs_conn_put(cp); kfree_skb(skb); return NF_STOLEN; } /* * It is hooked at the NF_INET_FORWARD chain, used only for VS/NAT. * Check if outgoing packet belongs to the established ip_vs_conn. */ static unsigned int ip_vs_out(unsigned int hooknum, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct ip_vs_iphdr iph; struct ip_vs_protocol *pp; struct ip_vs_conn *cp; int af; EnterFunction(11); af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6; if (skb->ipvs_property) return NF_ACCEPT; ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { int related, verdict = ip_vs_out_icmp_v6(skb, &related); if (related) return verdict; ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); } } else #endif if (unlikely(iph.protocol == IPPROTO_ICMP)) { int related, verdict = ip_vs_out_icmp(skb, &related); if (related) return verdict; ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); } pp = ip_vs_proto_get(iph.protocol); if (unlikely(!pp)) return NF_ACCEPT; /* reassemble IP fragments */ #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { int related, verdict = ip_vs_out_icmp_v6(skb, &related); if (related) return verdict; ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); } } else #endif if (unlikely(ip_hdr(skb)->frag_off & htons(IP_MF|IP_OFFSET) && !pp->dont_defrag)) { if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT)) return NF_STOLEN; ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); } /* * Check if the packet belongs to an existing entry */ cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0); if (unlikely(!cp)) { if (sysctl_ip_vs_nat_icmp_send && (pp->protocol == IPPROTO_TCP || pp->protocol == IPPROTO_UDP)) { __be16 _ports[2], *pptr; pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports); if (pptr == NULL) return NF_ACCEPT; /* Not for me */ if (ip_vs_lookup_real_service(af, iph.protocol, &iph.saddr, pptr[0])) { /* * Notify the real server: there is no * existing entry if it is not RST * packet or not TCP packet. */ if (iph.protocol != IPPROTO_TCP || !is_tcp_reset(skb, iph.len)) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev); else #endif icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); return NF_DROP; } } } IP_VS_DBG_PKT(12, pp, skb, 0, "packet continues traversal as normal"); return NF_ACCEPT; } return handle_response(af, skb, pp, cp, iph.len); } /* * Handle ICMP messages in the outside-to-inside direction (incoming). * Find any that might be relevant, check against existing connections, * forward to the right destination host if relevant. * Currently handles error types - unreachable, quench, ttl exceeded. */ static int ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) { struct iphdr *iph; struct icmphdr _icmph, *ic; struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ struct ip_vs_iphdr ciph; struct ip_vs_conn *cp; struct ip_vs_protocol *pp; unsigned int offset, ihl, verdict; union nf_inet_addr snet; *related = 1; /* reassemble IP fragments */ if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { if (ip_vs_gather_frags(skb, hooknum == NF_INET_LOCAL_IN ? IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD)) return NF_STOLEN; } iph = ip_hdr(skb); offset = ihl = iph->ihl * 4; ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); if (ic == NULL) return NF_DROP; IP_VS_DBG(12, "Incoming ICMP (%d,%d) %pI4->%pI4\n", ic->type, ntohs(icmp_id(ic)), &iph->saddr, &iph->daddr); /* * Work through seeing if this is for us. * These checks are supposed to be in an order that means easy * things are checked first to speed up processing.... however * this means that some packets will manage to get a long way * down this stack and then be rejected, but that's life. */ if ((ic->type != ICMP_DEST_UNREACH) && (ic->type != ICMP_SOURCE_QUENCH) && (ic->type != ICMP_TIME_EXCEEDED)) { *related = 0; return NF_ACCEPT; } /* Now find the contained IP header */ offset += sizeof(_icmph); cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); if (cih == NULL) return NF_ACCEPT; /* The packet looks wrong, ignore */ pp = ip_vs_proto_get(cih->protocol); if (!pp) return NF_ACCEPT; /* Is the embedded protocol header present? */ if (unlikely(cih->frag_off & htons(IP_OFFSET) && pp->dont_defrag)) return NF_ACCEPT; IP_VS_DBG_PKT(11, pp, skb, offset, "Checking incoming ICMP for"); offset += cih->ihl * 4; ip_vs_fill_iphdr(AF_INET, cih, &ciph); /* The embedded headers contain source and dest in reverse order */ cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1); if (!cp) { /* The packet could also belong to a local client */ cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1); if (cp) { snet.ip = iph->saddr; return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, pp, offset, ihl); } return NF_ACCEPT; } verdict = NF_DROP; /* Ensure the checksum is correct */ if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { /* Failed checksum! */ IP_VS_DBG(1, "Incoming ICMP: failed checksum from %pI4!\n", &iph->saddr); goto out; } /* do the statistics and put it back */ ip_vs_in_stats(cp, skb); if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) offset += 2 * sizeof(__u16); verdict = ip_vs_icmp_xmit(skb, cp, pp, offset); /* do not touch skb anymore */ out: __ip_vs_conn_put(cp); return verdict; } #ifdef CONFIG_IP_VS_IPV6 static int ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) { struct ipv6hdr *iph; struct icmp6hdr _icmph, *ic; struct ipv6hdr _ciph, *cih; /* The ip header contained within the ICMP */ struct ip_vs_iphdr ciph; struct ip_vs_conn *cp; struct ip_vs_protocol *pp; unsigned int offset, verdict; union nf_inet_addr snet; *related = 1; /* reassemble IP fragments */ if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) { if (ip_vs_gather_frags_v6(skb, hooknum == NF_INET_LOCAL_IN ? IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD)) return NF_STOLEN; } iph = ipv6_hdr(skb); offset = sizeof(struct ipv6hdr); ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); if (ic == NULL) return NF_DROP; IP_VS_DBG(12, "Incoming ICMPv6 (%d,%d) %pI6->%pI6\n", ic->icmp6_type, ntohs(icmpv6_id(ic)), &iph->saddr, &iph->daddr); /* * Work through seeing if this is for us. * These checks are supposed to be in an order that means easy * things are checked first to speed up processing.... however * this means that some packets will manage to get a long way * down this stack and then be rejected, but that's life. */ if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) && (ic->icmp6_type != ICMPV6_PKT_TOOBIG) && (ic->icmp6_type != ICMPV6_TIME_EXCEED)) { *related = 0; return NF_ACCEPT; } /* Now find the contained IP header */ offset += sizeof(_icmph); cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); if (cih == NULL) return NF_ACCEPT; /* The packet looks wrong, ignore */ pp = ip_vs_proto_get(cih->nexthdr); if (!pp) return NF_ACCEPT; /* Is the embedded protocol header present? */ /* TODO: we don't support fragmentation at the moment anyways */ if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag)) return NF_ACCEPT; IP_VS_DBG_PKT(11, pp, skb, offset, "Checking incoming ICMPv6 for"); offset += sizeof(struct ipv6hdr); ip_vs_fill_iphdr(AF_INET6, cih, &ciph); /* The embedded headers contain source and dest in reverse order */ cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1); if (!cp) { /* The packet could also belong to a local client */ cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1); if (cp) { ipv6_addr_copy(&snet.in6, &iph->saddr); return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp, pp, offset, sizeof(struct ipv6hdr)); } return NF_ACCEPT; } verdict = NF_DROP; /* do the statistics and put it back */ ip_vs_in_stats(cp, skb); if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr) offset += 2 * sizeof(__u16); verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset); /* do not touch skb anymore */ __ip_vs_conn_put(cp); return verdict; } #endif /* * Check if it's for virtual services, look it up, * and send it on its way... */ static unsigned int ip_vs_in(unsigned int hooknum, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct ip_vs_iphdr iph; struct ip_vs_protocol *pp; struct ip_vs_conn *cp; int ret, restart, af, pkts; af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6; ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); /* * Big tappo: only PACKET_HOST, including loopback for local client * Don't handle local packets on IPv6 for now */ if (unlikely(skb->pkt_type != PACKET_HOST)) { IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s ignored\n", skb->pkt_type, iph.protocol, IP_VS_DBG_ADDR(af, &iph.daddr)); return NF_ACCEPT; } #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { int related, verdict = ip_vs_in_icmp_v6(skb, &related, hooknum); if (related) return verdict; ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); } } else #endif if (unlikely(iph.protocol == IPPROTO_ICMP)) { int related, verdict = ip_vs_in_icmp(skb, &related, hooknum); if (related) return verdict; ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); } /* Protocol supported? */ pp = ip_vs_proto_get(iph.protocol); if (unlikely(!pp)) return NF_ACCEPT; /* * Check if the packet belongs to an existing connection entry */ cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0); if (unlikely(!cp)) { int v; /* For local client packets, it could be a response */ cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0); if (cp) return handle_response(af, skb, pp, cp, iph.len); if (!pp->conn_schedule(af, skb, pp, &v, &cp)) return v; } if (unlikely(!cp)) { /* sorry, all this trouble for a no-hit :) */ IP_VS_DBG_PKT(12, pp, skb, 0, "packet continues traversal as normal"); return NF_ACCEPT; } IP_VS_DBG_PKT(11, pp, skb, 0, "Incoming packet"); /* Check the server status */ if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { /* the destination server is not available */ if (sysctl_ip_vs_expire_nodest_conn) { /* try to expire the connection immediately */ ip_vs_conn_expire_now(cp); } /* don't restart its timer, and silently drop the packet. */ __ip_vs_conn_put(cp); return NF_DROP; } ip_vs_in_stats(cp, skb); restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp); if (cp->packet_xmit) ret = cp->packet_xmit(skb, cp, pp); /* do not touch skb anymore */ else { IP_VS_DBG_RL("warning: packet_xmit is null"); ret = NF_ACCEPT; } /* Increase its packet counter and check if it is needed * to be synchronized * * Sync connection if it is about to close to * encorage the standby servers to update the connections timeout */ pkts = atomic_add_return(1, &cp->in_pkts); if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) && (((cp->protocol != IPPROTO_TCP || cp->state == IP_VS_TCP_S_ESTABLISHED) && (pkts % sysctl_ip_vs_sync_threshold[1] == sysctl_ip_vs_sync_threshold[0])) || ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && ((cp->state == IP_VS_TCP_S_FIN_WAIT) || (cp->state == IP_VS_TCP_S_CLOSE_WAIT) || (cp->state == IP_VS_TCP_S_TIME_WAIT))))) ip_vs_sync_conn(cp); cp->old_state = cp->state; ip_vs_conn_put(cp); return ret; } /* * It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP * related packets destined for 0.0.0.0/0. * When fwmark-based virtual service is used, such as transparent * cache cluster, TCP packets can be marked and routed to ip_vs_in, * but ICMP destined for 0.0.0.0/0 cannot not be easily marked and * sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain * and send them to ip_vs_in_icmp. */ static unsigned int ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { int r; if (ip_hdr(skb)->protocol != IPPROTO_ICMP) return NF_ACCEPT; return ip_vs_in_icmp(skb, &r, hooknum); } #ifdef CONFIG_IP_VS_IPV6 static unsigned int ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { int r; if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) return NF_ACCEPT; return ip_vs_in_icmp_v6(skb, &r, hooknum); } #endif static struct nf_hook_ops ip_vs_ops[] __read_mostly = { /* After packet filtering, forward packet through VS/DR, VS/TUN, * or VS/NAT(change destination), so that filtering rules can be * applied to IPVS. */ { .hook = ip_vs_in, .owner = THIS_MODULE, .pf = PF_INET, .hooknum = NF_INET_LOCAL_IN, .priority = 100, }, /* After packet filtering, change source only for VS/NAT */ { .hook = ip_vs_out, .owner = THIS_MODULE, .pf = PF_INET, .hooknum = NF_INET_FORWARD, .priority = 100, }, /* After packet filtering (but before ip_vs_out_icmp), catch icmp * destined for 0.0.0.0/0, which is for incoming IPVS connections */ { .hook = ip_vs_forward_icmp, .owner = THIS_MODULE, .pf = PF_INET, .hooknum = NF_INET_FORWARD, .priority = 99, }, /* Before the netfilter connection tracking, exit from POST_ROUTING */ { .hook = ip_vs_post_routing, .owner = THIS_MODULE, .pf = PF_INET, .hooknum = NF_INET_POST_ROUTING, .priority = NF_IP_PRI_NAT_SRC-1, }, #ifdef CONFIG_IP_VS_IPV6 /* After packet filtering, forward packet through VS/DR, VS/TUN, * or VS/NAT(change destination), so that filtering rules can be * applied to IPVS. */ { .hook = ip_vs_in, .owner = THIS_MODULE, .pf = PF_INET6, .hooknum = NF_INET_LOCAL_IN, .priority = 100, }, /* After packet filtering, change source only for VS/NAT */ { .hook = ip_vs_out, .owner = THIS_MODULE, .pf = PF_INET6, .hooknum = NF_INET_FORWARD, .priority = 100, }, /* After packet filtering (but before ip_vs_out_icmp), catch icmp * destined for 0.0.0.0/0, which is for incoming IPVS connections */ { .hook = ip_vs_forward_icmp_v6, .owner = THIS_MODULE, .pf = PF_INET6, .hooknum = NF_INET_FORWARD, .priority = 99, }, /* Before the netfilter connection tracking, exit from POST_ROUTING */ { .hook = ip_vs_post_routing, .owner = THIS_MODULE, .pf = PF_INET6, .hooknum = NF_INET_POST_ROUTING, .priority = NF_IP6_PRI_NAT_SRC-1, }, #endif }; /* * Initialize IP Virtual Server */ static int __init ip_vs_init(void) { int ret; ip_vs_estimator_init(); ret = ip_vs_control_init(); if (ret < 0) { pr_err("can't setup control.\n"); goto cleanup_estimator; } ip_vs_protocol_init(); ret = ip_vs_app_init(); if (ret < 0) { pr_err("can't setup application helper.\n"); goto cleanup_protocol; } ret = ip_vs_conn_init(); if (ret < 0) { pr_err("can't setup connection table.\n"); goto cleanup_app; } ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); if (ret < 0) { pr_err("can't register hooks.\n"); goto cleanup_conn; } pr_info("ipvs loaded.\n"); return ret; cleanup_conn: ip_vs_conn_cleanup(); cleanup_app: ip_vs_app_cleanup(); cleanup_protocol: ip_vs_protocol_cleanup(); ip_vs_control_cleanup(); cleanup_estimator: ip_vs_estimator_cleanup(); return ret; } static void __exit ip_vs_cleanup(void) { nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); ip_vs_conn_cleanup(); ip_vs_app_cleanup(); ip_vs_protocol_cleanup(); ip_vs_control_cleanup(); ip_vs_estimator_cleanup(); pr_info("ipvs unloaded.\n"); } module_init(ip_vs_init); module_exit(ip_vs_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
Digilent/linux-Digilent-Dev
arch/arm/mach-omap2/cm44xx.c
473
1215
/* * OMAP4 CM1, CM2 module low-level functions * * Copyright (C) 2010 Nokia Corporation * Paul Walmsley * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * These functions are intended to be used only by the cminst44xx.c file. * XXX Perhaps we should just move them there and make them static. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include "cm.h" #include "cm1_44xx.h" #include "cm2_44xx.h" /* CM1 hardware module low-level functions */ /* Read a register in CM1 */ u32 omap4_cm1_read_inst_reg(s16 inst, u16 reg) { return readl_relaxed(cm_base + inst + reg); } /* Write into a register in CM1 */ void omap4_cm1_write_inst_reg(u32 val, s16 inst, u16 reg) { writel_relaxed(val, cm_base + inst + reg); } /* Read a register in CM2 */ u32 omap4_cm2_read_inst_reg(s16 inst, u16 reg) { return readl_relaxed(cm2_base + inst + reg); } /* Write into a register in CM2 */ void omap4_cm2_write_inst_reg(u32 val, s16 inst, u16 reg) { writel_relaxed(val, cm2_base + inst + reg); }
gpl-2.0
LeeDroid-/Ace-2.6.35
arch/powerpc/platforms/pseries/xics.c
729
22166
/* * arch/powerpc/platforms/pseries/xics.c * * Copyright 2000 IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/types.h> #include <linux/threads.h> #include <linux/kernel.h> #include <linux/irq.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/radix-tree.h> #include <linux/cpu.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/percpu.h> #include <asm/firmware.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/smp.h> #include <asm/rtas.h> #include <asm/hvcall.h> #include <asm/machdep.h> #include "xics.h" #include "plpar_wrappers.h" static struct irq_host *xics_host; #define XICS_IPI 2 #define XICS_IRQ_SPURIOUS 0 /* Want a priority other than 0. Various HW issues require this. */ #define DEFAULT_PRIORITY 5 /* * Mark IPIs as higher priority so we can take them inside interrupts that * arent marked IRQF_DISABLED */ #define IPI_PRIORITY 4 /* The least favored priority */ #define LOWEST_PRIORITY 0xFF /* The number of priorities defined above */ #define MAX_NUM_PRIORITIES 3 static unsigned int default_server = 0xFF; static unsigned int default_distrib_server = 0; static unsigned int interrupt_server_size = 8; /* RTAS service tokens */ static int ibm_get_xive; static int ibm_set_xive; static int ibm_int_on; static int ibm_int_off; struct xics_cppr { unsigned char stack[MAX_NUM_PRIORITIES]; int index; }; static DEFINE_PER_CPU(struct xics_cppr, xics_cppr); /* Direct hardware low level accessors */ /* The part of the interrupt presentation layer that we care about */ struct xics_ipl { union { u32 word; u8 bytes[4]; } xirr_poll; union { u32 word; u8 bytes[4]; } xirr; u32 dummy; union { u32 word; u8 bytes[4]; } qirr; }; static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; static inline unsigned int direct_xirr_info_get(void) { int cpu = smp_processor_id(); return in_be32(&xics_per_cpu[cpu]->xirr.word); } static inline void direct_xirr_info_set(unsigned int value) { int cpu = smp_processor_id(); out_be32(&xics_per_cpu[cpu]->xirr.word, value); } static inline void direct_cppr_info(u8 value) { int cpu = smp_processor_id(); out_8(&xics_per_cpu[cpu]->xirr.bytes[0], value); } static inline void direct_qirr_info(int n_cpu, u8 value) { out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value); } /* LPAR low level accessors */ static inline unsigned int lpar_xirr_info_get(unsigned char cppr) { unsigned long lpar_rc; unsigned long return_value; lpar_rc = plpar_xirr(&return_value, cppr); if (lpar_rc != H_SUCCESS) panic(" bad return code xirr - rc = %lx\n", lpar_rc); return (unsigned int)return_value; } static inline void lpar_xirr_info_set(unsigned int value) { unsigned long lpar_rc; lpar_rc = plpar_eoi(value); if (lpar_rc != H_SUCCESS) panic("bad return code EOI - rc = %ld, value=%x\n", lpar_rc, value); } static inline void lpar_cppr_info(u8 value) { unsigned long lpar_rc; lpar_rc = plpar_cppr(value); if (lpar_rc != H_SUCCESS) panic("bad return code cppr - rc = %lx\n", lpar_rc); } static inline void lpar_qirr_info(int n_cpu , u8 value) { unsigned long lpar_rc; lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value); if (lpar_rc != H_SUCCESS) panic("bad return code qirr - rc = %lx\n", lpar_rc); } /* Interface to generic irq subsystem */ #ifdef CONFIG_SMP /* * For the moment we only implement delivery to all cpus or one cpu. * * If the requested affinity is cpu_all_mask, we set global affinity. * If not we set it to the first cpu in the mask, even if multiple cpus * are set. This is so things like irqbalance (which set core and package * wide affinities) do the right thing. */ static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, unsigned int strict_check) { if (!distribute_irqs) return default_server; if (!cpumask_equal(cpumask, cpu_all_mask)) { int server = cpumask_first_and(cpu_online_mask, cpumask); if (server < nr_cpu_ids) return get_hard_smp_processor_id(server); if (strict_check) return -1; } /* * Workaround issue with some versions of JS20 firmware that * deliver interrupts to cpus which haven't been started. This * happens when using the maxcpus= boot option. */ if (cpumask_equal(cpu_online_mask, cpu_present_mask)) return default_distrib_server; return default_server; } #else #define get_irq_server(virq, cpumask, strict_check) (default_server) #endif static void xics_unmask_irq(unsigned int virq) { unsigned int irq; int call_status; int server; pr_devel("xics: unmask virq %d\n", virq); irq = (unsigned int)irq_map[virq].hwirq; pr_devel(" -> map to hwirq 0x%x\n", irq); if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) return; server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0); call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, DEFAULT_PRIORITY); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive irq %u server %x returned %d\n", __func__, irq, server, call_status); return; } /* Now unmask the interrupt (often a no-op) */ call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", __func__, irq, call_status); return; } } static unsigned int xics_startup(unsigned int virq) { /* * The generic MSI code returns with the interrupt disabled on the * card, using the MSI mask bits. Firmware doesn't appear to unmask * at that level, so we do it here by hand. */ if (irq_to_desc(virq)->msi_desc) unmask_msi_irq(virq); /* unmask it */ xics_unmask_irq(virq); return 0; } static void xics_mask_real_irq(unsigned int irq) { int call_status; if (irq == XICS_IPI) return; call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", __func__, irq, call_status); return; } /* Have to set XIVE to 0xff to be able to remove a slot */ call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, default_server, 0xff); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", __func__, irq, call_status); return; } } static void xics_mask_irq(unsigned int virq) { unsigned int irq; pr_devel("xics: mask virq %d\n", virq); irq = (unsigned int)irq_map[virq].hwirq; if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) return; xics_mask_real_irq(irq); } static void xics_mask_unknown_vec(unsigned int vec) { printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec); xics_mask_real_irq(vec); } static inline unsigned int xics_xirr_vector(unsigned int xirr) { /* * The top byte is the old cppr, to be restored on EOI. * The remaining 24 bits are the vector. */ return xirr & 0x00ffffff; } static void push_cppr(unsigned int vec) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) return; if (vec == XICS_IPI) os_cppr->stack[++os_cppr->index] = IPI_PRIORITY; else os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY; } static unsigned int xics_get_irq_direct(void) { unsigned int xirr = direct_xirr_info_get(); unsigned int vec = xics_xirr_vector(xirr); unsigned int irq; if (vec == XICS_IRQ_SPURIOUS) return NO_IRQ; irq = irq_radix_revmap_lookup(xics_host, vec); if (likely(irq != NO_IRQ)) { push_cppr(vec); return irq; } /* We don't have a linux mapping, so have rtas mask it. */ xics_mask_unknown_vec(vec); /* We might learn about it later, so EOI it */ direct_xirr_info_set(xirr); return NO_IRQ; } static unsigned int xics_get_irq_lpar(void) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); unsigned int xirr = lpar_xirr_info_get(os_cppr->stack[os_cppr->index]); unsigned int vec = xics_xirr_vector(xirr); unsigned int irq; if (vec == XICS_IRQ_SPURIOUS) return NO_IRQ; irq = irq_radix_revmap_lookup(xics_host, vec); if (likely(irq != NO_IRQ)) { push_cppr(vec); return irq; } /* We don't have a linux mapping, so have RTAS mask it. */ xics_mask_unknown_vec(vec); /* We might learn about it later, so EOI it */ lpar_xirr_info_set(xirr); return NO_IRQ; } static unsigned char pop_cppr(void) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); if (WARN_ON(os_cppr->index < 1)) return LOWEST_PRIORITY; return os_cppr->stack[--os_cppr->index]; } static void xics_eoi_direct(unsigned int virq) { unsigned int irq = (unsigned int)irq_map[virq].hwirq; iosync(); direct_xirr_info_set((pop_cppr() << 24) | irq); } static void xics_eoi_lpar(unsigned int virq) { unsigned int irq = (unsigned int)irq_map[virq].hwirq; iosync(); lpar_xirr_info_set((pop_cppr() << 24) | irq); } static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) { unsigned int irq; int status; int xics_status[2]; int irq_server; irq = (unsigned int)irq_map[virq].hwirq; if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) return -1; status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); if (status) { printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", __func__, irq, status); return -1; } irq_server = get_irq_server(virq, cpumask, 1); if (irq_server == -1) { char cpulist[128]; cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); printk(KERN_WARNING "%s: No online cpus in the mask %s for irq %d\n", __func__, cpulist, virq); return -1; } status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, irq_server, xics_status[1]); if (status) { printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", __func__, irq, status); return -1; } return 0; } static struct irq_chip xics_pic_direct = { .name = "XICS", .startup = xics_startup, .mask = xics_mask_irq, .unmask = xics_unmask_irq, .eoi = xics_eoi_direct, .set_affinity = xics_set_affinity }; static struct irq_chip xics_pic_lpar = { .name = "XICS", .startup = xics_startup, .mask = xics_mask_irq, .unmask = xics_unmask_irq, .eoi = xics_eoi_lpar, .set_affinity = xics_set_affinity }; /* Interface to arch irq controller subsystem layer */ /* Points to the irq_chip we're actually using */ static struct irq_chip *xics_irq_chip; static int xics_host_match(struct irq_host *h, struct device_node *node) { /* IBM machines have interrupt parents of various funky types for things * like vdevices, events, etc... The trick we use here is to match * everything here except the legacy 8259 which is compatible "chrp,iic" */ return !of_device_is_compatible(node, "chrp,iic"); } static int xics_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); /* Insert the interrupt mapping into the radix tree for fast lookup */ irq_radix_revmap_insert(xics_host, virq, hw); irq_to_desc(virq)->status |= IRQ_LEVEL; set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq); return 0; } static int xics_host_xlate(struct irq_host *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { /* Current xics implementation translates everything * to level. It is not technically right for MSIs but this * is irrelevant at this point. We might get smarter in the future */ *out_hwirq = intspec[0]; *out_flags = IRQ_TYPE_LEVEL_LOW; return 0; } static struct irq_host_ops xics_host_ops = { .match = xics_host_match, .map = xics_host_map, .xlate = xics_host_xlate, }; static void __init xics_init_host(void) { if (firmware_has_feature(FW_FEATURE_LPAR)) xics_irq_chip = &xics_pic_lpar; else xics_irq_chip = &xics_pic_direct; xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, XICS_IRQ_SPURIOUS); BUG_ON(xics_host == NULL); irq_set_default_host(xics_host); } /* Inter-processor interrupt support */ #ifdef CONFIG_SMP /* * XICS only has a single IPI, so encode the messages per CPU */ static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); static inline void smp_xics_do_message(int cpu, int msg) { unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); set_bit(msg, tgt); mb(); if (firmware_has_feature(FW_FEATURE_LPAR)) lpar_qirr_info(cpu, IPI_PRIORITY); else direct_qirr_info(cpu, IPI_PRIORITY); } void smp_xics_message_pass(int target, int msg) { unsigned int i; if (target < NR_CPUS) { smp_xics_do_message(target, msg); } else { for_each_online_cpu(i) { if (target == MSG_ALL_BUT_SELF && i == smp_processor_id()) continue; smp_xics_do_message(i, msg); } } } static irqreturn_t xics_ipi_dispatch(int cpu) { unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); WARN_ON(cpu_is_offline(cpu)); mb(); /* order mmio clearing qirr */ while (*tgt) { if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) { smp_message_recv(PPC_MSG_CALL_FUNCTION); } if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) { smp_message_recv(PPC_MSG_RESCHEDULE); } if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) { smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); } #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) { smp_message_recv(PPC_MSG_DEBUGGER_BREAK); } #endif } return IRQ_HANDLED; } static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id) { int cpu = smp_processor_id(); direct_qirr_info(cpu, 0xff); return xics_ipi_dispatch(cpu); } static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id) { int cpu = smp_processor_id(); lpar_qirr_info(cpu, 0xff); return xics_ipi_dispatch(cpu); } static void xics_request_ipi(void) { unsigned int ipi; int rc; ipi = irq_create_mapping(xics_host, XICS_IPI); BUG_ON(ipi == NO_IRQ); /* * IPIs are marked IRQF_DISABLED as they must run with irqs * disabled */ set_irq_handler(ipi, handle_percpu_irq); if (firmware_has_feature(FW_FEATURE_LPAR)) rc = request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); else rc = request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); BUG_ON(rc); } int __init smp_xics_probe(void) { xics_request_ipi(); return cpumask_weight(cpu_possible_mask); } #endif /* CONFIG_SMP */ /* Initialization */ static void xics_update_irq_servers(void) { int i, j; struct device_node *np; u32 ilen; const u32 *ireg; u32 hcpuid; /* Find the server numbers for the boot cpu. */ np = of_get_cpu_node(boot_cpuid, NULL); BUG_ON(!np); ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); if (!ireg) { of_node_put(np); return; } i = ilen / sizeof(int); hcpuid = get_hard_smp_processor_id(boot_cpuid); /* Global interrupt distribution server is specified in the last * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last * entry fom this property for current boot cpu id and use it as * default distribution server */ for (j = 0; j < i; j += 2) { if (ireg[j] == hcpuid) { default_server = hcpuid; default_distrib_server = ireg[j+1]; } } of_node_put(np); } static void __init xics_map_one_cpu(int hw_id, unsigned long addr, unsigned long size) { int i; /* This may look gross but it's good enough for now, we don't quite * have a hard -> linux processor id matching. */ for_each_possible_cpu(i) { if (!cpu_present(i)) continue; if (hw_id == get_hard_smp_processor_id(i)) { xics_per_cpu[i] = ioremap(addr, size); return; } } } static void __init xics_init_one_node(struct device_node *np, unsigned int *indx) { unsigned int ilen; const u32 *ireg; /* This code does the theorically broken assumption that the interrupt * server numbers are the same as the hard CPU numbers. * This happens to be the case so far but we are playing with fire... * should be fixed one of these days. -BenH. */ ireg = of_get_property(np, "ibm,interrupt-server-ranges", NULL); /* Do that ever happen ? we'll know soon enough... but even good'old * f80 does have that property .. */ WARN_ON(ireg == NULL); if (ireg) { /* * set node starting index for this node */ *indx = *ireg; } ireg = of_get_property(np, "reg", &ilen); if (!ireg) panic("xics_init_IRQ: can't find interrupt reg property"); while (ilen >= (4 * sizeof(u32))) { unsigned long addr, size; /* XXX Use proper OF parsing code here !!! */ addr = (unsigned long)*ireg++ << 32; ilen -= sizeof(u32); addr |= *ireg++; ilen -= sizeof(u32); size = (unsigned long)*ireg++ << 32; ilen -= sizeof(u32); size |= *ireg++; ilen -= sizeof(u32); xics_map_one_cpu(*indx, addr, size); (*indx)++; } } void __init xics_init_IRQ(void) { struct device_node *np; u32 indx = 0; int found = 0; const u32 *isize; ppc64_boot_msg(0x20, "XICS Init"); ibm_get_xive = rtas_token("ibm,get-xive"); ibm_set_xive = rtas_token("ibm,set-xive"); ibm_int_on = rtas_token("ibm,int-on"); ibm_int_off = rtas_token("ibm,int-off"); for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") { found = 1; if (firmware_has_feature(FW_FEATURE_LPAR)) { of_node_put(np); break; } xics_init_one_node(np, &indx); } if (found == 0) return; /* get the bit size of server numbers */ found = 0; for_each_compatible_node(np, NULL, "ibm,ppc-xics") { isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); if (!isize) continue; if (!found) { interrupt_server_size = *isize; found = 1; } else if (*isize != interrupt_server_size) { printk(KERN_WARNING "XICS: " "mismatched ibm,interrupt-server#-size\n"); interrupt_server_size = max(*isize, interrupt_server_size); } } xics_update_irq_servers(); xics_init_host(); if (firmware_has_feature(FW_FEATURE_LPAR)) ppc_md.get_irq = xics_get_irq_lpar; else ppc_md.get_irq = xics_get_irq_direct; xics_setup_cpu(); ppc64_boot_msg(0x21, "XICS Done"); } /* Cpu startup, shutdown, and hotplug */ static void xics_set_cpu_priority(unsigned char cppr) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); /* * we only really want to set the priority when there's * just one cppr value on the stack */ WARN_ON(os_cppr->index != 0); os_cppr->stack[0] = cppr; if (firmware_has_feature(FW_FEATURE_LPAR)) lpar_cppr_info(cppr); else direct_cppr_info(cppr); iosync(); } /* Have the calling processor join or leave the specified global queue */ static void xics_set_cpu_giq(unsigned int gserver, unsigned int join) { int index; int status; if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) return; index = (1UL << interrupt_server_size) - 1 - gserver; status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", GLOBAL_INTERRUPT_QUEUE, index, join, status); } void xics_setup_cpu(void) { xics_set_cpu_priority(LOWEST_PRIORITY); xics_set_cpu_giq(default_distrib_server, 1); } void xics_teardown_cpu(void) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); int cpu = smp_processor_id(); /* * we have to reset the cppr index to 0 because we're * not going to return from the IPI */ os_cppr->index = 0; xics_set_cpu_priority(0); /* Clear any pending IPI request */ if (firmware_has_feature(FW_FEATURE_LPAR)) lpar_qirr_info(cpu, 0xff); else direct_qirr_info(cpu, 0xff); } void xics_kexec_teardown_cpu(int secondary) { xics_teardown_cpu(); /* * we take the ipi irq but and never return so we * need to EOI the IPI, but want to leave our priority 0 * * should we check all the other interrupts too? * should we be flagging idle loop instead? * or creating some task to be scheduled? */ if (firmware_has_feature(FW_FEATURE_LPAR)) lpar_xirr_info_set((0x00 << 24) | XICS_IPI); else direct_xirr_info_set((0x00 << 24) | XICS_IPI); /* * Some machines need to have at least one cpu in the GIQ, * so leave the master cpu in the group. */ if (secondary) xics_set_cpu_giq(default_distrib_server, 0); } #ifdef CONFIG_HOTPLUG_CPU /* Interrupts are disabled. */ void xics_migrate_irqs_away(void) { int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); unsigned int irq, virq; /* If we used to be the default server, move to the new "boot_cpuid" */ if (hw_cpu == default_server) xics_update_irq_servers(); /* Reject any interrupt that was queued to us... */ xics_set_cpu_priority(0); /* Remove ourselves from the global interrupt queue */ xics_set_cpu_giq(default_distrib_server, 0); /* Allow IPIs again... */ xics_set_cpu_priority(DEFAULT_PRIORITY); for_each_irq(virq) { struct irq_desc *desc; int xics_status[2]; int status; unsigned long flags; /* We cant set affinity on ISA interrupts */ if (virq < NUM_ISA_INTERRUPTS) continue; if (irq_map[virq].host != xics_host) continue; irq = (unsigned int)irq_map[virq].hwirq; /* We need to get IPIs still. */ if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) continue; desc = irq_to_desc(virq); /* We only need to migrate enabled IRQS */ if (desc == NULL || desc->chip == NULL || desc->action == NULL || desc->chip->set_affinity == NULL) continue; raw_spin_lock_irqsave(&desc->lock, flags); status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); if (status) { printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", __func__, irq, status); goto unlock; } /* * We only support delivery to all cpus or to one cpu. * The irq has to be migrated only in the single cpu * case. */ if (xics_status[0] != hw_cpu) goto unlock; printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", virq, cpu); /* Reset affinity to all cpus */ cpumask_setall(irq_to_desc(virq)->affinity); desc->chip->set_affinity(virq, cpu_all_mask); unlock: raw_spin_unlock_irqrestore(&desc->lock, flags); } } #endif
gpl-2.0
mkasick/android_kernel_samsung_d2vzw
arch/arm/mach-s3c64xx/mach-smdk6410.c
1753
17706
/* linux/arch/arm/mach-s3c64xx/mach-smdk6410.c * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/input.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/leds.h> #include <linux/fb.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/smsc911x.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/pwm_backlight.h> #ifdef CONFIG_SMDK6410_WM1190_EV1 #include <linux/mfd/wm8350/core.h> #include <linux/mfd/wm8350/pmic.h> #endif #ifdef CONFIG_SMDK6410_WM1192_EV1 #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/pdata.h> #endif #include <video/platform_lcd.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/regs-fb.h> #include <mach/map.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <plat/regs-serial.h> #include <mach/regs-modem.h> #include <mach/regs-gpio.h> #include <mach/regs-sys.h> #include <mach/regs-srom.h> #include <plat/ata.h> #include <plat/iic.h> #include <plat/fb.h> #include <plat/gpio-cfg.h> #include <mach/s3c6410.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/adc.h> #include <plat/ts.h> #include <plat/keypad.h> #define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK #define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB #define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE static struct s3c2410_uartcfg smdk6410_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [1] = { .hwport = 1, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [2] = { .hwport = 2, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [3] = { .hwport = 3, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, }; /* framebuffer and LCD setup. */ /* GPF15 = LCD backlight control * GPF13 => Panel power * GPN5 = LCD nRESET signal * PWM_TOUT1 => backlight brightness */ static void smdk6410_lcd_power_set(struct plat_lcd_data *pd, unsigned int power) { if (power) { gpio_direction_output(S3C64XX_GPF(13), 1); /* fire nRESET on power up */ gpio_direction_output(S3C64XX_GPN(5), 0); msleep(10); gpio_direction_output(S3C64XX_GPN(5), 1); msleep(1); } else { gpio_direction_output(S3C64XX_GPF(13), 0); } } static struct plat_lcd_data smdk6410_lcd_power_data = { .set_power = smdk6410_lcd_power_set, }; static struct platform_device smdk6410_lcd_powerdev = { .name = "platform-lcd", .dev.parent = &s3c_device_fb.dev, .dev.platform_data = &smdk6410_lcd_power_data, }; static struct s3c_fb_pd_win smdk6410_fb_win0 = { /* this is to ensure we use win0 */ .win_mode = { .left_margin = 8, .right_margin = 13, .upper_margin = 7, .lower_margin = 5, .hsync_len = 3, .vsync_len = 1, .xres = 800, .yres = 480, }, .max_bpp = 32, .default_bpp = 16, .virtual_y = 480 * 2, .virtual_x = 800, }; /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */ static struct s3c_fb_platdata smdk6410_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, .win[0] = &smdk6410_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, }; /* * Configuring Ethernet on SMDK6410 * * Both CS8900A and LAN9115 chips share one chip select mediated by CFG6. * The constant address below corresponds to nCS1 * * 1) Set CFGB2 p3 ON others off, no other CFGB selects "ethernet" * 2) CFG6 needs to be switched to "LAN9115" side */ static struct resource smdk6410_smsc911x_resources[] = { [0] = { .start = S3C64XX_PA_XM0CSN1, .end = S3C64XX_PA_XM0CSN1 + SZ_64K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = S3C_EINT(10), .end = S3C_EINT(10), .flags = IORESOURCE_IRQ | IRQ_TYPE_LEVEL_LOW, }, }; static struct smsc911x_platform_config smdk6410_smsc911x_pdata = { .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, .flags = SMSC911X_USE_32BIT | SMSC911X_FORCE_INTERNAL_PHY, .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct platform_device smdk6410_smsc911x = { .name = "smsc911x", .id = -1, .num_resources = ARRAY_SIZE(smdk6410_smsc911x_resources), .resource = &smdk6410_smsc911x_resources[0], .dev = { .platform_data = &smdk6410_smsc911x_pdata, }, }; #ifdef CONFIG_REGULATOR static struct regulator_consumer_supply smdk6410_b_pwr_5v_consumers[] = { { /* WM8580 */ .supply = "PVDD", .dev_name = "0-001b", }, { /* WM8580 */ .supply = "AVDD", .dev_name = "0-001b", }, }; static struct regulator_init_data smdk6410_b_pwr_5v_data = { .constraints = { .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(smdk6410_b_pwr_5v_consumers), .consumer_supplies = smdk6410_b_pwr_5v_consumers, }; static struct fixed_voltage_config smdk6410_b_pwr_5v_pdata = { .supply_name = "B_PWR_5V", .microvolts = 5000000, .init_data = &smdk6410_b_pwr_5v_data, .gpio = -EINVAL, }; static struct platform_device smdk6410_b_pwr_5v = { .name = "reg-fixed-voltage", .id = -1, .dev = { .platform_data = &smdk6410_b_pwr_5v_pdata, }, }; #endif static struct s3c_ide_platdata smdk6410_ide_pdata __initdata = { .setup_gpio = s3c64xx_ide_setup_gpio, }; static uint32_t smdk6410_keymap[] __initdata = { /* KEY(row, col, keycode) */ KEY(0, 3, KEY_1), KEY(0, 4, KEY_2), KEY(0, 5, KEY_3), KEY(0, 6, KEY_4), KEY(0, 7, KEY_5), KEY(1, 3, KEY_A), KEY(1, 4, KEY_B), KEY(1, 5, KEY_C), KEY(1, 6, KEY_D), KEY(1, 7, KEY_E) }; static struct matrix_keymap_data smdk6410_keymap_data __initdata = { .keymap = smdk6410_keymap, .keymap_size = ARRAY_SIZE(smdk6410_keymap), }; static struct samsung_keypad_platdata smdk6410_keypad_data __initdata = { .keymap_data = &smdk6410_keymap_data, .rows = 2, .cols = 8, }; static int smdk6410_backlight_init(struct device *dev) { int ret; ret = gpio_request(S3C64XX_GPF(15), "Backlight"); if (ret) { printk(KERN_ERR "failed to request GPF for PWM-OUT1\n"); return ret; } /* Configure GPIO pin with S3C64XX_GPF15_PWM_TOUT1 */ s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_SFN(2)); return 0; } static void smdk6410_backlight_exit(struct device *dev) { s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_OUTPUT); gpio_free(S3C64XX_GPF(15)); } static struct platform_pwm_backlight_data smdk6410_backlight_data = { .pwm_id = 1, .max_brightness = 255, .dft_brightness = 255, .pwm_period_ns = 78770, .init = smdk6410_backlight_init, .exit = smdk6410_backlight_exit, }; static struct platform_device smdk6410_backlight_device = { .name = "pwm-backlight", .dev = { .parent = &s3c_device_timer[1].dev, .platform_data = &smdk6410_backlight_data, }, }; static struct map_desc smdk6410_iodesc[] = {}; static struct platform_device *smdk6410_devices[] __initdata = { #ifdef CONFIG_SMDK6410_SD_CH0 &s3c_device_hsmmc0, #endif #ifdef CONFIG_SMDK6410_SD_CH1 &s3c_device_hsmmc1, #endif &s3c_device_i2c0, &s3c_device_i2c1, &s3c_device_fb, &s3c_device_ohci, &s3c_device_usb_hsotg, &samsung_asoc_dma, &s3c64xx_device_iisv4, &samsung_device_keypad, #ifdef CONFIG_REGULATOR &smdk6410_b_pwr_5v, #endif &smdk6410_lcd_powerdev, &smdk6410_smsc911x, &s3c_device_adc, &s3c_device_cfcon, &s3c_device_rtc, &s3c_device_ts, &s3c_device_wdt, &s3c_device_timer[1], &smdk6410_backlight_device, }; #ifdef CONFIG_REGULATOR /* ARM core */ static struct regulator_consumer_supply smdk6410_vddarm_consumers[] = { { .supply = "vddarm", } }; /* VDDARM, BUCK1 on J5 */ static struct regulator_init_data smdk6410_vddarm = { .constraints = { .name = "PVDD_ARM", .min_uV = 1000000, .max_uV = 1300000, .always_on = 1, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, }, .num_consumer_supplies = ARRAY_SIZE(smdk6410_vddarm_consumers), .consumer_supplies = smdk6410_vddarm_consumers, }; /* VDD_INT, BUCK2 on J5 */ static struct regulator_init_data smdk6410_vddint = { .constraints = { .name = "PVDD_INT", .min_uV = 1000000, .max_uV = 1200000, .always_on = 1, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, }, }; /* VDD_HI, LDO3 on J5 */ static struct regulator_init_data smdk6410_vddhi = { .constraints = { .name = "PVDD_HI", .always_on = 1, }, }; /* VDD_PLL, LDO2 on J5 */ static struct regulator_init_data smdk6410_vddpll = { .constraints = { .name = "PVDD_PLL", .always_on = 1, }, }; /* VDD_UH_MMC, LDO5 on J5 */ static struct regulator_init_data smdk6410_vdduh_mmc = { .constraints = { .name = "PVDD_UH+PVDD_MMC", .always_on = 1, }, }; /* VCCM3BT, LDO8 on J5 */ static struct regulator_init_data smdk6410_vccmc3bt = { .constraints = { .name = "PVCCM3BT", .always_on = 1, }, }; /* VCCM2MTV, LDO11 on J5 */ static struct regulator_init_data smdk6410_vccm2mtv = { .constraints = { .name = "PVCCM2MTV", .always_on = 1, }, }; /* VDD_LCD, LDO12 on J5 */ static struct regulator_init_data smdk6410_vddlcd = { .constraints = { .name = "PVDD_LCD", .always_on = 1, }, }; /* VDD_OTGI, LDO9 on J5 */ static struct regulator_init_data smdk6410_vddotgi = { .constraints = { .name = "PVDD_OTGI", .always_on = 1, }, }; /* VDD_OTG, LDO14 on J5 */ static struct regulator_init_data smdk6410_vddotg = { .constraints = { .name = "PVDD_OTG", .always_on = 1, }, }; /* VDD_ALIVE, LDO15 on J5 */ static struct regulator_init_data smdk6410_vddalive = { .constraints = { .name = "PVDD_ALIVE", .always_on = 1, }, }; /* VDD_AUDIO, VLDO_AUDIO on J5 */ static struct regulator_init_data smdk6410_vddaudio = { .constraints = { .name = "PVDD_AUDIO", .always_on = 1, }, }; #endif #ifdef CONFIG_SMDK6410_WM1190_EV1 /* S3C64xx internal logic & PLL */ static struct regulator_init_data wm8350_dcdc1_data = { .constraints = { .name = "PVDD_INT+PVDD_PLL", .min_uV = 1200000, .max_uV = 1200000, .always_on = 1, .apply_uV = 1, }, }; /* Memory */ static struct regulator_init_data wm8350_dcdc3_data = { .constraints = { .name = "PVDD_MEM", .min_uV = 1800000, .max_uV = 1800000, .always_on = 1, .state_mem = { .uV = 1800000, .mode = REGULATOR_MODE_NORMAL, .enabled = 1, }, .initial_state = PM_SUSPEND_MEM, }, }; /* USB, EXT, PCM, ADC/DAC, USB, MMC */ static struct regulator_consumer_supply wm8350_dcdc4_consumers[] = { { /* WM8580 */ .supply = "DVDD", .dev_name = "0-001b", }, }; static struct regulator_init_data wm8350_dcdc4_data = { .constraints = { .name = "PVDD_HI+PVDD_EXT+PVDD_SYS+PVCCM2MTV", .min_uV = 3000000, .max_uV = 3000000, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(wm8350_dcdc4_consumers), .consumer_supplies = wm8350_dcdc4_consumers, }; /* OTGi/1190-EV1 HPVDD & AVDD */ static struct regulator_init_data wm8350_ldo4_data = { .constraints = { .name = "PVDD_OTGI+HPVDD+AVDD", .min_uV = 1200000, .max_uV = 1200000, .apply_uV = 1, .always_on = 1, }, }; static struct { int regulator; struct regulator_init_data *initdata; } wm1190_regulators[] = { { WM8350_DCDC_1, &wm8350_dcdc1_data }, { WM8350_DCDC_3, &wm8350_dcdc3_data }, { WM8350_DCDC_4, &wm8350_dcdc4_data }, { WM8350_DCDC_6, &smdk6410_vddarm }, { WM8350_LDO_1, &smdk6410_vddalive }, { WM8350_LDO_2, &smdk6410_vddotg }, { WM8350_LDO_3, &smdk6410_vddlcd }, { WM8350_LDO_4, &wm8350_ldo4_data }, }; static int __init smdk6410_wm8350_init(struct wm8350 *wm8350) { int i; /* Configure the IRQ line */ s3c_gpio_setpull(S3C64XX_GPN(12), S3C_GPIO_PULL_UP); /* Instantiate the regulators */ for (i = 0; i < ARRAY_SIZE(wm1190_regulators); i++) wm8350_register_regulator(wm8350, wm1190_regulators[i].regulator, wm1190_regulators[i].initdata); return 0; } static struct wm8350_platform_data __initdata smdk6410_wm8350_pdata = { .init = smdk6410_wm8350_init, .irq_high = 1, .irq_base = IRQ_BOARD_START, }; #endif #ifdef CONFIG_SMDK6410_WM1192_EV1 static struct gpio_led wm1192_pmic_leds[] = { { .name = "PMIC:red:power", .gpio = GPIO_BOARD_START + 3, .default_state = LEDS_GPIO_DEFSTATE_ON, }, }; static struct gpio_led_platform_data wm1192_pmic_led = { .num_leds = ARRAY_SIZE(wm1192_pmic_leds), .leds = wm1192_pmic_leds, }; static struct platform_device wm1192_pmic_led_dev = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &wm1192_pmic_led, }, }; static int wm1192_pre_init(struct wm831x *wm831x) { int ret; /* Configure the IRQ line */ s3c_gpio_setpull(S3C64XX_GPN(12), S3C_GPIO_PULL_UP); ret = platform_device_register(&wm1192_pmic_led_dev); if (ret != 0) dev_err(wm831x->dev, "Failed to add PMIC LED: %d\n", ret); return 0; } static struct wm831x_backlight_pdata wm1192_backlight_pdata = { .isink = 1, .max_uA = 27554, }; static struct regulator_init_data wm1192_dcdc3 = { .constraints = { .name = "PVDD_MEM+PVDD_GPS", .always_on = 1, }, }; static struct regulator_consumer_supply wm1192_ldo1_consumers[] = { { .supply = "DVDD", .dev_name = "0-001b", }, /* WM8580 */ }; static struct regulator_init_data wm1192_ldo1 = { .constraints = { .name = "PVDD_LCD+PVDD_EXT", .always_on = 1, }, .consumer_supplies = wm1192_ldo1_consumers, .num_consumer_supplies = ARRAY_SIZE(wm1192_ldo1_consumers), }; static struct wm831x_status_pdata wm1192_led7_pdata = { .name = "LED7:green:", }; static struct wm831x_status_pdata wm1192_led8_pdata = { .name = "LED8:green:", }; static struct wm831x_pdata smdk6410_wm1192_pdata = { .pre_init = wm1192_pre_init, .irq_base = IRQ_BOARD_START, .backlight = &wm1192_backlight_pdata, .dcdc = { &smdk6410_vddarm, /* DCDC1 */ &smdk6410_vddint, /* DCDC2 */ &wm1192_dcdc3, }, .gpio_base = GPIO_BOARD_START, .ldo = { &wm1192_ldo1, /* LDO1 */ &smdk6410_vdduh_mmc, /* LDO2 */ NULL, /* LDO3 NC */ &smdk6410_vddotgi, /* LDO4 */ &smdk6410_vddotg, /* LDO5 */ &smdk6410_vddhi, /* LDO6 */ &smdk6410_vddaudio, /* LDO7 */ &smdk6410_vccm2mtv, /* LDO8 */ &smdk6410_vddpll, /* LDO9 */ &smdk6410_vccmc3bt, /* LDO10 */ &smdk6410_vddalive, /* LDO11 */ }, .status = { &wm1192_led7_pdata, &wm1192_led8_pdata, }, }; #endif static struct i2c_board_info i2c_devs0[] __initdata = { { I2C_BOARD_INFO("24c08", 0x50), }, { I2C_BOARD_INFO("wm8580", 0x1b), }, #ifdef CONFIG_SMDK6410_WM1192_EV1 { I2C_BOARD_INFO("wm8312", 0x34), .platform_data = &smdk6410_wm1192_pdata, .irq = S3C_EINT(12), }, #endif #ifdef CONFIG_SMDK6410_WM1190_EV1 { I2C_BOARD_INFO("wm8350", 0x1a), .platform_data = &smdk6410_wm8350_pdata, .irq = S3C_EINT(12), }, #endif }; static struct i2c_board_info i2c_devs1[] __initdata = { { I2C_BOARD_INFO("24c128", 0x57), }, /* Samsung S524AD0XD1 */ }; static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = { .delay = 10000, .presc = 49, .oversampling_shift = 2, }; static void __init smdk6410_map_io(void) { u32 tmp; s3c64xx_init_io(smdk6410_iodesc, ARRAY_SIZE(smdk6410_iodesc)); s3c24xx_init_clocks(12000000); s3c24xx_init_uarts(smdk6410_uartcfgs, ARRAY_SIZE(smdk6410_uartcfgs)); /* set the LCD type */ tmp = __raw_readl(S3C64XX_SPCON); tmp &= ~S3C64XX_SPCON_LCD_SEL_MASK; tmp |= S3C64XX_SPCON_LCD_SEL_RGB; __raw_writel(tmp, S3C64XX_SPCON); /* remove the lcd bypass */ tmp = __raw_readl(S3C64XX_MODEM_MIFPCON); tmp &= ~MIFPCON_LCD_BYPASS; __raw_writel(tmp, S3C64XX_MODEM_MIFPCON); } static void __init smdk6410_machine_init(void) { u32 cs1; s3c_i2c0_set_platdata(NULL); s3c_i2c1_set_platdata(NULL); s3c_fb_set_platdata(&smdk6410_lcd_pdata); samsung_keypad_set_platdata(&smdk6410_keypad_data); s3c24xx_ts_set_platdata(&s3c_ts_platform); /* configure nCS1 width to 16 bits */ cs1 = __raw_readl(S3C64XX_SROM_BW) & ~(S3C64XX_SROM_BW__CS_MASK << S3C64XX_SROM_BW__NCS1__SHIFT); cs1 |= ((1 << S3C64XX_SROM_BW__DATAWIDTH__SHIFT) | (1 << S3C64XX_SROM_BW__WAITENABLE__SHIFT) | (1 << S3C64XX_SROM_BW__BYTEENABLE__SHIFT)) << S3C64XX_SROM_BW__NCS1__SHIFT; __raw_writel(cs1, S3C64XX_SROM_BW); /* set timing for nCS1 suitable for ethernet chip */ __raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) | (6 << S3C64XX_SROM_BCX__TACP__SHIFT) | (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) | (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) | (0xe << S3C64XX_SROM_BCX__TACC__SHIFT) | (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) | (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1); gpio_request(S3C64XX_GPN(5), "LCD power"); gpio_request(S3C64XX_GPF(13), "LCD power"); i2c_register_board_info(0, i2c_devs0, ARRAY_SIZE(i2c_devs0)); i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1)); s3c_ide_set_platdata(&smdk6410_ide_pdata); platform_add_devices(smdk6410_devices, ARRAY_SIZE(smdk6410_devices)); } MACHINE_START(SMDK6410, "SMDK6410") /* Maintainer: Ben Dooks <ben-linux@fluff.org> */ .boot_params = S3C64XX_PA_SDRAM + 0x100, .init_irq = s3c6410_init_irq, .map_io = smdk6410_map_io, .init_machine = smdk6410_machine_init, .timer = &s3c24xx_timer, MACHINE_END
gpl-2.0
rafyvitto/HTC-Vivid-ICS-GPU-CPU-OC
drivers/net/wireless/iwlwifi/iwl-rx.c
1753
34986
/****************************************************************************** * * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/etherdevice.h> #include <linux/slab.h> #include <linux/sched.h> #include <net/mac80211.h> #include <asm/unaligned.h> #include "iwl-eeprom.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-sta.h" #include "iwl-io.h" #include "iwl-helpers.h" #include "iwl-agn-calib.h" #include "iwl-agn.h" /****************************************************************************** * * RX path functions * ******************************************************************************/ /* * Rx theory of operation * * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), * each of which point to Receive Buffers to be filled by the NIC. These get * used not only for Rx frames, but for any command response or notification * from the NIC. The driver and NIC manage the Rx buffers by means * of indexes into the circular buffer. * * Rx Queue Indexes * The host/firmware share two index registers for managing the Rx buffers. * * The READ index maps to the first position that the firmware may be writing * to -- the driver can read up to (but not including) this position and get * good data. * The READ index is managed by the firmware once the card is enabled. * * The WRITE index maps to the last position the driver has read from -- the * position preceding WRITE is the last slot the firmware can place a packet. * * The queue is empty (no good data) if WRITE = READ - 1, and is full if * WRITE = READ. * * During initialization, the host sets up the READ queue position to the first * INDEX position, and WRITE to the last (READ - 1 wrapped) * * When the firmware places a packet in a buffer, it will advance the READ index * and fire the RX interrupt. The driver can then query the READ index and * process as many packets as possible, moving the WRITE index forward as it * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled * to replenish the iwl->rxq->rx_free. * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the * iwl->rxq is replenished and the READ INDEX is updated (updating the * 'processed' and 'read' driver indexes as well) * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there * were enough free buffers and RX_STALLED is set it is cleared. * * * Driver sequence: * * iwl_rx_queue_alloc() Allocates rx_free * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls * iwl_rx_queue_restock * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates * the WRITE index. If insufficient rx_free buffers * are available, schedules iwl_rx_replenish * * -- enable interrupts -- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. * Calls iwl_rx_queue_restock to refill any empty * slots. * ... * */ /** * iwl_rx_queue_space - Return number of free slots available in queue. */ int iwl_rx_queue_space(const struct iwl_rx_queue *q) { int s = q->read - q->write; if (s <= 0) s += RX_QUEUE_SIZE; /* keep some buffer to not confuse full and empty queue */ s -= 2; if (s < 0) s = 0; return s; } /** * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue */ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) { unsigned long flags; u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg; u32 reg; spin_lock_irqsave(&q->lock, flags); if (q->need_update == 0) goto exit_unlock; if (priv->cfg->base_params->shadow_reg_enable) { /* shadow register enabled */ /* Device expects a multiple of 8 */ q->write_actual = (q->write & ~0x7); iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual); } else { /* If power-saving is in use, make sure device is awake */ if (test_bit(STATUS_POWER_PMI, &priv->status)) { reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup," " GP1 = 0x%x\n", reg); iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); goto exit_unlock; } q->write_actual = (q->write & ~0x7); iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual); /* Else device is assumed to be awake */ } else { /* Device expects a multiple of 8 */ q->write_actual = (q->write & ~0x7); iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual); } } q->need_update = 0; exit_unlock: spin_unlock_irqrestore(&q->lock, flags); } int iwl_rx_queue_alloc(struct iwl_priv *priv) { struct iwl_rx_queue *rxq = &priv->rxq; struct device *dev = &priv->pci_dev->dev; int i; spin_lock_init(&rxq->lock); INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, GFP_KERNEL); if (!rxq->bd) goto err_bd; rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status), &rxq->rb_stts_dma, GFP_KERNEL); if (!rxq->rb_stts) goto err_rb; /* Fill the rx_used queue with _all_ of the Rx buffers */ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) list_add_tail(&rxq->pool[i].list, &rxq->rx_used); /* Set us so that we have processed and used all buffers, but have * not restocked the Rx queue with fresh buffers */ rxq->read = rxq->write = 0; rxq->write_actual = 0; rxq->free_count = 0; rxq->need_update = 0; return 0; err_rb: dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, rxq->bd_dma); err_bd: return -ENOMEM; } /****************************************************************************** * * Generic RX handler implementations * ******************************************************************************/ static void iwl_rx_reply_error(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " "seq 0x%04X ser 0x%08X\n", le32_to_cpu(pkt->u.err_resp.error_type), get_cmd_string(pkt->u.err_resp.cmd_id), pkt->u.err_resp.cmd_id, le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), le32_to_cpu(pkt->u.err_resp.error_info)); } static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_csa_notification *csa = &(pkt->u.csa_notif); /* * MULTI-FIXME * See iwl_mac_channel_switch. */ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; struct iwl_rxon_cmd *rxon = (void *)&ctx->active; if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) return; if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) { rxon->channel = csa->channel; ctx->staging.channel = csa->channel; IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", le16_to_cpu(csa->channel)); iwl_chswitch_done(priv, true); } else { IWL_ERR(priv, "CSA notif (fail) : channel %d\n", le16_to_cpu(csa->channel)); iwl_chswitch_done(priv, false); } } static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); if (!report->state) { IWL_DEBUG_11H(priv, "Spectrum Measure Notification: Start\n"); return; } memcpy(&priv->measure_report, report, sizeof(*report)); priv->measurement_status |= MEASUREMENT_READY; } static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { #ifdef CONFIG_IWLWIFI_DEBUG struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", sleep->pm_sleep_mode, sleep->pm_wakeup_src); #endif } static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " "notification for %s:\n", len, get_cmd_string(pkt->hdr.cmd)); iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len); } static void iwl_rx_beacon_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw; #ifdef CONFIG_IWLWIFI_DEBUG u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status); u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d " "tsf:0x%.8x%.8x rate:%d\n", status & TX_STATUS_MSK, beacon->beacon_notify_hdr.failure_frame, le32_to_cpu(beacon->ibss_mgr_status), le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate); #endif priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) queue_work(priv->workqueue, &priv->beacon_update); } /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ #define ACK_CNT_RATIO (50) #define BA_TIMEOUT_CNT (5) #define BA_TIMEOUT_MAX (16) /** * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. * * When the ACK count ratio is low and aggregated BA timeout retries exceeding * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal * operation state. */ static bool iwl_good_ack_health(struct iwl_priv *priv, struct statistics_tx *cur) { int actual_delta, expected_delta, ba_timeout_delta; struct statistics_tx *old; if (priv->_agn.agg_tids_count) return true; old = &priv->statistics.tx; actual_delta = le32_to_cpu(cur->actual_ack_cnt) - le32_to_cpu(old->actual_ack_cnt); expected_delta = le32_to_cpu(cur->expected_ack_cnt) - le32_to_cpu(old->expected_ack_cnt); /* Values should not be negative, but we do not trust the firmware */ if (actual_delta <= 0 || expected_delta <= 0) return true; ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) - le32_to_cpu(old->agg.ba_timeout); if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO && ba_timeout_delta > BA_TIMEOUT_CNT) { IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n", actual_delta, expected_delta, ba_timeout_delta); #ifdef CONFIG_IWLWIFI_DEBUGFS /* * This is ifdef'ed on DEBUGFS because otherwise the * statistics aren't available. If DEBUGFS is set but * DEBUG is not, these will just compile out. */ IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n", priv->delta_stats.tx.rx_detected_cnt); IWL_DEBUG_RADIO(priv, "ack_or_ba_timeout_collision delta %d\n", priv->delta_stats.tx.ack_or_ba_timeout_collision); #endif if (ba_timeout_delta >= BA_TIMEOUT_MAX) return false; } return true; } /** * iwl_good_plcp_health - checks for plcp error. * * When the plcp error is exceeding the thresholds, reset the radio * to improve the throughput. */ static bool iwl_good_plcp_health(struct iwl_priv *priv, struct statistics_rx_phy *cur_ofdm, struct statistics_rx_ht_phy *cur_ofdm_ht, unsigned int msecs) { int delta; int threshold = priv->cfg->base_params->plcp_delta_threshold; if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); return true; } delta = le32_to_cpu(cur_ofdm->plcp_err) - le32_to_cpu(priv->statistics.rx_ofdm.plcp_err) + le32_to_cpu(cur_ofdm_ht->plcp_err) - le32_to_cpu(priv->statistics.rx_ofdm_ht.plcp_err); /* Can be negative if firmware reset statistics */ if (delta <= 0) return true; if ((delta * 100 / msecs) > threshold) { IWL_DEBUG_RADIO(priv, "plcp health threshold %u delta %d msecs %u\n", threshold, delta, msecs); return false; } return true; } static void iwl_recover_from_statistics(struct iwl_priv *priv, struct statistics_rx_phy *cur_ofdm, struct statistics_rx_ht_phy *cur_ofdm_ht, struct statistics_tx *tx, unsigned long stamp) { unsigned int msecs; if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies); /* Only gather statistics and update time stamp when not associated */ if (!iwl_is_any_associated(priv)) return; /* Do not check/recover when do not have enough statistics data */ if (msecs < 99) return; if (iwlagn_mod_params.ack_check && !iwl_good_ack_health(priv, tx)) { IWL_ERR(priv, "low ack count detected, restart firmware\n"); if (!iwl_force_reset(priv, IWL_FW_RESET, false)) return; } if (iwlagn_mod_params.plcp_check && !iwl_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs)) iwl_force_reset(priv, IWL_RF_RESET, false); } /* Calculate noise level, based on measurements during network silence just * before arriving beacon. This measurement can be done only if we know * exactly when to expect beacons, therefore only when we're associated. */ static void iwl_rx_calc_noise(struct iwl_priv *priv) { struct statistics_rx_non_phy *rx_info; int num_active_rx = 0; int total_silence = 0; int bcn_silence_a, bcn_silence_b, bcn_silence_c; int last_rx_noise; rx_info = &priv->statistics.rx_non_phy; bcn_silence_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; bcn_silence_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; bcn_silence_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; if (bcn_silence_a) { total_silence += bcn_silence_a; num_active_rx++; } if (bcn_silence_b) { total_silence += bcn_silence_b; num_active_rx++; } if (bcn_silence_c) { total_silence += bcn_silence_c; num_active_rx++; } /* Average among active antennas */ if (num_active_rx) last_rx_noise = (total_silence / num_active_rx) - 107; else last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a, bcn_silence_b, bcn_silence_c, last_rx_noise); } #ifdef CONFIG_IWLWIFI_DEBUGFS /* * based on the assumption of all statistics counter are in DWORD * FIXME: This function is for debugging, do not deal with * the case of counters roll-over. */ static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta, __le32 *max_delta, __le32 *accum, int size) { int i; for (i = 0; i < size / sizeof(__le32); i++, prev++, cur++, delta++, max_delta++, accum++) { if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) { *delta = cpu_to_le32( le32_to_cpu(*cur) - le32_to_cpu(*prev)); le32_add_cpu(accum, le32_to_cpu(*delta)); if (le32_to_cpu(*delta) > le32_to_cpu(*max_delta)) *max_delta = *delta; } } } static void iwl_accumulative_statistics(struct iwl_priv *priv, struct statistics_general_common *common, struct statistics_rx_non_phy *rx_non_phy, struct statistics_rx_phy *rx_ofdm, struct statistics_rx_ht_phy *rx_ofdm_ht, struct statistics_rx_phy *rx_cck, struct statistics_tx *tx, struct statistics_bt_activity *bt_activity) { #define ACCUM(_name) \ accum_stats((__le32 *)&priv->statistics._name, \ (__le32 *)_name, \ (__le32 *)&priv->delta_stats._name, \ (__le32 *)&priv->max_delta_stats._name, \ (__le32 *)&priv->accum_stats._name, \ sizeof(*_name)); ACCUM(common); ACCUM(rx_non_phy); ACCUM(rx_ofdm); ACCUM(rx_ofdm_ht); ACCUM(rx_cck); ACCUM(tx); if (bt_activity) ACCUM(bt_activity); #undef ACCUM } #else static inline void iwl_accumulative_statistics(struct iwl_priv *priv, struct statistics_general_common *common, struct statistics_rx_non_phy *rx_non_phy, struct statistics_rx_phy *rx_ofdm, struct statistics_rx_ht_phy *rx_ofdm_ht, struct statistics_rx_phy *rx_cck, struct statistics_tx *tx, struct statistics_bt_activity *bt_activity) { } #endif static void iwl_rx_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { unsigned long stamp = jiffies; const int reg_recalib_period = 60; int change; struct iwl_rx_packet *pkt = rxb_addr(rxb); u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; __le32 *flag; struct statistics_general_common *common; struct statistics_rx_non_phy *rx_non_phy; struct statistics_rx_phy *rx_ofdm; struct statistics_rx_ht_phy *rx_ofdm_ht; struct statistics_rx_phy *rx_cck; struct statistics_tx *tx; struct statistics_bt_activity *bt_activity; len -= sizeof(struct iwl_cmd_header); /* skip header */ IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n", len); if (len == sizeof(struct iwl_bt_notif_statistics)) { struct iwl_bt_notif_statistics *stats; stats = &pkt->u.stats_bt; flag = &stats->flag; common = &stats->general.common; rx_non_phy = &stats->rx.general.common; rx_ofdm = &stats->rx.ofdm; rx_ofdm_ht = &stats->rx.ofdm_ht; rx_cck = &stats->rx.cck; tx = &stats->tx; bt_activity = &stats->general.activity; #ifdef CONFIG_IWLWIFI_DEBUGFS /* handle this exception directly */ priv->statistics.num_bt_kills = stats->rx.general.num_bt_kills; le32_add_cpu(&priv->statistics.accum_num_bt_kills, le32_to_cpu(stats->rx.general.num_bt_kills)); #endif } else if (len == sizeof(struct iwl_notif_statistics)) { struct iwl_notif_statistics *stats; stats = &pkt->u.stats; flag = &stats->flag; common = &stats->general.common; rx_non_phy = &stats->rx.general; rx_ofdm = &stats->rx.ofdm; rx_ofdm_ht = &stats->rx.ofdm_ht; rx_cck = &stats->rx.cck; tx = &stats->tx; bt_activity = NULL; } else { WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n", len, sizeof(struct iwl_bt_notif_statistics), sizeof(struct iwl_notif_statistics)); return; } change = common->temperature != priv->statistics.common.temperature || (*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) != (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK); iwl_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm, rx_ofdm_ht, rx_cck, tx, bt_activity); iwl_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp); priv->statistics.flag = *flag; memcpy(&priv->statistics.common, common, sizeof(*common)); memcpy(&priv->statistics.rx_non_phy, rx_non_phy, sizeof(*rx_non_phy)); memcpy(&priv->statistics.rx_ofdm, rx_ofdm, sizeof(*rx_ofdm)); memcpy(&priv->statistics.rx_ofdm_ht, rx_ofdm_ht, sizeof(*rx_ofdm_ht)); memcpy(&priv->statistics.rx_cck, rx_cck, sizeof(*rx_cck)); memcpy(&priv->statistics.tx, tx, sizeof(*tx)); #ifdef CONFIG_IWLWIFI_DEBUGFS if (bt_activity) memcpy(&priv->statistics.bt_activity, bt_activity, sizeof(*bt_activity)); #endif priv->rx_statistics_jiffies = stamp; set_bit(STATUS_STATISTICS, &priv->status); /* Reschedule the statistics timer to occur in * reg_recalib_period seconds to ensure we get a * thermal update even if the uCode doesn't give * us one */ mod_timer(&priv->statistics_periodic, jiffies + msecs_to_jiffies(reg_recalib_period * 1000)); if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { iwl_rx_calc_noise(priv); queue_work(priv->workqueue, &priv->run_time_calib_work); } if (priv->cfg->ops->lib->temp_ops.temperature && change) priv->cfg->ops->lib->temp_ops.temperature(priv); } static void iwl_rx_reply_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { #ifdef CONFIG_IWLWIFI_DEBUGFS memset(&priv->accum_stats, 0, sizeof(priv->accum_stats)); memset(&priv->delta_stats, 0, sizeof(priv->delta_stats)); memset(&priv->max_delta_stats, 0, sizeof(priv->max_delta_stats)); #endif IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); } iwl_rx_statistics(priv, rxb); } /* Handle notification from uCode that card's power state is changing * due to software, hardware, or critical temperature RFKILL */ static void iwl_rx_card_state_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); unsigned long status = priv->status; IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", (flags & HW_CARD_DISABLED) ? "Kill" : "On", (flags & SW_CARD_DISABLED) ? "Kill" : "On", (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached"); if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) { iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); iwl_write_direct32(priv, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); if (!(flags & RXON_CARD_DISABLED)) { iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); iwl_write_direct32(priv, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); } if (flags & CT_CARD_DISABLED) iwl_tt_enter_ct_kill(priv); } if (!(flags & CT_CARD_DISABLED)) iwl_tt_exit_ct_kill(priv); if (flags & HW_CARD_DISABLED) set_bit(STATUS_RF_KILL_HW, &priv->status); else clear_bit(STATUS_RF_KILL_HW, &priv->status); if (!(flags & RXON_CARD_DISABLED)) iwl_scan_cancel(priv); if ((test_bit(STATUS_RF_KILL_HW, &status) != test_bit(STATUS_RF_KILL_HW, &priv->status))) wiphy_rfkill_set_hw_state(priv->hw->wiphy, test_bit(STATUS_RF_KILL_HW, &priv->status)); else wake_up(&priv->wait_command_queue); } static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_missed_beacon_notif *missed_beacon; missed_beacon = &pkt->u.missed_beacon; if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > priv->missed_beacon_threshold) { IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n", le32_to_cpu(missed_beacon->consecutive_missed_beacons), le32_to_cpu(missed_beacon->total_missed_becons), le32_to_cpu(missed_beacon->num_recvd_beacons), le32_to_cpu(missed_beacon->num_expected_beacons)); if (!test_bit(STATUS_SCANNING, &priv->status)) iwl_init_sensitivity(priv); } } /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ static void iwl_rx_reply_rx_phy(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); priv->_agn.last_phy_res_valid = true; memcpy(&priv->_agn.last_phy_res, pkt->u.raw, sizeof(struct iwl_rx_phy_res)); } /* * returns non-zero if packet should be dropped */ static int iwl_set_decrypted_flag(struct iwl_priv *priv, struct ieee80211_hdr *hdr, u32 decrypt_res, struct ieee80211_rx_status *stats) { u16 fc = le16_to_cpu(hdr->frame_control); /* * All contexts have the same setting here due to it being * a module parameter, so OK to check any context. */ if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) return 0; if (!(fc & IEEE80211_FCTL_PROTECTED)) return 0; IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res); switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { case RX_RES_STATUS_SEC_TYPE_TKIP: /* The uCode has got a bad phase 1 Key, pushes the packet. * Decryption will be done in SW. */ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_BAD_KEY_TTAK) break; case RX_RES_STATUS_SEC_TYPE_WEP: if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_BAD_ICV_MIC) { /* bad ICV, the packet is destroyed since the * decryption is inplace, drop it */ IWL_DEBUG_RX(priv, "Packet destroyed\n"); return -1; } case RX_RES_STATUS_SEC_TYPE_CCMP: if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_DECRYPT_OK) { IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n"); stats->flag |= RX_FLAG_DECRYPTED; } break; default: break; } return 0; } static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, struct ieee80211_hdr *hdr, u16 len, u32 ampdu_status, struct iwl_rx_mem_buffer *rxb, struct ieee80211_rx_status *stats) { struct sk_buff *skb; __le16 fc = hdr->frame_control; struct iwl_rxon_context *ctx; /* We only process data packets if the interface is open */ if (unlikely(!priv->is_open)) { IWL_DEBUG_DROP_LIMIT(priv, "Dropping packet while interface is not open.\n"); return; } /* In case of HW accelerated crypto and bad decryption, drop */ if (!iwlagn_mod_params.sw_crypto && iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) return; skb = dev_alloc_skb(128); if (!skb) { IWL_ERR(priv, "dev_alloc_skb failed\n"); return; } skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); iwl_update_stats(priv, false, fc, len); /* * Wake any queues that were stopped due to a passive channel tx * failure. This can happen because the regulatory enforcement in * the device waits for a beacon before allowing transmission, * sometimes even after already having transmitted frames for the * association because the new RXON may reset the information. */ if (unlikely(ieee80211_is_beacon(fc))) { for_each_context(priv, ctx) { if (!ctx->last_tx_rejected) continue; if (compare_ether_addr(hdr->addr3, ctx->active.bssid_addr)) continue; ctx->last_tx_rejected = false; iwl_wake_any_queue(priv, ctx); } } memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); ieee80211_rx(priv->hw, skb); rxb->page = NULL; } static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) { u32 decrypt_out = 0; if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == RX_RES_STATUS_STATION_FOUND) decrypt_out |= (RX_RES_STATUS_STATION_FOUND | RX_RES_STATUS_NO_STATION_INFO_MISMATCH); decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); /* packet was not encrypted */ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == RX_RES_STATUS_SEC_TYPE_NONE) return decrypt_out; /* packet was encrypted with unknown alg */ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == RX_RES_STATUS_SEC_TYPE_ERR) return decrypt_out; /* decryption was not done in HW */ if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != RX_MPDU_RES_STATUS_DEC_DONE_MSK) return decrypt_out; switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { case RX_RES_STATUS_SEC_TYPE_CCMP: /* alg is CCM: check MIC only */ if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) /* Bad MIC */ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; else decrypt_out |= RX_RES_STATUS_DECRYPT_OK; break; case RX_RES_STATUS_SEC_TYPE_TKIP: if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { /* Bad TTAK */ decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; break; } /* fall through if TTAK OK */ default: if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; else decrypt_out |= RX_RES_STATUS_DECRYPT_OK; break; } IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out); return decrypt_out; } /* Called for REPLY_RX (legacy ABG frames), or * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ static void iwl_rx_reply_rx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct ieee80211_hdr *header; struct ieee80211_rx_status rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_phy_res *phy_res; __le32 rx_pkt_status; struct iwl_rx_mpdu_res_start *amsdu; u32 len; u32 ampdu_status; u32 rate_n_flags; /** * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. * REPLY_RX: physical layer info is in this buffer * REPLY_RX_MPDU_CMD: physical layer info was sent in separate * command and cached in priv->last_phy_res * * Here we set up local variables depending on which command is * received. */ if (pkt->hdr.cmd == REPLY_RX) { phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) + phy_res->cfg_phy_cnt); len = le16_to_cpu(phy_res->byte_count); rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + phy_res->cfg_phy_cnt + len); ampdu_status = le32_to_cpu(rx_pkt_status); } else { if (!priv->_agn.last_phy_res_valid) { IWL_ERR(priv, "MPDU frame without cached PHY data\n"); return; } phy_res = &priv->_agn.last_phy_res; amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); len = le16_to_cpu(amsdu->byte_count); rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); ampdu_status = iwl_translate_rx_status(priv, le32_to_cpu(rx_pkt_status)); } if ((unlikely(phy_res->cfg_phy_cnt > 20))) { IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", phy_res->cfg_phy_cnt); return; } if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status)); return; } /* This will be used in several places later */ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); /* rx_status carries information about the packet to mac80211 */ rx_status.mactime = le64_to_cpu(phy_res->timestamp); rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), rx_status.band); rx_status.rate_idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); rx_status.flag = 0; /* TSF isn't reliable. In order to allow smooth user experience, * this W/A doesn't propagate it to the mac80211 */ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); /* Find max signal strength (dBm) among 3 antenna/receiver chains */ rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res); iwl_dbg_log_rx_data_frame(priv, len, header); IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", rx_status.signal, (unsigned long long)rx_status.mactime); /* * "antenna number" * * It seems that the antenna field in the phy flags value * is actually a bit field. This is undefined by radiotap, * it wants an actual antenna number but I always get "7" * for most legacy frames I receive indicating that the * same frame was received on all three RX chains. * * I think this field should be removed in favor of a * new 802.11n radiotap field "RX chains" that is defined * as a bitmask. */ rx_status.antenna = (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> RX_RES_PHY_FLAGS_ANTENNA_POS; /* set the preamble flag if appropriate */ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) rx_status.flag |= RX_FLAG_SHORTPRE; /* Set up the HT phy flags */ if (rate_n_flags & RATE_MCS_HT_MSK) rx_status.flag |= RX_FLAG_HT; if (rate_n_flags & RATE_MCS_HT40_MSK) rx_status.flag |= RX_FLAG_40MHZ; if (rate_n_flags & RATE_MCS_SGI_MSK) rx_status.flag |= RX_FLAG_SHORT_GI; iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status, rxb, &rx_status); } /** * iwl_setup_rx_handlers - Initialize Rx handler callbacks * * Setup the RX handlers for each of the reply types sent from the uCode * to the host. */ void iwl_setup_rx_handlers(struct iwl_priv *priv) { void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); handlers = priv->rx_handlers; handlers[REPLY_ERROR] = iwl_rx_reply_error; handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif; handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif; handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; /* * The same handler is used for both the REPLY to a discrete * statistics request from the host as well as for the periodic * statistics notifications (after received beacons) from the uCode. */ handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics; handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics; iwl_setup_rx_scan_handlers(priv); handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif; /* Rx handlers */ handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy; handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx; /* block ack */ handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba; /* Set up hardware specific Rx handlers */ priv->cfg->ops->lib->rx_handler_setup(priv); }
gpl-2.0
gq213/linux-3.10.72
arch/arm/mach-tegra/cpuidle.c
2009
1258
/* * arch/arm/mach-tegra/cpuidle.c * * CPU idle driver for Tegra CPUs * * Copyright (c) 2010-2012, NVIDIA Corporation. * Copyright (c) 2011 Google, Inc. * Author: Colin Cross <ccross@android.com> * Gary King <gking@nvidia.com> * * Rework for 3.3 by Peter De Schrijver <pdeschrijver@nvidia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/kernel.h> #include <linux/module.h> #include "fuse.h" #include "cpuidle.h" static int __init tegra_cpuidle_init(void) { int ret; switch (tegra_chip_id) { case TEGRA20: ret = tegra20_cpuidle_init(); break; case TEGRA30: ret = tegra30_cpuidle_init(); break; case TEGRA114: ret = tegra114_cpuidle_init(); break; default: ret = -ENODEV; break; } return ret; } device_initcall(tegra_cpuidle_init);
gpl-2.0
sleshepic/epic_touch_kernel
drivers/rtc/rtc-ds1286.c
2521
10604
/* * DS1286 Real Time Clock interface for Linux * * Copyright (C) 1998, 1999, 2000 Ralf Baechle * Copyright (C) 2008 Thomas Bogendoerfer * * Based on code written by Paul Gortmaker. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/rtc.h> #include <linux/platform_device.h> #include <linux/bcd.h> #include <linux/ds1286.h> #include <linux/io.h> #include <linux/slab.h> #define DRV_VERSION "1.0" struct ds1286_priv { struct rtc_device *rtc; u32 __iomem *rtcregs; size_t size; unsigned long baseaddr; spinlock_t lock; }; static inline u8 ds1286_rtc_read(struct ds1286_priv *priv, int reg) { return __raw_readl(&priv->rtcregs[reg]) & 0xff; } static inline void ds1286_rtc_write(struct ds1286_priv *priv, u8 data, int reg) { __raw_writel(data, &priv->rtcregs[reg]); } static int ds1286_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct ds1286_priv *priv = dev_get_drvdata(dev); unsigned long flags; unsigned char val; /* Allow or mask alarm interrupts */ spin_lock_irqsave(&priv->lock, flags); val = ds1286_rtc_read(priv, RTC_CMD); if (enabled) val &= ~RTC_TDM; else val |= RTC_TDM; ds1286_rtc_write(priv, val, RTC_CMD); spin_unlock_irqrestore(&priv->lock, flags); return 0; } #ifdef CONFIG_RTC_INTF_DEV static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { struct ds1286_priv *priv = dev_get_drvdata(dev); unsigned long flags; unsigned char val; switch (cmd) { case RTC_WIE_OFF: /* Mask watchdog int. enab. bit */ spin_lock_irqsave(&priv->lock, flags); val = ds1286_rtc_read(priv, RTC_CMD); val |= RTC_WAM; ds1286_rtc_write(priv, val, RTC_CMD); spin_unlock_irqrestore(&priv->lock, flags); break; case RTC_WIE_ON: /* Allow watchdog interrupts. */ spin_lock_irqsave(&priv->lock, flags); val = ds1286_rtc_read(priv, RTC_CMD); val &= ~RTC_WAM; ds1286_rtc_write(priv, val, RTC_CMD); spin_unlock_irqrestore(&priv->lock, flags); break; default: return -ENOIOCTLCMD; } return 0; } #else #define ds1286_ioctl NULL #endif #ifdef CONFIG_PROC_FS static int ds1286_proc(struct device *dev, struct seq_file *seq) { struct ds1286_priv *priv = dev_get_drvdata(dev); unsigned char month, cmd, amode; const char *s; month = ds1286_rtc_read(priv, RTC_MONTH); seq_printf(seq, "oscillator\t: %s\n" "square_wave\t: %s\n", (month & RTC_EOSC) ? "disabled" : "enabled", (month & RTC_ESQW) ? "disabled" : "enabled"); amode = ((ds1286_rtc_read(priv, RTC_MINUTES_ALARM) & 0x80) >> 5) | ((ds1286_rtc_read(priv, RTC_HOURS_ALARM) & 0x80) >> 6) | ((ds1286_rtc_read(priv, RTC_DAY_ALARM) & 0x80) >> 7); switch (amode) { case 7: s = "each minute"; break; case 3: s = "minutes match"; break; case 1: s = "hours and minutes match"; break; case 0: s = "days, hours and minutes match"; break; default: s = "invalid"; break; } seq_printf(seq, "alarm_mode\t: %s\n", s); cmd = ds1286_rtc_read(priv, RTC_CMD); seq_printf(seq, "alarm_enable\t: %s\n" "wdog_alarm\t: %s\n" "alarm_mask\t: %s\n" "wdog_alarm_mask\t: %s\n" "interrupt_mode\t: %s\n" "INTB_mode\t: %s_active\n" "interrupt_pins\t: %s\n", (cmd & RTC_TDF) ? "yes" : "no", (cmd & RTC_WAF) ? "yes" : "no", (cmd & RTC_TDM) ? "disabled" : "enabled", (cmd & RTC_WAM) ? "disabled" : "enabled", (cmd & RTC_PU_LVL) ? "pulse" : "level", (cmd & RTC_IBH_LO) ? "low" : "high", (cmd & RTC_IPSW) ? "unswapped" : "swapped"); return 0; } #else #define ds1286_proc NULL #endif static int ds1286_read_time(struct device *dev, struct rtc_time *tm) { struct ds1286_priv *priv = dev_get_drvdata(dev); unsigned char save_control; unsigned long flags; unsigned long uip_watchdog = jiffies; /* * read RTC once any update in progress is done. The update * can take just over 2ms. We wait 10 to 20ms. There is no need to * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. * If you need to know *exactly* when a second has started, enable * periodic update complete interrupts, (via ioctl) and then * immediately read /dev/rtc which will block until you get the IRQ. * Once the read clears, read the RTC time (again via ioctl). Easy. */ if (ds1286_rtc_read(priv, RTC_CMD) & RTC_TE) while (time_before(jiffies, uip_watchdog + 2*HZ/100)) barrier(); /* * Only the values that we read from the RTC are set. We leave * tm_wday, tm_yday and tm_isdst untouched. Even though the * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated * by the RTC when initially set to a non-zero value. */ spin_lock_irqsave(&priv->lock, flags); save_control = ds1286_rtc_read(priv, RTC_CMD); ds1286_rtc_write(priv, (save_control|RTC_TE), RTC_CMD); tm->tm_sec = ds1286_rtc_read(priv, RTC_SECONDS); tm->tm_min = ds1286_rtc_read(priv, RTC_MINUTES); tm->tm_hour = ds1286_rtc_read(priv, RTC_HOURS) & 0x3f; tm->tm_mday = ds1286_rtc_read(priv, RTC_DATE); tm->tm_mon = ds1286_rtc_read(priv, RTC_MONTH) & 0x1f; tm->tm_year = ds1286_rtc_read(priv, RTC_YEAR); ds1286_rtc_write(priv, save_control, RTC_CMD); spin_unlock_irqrestore(&priv->lock, flags); tm->tm_sec = bcd2bin(tm->tm_sec); tm->tm_min = bcd2bin(tm->tm_min); tm->tm_hour = bcd2bin(tm->tm_hour); tm->tm_mday = bcd2bin(tm->tm_mday); tm->tm_mon = bcd2bin(tm->tm_mon); tm->tm_year = bcd2bin(tm->tm_year); /* * Account for differences between how the RTC uses the values * and how they are defined in a struct rtc_time; */ if (tm->tm_year < 45) tm->tm_year += 30; tm->tm_year += 40; if (tm->tm_year < 70) tm->tm_year += 100; tm->tm_mon--; return rtc_valid_tm(tm); } static int ds1286_set_time(struct device *dev, struct rtc_time *tm) { struct ds1286_priv *priv = dev_get_drvdata(dev); unsigned char mon, day, hrs, min, sec; unsigned char save_control; unsigned int yrs; unsigned long flags; yrs = tm->tm_year + 1900; mon = tm->tm_mon + 1; /* tm_mon starts at zero */ day = tm->tm_mday; hrs = tm->tm_hour; min = tm->tm_min; sec = tm->tm_sec; if (yrs < 1970) return -EINVAL; yrs -= 1940; if (yrs > 255) /* They are unsigned */ return -EINVAL; if (yrs >= 100) yrs -= 100; sec = bin2bcd(sec); min = bin2bcd(min); hrs = bin2bcd(hrs); day = bin2bcd(day); mon = bin2bcd(mon); yrs = bin2bcd(yrs); spin_lock_irqsave(&priv->lock, flags); save_control = ds1286_rtc_read(priv, RTC_CMD); ds1286_rtc_write(priv, (save_control|RTC_TE), RTC_CMD); ds1286_rtc_write(priv, yrs, RTC_YEAR); ds1286_rtc_write(priv, mon, RTC_MONTH); ds1286_rtc_write(priv, day, RTC_DATE); ds1286_rtc_write(priv, hrs, RTC_HOURS); ds1286_rtc_write(priv, min, RTC_MINUTES); ds1286_rtc_write(priv, sec, RTC_SECONDS); ds1286_rtc_write(priv, 0, RTC_HUNDREDTH_SECOND); ds1286_rtc_write(priv, save_control, RTC_CMD); spin_unlock_irqrestore(&priv->lock, flags); return 0; } static int ds1286_read_alarm(struct device *dev, struct rtc_wkalrm *alm) { struct ds1286_priv *priv = dev_get_drvdata(dev); unsigned char cmd; unsigned long flags; /* * Only the values that we read from the RTC are set. That * means only tm_wday, tm_hour, tm_min. */ spin_lock_irqsave(&priv->lock, flags); alm->time.tm_min = ds1286_rtc_read(priv, RTC_MINUTES_ALARM) & 0x7f; alm->time.tm_hour = ds1286_rtc_read(priv, RTC_HOURS_ALARM) & 0x1f; alm->time.tm_wday = ds1286_rtc_read(priv, RTC_DAY_ALARM) & 0x07; cmd = ds1286_rtc_read(priv, RTC_CMD); spin_unlock_irqrestore(&priv->lock, flags); alm->time.tm_min = bcd2bin(alm->time.tm_min); alm->time.tm_hour = bcd2bin(alm->time.tm_hour); alm->time.tm_sec = 0; return 0; } static int ds1286_set_alarm(struct device *dev, struct rtc_wkalrm *alm) { struct ds1286_priv *priv = dev_get_drvdata(dev); unsigned char hrs, min, sec; hrs = alm->time.tm_hour; min = alm->time.tm_min; sec = alm->time.tm_sec; if (hrs >= 24) hrs = 0xff; if (min >= 60) min = 0xff; if (sec != 0) return -EINVAL; min = bin2bcd(min); hrs = bin2bcd(hrs); spin_lock(&priv->lock); ds1286_rtc_write(priv, hrs, RTC_HOURS_ALARM); ds1286_rtc_write(priv, min, RTC_MINUTES_ALARM); spin_unlock(&priv->lock); return 0; } static const struct rtc_class_ops ds1286_ops = { .ioctl = ds1286_ioctl, .proc = ds1286_proc, .read_time = ds1286_read_time, .set_time = ds1286_set_time, .read_alarm = ds1286_read_alarm, .set_alarm = ds1286_set_alarm, .alarm_irq_enable = ds1286_alarm_irq_enable, }; static int __devinit ds1286_probe(struct platform_device *pdev) { struct rtc_device *rtc; struct resource *res; struct ds1286_priv *priv; int ret = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; priv = kzalloc(sizeof(struct ds1286_priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->size = res->end - res->start + 1; if (!request_mem_region(res->start, priv->size, pdev->name)) { ret = -EBUSY; goto out; } priv->baseaddr = res->start; priv->rtcregs = ioremap(priv->baseaddr, priv->size); if (!priv->rtcregs) { ret = -ENOMEM; goto out; } spin_lock_init(&priv->lock); platform_set_drvdata(pdev, priv); rtc = rtc_device_register("ds1286", &pdev->dev, &ds1286_ops, THIS_MODULE); if (IS_ERR(rtc)) { ret = PTR_ERR(rtc); goto out; } priv->rtc = rtc; return 0; out: if (priv->rtc) rtc_device_unregister(priv->rtc); if (priv->rtcregs) iounmap(priv->rtcregs); if (priv->baseaddr) release_mem_region(priv->baseaddr, priv->size); kfree(priv); return ret; } static int __devexit ds1286_remove(struct platform_device *pdev) { struct ds1286_priv *priv = platform_get_drvdata(pdev); rtc_device_unregister(priv->rtc); iounmap(priv->rtcregs); release_mem_region(priv->baseaddr, priv->size); kfree(priv); return 0; } static struct platform_driver ds1286_platform_driver = { .driver = { .name = "rtc-ds1286", .owner = THIS_MODULE, }, .probe = ds1286_probe, .remove = __devexit_p(ds1286_remove), }; static int __init ds1286_init(void) { return platform_driver_register(&ds1286_platform_driver); } static void __exit ds1286_exit(void) { platform_driver_unregister(&ds1286_platform_driver); } MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>"); MODULE_DESCRIPTION("DS1286 RTC driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_ALIAS("platform:rtc-ds1286"); module_init(ds1286_init); module_exit(ds1286_exit);
gpl-2.0
argentinos/kernel-s5pc100
drivers/net/wireless/mwifiex/11n_rxreorder.c
2777
17857
/* * Marvell Wireless LAN device driver: 802.11n RX Re-ordering * * Copyright (C) 2011, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "main.h" #include "wmm.h" #include "11n.h" #include "11n_rxreorder.h" /* * This function dispatches all packets in the Rx reorder table until the * start window. * * There could be holes in the buffer, which are skipped by the function. * Since the buffer is linear, the function uses rotation to simulate * circular buffer. */ static void mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, struct mwifiex_rx_reorder_tbl *tbl, int start_win) { int pkt_to_send, i; void *rx_tmp_ptr; unsigned long flags; pkt_to_send = (start_win > tbl->start_win) ? min((start_win - tbl->start_win), tbl->win_size) : tbl->win_size; for (i = 0; i < pkt_to_send; ++i) { spin_lock_irqsave(&priv->rx_pkt_lock, flags); rx_tmp_ptr = NULL; if (tbl->rx_reorder_ptr[i]) { rx_tmp_ptr = tbl->rx_reorder_ptr[i]; tbl->rx_reorder_ptr[i] = NULL; } spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); if (rx_tmp_ptr) mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr); } spin_lock_irqsave(&priv->rx_pkt_lock, flags); /* * We don't have a circular buffer, hence use rotation to simulate * circular buffer */ for (i = 0; i < tbl->win_size - pkt_to_send; ++i) { tbl->rx_reorder_ptr[i] = tbl->rx_reorder_ptr[pkt_to_send + i]; tbl->rx_reorder_ptr[pkt_to_send + i] = NULL; } tbl->start_win = start_win; spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); } /* * This function dispatches all packets in the Rx reorder table until * a hole is found. * * The start window is adjusted automatically when a hole is located. * Since the buffer is linear, the function uses rotation to simulate * circular buffer. */ static void mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, struct mwifiex_rx_reorder_tbl *tbl) { int i, j, xchg; void *rx_tmp_ptr; unsigned long flags; for (i = 0; i < tbl->win_size; ++i) { spin_lock_irqsave(&priv->rx_pkt_lock, flags); if (!tbl->rx_reorder_ptr[i]) { spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); break; } rx_tmp_ptr = tbl->rx_reorder_ptr[i]; tbl->rx_reorder_ptr[i] = NULL; spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr); } spin_lock_irqsave(&priv->rx_pkt_lock, flags); /* * We don't have a circular buffer, hence use rotation to simulate * circular buffer */ if (i > 0) { xchg = tbl->win_size - i; for (j = 0; j < xchg; ++j) { tbl->rx_reorder_ptr[j] = tbl->rx_reorder_ptr[i + j]; tbl->rx_reorder_ptr[i + j] = NULL; } } tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1); spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); } /* * This function deletes the Rx reorder table and frees the memory. * * The function stops the associated timer and dispatches all the * pending packets in the Rx reorder table before deletion. */ static void mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, struct mwifiex_rx_reorder_tbl *tbl) { unsigned long flags; if (!tbl) return; mwifiex_11n_dispatch_pkt(priv, tbl, (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1)); del_timer(&tbl->timer_context.timer); spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_del(&tbl->list); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); kfree(tbl->rx_reorder_ptr); kfree(tbl); } /* * This function returns the pointer to an entry in Rx reordering * table which matches the given TA/TID pair. */ static struct mwifiex_rx_reorder_tbl * mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) { struct mwifiex_rx_reorder_tbl *tbl; unsigned long flags; spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) { if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) { spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return tbl; } } spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return NULL; } /* * This function finds the last sequence number used in the packets * buffered in Rx reordering table. */ static int mwifiex_11n_find_last_seq_num(struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr) { int i; for (i = (rx_reorder_tbl_ptr->win_size - 1); i >= 0; --i) if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) return i; return -1; } /* * This function flushes all the packets in Rx reordering table. * * The function checks if any packets are currently buffered in the * table or not. In case there are packets available, it dispatches * them and then dumps the Rx reordering table. */ static void mwifiex_flush_data(unsigned long context) { struct reorder_tmr_cnxt *ctx = (struct reorder_tmr_cnxt *) context; int start_win; start_win = mwifiex_11n_find_last_seq_num(ctx->ptr); if (start_win < 0) return; dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", start_win); mwifiex_11n_dispatch_pkt(ctx->priv, ctx->ptr, (ctx->ptr->start_win + start_win + 1) & (MAX_TID_VALUE - 1)); } /* * This function creates an entry in Rx reordering table for the * given TA/TID. * * The function also initializes the entry with sequence number, window * size as well as initializes the timer. * * If the received TA/TID pair is already present, all the packets are * dispatched and the window size is moved until the SSN. */ static void mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, int tid, int win_size, int seq_num) { int i; struct mwifiex_rx_reorder_tbl *tbl, *new_node; u16 last_seq = 0; unsigned long flags; /* * If we get a TID, ta pair which is already present dispatch all the * the packets and move the window size until the ssn */ tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); if (tbl) { mwifiex_11n_dispatch_pkt(priv, tbl, seq_num); return; } /* if !tbl then create one */ new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL); if (!new_node) { dev_err(priv->adapter->dev, "%s: failed to alloc new_node\n", __func__); return; } INIT_LIST_HEAD(&new_node->list); new_node->tid = tid; memcpy(new_node->ta, ta, ETH_ALEN); new_node->start_win = seq_num; if (mwifiex_queuing_ra_based(priv)) /* TODO for adhoc */ dev_dbg(priv->adapter->dev, "info: ADHOC:last_seq=%d start_win=%d\n", last_seq, new_node->start_win); else last_seq = priv->rx_seq[tid]; if (last_seq >= new_node->start_win) new_node->start_win = last_seq + 1; new_node->win_size = win_size; new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size, GFP_KERNEL); if (!new_node->rx_reorder_ptr) { kfree((u8 *) new_node); dev_err(priv->adapter->dev, "%s: failed to alloc reorder_ptr\n", __func__); return; } new_node->timer_context.ptr = new_node; new_node->timer_context.priv = priv; init_timer(&new_node->timer_context.timer); new_node->timer_context.timer.function = mwifiex_flush_data; new_node->timer_context.timer.data = (unsigned long) &new_node->timer_context; for (i = 0; i < win_size; ++i) new_node->rx_reorder_ptr[i] = NULL; spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_add_tail(&new_node->list, &priv->rx_reorder_tbl_ptr); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } /* * This function prepares command for adding a BA request. * * Preparation includes - * - Setting command ID and proper size * - Setting add BA request buffer * - Ensuring correct endian-ness */ int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf) { struct host_cmd_ds_11n_addba_req *add_ba_req = (struct host_cmd_ds_11n_addba_req *) &cmd->params.add_ba_req; cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ); cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN); memcpy(add_ba_req, data_buf, sizeof(*add_ba_req)); return 0; } /* * This function prepares command for adding a BA response. * * Preparation includes - * - Setting command ID and proper size * - Setting add BA response buffer * - Ensuring correct endian-ness */ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, struct host_cmd_ds_11n_addba_req *cmd_addba_req) { struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = (struct host_cmd_ds_11n_addba_rsp *) &cmd->params.add_ba_rsp; u8 tid; int win_size; uint16_t block_ack_param_set; cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP); cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN); memcpy(add_ba_rsp->peer_mac_addr, cmd_addba_req->peer_mac_addr, ETH_ALEN); add_ba_rsp->dialog_token = cmd_addba_req->dialog_token; add_ba_rsp->block_ack_tmo = cmd_addba_req->block_ack_tmo; add_ba_rsp->ssn = cmd_addba_req->ssn; block_ack_param_set = le16_to_cpu(cmd_addba_req->block_ack_param_set); tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK) >> BLOCKACKPARAM_TID_POS; add_ba_rsp->status_code = cpu_to_le16(ADDBA_RSP_STATUS_ACCEPT); block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK; /* We donot support AMSDU inside AMPDU, hence reset the bit */ block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK; block_ack_param_set |= (priv->add_ba_param.rx_win_size << BLOCKACKPARAM_WINSIZE_POS); add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set); win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set) & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> BLOCKACKPARAM_WINSIZE_POS; cmd_addba_req->block_ack_param_set = cpu_to_le16(block_ack_param_set); mwifiex_11n_create_rx_reorder_tbl(priv, cmd_addba_req->peer_mac_addr, tid, win_size, le16_to_cpu(cmd_addba_req->ssn)); return 0; } /* * This function prepares command for deleting a BA request. * * Preparation includes - * - Setting command ID and proper size * - Setting del BA request buffer * - Ensuring correct endian-ness */ int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd, void *data_buf) { struct host_cmd_ds_11n_delba *del_ba = (struct host_cmd_ds_11n_delba *) &cmd->params.del_ba; cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA); cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN); memcpy(del_ba, data_buf, sizeof(*del_ba)); return 0; } /* * This function identifies if Rx reordering is needed for a received packet. * * In case reordering is required, the function will do the reordering * before sending it to kernel. * * The Rx reorder table is checked first with the received TID/TA pair. If * not found, the received packet is dispatched immediately. But if found, * the packet is reordered and all the packets in the updated Rx reordering * table is dispatched until a hole is found. * * For sequence number less than the starting window, the packet is dropped. */ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, u16 seq_num, u16 tid, u8 *ta, u8 pkt_type, void *payload) { struct mwifiex_rx_reorder_tbl *tbl; int start_win, end_win, win_size; u16 pkt_index; tbl = mwifiex_11n_get_rx_reorder_tbl((struct mwifiex_private *) priv, tid, ta); if (!tbl) { if (pkt_type != PKT_TYPE_BAR) mwifiex_process_rx_packet(priv->adapter, payload); return 0; } start_win = tbl->start_win; win_size = tbl->win_size; end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1); del_timer(&tbl->timer_context.timer); mod_timer(&tbl->timer_context.timer, jiffies + (MIN_FLUSH_TIMER_MS * win_size * HZ) / 1000); /* * If seq_num is less then starting win then ignore and drop the * packet */ if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {/* Wrap */ if (seq_num >= ((start_win + TWOPOW11) & (MAX_TID_VALUE - 1)) && (seq_num < start_win)) return -1; } else if ((seq_num < start_win) || (seq_num > (start_win + TWOPOW11))) { return -1; } /* * If this packet is a BAR we adjust seq_num as * WinStart = seq_num */ if (pkt_type == PKT_TYPE_BAR) seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1); if (((end_win < start_win) && (seq_num < (TWOPOW11 - (MAX_TID_VALUE - start_win))) && (seq_num > end_win)) || ((end_win > start_win) && ((seq_num > end_win) || (seq_num < start_win)))) { end_win = seq_num; if (((seq_num - win_size) + 1) >= 0) start_win = (end_win - win_size) + 1; else start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1; mwifiex_11n_dispatch_pkt(priv, tbl, start_win); } if (pkt_type != PKT_TYPE_BAR) { if (seq_num >= start_win) pkt_index = seq_num - start_win; else pkt_index = (seq_num+MAX_TID_VALUE) - start_win; if (tbl->rx_reorder_ptr[pkt_index]) return -1; tbl->rx_reorder_ptr[pkt_index] = payload; } /* * Dispatch all packets sequentially from start_win until a * hole is found and adjust the start_win appropriately */ mwifiex_11n_scan_and_dispatch(priv, tbl); return 0; } /* * This function deletes an entry for a given TID/TA pair. * * The TID/TA are taken from del BA event body. */ void mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac, u8 type, int initiator) { struct mwifiex_rx_reorder_tbl *tbl; struct mwifiex_tx_ba_stream_tbl *ptx_tbl; u8 cleanup_rx_reorder_tbl; unsigned long flags; if (type == TYPE_DELBA_RECEIVE) cleanup_rx_reorder_tbl = (initiator) ? true : false; else cleanup_rx_reorder_tbl = (initiator) ? false : true; dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d initiator=%d\n", peer_mac, tid, initiator); if (cleanup_rx_reorder_tbl) { tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, peer_mac); if (!tbl) { dev_dbg(priv->adapter->dev, "event: TID, TA not found in table\n"); return; } mwifiex_del_rx_reorder_entry(priv, tbl); } else { ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac); if (!ptx_tbl) { dev_dbg(priv->adapter->dev, "event: TID, RA not found in table\n"); return; } spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags); mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl); spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags); } } /* * This function handles the command response of an add BA response. * * Handling includes changing the header fields into CPU format and * creating the stream, provided the add BA is accepted. */ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = (struct host_cmd_ds_11n_addba_rsp *) &resp->params.add_ba_rsp; int tid, win_size; struct mwifiex_rx_reorder_tbl *tbl; uint16_t block_ack_param_set; block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set); tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK) >> BLOCKACKPARAM_TID_POS; /* * Check if we had rejected the ADDBA, if yes then do not create * the stream */ if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) { win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> BLOCKACKPARAM_WINSIZE_POS; dev_dbg(priv->adapter->dev, "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n", add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size); } else { dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n", add_ba_rsp->peer_mac_addr, tid); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, add_ba_rsp->peer_mac_addr); if (tbl) mwifiex_del_rx_reorder_entry(priv, tbl); } return 0; } /* * This function handles BA stream timeout event by preparing and sending * a command to the firmware. */ void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv, struct host_cmd_ds_11n_batimeout *event) { struct host_cmd_ds_11n_delba delba; memset(&delba, 0, sizeof(struct host_cmd_ds_11n_delba)); memcpy(delba.peer_mac_addr, event->peer_mac_addr, ETH_ALEN); delba.del_ba_param_set |= cpu_to_le16((u16) event->tid << DELBA_TID_POS); delba.del_ba_param_set |= cpu_to_le16( (u16) event->origninator << DELBA_INITIATOR_POS); delba.reason_code = cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT); mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba); } /* * This function cleans up the Rx reorder table by deleting all the entries * and re-initializing. */ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv) { struct mwifiex_rx_reorder_tbl *del_tbl_ptr, *tmp_node; unsigned long flags; spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_for_each_entry_safe(del_tbl_ptr, tmp_node, &priv->rx_reorder_tbl_ptr, list) { spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr); spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); } spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); memset(priv->rx_seq, 0, sizeof(priv->rx_seq)); }
gpl-2.0
WarheadsSE/OX820-3.1-Linux
sound/soc/codecs/dmic.c
2777
2688
/* * dmic.c -- SoC audio for Generic Digital MICs * * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/platform_device.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/soc-dapm.h> static struct snd_soc_dai_driver dmic_dai = { .name = "dmic-hifi", .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 8, .rates = SNDRV_PCM_RATE_CONTINUOUS, .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE, }, }; static const struct snd_soc_dapm_widget dmic_dapm_widgets[] = { SND_SOC_DAPM_AIF_OUT("DMIC AIF", "Capture", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_INPUT("DMic"), }; static const struct snd_soc_dapm_route intercon[] = { {"DMIC AIF", NULL, "DMic"}, }; static int dmic_probe(struct snd_soc_codec *codec) { struct snd_soc_dapm_context *dapm = &codec->dapm; snd_soc_dapm_new_controls(dapm, dmic_dapm_widgets, ARRAY_SIZE(dmic_dapm_widgets)); snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); snd_soc_dapm_new_widgets(dapm); return 0; } static struct snd_soc_codec_driver soc_dmic = { .probe = dmic_probe, }; static int __devinit dmic_dev_probe(struct platform_device *pdev) { return snd_soc_register_codec(&pdev->dev, &soc_dmic, &dmic_dai, 1); } static int __devexit dmic_dev_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); return 0; } MODULE_ALIAS("platform:dmic-codec"); static struct platform_driver dmic_driver = { .driver = { .name = "dmic-codec", .owner = THIS_MODULE, }, .probe = dmic_dev_probe, .remove = __devexit_p(dmic_dev_remove), }; static int __init dmic_init(void) { return platform_driver_register(&dmic_driver); } module_init(dmic_init); static void __exit dmic_exit(void) { platform_driver_unregister(&dmic_driver); } module_exit(dmic_exit); MODULE_DESCRIPTION("Generic DMIC driver"); MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>"); MODULE_LICENSE("GPL");
gpl-2.0
Kra1o5/android_kernel_bq_rk3066
net/irda/ircomm/ircomm_tty.c
2777
39158
/********************************************************************* * * Filename: ircomm_tty.c * Version: 1.0 * Description: IrCOMM serial TTY driver * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sun Jun 6 21:00:56 1999 * Modified at: Wed Feb 23 00:09:02 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * Sources: serial.c and previous IrCOMM work by Takahide Higuchi * * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved. * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ #include <linux/init.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/termios.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/interrupt.h> #include <linux/device.h> /* for MODULE_ALIAS_CHARDEV_MAJOR */ #include <asm/uaccess.h> #include <net/irda/irda.h> #include <net/irda/irmod.h> #include <net/irda/ircomm_core.h> #include <net/irda/ircomm_param.h> #include <net/irda/ircomm_tty_attach.h> #include <net/irda/ircomm_tty.h> static int ircomm_tty_open(struct tty_struct *tty, struct file *filp); static void ircomm_tty_close(struct tty_struct * tty, struct file *filp); static int ircomm_tty_write(struct tty_struct * tty, const unsigned char *buf, int count); static int ircomm_tty_write_room(struct tty_struct *tty); static void ircomm_tty_throttle(struct tty_struct *tty); static void ircomm_tty_unthrottle(struct tty_struct *tty); static int ircomm_tty_chars_in_buffer(struct tty_struct *tty); static void ircomm_tty_flush_buffer(struct tty_struct *tty); static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); static void ircomm_tty_hangup(struct tty_struct *tty); static void ircomm_tty_do_softint(struct work_struct *work); static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); static void ircomm_tty_stop(struct tty_struct *tty); static int ircomm_tty_data_indication(void *instance, void *sap, struct sk_buff *skb); static int ircomm_tty_control_indication(void *instance, void *sap, struct sk_buff *skb); static void ircomm_tty_flow_indication(void *instance, void *sap, LOCAL_FLOW cmd); #ifdef CONFIG_PROC_FS static const struct file_operations ircomm_tty_proc_fops; #endif /* CONFIG_PROC_FS */ static struct tty_driver *driver; static hashbin_t *ircomm_tty = NULL; static const struct tty_operations ops = { .open = ircomm_tty_open, .close = ircomm_tty_close, .write = ircomm_tty_write, .write_room = ircomm_tty_write_room, .chars_in_buffer = ircomm_tty_chars_in_buffer, .flush_buffer = ircomm_tty_flush_buffer, .ioctl = ircomm_tty_ioctl, /* ircomm_tty_ioctl.c */ .tiocmget = ircomm_tty_tiocmget, /* ircomm_tty_ioctl.c */ .tiocmset = ircomm_tty_tiocmset, /* ircomm_tty_ioctl.c */ .throttle = ircomm_tty_throttle, .unthrottle = ircomm_tty_unthrottle, .send_xchar = ircomm_tty_send_xchar, .set_termios = ircomm_tty_set_termios, .stop = ircomm_tty_stop, .start = ircomm_tty_start, .hangup = ircomm_tty_hangup, .wait_until_sent = ircomm_tty_wait_until_sent, #ifdef CONFIG_PROC_FS .proc_fops = &ircomm_tty_proc_fops, #endif /* CONFIG_PROC_FS */ }; /* * Function ircomm_tty_init() * * Init IrCOMM TTY layer/driver * */ static int __init ircomm_tty_init(void) { driver = alloc_tty_driver(IRCOMM_TTY_PORTS); if (!driver) return -ENOMEM; ircomm_tty = hashbin_new(HB_LOCK); if (ircomm_tty == NULL) { IRDA_ERROR("%s(), can't allocate hashbin!\n", __func__); put_tty_driver(driver); return -ENOMEM; } driver->owner = THIS_MODULE; driver->driver_name = "ircomm"; driver->name = "ircomm"; driver->major = IRCOMM_TTY_MAJOR; driver->minor_start = IRCOMM_TTY_MINOR; driver->type = TTY_DRIVER_TYPE_SERIAL; driver->subtype = SERIAL_TYPE_NORMAL; driver->init_termios = tty_std_termios; driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(driver, &ops); if (tty_register_driver(driver)) { IRDA_ERROR("%s(): Couldn't register serial driver\n", __func__); put_tty_driver(driver); return -1; } return 0; } static void __exit __ircomm_tty_cleanup(struct ircomm_tty_cb *self) { IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); ircomm_tty_shutdown(self); self->magic = 0; kfree(self); } /* * Function ircomm_tty_cleanup () * * Remove IrCOMM TTY layer/driver * */ static void __exit ircomm_tty_cleanup(void) { int ret; IRDA_DEBUG(4, "%s()\n", __func__ ); ret = tty_unregister_driver(driver); if (ret) { IRDA_ERROR("%s(), failed to unregister driver\n", __func__); return; } hashbin_delete(ircomm_tty, (FREE_FUNC) __ircomm_tty_cleanup); put_tty_driver(driver); } /* * Function ircomm_startup (self) * * * */ static int ircomm_tty_startup(struct ircomm_tty_cb *self) { notify_t notify; int ret = -ENODEV; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); /* Check if already open */ if (test_and_set_bit(ASYNC_B_INITIALIZED, &self->flags)) { IRDA_DEBUG(2, "%s(), already open so break out!\n", __func__ ); return 0; } /* Register with IrCOMM */ irda_notify_init(&notify); /* These callbacks we must handle ourselves */ notify.data_indication = ircomm_tty_data_indication; notify.udata_indication = ircomm_tty_control_indication; notify.flow_indication = ircomm_tty_flow_indication; /* Use the ircomm_tty interface for these ones */ notify.disconnect_indication = ircomm_tty_disconnect_indication; notify.connect_confirm = ircomm_tty_connect_confirm; notify.connect_indication = ircomm_tty_connect_indication; strlcpy(notify.name, "ircomm_tty", sizeof(notify.name)); notify.instance = self; if (!self->ircomm) { self->ircomm = ircomm_open(&notify, self->service_type, self->line); } if (!self->ircomm) goto err; self->slsap_sel = self->ircomm->slsap_sel; /* Connect IrCOMM link with remote device */ ret = ircomm_tty_attach_cable(self); if (ret < 0) { IRDA_ERROR("%s(), error attaching cable!\n", __func__); goto err; } return 0; err: clear_bit(ASYNC_B_INITIALIZED, &self->flags); return ret; } /* * Function ircomm_block_til_ready (self, filp) * * * */ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, struct file *filp) { DECLARE_WAITQUEUE(wait, current); int retval; int do_clocal = 0, extra_count = 0; unsigned long flags; struct tty_struct *tty; IRDA_DEBUG(2, "%s()\n", __func__ ); tty = self->tty; /* * If non-blocking mode is set, or the port is not enabled, * then make the check up front and then exit. */ if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ /* nonblock mode is set or port is not enabled */ self->flags |= ASYNC_NORMAL_ACTIVE; IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); return 0; } if (tty->termios->c_cflag & CLOCAL) { IRDA_DEBUG(1, "%s(), doing CLOCAL!\n", __func__ ); do_clocal = 1; } /* Wait for carrier detect and the line to become * free (i.e., not in use by the callout). While we are in * this loop, self->open_count is dropped by one, so that * mgsl_close() knows when to free things. We restore it upon * exit, either normal or abnormal. */ retval = 0; add_wait_queue(&self->open_wait, &wait); IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n", __FILE__,__LINE__, tty->driver->name, self->open_count ); /* As far as I can see, we protect open_count - Jean II */ spin_lock_irqsave(&self->spinlock, flags); if (!tty_hung_up_p(filp)) { extra_count = 1; self->open_count--; } spin_unlock_irqrestore(&self->spinlock, flags); self->blocked_open++; while (1) { if (tty->termios->c_cflag & CBAUD) { /* Here, we use to lock those two guys, but * as ircomm_param_request() does it itself, * I don't see the point (and I see the deadlock). * Jean II */ self->settings.dte |= IRCOMM_RTS + IRCOMM_DTR; ircomm_param_request(self, IRCOMM_DTE, TRUE); } current->state = TASK_INTERRUPTIBLE; if (tty_hung_up_p(filp) || !test_bit(ASYNC_B_INITIALIZED, &self->flags)) { retval = (self->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS; break; } /* * Check if link is ready now. Even if CLOCAL is * specified, we cannot return before the IrCOMM link is * ready */ if (!test_bit(ASYNC_B_CLOSING, &self->flags) && (do_clocal || (self->settings.dce & IRCOMM_CD)) && self->state == IRCOMM_TTY_READY) { break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n", __FILE__,__LINE__, tty->driver->name, self->open_count ); schedule(); } __set_current_state(TASK_RUNNING); remove_wait_queue(&self->open_wait, &wait); if (extra_count) { /* ++ is not atomic, so this should be protected - Jean II */ spin_lock_irqsave(&self->spinlock, flags); self->open_count++; spin_unlock_irqrestore(&self->spinlock, flags); } self->blocked_open--; IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", __FILE__,__LINE__, tty->driver->name, self->open_count); if (!retval) self->flags |= ASYNC_NORMAL_ACTIVE; return retval; } /* * Function ircomm_tty_open (tty, filp) * * This routine is called when a particular tty device is opened. This * routine is mandatory; if this routine is not filled in, the attempted * open will fail with ENODEV. */ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) { struct ircomm_tty_cb *self; unsigned int line; unsigned long flags; int ret; IRDA_DEBUG(2, "%s()\n", __func__ ); line = tty->index; if (line >= IRCOMM_TTY_PORTS) return -ENODEV; /* Check if instance already exists */ self = hashbin_lock_find(ircomm_tty, line, NULL); if (!self) { /* No, so make new instance */ self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); if (self == NULL) { IRDA_ERROR("%s(), kmalloc failed!\n", __func__); return -ENOMEM; } self->magic = IRCOMM_TTY_MAGIC; self->flow = FLOW_STOP; self->line = line; INIT_WORK(&self->tqueue, ircomm_tty_do_softint); self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; self->close_delay = 5*HZ/10; self->closing_wait = 30*HZ; /* Init some important stuff */ init_timer(&self->watchdog_timer); init_waitqueue_head(&self->open_wait); init_waitqueue_head(&self->close_wait); spin_lock_init(&self->spinlock); /* * Force TTY into raw mode by default which is usually what * we want for IrCOMM and IrLPT. This way applications will * not have to twiddle with printcap etc. * * Note this is completely usafe and doesn't work properly */ tty->termios->c_iflag = 0; tty->termios->c_oflag = 0; /* Insert into hash */ hashbin_insert(ircomm_tty, (irda_queue_t *) self, line, NULL); } /* ++ is not atomic, so this should be protected - Jean II */ spin_lock_irqsave(&self->spinlock, flags); self->open_count++; tty->driver_data = self; self->tty = tty; spin_unlock_irqrestore(&self->spinlock, flags); IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name, self->line, self->open_count); /* Not really used by us, but lets do it anyway */ self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0; /* * If the port is the middle of closing, bail out now */ if (tty_hung_up_p(filp) || test_bit(ASYNC_B_CLOSING, &self->flags)) { /* Hm, why are we blocking on ASYNC_CLOSING if we * do return -EAGAIN/-ERESTARTSYS below anyway? * IMHO it's either not needed in the first place * or for some reason we need to make sure the async * closing has been finished - if so, wouldn't we * probably better sleep uninterruptible? */ if (wait_event_interruptible(self->close_wait, !test_bit(ASYNC_B_CLOSING, &self->flags))) { IRDA_WARNING("%s - got signal while blocking on ASYNC_CLOSING!\n", __func__); return -ERESTARTSYS; } #ifdef SERIAL_DO_RESTART return (self->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS; #else return -EAGAIN; #endif } /* Check if this is a "normal" ircomm device, or an irlpt device */ if (line < 0x10) { self->service_type = IRCOMM_3_WIRE | IRCOMM_9_WIRE; self->settings.service_type = IRCOMM_9_WIRE; /* 9 wire as default */ /* Jan Kiszka -> add DSR/RI -> Conform to IrCOMM spec */ self->settings.dce = IRCOMM_CTS | IRCOMM_CD | IRCOMM_DSR | IRCOMM_RI; /* Default line settings */ IRDA_DEBUG(2, "%s(), IrCOMM device\n", __func__ ); } else { IRDA_DEBUG(2, "%s(), IrLPT device\n", __func__ ); self->service_type = IRCOMM_3_WIRE_RAW; self->settings.service_type = IRCOMM_3_WIRE_RAW; /* Default */ } ret = ircomm_tty_startup(self); if (ret) return ret; ret = ircomm_tty_block_til_ready(self, filp); if (ret) { IRDA_DEBUG(2, "%s(), returning after block_til_ready with %d\n", __func__ , ret); return ret; } return 0; } /* * Function ircomm_tty_close (tty, filp) * * This routine is called when a particular tty device is closed. * */ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned long flags; IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); spin_lock_irqsave(&self->spinlock, flags); if (tty_hung_up_p(filp)) { spin_unlock_irqrestore(&self->spinlock, flags); IRDA_DEBUG(0, "%s(), returning 1\n", __func__ ); return; } if ((tty->count == 1) && (self->open_count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always * be one in these conditions. If it's greater than * one, we've got real problems, since it means the * serial port won't be shutdown. */ IRDA_DEBUG(0, "%s(), bad serial port count; " "tty->count is 1, state->count is %d\n", __func__ , self->open_count); self->open_count = 1; } if (--self->open_count < 0) { IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n", __func__, self->line, self->open_count); self->open_count = 0; } if (self->open_count) { spin_unlock_irqrestore(&self->spinlock, flags); IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ ); return; } /* Hum... Should be test_and_set_bit ??? - Jean II */ set_bit(ASYNC_B_CLOSING, &self->flags); /* We need to unlock here (we were unlocking at the end of this * function), because tty_wait_until_sent() may schedule. * I don't know if the rest should be protected somehow, * so someone should check. - Jean II */ spin_unlock_irqrestore(&self->spinlock, flags); /* * Now we wait for the transmit buffer to clear; and we notify * the line discipline to only process XON/XOFF characters. */ tty->closing = 1; if (self->closing_wait != ASYNC_CLOSING_WAIT_NONE) tty_wait_until_sent(tty, self->closing_wait); ircomm_tty_shutdown(self); tty_driver_flush_buffer(tty); tty_ldisc_flush(tty); tty->closing = 0; self->tty = NULL; if (self->blocked_open) { if (self->close_delay) schedule_timeout_interruptible(self->close_delay); wake_up_interruptible(&self->open_wait); } self->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); wake_up_interruptible(&self->close_wait); } /* * Function ircomm_tty_flush_buffer (tty) * * * */ static void ircomm_tty_flush_buffer(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); /* * Let do_softint() do this to avoid race condition with * do_softint() ;-) */ schedule_work(&self->tqueue); } /* * Function ircomm_tty_do_softint (work) * * We use this routine to give the write wakeup to the user at at a * safe time (as fast as possible after write have completed). This * can be compared to the Tx interrupt. */ static void ircomm_tty_do_softint(struct work_struct *work) { struct ircomm_tty_cb *self = container_of(work, struct ircomm_tty_cb, tqueue); struct tty_struct *tty; unsigned long flags; struct sk_buff *skb, *ctrl_skb; IRDA_DEBUG(2, "%s()\n", __func__ ); if (!self || self->magic != IRCOMM_TTY_MAGIC) return; tty = self->tty; if (!tty) return; /* Unlink control buffer */ spin_lock_irqsave(&self->spinlock, flags); ctrl_skb = self->ctrl_skb; self->ctrl_skb = NULL; spin_unlock_irqrestore(&self->spinlock, flags); /* Flush control buffer if any */ if(ctrl_skb) { if(self->flow == FLOW_START) ircomm_control_request(self->ircomm, ctrl_skb); /* Drop reference count - see ircomm_ttp_data_request(). */ dev_kfree_skb(ctrl_skb); } if (tty->hw_stopped) return; /* Unlink transmit buffer */ spin_lock_irqsave(&self->spinlock, flags); skb = self->tx_skb; self->tx_skb = NULL; spin_unlock_irqrestore(&self->spinlock, flags); /* Flush transmit buffer if any */ if (skb) { ircomm_tty_do_event(self, IRCOMM_TTY_DATA_REQUEST, skb, NULL); /* Drop reference count - see ircomm_ttp_data_request(). */ dev_kfree_skb(skb); } /* Check if user (still) wants to be waken up */ tty_wakeup(tty); } /* * Function ircomm_tty_write (tty, buf, count) * * This routine is called by the kernel to write a series of characters * to the tty device. The characters may come from user space or kernel * space. This routine will return the number of characters actually * accepted for writing. This routine is mandatory. */ static int ircomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned long flags; struct sk_buff *skb; int tailroom = 0; int len = 0; int size; IRDA_DEBUG(2, "%s(), count=%d, hw_stopped=%d\n", __func__ , count, tty->hw_stopped); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); /* We may receive packets from the TTY even before we have finished * our setup. Not cool. * The problem is that we don't know the final header and data size * to create the proper skb, so any skb we would create would have * bogus header and data size, so need care. * We use a bogus header size to safely detect this condition. * Another problem is that hw_stopped was set to 0 way before it * should be, so we would drop this skb. It should now be fixed. * One option is to not accept data until we are properly setup. * But, I suspect that when it happens, the ppp line discipline * just "drops" the data, which might screw up connect scripts. * The second option is to create a "safe skb", with large header * and small size (see ircomm_tty_open() for values). * We just need to make sure that when the real values get filled, * we don't mess up the original "safe skb" (see tx_data_size). * Jean II */ if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED) { IRDA_DEBUG(1, "%s() : not initialised\n", __func__); #ifdef IRCOMM_NO_TX_BEFORE_INIT /* We didn't consume anything, TTY will retry */ return 0; #endif } if (count < 1) return 0; /* Protect our manipulation of self->tx_skb and related */ spin_lock_irqsave(&self->spinlock, flags); /* Fetch current transmit buffer */ skb = self->tx_skb; /* * Send out all the data we get, possibly as multiple fragmented * frames, but this will only happen if the data is larger than the * max data size. The normal case however is just the opposite, and * this function may be called multiple times, and will then actually * defragment the data and send it out as one packet as soon as * possible, but at a safer point in time */ while (count) { size = count; /* Adjust data size to the max data size */ if (size > self->max_data_size) size = self->max_data_size; /* * Do we already have a buffer ready for transmit, or do * we need to allocate a new frame */ if (skb) { /* * Any room for more data at the end of the current * transmit buffer? Cannot use skb_tailroom, since * dev_alloc_skb gives us a larger skb than we * requested * Note : use tx_data_size, because max_data_size * may have changed and we don't want to overwrite * the skb. - Jean II */ if ((tailroom = (self->tx_data_size - skb->len)) > 0) { /* Adjust data to tailroom */ if (size > tailroom) size = tailroom; } else { /* * Current transmit frame is full, so break * out, so we can send it as soon as possible */ break; } } else { /* Prepare a full sized frame */ skb = alloc_skb(self->max_data_size+ self->max_header_size, GFP_ATOMIC); if (!skb) { spin_unlock_irqrestore(&self->spinlock, flags); return -ENOBUFS; } skb_reserve(skb, self->max_header_size); self->tx_skb = skb; /* Remember skb size because max_data_size may * change later on - Jean II */ self->tx_data_size = self->max_data_size; } /* Copy data */ memcpy(skb_put(skb,size), buf + len, size); count -= size; len += size; } spin_unlock_irqrestore(&self->spinlock, flags); /* * Schedule a new thread which will transmit the frame as soon * as possible, but at a safe point in time. We do this so the * "user" can give us data multiple times, as PPP does (because of * its 256 byte tx buffer). We will then defragment and send out * all this data as one single packet. */ schedule_work(&self->tqueue); return len; } /* * Function ircomm_tty_write_room (tty) * * This routine returns the numbers of characters the tty driver will * accept for queuing to be written. This number is subject to change as * output buffers get emptied, or if the output flow control is acted. */ static int ircomm_tty_write_room(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned long flags; int ret; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); #ifdef IRCOMM_NO_TX_BEFORE_INIT /* max_header_size tells us if the channel is initialised or not. */ if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED) /* Don't bother us yet */ return 0; #endif /* Check if we are allowed to transmit any data. * hw_stopped is the regular flow control. * Jean II */ if (tty->hw_stopped) ret = 0; else { spin_lock_irqsave(&self->spinlock, flags); if (self->tx_skb) ret = self->tx_data_size - self->tx_skb->len; else ret = self->max_data_size; spin_unlock_irqrestore(&self->spinlock, flags); } IRDA_DEBUG(2, "%s(), ret=%d\n", __func__ , ret); return ret; } /* * Function ircomm_tty_wait_until_sent (tty, timeout) * * This routine waits until the device has written out all of the * characters in its transmitter FIFO. */ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned long orig_jiffies, poll_time; unsigned long flags; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); orig_jiffies = jiffies; /* Set poll time to 200 ms */ poll_time = IRDA_MIN(timeout, msecs_to_jiffies(200)); spin_lock_irqsave(&self->spinlock, flags); while (self->tx_skb && self->tx_skb->len) { spin_unlock_irqrestore(&self->spinlock, flags); schedule_timeout_interruptible(poll_time); spin_lock_irqsave(&self->spinlock, flags); if (signal_pending(current)) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } spin_unlock_irqrestore(&self->spinlock, flags); current->state = TASK_RUNNING; } /* * Function ircomm_tty_throttle (tty) * * This routine notifies the tty driver that input buffers for the line * discipline are close to full, and it should somehow signal that no * more characters should be sent to the tty. */ static void ircomm_tty_throttle(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); /* Software flow control? */ if (I_IXOFF(tty)) ircomm_tty_send_xchar(tty, STOP_CHAR(tty)); /* Hardware flow control? */ if (tty->termios->c_cflag & CRTSCTS) { self->settings.dte &= ~IRCOMM_RTS; self->settings.dte |= IRCOMM_DELTA_RTS; ircomm_param_request(self, IRCOMM_DTE, TRUE); } ircomm_flow_request(self->ircomm, FLOW_STOP); } /* * Function ircomm_tty_unthrottle (tty) * * This routine notifies the tty drivers that it should signals that * characters can now be sent to the tty without fear of overrunning the * input buffers of the line disciplines. */ static void ircomm_tty_unthrottle(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); /* Using software flow control? */ if (I_IXOFF(tty)) { ircomm_tty_send_xchar(tty, START_CHAR(tty)); } /* Using hardware flow control? */ if (tty->termios->c_cflag & CRTSCTS) { self->settings.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS); ircomm_param_request(self, IRCOMM_DTE, TRUE); IRDA_DEBUG(1, "%s(), FLOW_START\n", __func__ ); } ircomm_flow_request(self->ircomm, FLOW_START); } /* * Function ircomm_tty_chars_in_buffer (tty) * * Indicates if there are any data in the buffer * */ static int ircomm_tty_chars_in_buffer(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned long flags; int len = 0; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); spin_lock_irqsave(&self->spinlock, flags); if (self->tx_skb) len = self->tx_skb->len; spin_unlock_irqrestore(&self->spinlock, flags); return len; } static void ircomm_tty_shutdown(struct ircomm_tty_cb *self) { unsigned long flags; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); IRDA_DEBUG(0, "%s()\n", __func__ ); if (!test_and_clear_bit(ASYNC_B_INITIALIZED, &self->flags)) return; ircomm_tty_detach_cable(self); spin_lock_irqsave(&self->spinlock, flags); del_timer(&self->watchdog_timer); /* Free parameter buffer */ if (self->ctrl_skb) { dev_kfree_skb(self->ctrl_skb); self->ctrl_skb = NULL; } /* Free transmit buffer */ if (self->tx_skb) { dev_kfree_skb(self->tx_skb); self->tx_skb = NULL; } if (self->ircomm) { ircomm_close(self->ircomm); self->ircomm = NULL; } spin_unlock_irqrestore(&self->spinlock, flags); } /* * Function ircomm_tty_hangup (tty) * * This routine notifies the tty driver that it should hangup the tty * device. * */ static void ircomm_tty_hangup(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned long flags; IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); /* ircomm_tty_flush_buffer(tty); */ ircomm_tty_shutdown(self); /* I guess we need to lock here - Jean II */ spin_lock_irqsave(&self->spinlock, flags); self->flags &= ~ASYNC_NORMAL_ACTIVE; self->tty = NULL; self->open_count = 0; spin_unlock_irqrestore(&self->spinlock, flags); wake_up_interruptible(&self->open_wait); } /* * Function ircomm_tty_send_xchar (tty, ch) * * This routine is used to send a high-priority XON/XOFF character to * the device. */ static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch) { IRDA_DEBUG(0, "%s(), not impl\n", __func__ ); } /* * Function ircomm_tty_start (tty) * * This routine notifies the tty driver that it resume sending * characters to the tty device. */ void ircomm_tty_start(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; ircomm_flow_request(self->ircomm, FLOW_START); } /* * Function ircomm_tty_stop (tty) * * This routine notifies the tty driver that it should stop outputting * characters to the tty device. */ static void ircomm_tty_stop(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); ircomm_flow_request(self->ircomm, FLOW_STOP); } /* * Function ircomm_check_modem_status (self) * * Check for any changes in the DCE's line settings. This function should * be called whenever the dce parameter settings changes, to update the * flow control settings and other things */ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) { struct tty_struct *tty; int status; IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); tty = self->tty; status = self->settings.dce; if (status & IRCOMM_DCE_DELTA_ANY) { /*wake_up_interruptible(&self->delta_msr_wait);*/ } if ((self->flags & ASYNC_CHECK_CD) && (status & IRCOMM_DELTA_CD)) { IRDA_DEBUG(2, "%s(), ircomm%d CD now %s...\n", __func__ , self->line, (status & IRCOMM_CD) ? "on" : "off"); if (status & IRCOMM_CD) { wake_up_interruptible(&self->open_wait); } else { IRDA_DEBUG(2, "%s(), Doing serial hangup..\n", __func__ ); if (tty) tty_hangup(tty); /* Hangup will remote the tty, so better break out */ return; } } if (self->flags & ASYNC_CTS_FLOW) { if (tty->hw_stopped) { if (status & IRCOMM_CTS) { IRDA_DEBUG(2, "%s(), CTS tx start...\n", __func__ ); tty->hw_stopped = 0; /* Wake up processes blocked on open */ wake_up_interruptible(&self->open_wait); schedule_work(&self->tqueue); return; } } else { if (!(status & IRCOMM_CTS)) { IRDA_DEBUG(2, "%s(), CTS tx stop...\n", __func__ ); tty->hw_stopped = 1; } } } } /* * Function ircomm_tty_data_indication (instance, sap, skb) * * Handle incoming data, and deliver it to the line discipline * */ static int ircomm_tty_data_indication(void *instance, void *sap, struct sk_buff *skb) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; IRDA_DEBUG(2, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); if (!self->tty) { IRDA_DEBUG(0, "%s(), no tty!\n", __func__ ); return 0; } /* * If we receive data when hardware is stopped then something is wrong. * We try to poll the peers line settings to check if we are up todate. * Devices like WinCE can do this, and since they don't send any * params, we can just as well declare the hardware for running. */ if (self->tty->hw_stopped && (self->flow == FLOW_START)) { IRDA_DEBUG(0, "%s(), polling for line settings!\n", __func__ ); ircomm_param_request(self, IRCOMM_POLL, TRUE); /* We can just as well declare the hardware for running */ ircomm_tty_send_initial_parameters(self); ircomm_tty_link_established(self); } /* * Use flip buffer functions since the code may be called from interrupt * context */ tty_insert_flip_string(self->tty, skb->data, skb->len); tty_flip_buffer_push(self->tty); /* No need to kfree_skb - see ircomm_ttp_data_indication() */ return 0; } /* * Function ircomm_tty_control_indication (instance, sap, skb) * * Parse all incoming parameters (easy!) * */ static int ircomm_tty_control_indication(void *instance, void *sap, struct sk_buff *skb) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; int clen; IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); clen = skb->data[0]; irda_param_extract_all(self, skb->data+1, IRDA_MIN(skb->len-1, clen), &ircomm_param_info); /* No need to kfree_skb - see ircomm_control_indication() */ return 0; } /* * Function ircomm_tty_flow_indication (instance, sap, cmd) * * This function is called by IrTTP when it wants us to slow down the * transmission of data. We just mark the hardware as stopped, and wait * for IrTTP to notify us that things are OK again. */ static void ircomm_tty_flow_indication(void *instance, void *sap, LOCAL_FLOW cmd) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; struct tty_struct *tty; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); tty = self->tty; switch (cmd) { case FLOW_START: IRDA_DEBUG(2, "%s(), hw start!\n", __func__ ); tty->hw_stopped = 0; /* ircomm_tty_do_softint will take care of the rest */ schedule_work(&self->tqueue); break; default: /* If we get here, something is very wrong, better stop */ case FLOW_STOP: IRDA_DEBUG(2, "%s(), hw stopped!\n", __func__ ); tty->hw_stopped = 1; break; } self->flow = cmd; } #ifdef CONFIG_PROC_FS static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m) { char sep; seq_printf(m, "State: %s\n", ircomm_tty_state[self->state]); seq_puts(m, "Service type: "); if (self->service_type & IRCOMM_9_WIRE) seq_puts(m, "9_WIRE"); else if (self->service_type & IRCOMM_3_WIRE) seq_puts(m, "3_WIRE"); else if (self->service_type & IRCOMM_3_WIRE_RAW) seq_puts(m, "3_WIRE_RAW"); else seq_puts(m, "No common service type!\n"); seq_putc(m, '\n'); seq_printf(m, "Port name: %s\n", self->settings.port_name); seq_printf(m, "DTE status:"); sep = ' '; if (self->settings.dte & IRCOMM_RTS) { seq_printf(m, "%cRTS", sep); sep = '|'; } if (self->settings.dte & IRCOMM_DTR) { seq_printf(m, "%cDTR", sep); sep = '|'; } seq_putc(m, '\n'); seq_puts(m, "DCE status:"); sep = ' '; if (self->settings.dce & IRCOMM_CTS) { seq_printf(m, "%cCTS", sep); sep = '|'; } if (self->settings.dce & IRCOMM_DSR) { seq_printf(m, "%cDSR", sep); sep = '|'; } if (self->settings.dce & IRCOMM_CD) { seq_printf(m, "%cCD", sep); sep = '|'; } if (self->settings.dce & IRCOMM_RI) { seq_printf(m, "%cRI", sep); sep = '|'; } seq_putc(m, '\n'); seq_puts(m, "Configuration: "); if (!self->settings.null_modem) seq_puts(m, "DTE <-> DCE\n"); else seq_puts(m, "DTE <-> DTE (null modem emulation)\n"); seq_printf(m, "Data rate: %d\n", self->settings.data_rate); seq_puts(m, "Flow control:"); sep = ' '; if (self->settings.flow_control & IRCOMM_XON_XOFF_IN) { seq_printf(m, "%cXON_XOFF_IN", sep); sep = '|'; } if (self->settings.flow_control & IRCOMM_XON_XOFF_OUT) { seq_printf(m, "%cXON_XOFF_OUT", sep); sep = '|'; } if (self->settings.flow_control & IRCOMM_RTS_CTS_IN) { seq_printf(m, "%cRTS_CTS_IN", sep); sep = '|'; } if (self->settings.flow_control & IRCOMM_RTS_CTS_OUT) { seq_printf(m, "%cRTS_CTS_OUT", sep); sep = '|'; } if (self->settings.flow_control & IRCOMM_DSR_DTR_IN) { seq_printf(m, "%cDSR_DTR_IN", sep); sep = '|'; } if (self->settings.flow_control & IRCOMM_DSR_DTR_OUT) { seq_printf(m, "%cDSR_DTR_OUT", sep); sep = '|'; } if (self->settings.flow_control & IRCOMM_ENQ_ACK_IN) { seq_printf(m, "%cENQ_ACK_IN", sep); sep = '|'; } if (self->settings.flow_control & IRCOMM_ENQ_ACK_OUT) { seq_printf(m, "%cENQ_ACK_OUT", sep); sep = '|'; } seq_putc(m, '\n'); seq_puts(m, "Flags:"); sep = ' '; if (self->flags & ASYNC_CTS_FLOW) { seq_printf(m, "%cASYNC_CTS_FLOW", sep); sep = '|'; } if (self->flags & ASYNC_CHECK_CD) { seq_printf(m, "%cASYNC_CHECK_CD", sep); sep = '|'; } if (self->flags & ASYNC_INITIALIZED) { seq_printf(m, "%cASYNC_INITIALIZED", sep); sep = '|'; } if (self->flags & ASYNC_LOW_LATENCY) { seq_printf(m, "%cASYNC_LOW_LATENCY", sep); sep = '|'; } if (self->flags & ASYNC_CLOSING) { seq_printf(m, "%cASYNC_CLOSING", sep); sep = '|'; } if (self->flags & ASYNC_NORMAL_ACTIVE) { seq_printf(m, "%cASYNC_NORMAL_ACTIVE", sep); sep = '|'; } seq_putc(m, '\n'); seq_printf(m, "Role: %s\n", self->client ? "client" : "server"); seq_printf(m, "Open count: %d\n", self->open_count); seq_printf(m, "Max data size: %d\n", self->max_data_size); seq_printf(m, "Max header size: %d\n", self->max_header_size); if (self->tty) seq_printf(m, "Hardware: %s\n", self->tty->hw_stopped ? "Stopped" : "Running"); } static int ircomm_tty_proc_show(struct seq_file *m, void *v) { struct ircomm_tty_cb *self; unsigned long flags; spin_lock_irqsave(&ircomm_tty->hb_spinlock, flags); self = (struct ircomm_tty_cb *) hashbin_get_first(ircomm_tty); while (self != NULL) { if (self->magic != IRCOMM_TTY_MAGIC) break; ircomm_tty_line_info(self, m); self = (struct ircomm_tty_cb *) hashbin_get_next(ircomm_tty); } spin_unlock_irqrestore(&ircomm_tty->hb_spinlock, flags); return 0; } static int ircomm_tty_proc_open(struct inode *inode, struct file *file) { return single_open(file, ircomm_tty_proc_show, NULL); } static const struct file_operations ircomm_tty_proc_fops = { .owner = THIS_MODULE, .open = ircomm_tty_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* CONFIG_PROC_FS */ MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>"); MODULE_DESCRIPTION("IrCOMM serial TTY driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(IRCOMM_TTY_MAJOR); module_init(ircomm_tty_init); module_exit(ircomm_tty_cleanup);
gpl-2.0
farchanrifai/kernel_cancro
arch/arm/mach-s5p64x0/mach-smdk6450.c
4825
7275
/* linux/arch/arm/mach-s5p64x0/mach-smdk6450.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/pwm_backlight.h> #include <linux/fb.h> #include <linux/mmc/host.h> #include <video/platform_lcd.h> #include <asm/hardware/vic.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <mach/i2c.h> #include <mach/regs-gpio.h> #include <plat/regs-serial.h> #include <plat/gpio-cfg.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/iic.h> #include <plat/pll.h> #include <plat/adc.h> #include <plat/ts.h> #include <plat/s5p-time.h> #include <plat/backlight.h> #include <plat/fb.h> #include <plat/regs-fb.h> #include <plat/sdhci.h> #include "common.h" #define SMDK6450_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \ S3C2410_UCON_RXILEVEL | \ S3C2410_UCON_TXIRQMODE | \ S3C2410_UCON_RXIRQMODE | \ S3C2410_UCON_RXFIFO_TOI | \ S3C2443_UCON_RXERR_IRQEN) #define SMDK6450_ULCON_DEFAULT S3C2410_LCON_CS8 #define SMDK6450_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \ S3C2440_UFCON_TXTRIG16 | \ S3C2410_UFCON_RXTRIG8) static struct s3c2410_uartcfg smdk6450_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, [1] = { .hwport = 1, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, [2] = { .hwport = 2, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, [3] = { .hwport = 3, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, #if CONFIG_SERIAL_SAMSUNG_UARTS > 4 [4] = { .hwport = 4, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, #endif #if CONFIG_SERIAL_SAMSUNG_UARTS > 5 [5] = { .hwport = 5, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, #endif }; /* Frame Buffer */ static struct s3c_fb_pd_win smdk6450_fb_win0 = { .win_mode = { .left_margin = 8, .right_margin = 13, .upper_margin = 7, .lower_margin = 5, .hsync_len = 3, .vsync_len = 1, .xres = 800, .yres = 480, }, .max_bpp = 32, .default_bpp = 24, }; static struct s3c_fb_platdata smdk6450_lcd_pdata __initdata = { .win[0] = &smdk6450_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, .setup_gpio = s5p64x0_fb_gpio_setup_24bpp, }; /* LCD power controller */ static void smdk6450_lte480_reset_power(struct plat_lcd_data *pd, unsigned int power) { int err; if (power) { err = gpio_request(S5P6450_GPN(5), "GPN"); if (err) { printk(KERN_ERR "failed to request GPN for lcd reset\n"); return; } gpio_direction_output(S5P6450_GPN(5), 1); gpio_set_value(S5P6450_GPN(5), 0); gpio_set_value(S5P6450_GPN(5), 1); gpio_free(S5P6450_GPN(5)); } } static struct plat_lcd_data smdk6450_lcd_power_data = { .set_power = smdk6450_lte480_reset_power, }; static struct platform_device smdk6450_lcd_lte480wv = { .name = "platform-lcd", .dev.parent = &s3c_device_fb.dev, .dev.platform_data = &smdk6450_lcd_power_data, }; static struct platform_device *smdk6450_devices[] __initdata = { &s3c_device_adc, &s3c_device_rtc, &s3c_device_i2c0, &s3c_device_i2c1, &s3c_device_ts, &s3c_device_wdt, &samsung_asoc_dma, &s5p6450_device_iis0, &s3c_device_fb, &smdk6450_lcd_lte480wv, &s3c_device_hsmmc0, &s3c_device_hsmmc1, &s3c_device_hsmmc2, /* s5p6450_device_spi0 will be added */ }; static struct s3c_sdhci_platdata smdk6450_hsmmc0_pdata __initdata = { .cd_type = S3C_SDHCI_CD_NONE, }; static struct s3c_sdhci_platdata smdk6450_hsmmc1_pdata __initdata = { .cd_type = S3C_SDHCI_CD_NONE, #if defined(CONFIG_S5P64X0_SD_CH1_8BIT) .max_width = 8, .host_caps = MMC_CAP_8_BIT_DATA, #endif }; static struct s3c_sdhci_platdata smdk6450_hsmmc2_pdata __initdata = { .cd_type = S3C_SDHCI_CD_NONE, }; static struct s3c2410_platform_i2c s5p6450_i2c0_data __initdata = { .flags = 0, .slave_addr = 0x10, .frequency = 100*1000, .sda_delay = 100, .cfg_gpio = s5p6450_i2c0_cfg_gpio, }; static struct s3c2410_platform_i2c s5p6450_i2c1_data __initdata = { .flags = 0, .bus_num = 1, .slave_addr = 0x10, .frequency = 100*1000, .sda_delay = 100, .cfg_gpio = s5p6450_i2c1_cfg_gpio, }; static struct i2c_board_info smdk6450_i2c_devs0[] __initdata = { { I2C_BOARD_INFO("wm8580", 0x1b), }, { I2C_BOARD_INFO("24c08", 0x50), }, /* Samsung KS24C080C EEPROM */ }; static struct i2c_board_info smdk6450_i2c_devs1[] __initdata = { { I2C_BOARD_INFO("24c128", 0x57), },/* Samsung S524AD0XD1 EEPROM */ }; /* LCD Backlight data */ static struct samsung_bl_gpio_info smdk6450_bl_gpio_info = { .no = S5P6450_GPF(15), .func = S3C_GPIO_SFN(2), }; static struct platform_pwm_backlight_data smdk6450_bl_data = { .pwm_id = 1, }; static void __init smdk6450_map_io(void) { s5p64x0_init_io(NULL, 0); s3c24xx_init_clocks(19200000); s3c24xx_init_uarts(smdk6450_uartcfgs, ARRAY_SIZE(smdk6450_uartcfgs)); s5p_set_timer_source(S5P_PWM3, S5P_PWM4); } static void s5p6450_set_lcd_interface(void) { unsigned int cfg; /* select TFT LCD type (RGB I/F) */ cfg = __raw_readl(S5P64X0_SPCON0); cfg &= ~S5P64X0_SPCON0_LCD_SEL_MASK; cfg |= S5P64X0_SPCON0_LCD_SEL_RGB; __raw_writel(cfg, S5P64X0_SPCON0); } static void __init smdk6450_machine_init(void) { s3c24xx_ts_set_platdata(NULL); s3c_i2c0_set_platdata(&s5p6450_i2c0_data); s3c_i2c1_set_platdata(&s5p6450_i2c1_data); i2c_register_board_info(0, smdk6450_i2c_devs0, ARRAY_SIZE(smdk6450_i2c_devs0)); i2c_register_board_info(1, smdk6450_i2c_devs1, ARRAY_SIZE(smdk6450_i2c_devs1)); samsung_bl_set(&smdk6450_bl_gpio_info, &smdk6450_bl_data); s5p6450_set_lcd_interface(); s3c_fb_set_platdata(&smdk6450_lcd_pdata); s3c_sdhci0_set_platdata(&smdk6450_hsmmc0_pdata); s3c_sdhci1_set_platdata(&smdk6450_hsmmc1_pdata); s3c_sdhci2_set_platdata(&smdk6450_hsmmc2_pdata); platform_add_devices(smdk6450_devices, ARRAY_SIZE(smdk6450_devices)); } MACHINE_START(SMDK6450, "SMDK6450") /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */ .atag_offset = 0x100, .init_irq = s5p6450_init_irq, .handle_irq = vic_handle_irq, .map_io = smdk6450_map_io, .init_machine = smdk6450_machine_init, .timer = &s5p_timer, .restart = s5p64x0_restart, MACHINE_END
gpl-2.0
3EleVen/android_kernel_motorola_ghost
arch/arm/mach-s5p64x0/mach-smdk6450.c
4825
7275
/* linux/arch/arm/mach-s5p64x0/mach-smdk6450.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/pwm_backlight.h> #include <linux/fb.h> #include <linux/mmc/host.h> #include <video/platform_lcd.h> #include <asm/hardware/vic.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <mach/i2c.h> #include <mach/regs-gpio.h> #include <plat/regs-serial.h> #include <plat/gpio-cfg.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/iic.h> #include <plat/pll.h> #include <plat/adc.h> #include <plat/ts.h> #include <plat/s5p-time.h> #include <plat/backlight.h> #include <plat/fb.h> #include <plat/regs-fb.h> #include <plat/sdhci.h> #include "common.h" #define SMDK6450_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \ S3C2410_UCON_RXILEVEL | \ S3C2410_UCON_TXIRQMODE | \ S3C2410_UCON_RXIRQMODE | \ S3C2410_UCON_RXFIFO_TOI | \ S3C2443_UCON_RXERR_IRQEN) #define SMDK6450_ULCON_DEFAULT S3C2410_LCON_CS8 #define SMDK6450_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \ S3C2440_UFCON_TXTRIG16 | \ S3C2410_UFCON_RXTRIG8) static struct s3c2410_uartcfg smdk6450_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, [1] = { .hwport = 1, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, [2] = { .hwport = 2, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, [3] = { .hwport = 3, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, #if CONFIG_SERIAL_SAMSUNG_UARTS > 4 [4] = { .hwport = 4, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, #endif #if CONFIG_SERIAL_SAMSUNG_UARTS > 5 [5] = { .hwport = 5, .flags = 0, .ucon = SMDK6450_UCON_DEFAULT, .ulcon = SMDK6450_ULCON_DEFAULT, .ufcon = SMDK6450_UFCON_DEFAULT, }, #endif }; /* Frame Buffer */ static struct s3c_fb_pd_win smdk6450_fb_win0 = { .win_mode = { .left_margin = 8, .right_margin = 13, .upper_margin = 7, .lower_margin = 5, .hsync_len = 3, .vsync_len = 1, .xres = 800, .yres = 480, }, .max_bpp = 32, .default_bpp = 24, }; static struct s3c_fb_platdata smdk6450_lcd_pdata __initdata = { .win[0] = &smdk6450_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, .setup_gpio = s5p64x0_fb_gpio_setup_24bpp, }; /* LCD power controller */ static void smdk6450_lte480_reset_power(struct plat_lcd_data *pd, unsigned int power) { int err; if (power) { err = gpio_request(S5P6450_GPN(5), "GPN"); if (err) { printk(KERN_ERR "failed to request GPN for lcd reset\n"); return; } gpio_direction_output(S5P6450_GPN(5), 1); gpio_set_value(S5P6450_GPN(5), 0); gpio_set_value(S5P6450_GPN(5), 1); gpio_free(S5P6450_GPN(5)); } } static struct plat_lcd_data smdk6450_lcd_power_data = { .set_power = smdk6450_lte480_reset_power, }; static struct platform_device smdk6450_lcd_lte480wv = { .name = "platform-lcd", .dev.parent = &s3c_device_fb.dev, .dev.platform_data = &smdk6450_lcd_power_data, }; static struct platform_device *smdk6450_devices[] __initdata = { &s3c_device_adc, &s3c_device_rtc, &s3c_device_i2c0, &s3c_device_i2c1, &s3c_device_ts, &s3c_device_wdt, &samsung_asoc_dma, &s5p6450_device_iis0, &s3c_device_fb, &smdk6450_lcd_lte480wv, &s3c_device_hsmmc0, &s3c_device_hsmmc1, &s3c_device_hsmmc2, /* s5p6450_device_spi0 will be added */ }; static struct s3c_sdhci_platdata smdk6450_hsmmc0_pdata __initdata = { .cd_type = S3C_SDHCI_CD_NONE, }; static struct s3c_sdhci_platdata smdk6450_hsmmc1_pdata __initdata = { .cd_type = S3C_SDHCI_CD_NONE, #if defined(CONFIG_S5P64X0_SD_CH1_8BIT) .max_width = 8, .host_caps = MMC_CAP_8_BIT_DATA, #endif }; static struct s3c_sdhci_platdata smdk6450_hsmmc2_pdata __initdata = { .cd_type = S3C_SDHCI_CD_NONE, }; static struct s3c2410_platform_i2c s5p6450_i2c0_data __initdata = { .flags = 0, .slave_addr = 0x10, .frequency = 100*1000, .sda_delay = 100, .cfg_gpio = s5p6450_i2c0_cfg_gpio, }; static struct s3c2410_platform_i2c s5p6450_i2c1_data __initdata = { .flags = 0, .bus_num = 1, .slave_addr = 0x10, .frequency = 100*1000, .sda_delay = 100, .cfg_gpio = s5p6450_i2c1_cfg_gpio, }; static struct i2c_board_info smdk6450_i2c_devs0[] __initdata = { { I2C_BOARD_INFO("wm8580", 0x1b), }, { I2C_BOARD_INFO("24c08", 0x50), }, /* Samsung KS24C080C EEPROM */ }; static struct i2c_board_info smdk6450_i2c_devs1[] __initdata = { { I2C_BOARD_INFO("24c128", 0x57), },/* Samsung S524AD0XD1 EEPROM */ }; /* LCD Backlight data */ static struct samsung_bl_gpio_info smdk6450_bl_gpio_info = { .no = S5P6450_GPF(15), .func = S3C_GPIO_SFN(2), }; static struct platform_pwm_backlight_data smdk6450_bl_data = { .pwm_id = 1, }; static void __init smdk6450_map_io(void) { s5p64x0_init_io(NULL, 0); s3c24xx_init_clocks(19200000); s3c24xx_init_uarts(smdk6450_uartcfgs, ARRAY_SIZE(smdk6450_uartcfgs)); s5p_set_timer_source(S5P_PWM3, S5P_PWM4); } static void s5p6450_set_lcd_interface(void) { unsigned int cfg; /* select TFT LCD type (RGB I/F) */ cfg = __raw_readl(S5P64X0_SPCON0); cfg &= ~S5P64X0_SPCON0_LCD_SEL_MASK; cfg |= S5P64X0_SPCON0_LCD_SEL_RGB; __raw_writel(cfg, S5P64X0_SPCON0); } static void __init smdk6450_machine_init(void) { s3c24xx_ts_set_platdata(NULL); s3c_i2c0_set_platdata(&s5p6450_i2c0_data); s3c_i2c1_set_platdata(&s5p6450_i2c1_data); i2c_register_board_info(0, smdk6450_i2c_devs0, ARRAY_SIZE(smdk6450_i2c_devs0)); i2c_register_board_info(1, smdk6450_i2c_devs1, ARRAY_SIZE(smdk6450_i2c_devs1)); samsung_bl_set(&smdk6450_bl_gpio_info, &smdk6450_bl_data); s5p6450_set_lcd_interface(); s3c_fb_set_platdata(&smdk6450_lcd_pdata); s3c_sdhci0_set_platdata(&smdk6450_hsmmc0_pdata); s3c_sdhci1_set_platdata(&smdk6450_hsmmc1_pdata); s3c_sdhci2_set_platdata(&smdk6450_hsmmc2_pdata); platform_add_devices(smdk6450_devices, ARRAY_SIZE(smdk6450_devices)); } MACHINE_START(SMDK6450, "SMDK6450") /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */ .atag_offset = 0x100, .init_irq = s5p6450_init_irq, .handle_irq = vic_handle_irq, .map_io = smdk6450_map_io, .init_machine = smdk6450_machine_init, .timer = &s5p_timer, .restart = s5p64x0_restart, MACHINE_END
gpl-2.0
slz/delidded-kernel-n900t-note3
arch/arm/mach-pxa/pcm990-baseboard.c
4825
13653
/* * arch/arm/mach-pxa/pcm990-baseboard.c * Support for the Phytec phyCORE-PXA270 Development Platform (PCM-990). * * Refer * http://www.phytec.com/products/rdk/ARM-XScale/phyCORE-XScale-PXA270.html * for additional hardware info * * Author: Juergen Kilb * Created: April 05, 2005 * Copyright: Phytec Messtechnik GmbH * e-Mail: armlinux@phytec.de * * based on Intel Mainstone Board * * Copyright 2007 Juergen Beisert @ Pengutronix (j.beisert@pengutronix.de) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/i2c/pxa-i2c.h> #include <linux/pwm_backlight.h> #include <media/soc_camera.h> #include <mach/camera.h> #include <asm/mach/map.h> #include <mach/pxa27x.h> #include <mach/audio.h> #include <mach/mmc.h> #include <mach/ohci.h> #include <mach/pcm990_baseboard.h> #include <mach/pxafb.h> #include "devices.h" #include "generic.h" static unsigned long pcm990_pin_config[] __initdata = { /* MMC */ GPIO32_MMC_CLK, GPIO112_MMC_CMD, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, /* USB */ GPIO88_USBH1_PWR, GPIO89_USBH1_PEN, /* PWM0 */ GPIO16_PWM0_OUT, /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, /* AC97 */ GPIO28_AC97_BITCLK, GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, }; /* * pcm990_lcd_power - control power supply to the LCD * @on: 0 = switch off, 1 = switch on * * Called by the pxafb driver */ #ifndef CONFIG_PCM990_DISPLAY_NONE static void pcm990_lcd_power(int on, struct fb_var_screeninfo *var) { if (on) { /* enable LCD-Latches * power on LCD */ __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG3) = PCM990_CTRL_LCDPWR + PCM990_CTRL_LCDON; } else { /* disable LCD-Latches * power off LCD */ __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG3) = 0x00; } } #endif #if defined(CONFIG_PCM990_DISPLAY_SHARP) static struct pxafb_mode_info fb_info_sharp_lq084v1dg21 = { .pixclock = 28000, .xres = 640, .yres = 480, .bpp = 16, .hsync_len = 20, .left_margin = 103, .right_margin = 47, .vsync_len = 6, .upper_margin = 28, .lower_margin = 5, .sync = 0, .cmap_greyscale = 0, }; static struct pxafb_mach_info pcm990_fbinfo __initdata = { .modes = &fb_info_sharp_lq084v1dg21, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, .pxafb_lcd_power = pcm990_lcd_power, }; #elif defined(CONFIG_PCM990_DISPLAY_NEC) struct pxafb_mode_info fb_info_nec_nl6448bc20_18d = { .pixclock = 39720, .xres = 640, .yres = 480, .bpp = 16, .hsync_len = 32, .left_margin = 16, .right_margin = 48, .vsync_len = 2, .upper_margin = 12, .lower_margin = 17, .sync = 0, .cmap_greyscale = 0, }; static struct pxafb_mach_info pcm990_fbinfo __initdata = { .modes = &fb_info_nec_nl6448bc20_18d, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, .pxafb_lcd_power = pcm990_lcd_power, }; #endif static struct platform_pwm_backlight_data pcm990_backlight_data = { .pwm_id = 0, .max_brightness = 1023, .dft_brightness = 1023, .pwm_period_ns = 78770, }; static struct platform_device pcm990_backlight_device = { .name = "pwm-backlight", .dev = { .parent = &pxa27x_device_pwm0.dev, .platform_data = &pcm990_backlight_data, }, }; /* * The PCM-990 development baseboard uses PCM-027's hardware in the * following way: * * - LCD support is in use * - GPIO16 is output for back light on/off with PWM * - GPIO58 ... GPIO73 are outputs for display data * - GPIO74 is output output for LCDFCLK * - GPIO75 is output for LCDLCLK * - GPIO76 is output for LCDPCLK * - GPIO77 is output for LCDBIAS * - MMC support is in use * - GPIO32 is output for MMCCLK * - GPIO92 is MMDAT0 * - GPIO109 is MMDAT1 * - GPIO110 is MMCS0 * - GPIO111 is MMCS1 * - GPIO112 is MMCMD * - IDE/CF card is in use * - GPIO48 is output /POE * - GPIO49 is output /PWE * - GPIO50 is output /PIOR * - GPIO51 is output /PIOW * - GPIO54 is output /PCE2 * - GPIO55 is output /PREG * - GPIO56 is input /PWAIT * - GPIO57 is output /PIOS16 * - GPIO79 is output PSKTSEL * - GPIO85 is output /PCE1 * - FFUART is in use * - GPIO34 is input FFRXD * - GPIO35 is input FFCTS * - GPIO36 is input FFDCD * - GPIO37 is input FFDSR * - GPIO38 is input FFRI * - GPIO39 is output FFTXD * - GPIO40 is output FFDTR * - GPIO41 is output FFRTS * - BTUART is in use * - GPIO42 is input BTRXD * - GPIO43 is output BTTXD * - GPIO44 is input BTCTS * - GPIO45 is output BTRTS * - IRUART is in use * - GPIO46 is input STDRXD * - GPIO47 is output STDTXD * - AC97 is in use*) * - GPIO28 is input AC97CLK * - GPIO29 is input AC97DatIn * - GPIO30 is output AC97DatO * - GPIO31 is output AC97SYNC * - GPIO113 is output AC97_RESET * - SSP is in use * - GPIO23 is output SSPSCLK * - GPIO24 is output chip select to Max7301 * - GPIO25 is output SSPTXD * - GPIO26 is input SSPRXD * - GPIO27 is input for Max7301 IRQ * - GPIO53 is input SSPSYSCLK * - SSP3 is in use * - GPIO81 is output SSPTXD3 * - GPIO82 is input SSPRXD3 * - GPIO83 is output SSPSFRM * - GPIO84 is output SSPCLK3 * * Otherwise claimed GPIOs: * GPIO1 -> IRQ from user switch * GPIO9 -> IRQ from power management * GPIO10 -> IRQ from WML9712 AC97 controller * GPIO11 -> IRQ from IDE controller * GPIO12 -> IRQ from CF controller * GPIO13 -> IRQ from CF controller * GPIO14 -> GPIO free * GPIO15 -> /CS1 selects baseboard's Control CPLD (U7, 16 bit wide data path) * GPIO19 -> GPIO free * GPIO20 -> /SDCS2 * GPIO21 -> /CS3 PC card socket select * GPIO33 -> /CS5 network controller select * GPIO78 -> /CS2 (16 bit wide data path) * GPIO80 -> /CS4 (16 bit wide data path) * GPIO86 -> GPIO free * GPIO87 -> GPIO free * GPIO90 -> LED0 on CPU module * GPIO91 -> LED1 on CPI module * GPIO117 -> SCL * GPIO118 -> SDA */ static unsigned long pcm990_irq_enabled; static void pcm990_mask_ack_irq(struct irq_data *d) { int pcm990_irq = (d->irq - PCM027_IRQ(0)); PCM990_INTMSKENA = (pcm990_irq_enabled &= ~(1 << pcm990_irq)); } static void pcm990_unmask_irq(struct irq_data *d) { int pcm990_irq = (d->irq - PCM027_IRQ(0)); /* the irq can be acknowledged only if deasserted, so it's done here */ PCM990_INTSETCLR |= 1 << pcm990_irq; PCM990_INTMSKENA = (pcm990_irq_enabled |= (1 << pcm990_irq)); } static struct irq_chip pcm990_irq_chip = { .irq_mask_ack = pcm990_mask_ack_irq, .irq_unmask = pcm990_unmask_irq, }; static void pcm990_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned long pending = (~PCM990_INTSETCLR) & pcm990_irq_enabled; do { /* clear our parent IRQ */ desc->irq_data.chip->irq_ack(&desc->irq_data); if (likely(pending)) { irq = PCM027_IRQ(0) + __ffs(pending); generic_handle_irq(irq); } pending = (~PCM990_INTSETCLR) & pcm990_irq_enabled; } while (pending); } static void __init pcm990_init_irq(void) { int irq; /* setup extra PCM990 irqs */ for (irq = PCM027_IRQ(0); irq <= PCM027_IRQ(3); irq++) { irq_set_chip_and_handler(irq, &pcm990_irq_chip, handle_level_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } PCM990_INTMSKENA = 0x00; /* disable all Interrupts */ PCM990_INTSETCLR = 0xFF; irq_set_chained_handler(PCM990_CTRL_INT_IRQ, pcm990_irq_handler); irq_set_irq_type(PCM990_CTRL_INT_IRQ, PCM990_CTRL_INT_IRQ_EDGE); } static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int, void *data) { int err; err = request_irq(PCM027_MMCDET_IRQ, mci_detect_int, IRQF_DISABLED, "MMC card detect", data); if (err) printk(KERN_ERR "pcm990_mci_init: MMC/SD: can't request MMC " "card detect IRQ\n"); return err; } static void pcm990_mci_setpower(struct device *dev, unsigned int vdd) { struct pxamci_platform_data *p_d = dev->platform_data; if ((1 << vdd) & p_d->ocr_mask) __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG5) = PCM990_CTRL_MMC2PWR; else __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG5) = ~PCM990_CTRL_MMC2PWR; } static void pcm990_mci_exit(struct device *dev, void *data) { free_irq(PCM027_MMCDET_IRQ, data); } #define MSECS_PER_JIFFY (1000/HZ) static struct pxamci_platform_data pcm990_mci_platform_data = { .detect_delay_ms = 250, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, .init = pcm990_mci_init, .setpower = pcm990_mci_setpower, .exit = pcm990_mci_exit, .gpio_card_detect = -1, .gpio_card_ro = -1, .gpio_power = -1, }; static struct pxaohci_platform_data pcm990_ohci_platform_data = { .port_mode = PMM_PERPORT_MODE, .flags = ENABLE_PORT1 | POWER_CONTROL_LOW | POWER_SENSE_LOW, .power_on_delay = 10, }; /* * PXA27x Camera specific stuff */ #if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE) static unsigned long pcm990_camera_pin_config[] = { /* CIF */ GPIO98_CIF_DD_0, GPIO105_CIF_DD_1, GPIO104_CIF_DD_2, GPIO103_CIF_DD_3, GPIO95_CIF_DD_4, GPIO94_CIF_DD_5, GPIO93_CIF_DD_6, GPIO108_CIF_DD_7, GPIO107_CIF_DD_8, GPIO106_CIF_DD_9, GPIO42_CIF_MCLK, GPIO45_CIF_PCLK, GPIO43_CIF_FV, GPIO44_CIF_LV, }; /* * CICR4: PCLK_EN: Pixel clock is supplied by the sensor * MCLK_EN: Master clock is generated by PXA * PCP: Data sampled on the falling edge of pixel clock */ struct pxacamera_platform_data pcm990_pxacamera_platform_data = { .flags = PXA_CAMERA_MASTER | PXA_CAMERA_DATAWIDTH_8 | PXA_CAMERA_DATAWIDTH_10 | PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN/* | PXA_CAMERA_PCP*/, .mclk_10khz = 1000, }; #include <linux/i2c/pca953x.h> static struct pca953x_platform_data pca9536_data = { .gpio_base = PXA_NR_BUILTIN_GPIO, }; static int gpio_bus_switch = -EINVAL; static int pcm990_camera_set_bus_param(struct soc_camera_link *link, unsigned long flags) { if (gpio_bus_switch < 0) { if (flags == SOCAM_DATAWIDTH_10) return 0; else return -EINVAL; } if (flags & SOCAM_DATAWIDTH_8) gpio_set_value_cansleep(gpio_bus_switch, 1); else gpio_set_value_cansleep(gpio_bus_switch, 0); return 0; } static unsigned long pcm990_camera_query_bus_param(struct soc_camera_link *link) { int ret; if (gpio_bus_switch < 0) { ret = gpio_request(PXA_NR_BUILTIN_GPIO, "camera"); if (!ret) { gpio_bus_switch = PXA_NR_BUILTIN_GPIO; gpio_direction_output(gpio_bus_switch, 0); } } if (gpio_bus_switch >= 0) return SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_10; else return SOCAM_DATAWIDTH_10; } static void pcm990_camera_free_bus(struct soc_camera_link *link) { if (gpio_bus_switch < 0) return; gpio_free(gpio_bus_switch); gpio_bus_switch = -EINVAL; } /* Board I2C devices. */ static struct i2c_board_info __initdata pcm990_i2c_devices[] = { { /* Must initialize before the camera(s) */ I2C_BOARD_INFO("pca9536", 0x41), .platform_data = &pca9536_data, }, }; static struct i2c_board_info pcm990_camera_i2c[] = { { I2C_BOARD_INFO("mt9v022", 0x48), }, { I2C_BOARD_INFO("mt9m001", 0x5d), }, }; static struct soc_camera_link iclink[] = { { .bus_id = 0, /* Must match with the camera ID */ .board_info = &pcm990_camera_i2c[0], .i2c_adapter_id = 0, .query_bus_param = pcm990_camera_query_bus_param, .set_bus_param = pcm990_camera_set_bus_param, .free_bus = pcm990_camera_free_bus, }, { .bus_id = 0, /* Must match with the camera ID */ .board_info = &pcm990_camera_i2c[1], .i2c_adapter_id = 0, .query_bus_param = pcm990_camera_query_bus_param, .set_bus_param = pcm990_camera_set_bus_param, .free_bus = pcm990_camera_free_bus, }, }; static struct platform_device pcm990_camera[] = { { .name = "soc-camera-pdrv", .id = 0, .dev = { .platform_data = &iclink[0], }, }, { .name = "soc-camera-pdrv", .id = 1, .dev = { .platform_data = &iclink[1], }, }, }; #endif /* CONFIG_VIDEO_PXA27x ||CONFIG_VIDEO_PXA27x_MODULE */ /* * enable generic access to the base board control CPLDs U6 and U7 */ static struct map_desc pcm990_io_desc[] __initdata = { { .virtual = PCM990_CTRL_BASE, .pfn = __phys_to_pfn(PCM990_CTRL_PHYS), .length = PCM990_CTRL_SIZE, .type = MT_DEVICE /* CPLD */ }, { .virtual = PCM990_CF_PLD_BASE, .pfn = __phys_to_pfn(PCM990_CF_PLD_PHYS), .length = PCM990_CF_PLD_SIZE, .type = MT_DEVICE /* CPLD */ } }; /* * system init for baseboard usage. Will be called by pcm027 init. * * Add platform devices present on this baseboard and init * them from CPU side as far as required to use them later on */ void __init pcm990_baseboard_init(void) { pxa2xx_mfp_config(ARRAY_AND_SIZE(pcm990_pin_config)); /* register CPLD access */ iotable_init(ARRAY_AND_SIZE(pcm990_io_desc)); /* register CPLD's IRQ controller */ pcm990_init_irq(); #ifndef CONFIG_PCM990_DISPLAY_NONE pxa_set_fb_info(NULL, &pcm990_fbinfo); #endif platform_device_register(&pcm990_backlight_device); /* MMC */ pxa_set_mci_info(&pcm990_mci_platform_data); /* USB host */ pxa_set_ohci_info(&pcm990_ohci_platform_data); pxa_set_i2c_info(NULL); pxa_set_ac97_info(NULL); #if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE) pxa2xx_mfp_config(ARRAY_AND_SIZE(pcm990_camera_pin_config)); pxa_set_camera_info(&pcm990_pxacamera_platform_data); i2c_register_board_info(0, ARRAY_AND_SIZE(pcm990_i2c_devices)); platform_device_register(&pcm990_camera[0]); platform_device_register(&pcm990_camera[1]); #endif printk(KERN_INFO "PCM-990 Evaluation baseboard initialized\n"); }
gpl-2.0
Jackeagle/kernel_samsung_exynos5260
drivers/pnp/resource.c
8153
16683
/* * resource.c - Contains functions for registering and analyzing resource information * * based on isapnp.c resource management (c) Jaroslav Kysela <perex@perex.cz> * Copyright 2003 Adam Belay <ambx1@neo.rr.com> * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <bjorn.helgaas@hp.com> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/irq.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/pnp.h> #include "base.h" static int pnp_reserve_irq[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some IRQ */ static int pnp_reserve_dma[8] = {[0 ... 7] = -1 }; /* reserve (don't use) some DMA */ static int pnp_reserve_io[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some I/O region */ static int pnp_reserve_mem[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some memory region */ /* * option registration */ struct pnp_option *pnp_build_option(struct pnp_dev *dev, unsigned long type, unsigned int option_flags) { struct pnp_option *option; option = kzalloc(sizeof(struct pnp_option), GFP_KERNEL); if (!option) return NULL; option->flags = option_flags; option->type = type; list_add_tail(&option->list, &dev->options); return option; } int pnp_register_irq_resource(struct pnp_dev *dev, unsigned int option_flags, pnp_irq_mask_t *map, unsigned char flags) { struct pnp_option *option; struct pnp_irq *irq; option = pnp_build_option(dev, IORESOURCE_IRQ, option_flags); if (!option) return -ENOMEM; irq = &option->u.irq; irq->map = *map; irq->flags = flags; #ifdef CONFIG_PCI { int i; for (i = 0; i < 16; i++) if (test_bit(i, irq->map.bits)) pcibios_penalize_isa_irq(i, 0); } #endif dbg_pnp_show_option(dev, option); return 0; } int pnp_register_dma_resource(struct pnp_dev *dev, unsigned int option_flags, unsigned char map, unsigned char flags) { struct pnp_option *option; struct pnp_dma *dma; option = pnp_build_option(dev, IORESOURCE_DMA, option_flags); if (!option) return -ENOMEM; dma = &option->u.dma; dma->map = map; dma->flags = flags; dbg_pnp_show_option(dev, option); return 0; } int pnp_register_port_resource(struct pnp_dev *dev, unsigned int option_flags, resource_size_t min, resource_size_t max, resource_size_t align, resource_size_t size, unsigned char flags) { struct pnp_option *option; struct pnp_port *port; option = pnp_build_option(dev, IORESOURCE_IO, option_flags); if (!option) return -ENOMEM; port = &option->u.port; port->min = min; port->max = max; port->align = align; port->size = size; port->flags = flags; dbg_pnp_show_option(dev, option); return 0; } int pnp_register_mem_resource(struct pnp_dev *dev, unsigned int option_flags, resource_size_t min, resource_size_t max, resource_size_t align, resource_size_t size, unsigned char flags) { struct pnp_option *option; struct pnp_mem *mem; option = pnp_build_option(dev, IORESOURCE_MEM, option_flags); if (!option) return -ENOMEM; mem = &option->u.mem; mem->min = min; mem->max = max; mem->align = align; mem->size = size; mem->flags = flags; dbg_pnp_show_option(dev, option); return 0; } void pnp_free_options(struct pnp_dev *dev) { struct pnp_option *option, *tmp; list_for_each_entry_safe(option, tmp, &dev->options, list) { list_del(&option->list); kfree(option); } } /* * resource validity checking */ #define length(start, end) (*(end) - *(start) + 1) /* Two ranges conflict if one doesn't end before the other starts */ #define ranged_conflict(starta, enda, startb, endb) \ !((*(enda) < *(startb)) || (*(endb) < *(starta))) #define cannot_compare(flags) \ ((flags) & IORESOURCE_DISABLED) int pnp_check_port(struct pnp_dev *dev, struct resource *res) { int i; struct pnp_dev *tdev; struct resource *tres; resource_size_t *port, *end, *tport, *tend; port = &res->start; end = &res->end; /* if the resource doesn't exist, don't complain about it */ if (cannot_compare(res->flags)) return 1; /* check if the resource is already in use, skip if the * device is active because it itself may be in use */ if (!dev->active) { if (__check_region(&ioport_resource, *port, length(port, end))) return 0; } /* check if the resource is reserved */ for (i = 0; i < 8; i++) { int rport = pnp_reserve_io[i << 1]; int rend = pnp_reserve_io[(i << 1) + 1] + rport - 1; if (ranged_conflict(port, end, &rport, &rend)) return 0; } /* check for internal conflicts */ for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) { if (tres != res && tres->flags & IORESOURCE_IO) { tport = &tres->start; tend = &tres->end; if (ranged_conflict(port, end, tport, tend)) return 0; } } /* check for conflicts with other pnp devices */ pnp_for_each_dev(tdev) { if (tdev == dev) continue; for (i = 0; (tres = pnp_get_resource(tdev, IORESOURCE_IO, i)); i++) { if (tres->flags & IORESOURCE_IO) { if (cannot_compare(tres->flags)) continue; if (tres->flags & IORESOURCE_WINDOW) continue; tport = &tres->start; tend = &tres->end; if (ranged_conflict(port, end, tport, tend)) return 0; } } } return 1; } int pnp_check_mem(struct pnp_dev *dev, struct resource *res) { int i; struct pnp_dev *tdev; struct resource *tres; resource_size_t *addr, *end, *taddr, *tend; addr = &res->start; end = &res->end; /* if the resource doesn't exist, don't complain about it */ if (cannot_compare(res->flags)) return 1; /* check if the resource is already in use, skip if the * device is active because it itself may be in use */ if (!dev->active) { if (check_mem_region(*addr, length(addr, end))) return 0; } /* check if the resource is reserved */ for (i = 0; i < 8; i++) { int raddr = pnp_reserve_mem[i << 1]; int rend = pnp_reserve_mem[(i << 1) + 1] + raddr - 1; if (ranged_conflict(addr, end, &raddr, &rend)) return 0; } /* check for internal conflicts */ for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) { if (tres != res && tres->flags & IORESOURCE_MEM) { taddr = &tres->start; tend = &tres->end; if (ranged_conflict(addr, end, taddr, tend)) return 0; } } /* check for conflicts with other pnp devices */ pnp_for_each_dev(tdev) { if (tdev == dev) continue; for (i = 0; (tres = pnp_get_resource(tdev, IORESOURCE_MEM, i)); i++) { if (tres->flags & IORESOURCE_MEM) { if (cannot_compare(tres->flags)) continue; if (tres->flags & IORESOURCE_WINDOW) continue; taddr = &tres->start; tend = &tres->end; if (ranged_conflict(addr, end, taddr, tend)) return 0; } } } return 1; } static irqreturn_t pnp_test_handler(int irq, void *dev_id) { return IRQ_HANDLED; } #ifdef CONFIG_PCI static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci, unsigned int irq) { u32 class; u8 progif; if (pci->irq == irq) { pnp_dbg(&pnp->dev, " device %s using irq %d\n", pci_name(pci), irq); return 1; } /* * See pci_setup_device() and ata_pci_sff_activate_host() for * similar IDE legacy detection. */ pci_read_config_dword(pci, PCI_CLASS_REVISION, &class); class >>= 8; /* discard revision ID */ progif = class & 0xff; class >>= 8; if (class == PCI_CLASS_STORAGE_IDE) { /* * Unless both channels are native-PCI mode only, * treat the compatibility IRQs as busy. */ if ((progif & 0x5) != 0x5) if (pci_get_legacy_ide_irq(pci, 0) == irq || pci_get_legacy_ide_irq(pci, 1) == irq) { pnp_dbg(&pnp->dev, " legacy IDE device %s " "using irq %d\n", pci_name(pci), irq); return 1; } } return 0; } #endif static int pci_uses_irq(struct pnp_dev *pnp, unsigned int irq) { #ifdef CONFIG_PCI struct pci_dev *pci = NULL; for_each_pci_dev(pci) { if (pci_dev_uses_irq(pnp, pci, irq)) { pci_dev_put(pci); return 1; } } #endif return 0; } int pnp_check_irq(struct pnp_dev *dev, struct resource *res) { int i; struct pnp_dev *tdev; struct resource *tres; resource_size_t *irq; irq = &res->start; /* if the resource doesn't exist, don't complain about it */ if (cannot_compare(res->flags)) return 1; /* check if the resource is valid */ if (*irq < 0 || *irq > 15) return 0; /* check if the resource is reserved */ for (i = 0; i < 16; i++) { if (pnp_reserve_irq[i] == *irq) return 0; } /* check for internal conflicts */ for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_IRQ, i)); i++) { if (tres != res && tres->flags & IORESOURCE_IRQ) { if (tres->start == *irq) return 0; } } /* check if the resource is being used by a pci device */ if (pci_uses_irq(dev, *irq)) return 0; /* check if the resource is already in use, skip if the * device is active because it itself may be in use */ if (!dev->active) { if (request_irq(*irq, pnp_test_handler, IRQF_DISABLED | IRQF_PROBE_SHARED, "pnp", NULL)) return 0; free_irq(*irq, NULL); } /* check for conflicts with other pnp devices */ pnp_for_each_dev(tdev) { if (tdev == dev) continue; for (i = 0; (tres = pnp_get_resource(tdev, IORESOURCE_IRQ, i)); i++) { if (tres->flags & IORESOURCE_IRQ) { if (cannot_compare(tres->flags)) continue; if (tres->start == *irq) return 0; } } } return 1; } #ifdef CONFIG_ISA_DMA_API int pnp_check_dma(struct pnp_dev *dev, struct resource *res) { int i; struct pnp_dev *tdev; struct resource *tres; resource_size_t *dma; dma = &res->start; /* if the resource doesn't exist, don't complain about it */ if (cannot_compare(res->flags)) return 1; /* check if the resource is valid */ if (*dma < 0 || *dma == 4 || *dma > 7) return 0; /* check if the resource is reserved */ for (i = 0; i < 8; i++) { if (pnp_reserve_dma[i] == *dma) return 0; } /* check for internal conflicts */ for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_DMA, i)); i++) { if (tres != res && tres->flags & IORESOURCE_DMA) { if (tres->start == *dma) return 0; } } /* check if the resource is already in use, skip if the * device is active because it itself may be in use */ if (!dev->active) { if (request_dma(*dma, "pnp")) return 0; free_dma(*dma); } /* check for conflicts with other pnp devices */ pnp_for_each_dev(tdev) { if (tdev == dev) continue; for (i = 0; (tres = pnp_get_resource(tdev, IORESOURCE_DMA, i)); i++) { if (tres->flags & IORESOURCE_DMA) { if (cannot_compare(tres->flags)) continue; if (tres->start == *dma) return 0; } } } return 1; } #endif /* CONFIG_ISA_DMA_API */ unsigned long pnp_resource_type(struct resource *res) { return res->flags & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_IRQ | IORESOURCE_DMA | IORESOURCE_BUS); } struct resource *pnp_get_resource(struct pnp_dev *dev, unsigned long type, unsigned int num) { struct pnp_resource *pnp_res; struct resource *res; list_for_each_entry(pnp_res, &dev->resources, list) { res = &pnp_res->res; if (pnp_resource_type(res) == type && num-- == 0) return res; } return NULL; } EXPORT_SYMBOL(pnp_get_resource); static struct pnp_resource *pnp_new_resource(struct pnp_dev *dev) { struct pnp_resource *pnp_res; pnp_res = kzalloc(sizeof(struct pnp_resource), GFP_KERNEL); if (!pnp_res) return NULL; list_add_tail(&pnp_res->list, &dev->resources); return pnp_res; } struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, int flags) { struct pnp_resource *pnp_res; struct resource *res; pnp_res = pnp_new_resource(dev); if (!pnp_res) { dev_err(&dev->dev, "can't add resource for IRQ %d\n", irq); return NULL; } res = &pnp_res->res; res->flags = IORESOURCE_IRQ | flags; res->start = irq; res->end = irq; dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res); return pnp_res; } struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma, int flags) { struct pnp_resource *pnp_res; struct resource *res; pnp_res = pnp_new_resource(dev); if (!pnp_res) { dev_err(&dev->dev, "can't add resource for DMA %d\n", dma); return NULL; } res = &pnp_res->res; res->flags = IORESOURCE_DMA | flags; res->start = dma; res->end = dma; dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res); return pnp_res; } struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev, resource_size_t start, resource_size_t end, int flags) { struct pnp_resource *pnp_res; struct resource *res; pnp_res = pnp_new_resource(dev); if (!pnp_res) { dev_err(&dev->dev, "can't add resource for IO %#llx-%#llx\n", (unsigned long long) start, (unsigned long long) end); return NULL; } res = &pnp_res->res; res->flags = IORESOURCE_IO | flags; res->start = start; res->end = end; dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res); return pnp_res; } struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev, resource_size_t start, resource_size_t end, int flags) { struct pnp_resource *pnp_res; struct resource *res; pnp_res = pnp_new_resource(dev); if (!pnp_res) { dev_err(&dev->dev, "can't add resource for MEM %#llx-%#llx\n", (unsigned long long) start, (unsigned long long) end); return NULL; } res = &pnp_res->res; res->flags = IORESOURCE_MEM | flags; res->start = start; res->end = end; dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res); return pnp_res; } struct pnp_resource *pnp_add_bus_resource(struct pnp_dev *dev, resource_size_t start, resource_size_t end) { struct pnp_resource *pnp_res; struct resource *res; pnp_res = pnp_new_resource(dev); if (!pnp_res) { dev_err(&dev->dev, "can't add resource for BUS %#llx-%#llx\n", (unsigned long long) start, (unsigned long long) end); return NULL; } res = &pnp_res->res; res->flags = IORESOURCE_BUS; res->start = start; res->end = end; dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res); return pnp_res; } /* * Determine whether the specified resource is a possible configuration * for this device. */ int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t start, resource_size_t size) { struct pnp_option *option; struct pnp_port *port; struct pnp_mem *mem; struct pnp_irq *irq; struct pnp_dma *dma; list_for_each_entry(option, &dev->options, list) { if (option->type != type) continue; switch (option->type) { case IORESOURCE_IO: port = &option->u.port; if (port->min == start && port->size == size) return 1; break; case IORESOURCE_MEM: mem = &option->u.mem; if (mem->min == start && mem->size == size) return 1; break; case IORESOURCE_IRQ: irq = &option->u.irq; if (start < PNP_IRQ_NR && test_bit(start, irq->map.bits)) return 1; break; case IORESOURCE_DMA: dma = &option->u.dma; if (dma->map & (1 << start)) return 1; break; } } return 0; } EXPORT_SYMBOL(pnp_possible_config); int pnp_range_reserved(resource_size_t start, resource_size_t end) { struct pnp_dev *dev; struct pnp_resource *pnp_res; resource_size_t *dev_start, *dev_end; pnp_for_each_dev(dev) { list_for_each_entry(pnp_res, &dev->resources, list) { dev_start = &pnp_res->res.start; dev_end = &pnp_res->res.end; if (ranged_conflict(&start, &end, dev_start, dev_end)) return 1; } } return 0; } EXPORT_SYMBOL(pnp_range_reserved); /* format is: pnp_reserve_irq=irq1[,irq2] .... */ static int __init pnp_setup_reserve_irq(char *str) { int i; for (i = 0; i < 16; i++) if (get_option(&str, &pnp_reserve_irq[i]) != 2) break; return 1; } __setup("pnp_reserve_irq=", pnp_setup_reserve_irq); /* format is: pnp_reserve_dma=dma1[,dma2] .... */ static int __init pnp_setup_reserve_dma(char *str) { int i; for (i = 0; i < 8; i++) if (get_option(&str, &pnp_reserve_dma[i]) != 2) break; return 1; } __setup("pnp_reserve_dma=", pnp_setup_reserve_dma); /* format is: pnp_reserve_io=io1,size1[,io2,size2] .... */ static int __init pnp_setup_reserve_io(char *str) { int i; for (i = 0; i < 16; i++) if (get_option(&str, &pnp_reserve_io[i]) != 2) break; return 1; } __setup("pnp_reserve_io=", pnp_setup_reserve_io); /* format is: pnp_reserve_mem=mem1,size1[,mem2,size2] .... */ static int __init pnp_setup_reserve_mem(char *str) { int i; for (i = 0; i < 16; i++) if (get_option(&str, &pnp_reserve_mem[i]) != 2) break; return 1; } __setup("pnp_reserve_mem=", pnp_setup_reserve_mem);
gpl-2.0
Altaf-Mahdi/flo
tools/perf/arch/x86/util/dwarf-regs.c
9433
1795
/* * dwarf-regs.c : Mapping of DWARF debug register numbers into register names. * Extracted from probe-finder.c * * Written by Masami Hiramatsu <mhiramat@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <libio.h> #include <dwarf-regs.h> /* * Generic dwarf analysis helpers */ #define X86_32_MAX_REGS 8 const char *x86_32_regs_table[X86_32_MAX_REGS] = { "%ax", "%cx", "%dx", "%bx", "$stack", /* Stack address instead of %sp */ "%bp", "%si", "%di", }; #define X86_64_MAX_REGS 16 const char *x86_64_regs_table[X86_64_MAX_REGS] = { "%ax", "%dx", "%cx", "%bx", "%si", "%di", "%bp", "%sp", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", }; /* TODO: switching by dwarf address size */ #ifdef __x86_64__ #define ARCH_MAX_REGS X86_64_MAX_REGS #define arch_regs_table x86_64_regs_table #else #define ARCH_MAX_REGS X86_32_MAX_REGS #define arch_regs_table x86_32_regs_table #endif /* Return architecture dependent register string (for kprobe-tracer) */ const char *get_arch_regstr(unsigned int n) { return (n <= ARCH_MAX_REGS) ? arch_regs_table[n] : NULL; }
gpl-2.0
nsingh94/caf-7x30
drivers/net/ethernet/chelsio/cxgb/my3126.c
12505
4663
/* $Date: 2005/11/12 02:13:49 $ $RCSfile: my3126.c,v $ $Revision: 1.15 $ */ #include "cphy.h" #include "elmer0.h" #include "suni1x10gexp_regs.h" /* Port Reset */ static int my3126_reset(struct cphy *cphy, int wait) { /* * This can be done through registers. It is not required since * a full chip reset is used. */ return 0; } static int my3126_interrupt_enable(struct cphy *cphy) { schedule_delayed_work(&cphy->phy_update, HZ/30); t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo); return 0; } static int my3126_interrupt_disable(struct cphy *cphy) { cancel_delayed_work_sync(&cphy->phy_update); return 0; } static int my3126_interrupt_clear(struct cphy *cphy) { return 0; } #define OFFSET(REG_ADDR) (REG_ADDR << 2) static int my3126_interrupt_handler(struct cphy *cphy) { u32 val; u16 val16; u16 status; u32 act_count; adapter_t *adapter; adapter = cphy->adapter; if (cphy->count == 50) { cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); val16 = (u16) val; status = cphy->bmsr ^ val16; if (status & MDIO_STAT1_LSTATUS) t1_link_changed(adapter, 0); cphy->bmsr = val16; /* We have only enabled link change interrupts so it must be that */ cphy->count = 0; } t1_tpi_write(adapter, OFFSET(SUNI1x10GEXP_REG_MSTAT_CONTROL), SUNI1x10GEXP_BITMSK_MSTAT_SNAP); t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW), &act_count); t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW), &val); act_count += val; /* Populate elmer_gpo with the register value */ t1_tpi_read(adapter, A_ELMER0_GPO, &val); cphy->elmer_gpo = val; if ( (val & (1 << 8)) || (val & (1 << 19)) || (cphy->act_count == act_count) || cphy->act_on ) { if (is_T2(adapter)) val |= (1 << 9); else if (t1_is_T1B(adapter)) val |= (1 << 20); cphy->act_on = 0; } else { if (is_T2(adapter)) val &= ~(1 << 9); else if (t1_is_T1B(adapter)) val &= ~(1 << 20); cphy->act_on = 1; } t1_tpi_write(adapter, A_ELMER0_GPO, val); cphy->elmer_gpo = val; cphy->act_count = act_count; cphy->count++; return cphy_cause_link_change; } static void my3216_poll(struct work_struct *work) { struct cphy *cphy = container_of(work, struct cphy, phy_update.work); my3126_interrupt_handler(cphy); } static int my3126_set_loopback(struct cphy *cphy, int on) { return 0; } /* To check the activity LED */ static int my3126_get_link_status(struct cphy *cphy, int *link_ok, int *speed, int *duplex, int *fc) { u32 val; u16 val16; adapter_t *adapter; adapter = cphy->adapter; cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); val16 = (u16) val; /* Populate elmer_gpo with the register value */ t1_tpi_read(adapter, A_ELMER0_GPO, &val); cphy->elmer_gpo = val; *link_ok = (val16 & MDIO_STAT1_LSTATUS); if (*link_ok) { /* Turn on the LED. */ if (is_T2(adapter)) val &= ~(1 << 8); else if (t1_is_T1B(adapter)) val &= ~(1 << 19); } else { /* Turn off the LED. */ if (is_T2(adapter)) val |= (1 << 8); else if (t1_is_T1B(adapter)) val |= (1 << 19); } t1_tpi_write(adapter, A_ELMER0_GPO, val); cphy->elmer_gpo = val; *speed = SPEED_10000; *duplex = DUPLEX_FULL; /* need to add flow control */ if (fc) *fc = PAUSE_RX | PAUSE_TX; return 0; } static void my3126_destroy(struct cphy *cphy) { kfree(cphy); } static struct cphy_ops my3126_ops = { .destroy = my3126_destroy, .reset = my3126_reset, .interrupt_enable = my3126_interrupt_enable, .interrupt_disable = my3126_interrupt_disable, .interrupt_clear = my3126_interrupt_clear, .interrupt_handler = my3126_interrupt_handler, .get_link_status = my3126_get_link_status, .set_loopback = my3126_set_loopback, .mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS), }; static struct cphy *my3126_phy_create(struct net_device *dev, int phy_addr, const struct mdio_ops *mdio_ops) { struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL); if (!cphy) return NULL; cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops); INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); cphy->bmsr = 0; return cphy; } /* Chip Reset */ static int my3126_phy_reset(adapter_t * adapter) { u32 val; t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~4; t1_tpi_write(adapter, A_ELMER0_GPO, val); msleep(100); t1_tpi_write(adapter, A_ELMER0_GPO, val | 4); msleep(1000); /* Now lets enable the Laser. Delay 100us */ t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= 0x8000; t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(100); return 0; } const struct gphy t1_my3126_ops = { .create = my3126_phy_create, .reset = my3126_phy_reset };
gpl-2.0
charles1018/The-f2fs-filesystem
drivers/regulator/tps65023-regulator.c
218
11597
/* * tps65023-regulator.c * * Supports TPS65023 Regulator * * Copyright (C) 2009 Texas Instrument Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind, * whether express or implied; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/regmap.h> /* Register definitions */ #define TPS65023_REG_VERSION 0 #define TPS65023_REG_PGOODZ 1 #define TPS65023_REG_MASK 2 #define TPS65023_REG_REG_CTRL 3 #define TPS65023_REG_CON_CTRL 4 #define TPS65023_REG_CON_CTRL2 5 #define TPS65023_REG_DEF_CORE 6 #define TPS65023_REG_DEFSLEW 7 #define TPS65023_REG_LDO_CTRL 8 /* PGOODZ bitfields */ #define TPS65023_PGOODZ_PWRFAILZ BIT(7) #define TPS65023_PGOODZ_LOWBATTZ BIT(6) #define TPS65023_PGOODZ_VDCDC1 BIT(5) #define TPS65023_PGOODZ_VDCDC2 BIT(4) #define TPS65023_PGOODZ_VDCDC3 BIT(3) #define TPS65023_PGOODZ_LDO2 BIT(2) #define TPS65023_PGOODZ_LDO1 BIT(1) /* MASK bitfields */ #define TPS65023_MASK_PWRFAILZ BIT(7) #define TPS65023_MASK_LOWBATTZ BIT(6) #define TPS65023_MASK_VDCDC1 BIT(5) #define TPS65023_MASK_VDCDC2 BIT(4) #define TPS65023_MASK_VDCDC3 BIT(3) #define TPS65023_MASK_LDO2 BIT(2) #define TPS65023_MASK_LDO1 BIT(1) /* REG_CTRL bitfields */ #define TPS65023_REG_CTRL_VDCDC1_EN BIT(5) #define TPS65023_REG_CTRL_VDCDC2_EN BIT(4) #define TPS65023_REG_CTRL_VDCDC3_EN BIT(3) #define TPS65023_REG_CTRL_LDO2_EN BIT(2) #define TPS65023_REG_CTRL_LDO1_EN BIT(1) /* REG_CTRL2 bitfields */ #define TPS65023_REG_CTRL2_GO BIT(7) #define TPS65023_REG_CTRL2_CORE_ADJ BIT(6) #define TPS65023_REG_CTRL2_DCDC2 BIT(2) #define TPS65023_REG_CTRL2_DCDC1 BIT(1) #define TPS65023_REG_CTRL2_DCDC3 BIT(0) /* Number of step-down converters available */ #define TPS65023_NUM_DCDC 3 /* Number of LDO voltage regulators available */ #define TPS65023_NUM_LDO 2 /* Number of total regulators available */ #define TPS65023_NUM_REGULATOR (TPS65023_NUM_DCDC + TPS65023_NUM_LDO) /* DCDCs */ #define TPS65023_DCDC_1 0 #define TPS65023_DCDC_2 1 #define TPS65023_DCDC_3 2 /* LDOs */ #define TPS65023_LDO_1 3 #define TPS65023_LDO_2 4 #define TPS65023_MAX_REG_ID TPS65023_LDO_2 /* Supported voltage values for regulators */ static const unsigned int VCORE_VSEL_table[] = { 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000, 1525000, 1550000, 1600000, }; static const unsigned int DCDC_FIXED_3300000_VSEL_table[] = { 3300000, }; static const unsigned int DCDC_FIXED_1800000_VSEL_table[] = { 1800000, }; /* Supported voltage values for LDO regulators for tps65020 */ static const unsigned int TPS65020_LDO_VSEL_table[] = { 1000000, 1050000, 1100000, 1300000, 1800000, 2500000, 3000000, 3300000, }; /* Supported voltage values for LDO regulators * for tps65021 and tps65023 */ static const unsigned int TPS65023_LDO1_VSEL_table[] = { 1000000, 1100000, 1300000, 1800000, 2200000, 2600000, 2800000, 3150000, }; static const unsigned int TPS65023_LDO2_VSEL_table[] = { 1050000, 1200000, 1300000, 1800000, 2500000, 2800000, 3000000, 3300000, }; /* Regulator specific details */ struct tps_info { const char *name; u8 table_len; const unsigned int *table; }; /* PMIC details */ struct tps_pmic { struct regulator_desc desc[TPS65023_NUM_REGULATOR]; struct regulator_dev *rdev[TPS65023_NUM_REGULATOR]; const struct tps_info *info[TPS65023_NUM_REGULATOR]; struct regmap *regmap; u8 core_regulator; }; /* Struct passed as driver data */ struct tps_driver_data { const struct tps_info *info; u8 core_regulator; }; static int tps65023_dcdc_get_voltage_sel(struct regulator_dev *dev) { struct tps_pmic *tps = rdev_get_drvdata(dev); int dcdc = rdev_get_id(dev); if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3) return -EINVAL; if (dcdc != tps->core_regulator) return 0; return regulator_get_voltage_sel_regmap(dev); } static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev, unsigned selector) { struct tps_pmic *tps = rdev_get_drvdata(dev); int dcdc = rdev_get_id(dev); if (dcdc != tps->core_regulator) return -EINVAL; return regulator_set_voltage_sel_regmap(dev, selector); } /* Operations permitted on VDCDCx */ static const struct regulator_ops tps65023_dcdc_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .get_voltage_sel = tps65023_dcdc_get_voltage_sel, .set_voltage_sel = tps65023_dcdc_set_voltage_sel, .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, }; /* Operations permitted on LDOx */ static const struct regulator_ops tps65023_ldo_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, }; static const struct regmap_config tps65023_regmap_config = { .reg_bits = 8, .val_bits = 8, }; static int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id) { const struct tps_driver_data *drv_data = (void *)id->driver_data; const struct tps_info *info = drv_data->info; struct regulator_config config = { }; struct regulator_init_data *init_data; struct regulator_dev *rdev; struct tps_pmic *tps; int i; int error; /** * init_data points to array of regulator_init structures * coming from the board-evm file. */ init_data = dev_get_platdata(&client->dev); if (!init_data) return -EIO; tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL); if (!tps) return -ENOMEM; tps->regmap = devm_regmap_init_i2c(client, &tps65023_regmap_config); if (IS_ERR(tps->regmap)) { error = PTR_ERR(tps->regmap); dev_err(&client->dev, "Failed to allocate register map: %d\n", error); return error; } /* common for all regulators */ tps->core_regulator = drv_data->core_regulator; for (i = 0; i < TPS65023_NUM_REGULATOR; i++, info++, init_data++) { /* Store regulator specific information */ tps->info[i] = info; tps->desc[i].name = info->name; tps->desc[i].id = i; tps->desc[i].n_voltages = info->table_len; tps->desc[i].volt_table = info->table; tps->desc[i].ops = (i > TPS65023_DCDC_3 ? &tps65023_ldo_ops : &tps65023_dcdc_ops); tps->desc[i].type = REGULATOR_VOLTAGE; tps->desc[i].owner = THIS_MODULE; tps->desc[i].enable_reg = TPS65023_REG_REG_CTRL; switch (i) { case TPS65023_LDO_1: tps->desc[i].vsel_reg = TPS65023_REG_LDO_CTRL; tps->desc[i].vsel_mask = 0x07; tps->desc[i].enable_mask = 1 << 1; break; case TPS65023_LDO_2: tps->desc[i].vsel_reg = TPS65023_REG_LDO_CTRL; tps->desc[i].vsel_mask = 0x70; tps->desc[i].enable_mask = 1 << 2; break; default: /* DCDCx */ tps->desc[i].enable_mask = 1 << (TPS65023_NUM_REGULATOR - i); tps->desc[i].vsel_reg = TPS65023_REG_DEF_CORE; tps->desc[i].vsel_mask = info->table_len - 1; tps->desc[i].apply_reg = TPS65023_REG_CON_CTRL2; tps->desc[i].apply_bit = TPS65023_REG_CTRL2_GO; } config.dev = &client->dev; config.init_data = init_data; config.driver_data = tps; config.regmap = tps->regmap; /* Register the regulators */ rdev = devm_regulator_register(&client->dev, &tps->desc[i], &config); if (IS_ERR(rdev)) { dev_err(&client->dev, "failed to register %s\n", id->name); return PTR_ERR(rdev); } /* Save regulator for cleanup */ tps->rdev[i] = rdev; } i2c_set_clientdata(client, tps); /* Enable setting output voltage by I2C */ regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2, TPS65023_REG_CTRL2_CORE_ADJ, TPS65023_REG_CTRL2_CORE_ADJ); return 0; } static const struct tps_info tps65020_regs[] = { { .name = "VDCDC1", .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table), .table = DCDC_FIXED_3300000_VSEL_table, }, { .name = "VDCDC2", .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table), .table = DCDC_FIXED_1800000_VSEL_table, }, { .name = "VDCDC3", .table_len = ARRAY_SIZE(VCORE_VSEL_table), .table = VCORE_VSEL_table, }, { .name = "LDO1", .table_len = ARRAY_SIZE(TPS65020_LDO_VSEL_table), .table = TPS65020_LDO_VSEL_table, }, { .name = "LDO2", .table_len = ARRAY_SIZE(TPS65020_LDO_VSEL_table), .table = TPS65020_LDO_VSEL_table, }, }; static const struct tps_info tps65021_regs[] = { { .name = "VDCDC1", .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table), .table = DCDC_FIXED_3300000_VSEL_table, }, { .name = "VDCDC2", .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table), .table = DCDC_FIXED_1800000_VSEL_table, }, { .name = "VDCDC3", .table_len = ARRAY_SIZE(VCORE_VSEL_table), .table = VCORE_VSEL_table, }, { .name = "LDO1", .table_len = ARRAY_SIZE(TPS65023_LDO1_VSEL_table), .table = TPS65023_LDO1_VSEL_table, }, { .name = "LDO2", .table_len = ARRAY_SIZE(TPS65023_LDO2_VSEL_table), .table = TPS65023_LDO2_VSEL_table, }, }; static const struct tps_info tps65023_regs[] = { { .name = "VDCDC1", .table_len = ARRAY_SIZE(VCORE_VSEL_table), .table = VCORE_VSEL_table, }, { .name = "VDCDC2", .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table), .table = DCDC_FIXED_3300000_VSEL_table, }, { .name = "VDCDC3", .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table), .table = DCDC_FIXED_1800000_VSEL_table, }, { .name = "LDO1", .table_len = ARRAY_SIZE(TPS65023_LDO1_VSEL_table), .table = TPS65023_LDO1_VSEL_table, }, { .name = "LDO2", .table_len = ARRAY_SIZE(TPS65023_LDO2_VSEL_table), .table = TPS65023_LDO2_VSEL_table, }, }; static struct tps_driver_data tps65020_drv_data = { .info = tps65020_regs, .core_regulator = TPS65023_DCDC_3, }; static struct tps_driver_data tps65021_drv_data = { .info = tps65021_regs, .core_regulator = TPS65023_DCDC_3, }; static struct tps_driver_data tps65023_drv_data = { .info = tps65023_regs, .core_regulator = TPS65023_DCDC_1, }; static const struct i2c_device_id tps_65023_id[] = { {.name = "tps65023", .driver_data = (unsigned long) &tps65023_drv_data}, {.name = "tps65021", .driver_data = (unsigned long) &tps65021_drv_data,}, {.name = "tps65020", .driver_data = (unsigned long) &tps65020_drv_data}, { }, }; MODULE_DEVICE_TABLE(i2c, tps_65023_id); static struct i2c_driver tps_65023_i2c_driver = { .driver = { .name = "tps65023", }, .probe = tps_65023_probe, .id_table = tps_65023_id, }; static int __init tps_65023_init(void) { return i2c_add_driver(&tps_65023_i2c_driver); } subsys_initcall(tps_65023_init); static void __exit tps_65023_cleanup(void) { i2c_del_driver(&tps_65023_i2c_driver); } module_exit(tps_65023_cleanup); MODULE_AUTHOR("Texas Instruments"); MODULE_DESCRIPTION("TPS65023 voltage regulator driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
Eih3/CHIP-linux
drivers/gpu/drm/drm_bridge.c
218
2512
/* * Copyright (c) 2014 Samsung Electronics Co., Ltd * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <linux/err.h> #include <linux/module.h> #include <drm/drm_crtc.h> #include "drm/drmP.h" static DEFINE_MUTEX(bridge_lock); static LIST_HEAD(bridge_list); int drm_bridge_add(struct drm_bridge *bridge) { mutex_lock(&bridge_lock); list_add_tail(&bridge->list, &bridge_list); mutex_unlock(&bridge_lock); return 0; } EXPORT_SYMBOL(drm_bridge_add); void drm_bridge_remove(struct drm_bridge *bridge) { mutex_lock(&bridge_lock); list_del_init(&bridge->list); mutex_unlock(&bridge_lock); } EXPORT_SYMBOL(drm_bridge_remove); int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge) { if (!dev || !bridge) return -EINVAL; if (bridge->dev) return -EBUSY; bridge->dev = dev; if (bridge->funcs->attach) return bridge->funcs->attach(bridge); return 0; } EXPORT_SYMBOL(drm_bridge_attach); #ifdef CONFIG_OF struct drm_bridge *of_drm_find_bridge(struct device_node *np) { struct drm_bridge *bridge; mutex_lock(&bridge_lock); list_for_each_entry(bridge, &bridge_list, list) { if (bridge->of_node == np) { mutex_unlock(&bridge_lock); return bridge; } } mutex_unlock(&bridge_lock); return NULL; } EXPORT_SYMBOL(of_drm_find_bridge); #endif MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>"); MODULE_DESCRIPTION("DRM bridge infrastructure"); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
e-yes/mini2440-kernel
drivers/media/video/mt9m111.c
474
27956
/* * Driver for MT9M111/MT9M112 CMOS Image Sensor from Micron * * Copyright (C) 2008, Robert Jarzmik <robert.jarzmik@free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/videodev2.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/log2.h> #include <linux/gpio.h> #include <linux/delay.h> #include <media/v4l2-common.h> #include <media/v4l2-chip-ident.h> #include <media/soc_camera.h> /* * mt9m111 and mt9m112 i2c address is 0x5d or 0x48 (depending on SAddr pin) * The platform has to define i2c_board_info and call i2c_register_board_info() */ /* mt9m111: Sensor register addresses */ #define MT9M111_CHIP_VERSION 0x000 #define MT9M111_ROW_START 0x001 #define MT9M111_COLUMN_START 0x002 #define MT9M111_WINDOW_HEIGHT 0x003 #define MT9M111_WINDOW_WIDTH 0x004 #define MT9M111_HORIZONTAL_BLANKING_B 0x005 #define MT9M111_VERTICAL_BLANKING_B 0x006 #define MT9M111_HORIZONTAL_BLANKING_A 0x007 #define MT9M111_VERTICAL_BLANKING_A 0x008 #define MT9M111_SHUTTER_WIDTH 0x009 #define MT9M111_ROW_SPEED 0x00a #define MT9M111_EXTRA_DELAY 0x00b #define MT9M111_SHUTTER_DELAY 0x00c #define MT9M111_RESET 0x00d #define MT9M111_READ_MODE_B 0x020 #define MT9M111_READ_MODE_A 0x021 #define MT9M111_FLASH_CONTROL 0x023 #define MT9M111_GREEN1_GAIN 0x02b #define MT9M111_BLUE_GAIN 0x02c #define MT9M111_RED_GAIN 0x02d #define MT9M111_GREEN2_GAIN 0x02e #define MT9M111_GLOBAL_GAIN 0x02f #define MT9M111_CONTEXT_CONTROL 0x0c8 #define MT9M111_PAGE_MAP 0x0f0 #define MT9M111_BYTE_WISE_ADDR 0x0f1 #define MT9M111_RESET_SYNC_CHANGES (1 << 15) #define MT9M111_RESET_RESTART_BAD_FRAME (1 << 9) #define MT9M111_RESET_SHOW_BAD_FRAMES (1 << 8) #define MT9M111_RESET_RESET_SOC (1 << 5) #define MT9M111_RESET_OUTPUT_DISABLE (1 << 4) #define MT9M111_RESET_CHIP_ENABLE (1 << 3) #define MT9M111_RESET_ANALOG_STANDBY (1 << 2) #define MT9M111_RESET_RESTART_FRAME (1 << 1) #define MT9M111_RESET_RESET_MODE (1 << 0) #define MT9M111_RMB_MIRROR_COLS (1 << 1) #define MT9M111_RMB_MIRROR_ROWS (1 << 0) #define MT9M111_CTXT_CTRL_RESTART (1 << 15) #define MT9M111_CTXT_CTRL_DEFECTCOR_B (1 << 12) #define MT9M111_CTXT_CTRL_RESIZE_B (1 << 10) #define MT9M111_CTXT_CTRL_CTRL2_B (1 << 9) #define MT9M111_CTXT_CTRL_GAMMA_B (1 << 8) #define MT9M111_CTXT_CTRL_XENON_EN (1 << 7) #define MT9M111_CTXT_CTRL_READ_MODE_B (1 << 3) #define MT9M111_CTXT_CTRL_LED_FLASH_EN (1 << 2) #define MT9M111_CTXT_CTRL_VBLANK_SEL_B (1 << 1) #define MT9M111_CTXT_CTRL_HBLANK_SEL_B (1 << 0) /* * mt9m111: Colorpipe register addresses (0x100..0x1ff) */ #define MT9M111_OPER_MODE_CTRL 0x106 #define MT9M111_OUTPUT_FORMAT_CTRL 0x108 #define MT9M111_REDUCER_XZOOM_B 0x1a0 #define MT9M111_REDUCER_XSIZE_B 0x1a1 #define MT9M111_REDUCER_YZOOM_B 0x1a3 #define MT9M111_REDUCER_YSIZE_B 0x1a4 #define MT9M111_REDUCER_XZOOM_A 0x1a6 #define MT9M111_REDUCER_XSIZE_A 0x1a7 #define MT9M111_REDUCER_YZOOM_A 0x1a9 #define MT9M111_REDUCER_YSIZE_A 0x1aa #define MT9M111_OUTPUT_FORMAT_CTRL2_A 0x13a #define MT9M111_OUTPUT_FORMAT_CTRL2_B 0x19b #define MT9M111_OPMODE_AUTOEXPO_EN (1 << 14) #define MT9M111_OPMODE_AUTOWHITEBAL_EN (1 << 1) #define MT9M111_OUTFMT_PROCESSED_BAYER (1 << 14) #define MT9M111_OUTFMT_BYPASS_IFP (1 << 10) #define MT9M111_OUTFMT_INV_PIX_CLOCK (1 << 9) #define MT9M111_OUTFMT_RGB (1 << 8) #define MT9M111_OUTFMT_RGB565 (0x0 << 6) #define MT9M111_OUTFMT_RGB555 (0x1 << 6) #define MT9M111_OUTFMT_RGB444x (0x2 << 6) #define MT9M111_OUTFMT_RGBx444 (0x3 << 6) #define MT9M111_OUTFMT_TST_RAMP_OFF (0x0 << 4) #define MT9M111_OUTFMT_TST_RAMP_COL (0x1 << 4) #define MT9M111_OUTFMT_TST_RAMP_ROW (0x2 << 4) #define MT9M111_OUTFMT_TST_RAMP_FRAME (0x3 << 4) #define MT9M111_OUTFMT_SHIFT_3_UP (1 << 3) #define MT9M111_OUTFMT_AVG_CHROMA (1 << 2) #define MT9M111_OUTFMT_SWAP_YCbCr_C_Y (1 << 1) #define MT9M111_OUTFMT_SWAP_RGB_EVEN (1 << 1) #define MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr (1 << 0) /* * mt9m111: Camera control register addresses (0x200..0x2ff not implemented) */ #define reg_read(reg) mt9m111_reg_read(client, MT9M111_##reg) #define reg_write(reg, val) mt9m111_reg_write(client, MT9M111_##reg, (val)) #define reg_set(reg, val) mt9m111_reg_set(client, MT9M111_##reg, (val)) #define reg_clear(reg, val) mt9m111_reg_clear(client, MT9M111_##reg, (val)) #define MT9M111_MIN_DARK_ROWS 8 #define MT9M111_MIN_DARK_COLS 24 #define MT9M111_MAX_HEIGHT 1024 #define MT9M111_MAX_WIDTH 1280 #define COL_FMT(_name, _depth, _fourcc, _colorspace) \ { .name = _name, .depth = _depth, .fourcc = _fourcc, \ .colorspace = _colorspace } #define RGB_FMT(_name, _depth, _fourcc) \ COL_FMT(_name, _depth, _fourcc, V4L2_COLORSPACE_SRGB) #define JPG_FMT(_name, _depth, _fourcc) \ COL_FMT(_name, _depth, _fourcc, V4L2_COLORSPACE_JPEG) static const struct soc_camera_data_format mt9m111_colour_formats[] = { JPG_FMT("CbYCrY 16 bit", 16, V4L2_PIX_FMT_UYVY), JPG_FMT("CrYCbY 16 bit", 16, V4L2_PIX_FMT_VYUY), JPG_FMT("YCbYCr 16 bit", 16, V4L2_PIX_FMT_YUYV), JPG_FMT("YCrYCb 16 bit", 16, V4L2_PIX_FMT_YVYU), RGB_FMT("RGB 565", 16, V4L2_PIX_FMT_RGB565), RGB_FMT("RGB 555", 16, V4L2_PIX_FMT_RGB555), RGB_FMT("Bayer (sRGB) 10 bit", 10, V4L2_PIX_FMT_SBGGR16), RGB_FMT("Bayer (sRGB) 8 bit", 8, V4L2_PIX_FMT_SBGGR8), }; enum mt9m111_context { HIGHPOWER = 0, LOWPOWER, }; struct mt9m111 { struct v4l2_subdev subdev; int model; /* V4L2_IDENT_MT9M11x* codes from v4l2-chip-ident.h */ enum mt9m111_context context; struct v4l2_rect rect; u32 pixfmt; unsigned int gain; unsigned char autoexposure; unsigned char datawidth; unsigned int powered:1; unsigned int hflip:1; unsigned int vflip:1; unsigned int swap_rgb_even_odd:1; unsigned int swap_rgb_red_blue:1; unsigned int swap_yuv_y_chromas:1; unsigned int swap_yuv_cb_cr:1; unsigned int autowhitebalance:1; }; static struct mt9m111 *to_mt9m111(const struct i2c_client *client) { return container_of(i2c_get_clientdata(client), struct mt9m111, subdev); } static int reg_page_map_set(struct i2c_client *client, const u16 reg) { int ret; u16 page; static int lastpage = -1; /* PageMap cache value */ page = (reg >> 8); if (page == lastpage) return 0; if (page > 2) return -EINVAL; ret = i2c_smbus_write_word_data(client, MT9M111_PAGE_MAP, swab16(page)); if (!ret) lastpage = page; return ret; } static int mt9m111_reg_read(struct i2c_client *client, const u16 reg) { int ret; ret = reg_page_map_set(client, reg); if (!ret) ret = swab16(i2c_smbus_read_word_data(client, reg & 0xff)); dev_dbg(&client->dev, "read reg.%03x -> %04x\n", reg, ret); return ret; } static int mt9m111_reg_write(struct i2c_client *client, const u16 reg, const u16 data) { int ret; ret = reg_page_map_set(client, reg); if (!ret) ret = i2c_smbus_write_word_data(client, reg & 0xff, swab16(data)); dev_dbg(&client->dev, "write reg.%03x = %04x -> %d\n", reg, data, ret); return ret; } static int mt9m111_reg_set(struct i2c_client *client, const u16 reg, const u16 data) { int ret; ret = mt9m111_reg_read(client, reg); if (ret >= 0) ret = mt9m111_reg_write(client, reg, ret | data); return ret; } static int mt9m111_reg_clear(struct i2c_client *client, const u16 reg, const u16 data) { int ret; ret = mt9m111_reg_read(client, reg); return mt9m111_reg_write(client, reg, ret & ~data); } static int mt9m111_set_context(struct i2c_client *client, enum mt9m111_context ctxt) { int valB = MT9M111_CTXT_CTRL_RESTART | MT9M111_CTXT_CTRL_DEFECTCOR_B | MT9M111_CTXT_CTRL_RESIZE_B | MT9M111_CTXT_CTRL_CTRL2_B | MT9M111_CTXT_CTRL_GAMMA_B | MT9M111_CTXT_CTRL_READ_MODE_B | MT9M111_CTXT_CTRL_VBLANK_SEL_B | MT9M111_CTXT_CTRL_HBLANK_SEL_B; int valA = MT9M111_CTXT_CTRL_RESTART; if (ctxt == HIGHPOWER) return reg_write(CONTEXT_CONTROL, valB); else return reg_write(CONTEXT_CONTROL, valA); } static int mt9m111_setup_rect(struct i2c_client *client, struct v4l2_rect *rect) { struct mt9m111 *mt9m111 = to_mt9m111(client); int ret, is_raw_format; int width = rect->width; int height = rect->height; if (mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR8 || mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR16) is_raw_format = 1; else is_raw_format = 0; ret = reg_write(COLUMN_START, rect->left); if (!ret) ret = reg_write(ROW_START, rect->top); if (is_raw_format) { if (!ret) ret = reg_write(WINDOW_WIDTH, width); if (!ret) ret = reg_write(WINDOW_HEIGHT, height); } else { if (!ret) ret = reg_write(REDUCER_XZOOM_B, MT9M111_MAX_WIDTH); if (!ret) ret = reg_write(REDUCER_YZOOM_B, MT9M111_MAX_HEIGHT); if (!ret) ret = reg_write(REDUCER_XSIZE_B, width); if (!ret) ret = reg_write(REDUCER_YSIZE_B, height); if (!ret) ret = reg_write(REDUCER_XZOOM_A, MT9M111_MAX_WIDTH); if (!ret) ret = reg_write(REDUCER_YZOOM_A, MT9M111_MAX_HEIGHT); if (!ret) ret = reg_write(REDUCER_XSIZE_A, width); if (!ret) ret = reg_write(REDUCER_YSIZE_A, height); } return ret; } static int mt9m111_setup_pixfmt(struct i2c_client *client, u16 outfmt) { int ret; ret = reg_write(OUTPUT_FORMAT_CTRL2_A, outfmt); if (!ret) ret = reg_write(OUTPUT_FORMAT_CTRL2_B, outfmt); return ret; } static int mt9m111_setfmt_bayer8(struct i2c_client *client) { return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_PROCESSED_BAYER); } static int mt9m111_setfmt_bayer10(struct i2c_client *client) { return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_BYPASS_IFP); } static int mt9m111_setfmt_rgb565(struct i2c_client *client) { struct mt9m111 *mt9m111 = to_mt9m111(client); int val = 0; if (mt9m111->swap_rgb_red_blue) val |= MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr; if (mt9m111->swap_rgb_even_odd) val |= MT9M111_OUTFMT_SWAP_RGB_EVEN; val |= MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565; return mt9m111_setup_pixfmt(client, val); } static int mt9m111_setfmt_rgb555(struct i2c_client *client) { struct mt9m111 *mt9m111 = to_mt9m111(client); int val = 0; if (mt9m111->swap_rgb_red_blue) val |= MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr; if (mt9m111->swap_rgb_even_odd) val |= MT9M111_OUTFMT_SWAP_RGB_EVEN; val |= MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555; return mt9m111_setup_pixfmt(client, val); } static int mt9m111_setfmt_yuv(struct i2c_client *client) { struct mt9m111 *mt9m111 = to_mt9m111(client); int val = 0; if (mt9m111->swap_yuv_cb_cr) val |= MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr; if (mt9m111->swap_yuv_y_chromas) val |= MT9M111_OUTFMT_SWAP_YCbCr_C_Y; return mt9m111_setup_pixfmt(client, val); } static int mt9m111_enable(struct i2c_client *client) { struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; ret = reg_set(RESET, MT9M111_RESET_CHIP_ENABLE); if (!ret) mt9m111->powered = 1; return ret; } static int mt9m111_reset(struct i2c_client *client) { int ret; ret = reg_set(RESET, MT9M111_RESET_RESET_MODE); if (!ret) ret = reg_set(RESET, MT9M111_RESET_RESET_SOC); if (!ret) ret = reg_clear(RESET, MT9M111_RESET_RESET_MODE | MT9M111_RESET_RESET_SOC); return ret; } static unsigned long mt9m111_query_bus_param(struct soc_camera_device *icd) { struct soc_camera_link *icl = to_soc_camera_link(icd); unsigned long flags = SOCAM_MASTER | SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8; return soc_camera_apply_sensor_flags(icl, flags); } static int mt9m111_set_bus_param(struct soc_camera_device *icd, unsigned long f) { return 0; } static int mt9m111_make_rect(struct i2c_client *client, struct v4l2_rect *rect) { struct mt9m111 *mt9m111 = to_mt9m111(client); if (mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR8 || mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR16) { /* Bayer format - even size lengths */ rect->width = ALIGN(rect->width, 2); rect->height = ALIGN(rect->height, 2); /* Let the user play with the starting pixel */ } /* FIXME: the datasheet doesn't specify minimum sizes */ soc_camera_limit_side(&rect->left, &rect->width, MT9M111_MIN_DARK_COLS, 2, MT9M111_MAX_WIDTH); soc_camera_limit_side(&rect->top, &rect->height, MT9M111_MIN_DARK_ROWS, 2, MT9M111_MAX_HEIGHT); return mt9m111_setup_rect(client, rect); } static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { struct v4l2_rect rect = a->c; struct i2c_client *client = sd->priv; struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n", __func__, rect.left, rect.top, rect.width, rect.height); ret = mt9m111_make_rect(client, &rect); if (!ret) mt9m111->rect = rect; return ret; } static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { struct i2c_client *client = sd->priv; struct mt9m111 *mt9m111 = to_mt9m111(client); a->c = mt9m111->rect; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; return 0; } static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) { a->bounds.left = MT9M111_MIN_DARK_COLS; a->bounds.top = MT9M111_MIN_DARK_ROWS; a->bounds.width = MT9M111_MAX_WIDTH; a->bounds.height = MT9M111_MAX_HEIGHT; a->defrect = a->bounds; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; a->pixelaspect.numerator = 1; a->pixelaspect.denominator = 1; return 0; } static int mt9m111_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { struct i2c_client *client = sd->priv; struct mt9m111 *mt9m111 = to_mt9m111(client); struct v4l2_pix_format *pix = &f->fmt.pix; pix->width = mt9m111->rect.width; pix->height = mt9m111->rect.height; pix->pixelformat = mt9m111->pixfmt; pix->field = V4L2_FIELD_NONE; pix->colorspace = V4L2_COLORSPACE_SRGB; return 0; } static int mt9m111_set_pixfmt(struct i2c_client *client, u32 pixfmt) { struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; switch (pixfmt) { case V4L2_PIX_FMT_SBGGR8: ret = mt9m111_setfmt_bayer8(client); break; case V4L2_PIX_FMT_SBGGR16: ret = mt9m111_setfmt_bayer10(client); break; case V4L2_PIX_FMT_RGB555: ret = mt9m111_setfmt_rgb555(client); break; case V4L2_PIX_FMT_RGB565: ret = mt9m111_setfmt_rgb565(client); break; case V4L2_PIX_FMT_UYVY: mt9m111->swap_yuv_y_chromas = 0; mt9m111->swap_yuv_cb_cr = 0; ret = mt9m111_setfmt_yuv(client); break; case V4L2_PIX_FMT_VYUY: mt9m111->swap_yuv_y_chromas = 0; mt9m111->swap_yuv_cb_cr = 1; ret = mt9m111_setfmt_yuv(client); break; case V4L2_PIX_FMT_YUYV: mt9m111->swap_yuv_y_chromas = 1; mt9m111->swap_yuv_cb_cr = 0; ret = mt9m111_setfmt_yuv(client); break; case V4L2_PIX_FMT_YVYU: mt9m111->swap_yuv_y_chromas = 1; mt9m111->swap_yuv_cb_cr = 1; ret = mt9m111_setfmt_yuv(client); break; default: dev_err(&client->dev, "Pixel format not handled : %x\n", pixfmt); ret = -EINVAL; } if (!ret) mt9m111->pixfmt = pixfmt; return ret; } static int mt9m111_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { struct i2c_client *client = sd->priv; struct mt9m111 *mt9m111 = to_mt9m111(client); struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_rect rect = { .left = mt9m111->rect.left, .top = mt9m111->rect.top, .width = pix->width, .height = pix->height, }; int ret; dev_dbg(&client->dev, "%s fmt=%x left=%d, top=%d, width=%d, height=%d\n", __func__, pix->pixelformat, rect.left, rect.top, rect.width, rect.height); ret = mt9m111_make_rect(client, &rect); if (!ret) ret = mt9m111_set_pixfmt(client, pix->pixelformat); if (!ret) mt9m111->rect = rect; return ret; } static int mt9m111_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { struct v4l2_pix_format *pix = &f->fmt.pix; bool bayer = pix->pixelformat == V4L2_PIX_FMT_SBGGR8 || pix->pixelformat == V4L2_PIX_FMT_SBGGR16; /* * With Bayer format enforce even side lengths, but let the user play * with the starting pixel */ if (pix->height > MT9M111_MAX_HEIGHT) pix->height = MT9M111_MAX_HEIGHT; else if (pix->height < 2) pix->height = 2; else if (bayer) pix->height = ALIGN(pix->height, 2); if (pix->width > MT9M111_MAX_WIDTH) pix->width = MT9M111_MAX_WIDTH; else if (pix->width < 2) pix->width = 2; else if (bayer) pix->width = ALIGN(pix->width, 2); return 0; } static int mt9m111_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *id) { struct i2c_client *client = sd->priv; struct mt9m111 *mt9m111 = to_mt9m111(client); if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; if (id->match.addr != client->addr) return -ENODEV; id->ident = mt9m111->model; id->revision = 0; return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int mt9m111_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = sd->priv; int val; if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff) return -EINVAL; if (reg->match.addr != client->addr) return -ENODEV; val = mt9m111_reg_read(client, reg->reg); reg->size = 2; reg->val = (u64)val; if (reg->val > 0xffff) return -EIO; return 0; } static int mt9m111_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = sd->priv; if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff) return -EINVAL; if (reg->match.addr != client->addr) return -ENODEV; if (mt9m111_reg_write(client, reg->reg, reg->val) < 0) return -EIO; return 0; } #endif static const struct v4l2_queryctrl mt9m111_controls[] = { { .id = V4L2_CID_VFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Flip Verticaly", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, { .id = V4L2_CID_HFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Flip Horizontaly", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, { /* gain = 1/32*val (=>gain=1 if val==32) */ .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain", .minimum = 0, .maximum = 63 * 2 * 2, .step = 1, .default_value = 32, .flags = V4L2_CTRL_FLAG_SLIDER, }, { .id = V4L2_CID_EXPOSURE_AUTO, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Auto Exposure", .minimum = 0, .maximum = 1, .step = 1, .default_value = 1, } }; static int mt9m111_resume(struct soc_camera_device *icd); static int mt9m111_suspend(struct soc_camera_device *icd, pm_message_t state); static struct soc_camera_ops mt9m111_ops = { .suspend = mt9m111_suspend, .resume = mt9m111_resume, .query_bus_param = mt9m111_query_bus_param, .set_bus_param = mt9m111_set_bus_param, .controls = mt9m111_controls, .num_controls = ARRAY_SIZE(mt9m111_controls), }; static int mt9m111_set_flip(struct i2c_client *client, int flip, int mask) { struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; if (mt9m111->context == HIGHPOWER) { if (flip) ret = reg_set(READ_MODE_B, mask); else ret = reg_clear(READ_MODE_B, mask); } else { if (flip) ret = reg_set(READ_MODE_A, mask); else ret = reg_clear(READ_MODE_A, mask); } return ret; } static int mt9m111_get_global_gain(struct i2c_client *client) { int data; data = reg_read(GLOBAL_GAIN); if (data >= 0) return (data & 0x2f) * (1 << ((data >> 10) & 1)) * (1 << ((data >> 9) & 1)); return data; } static int mt9m111_set_global_gain(struct i2c_client *client, int gain) { struct mt9m111 *mt9m111 = to_mt9m111(client); u16 val; if (gain > 63 * 2 * 2) return -EINVAL; mt9m111->gain = gain; if ((gain >= 64 * 2) && (gain < 63 * 2 * 2)) val = (1 << 10) | (1 << 9) | (gain / 4); else if ((gain >= 64) && (gain < 64 * 2)) val = (1 << 9) | (gain / 2); else val = gain; return reg_write(GLOBAL_GAIN, val); } static int mt9m111_set_autoexposure(struct i2c_client *client, int on) { struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; if (on) ret = reg_set(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOEXPO_EN); else ret = reg_clear(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOEXPO_EN); if (!ret) mt9m111->autoexposure = on; return ret; } static int mt9m111_set_autowhitebalance(struct i2c_client *client, int on) { struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; if (on) ret = reg_set(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOWHITEBAL_EN); else ret = reg_clear(OPER_MODE_CTRL, MT9M111_OPMODE_AUTOWHITEBAL_EN); if (!ret) mt9m111->autowhitebalance = on; return ret; } static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct i2c_client *client = sd->priv; struct mt9m111 *mt9m111 = to_mt9m111(client); int data; switch (ctrl->id) { case V4L2_CID_VFLIP: if (mt9m111->context == HIGHPOWER) data = reg_read(READ_MODE_B); else data = reg_read(READ_MODE_A); if (data < 0) return -EIO; ctrl->value = !!(data & MT9M111_RMB_MIRROR_ROWS); break; case V4L2_CID_HFLIP: if (mt9m111->context == HIGHPOWER) data = reg_read(READ_MODE_B); else data = reg_read(READ_MODE_A); if (data < 0) return -EIO; ctrl->value = !!(data & MT9M111_RMB_MIRROR_COLS); break; case V4L2_CID_GAIN: data = mt9m111_get_global_gain(client); if (data < 0) return data; ctrl->value = data; break; case V4L2_CID_EXPOSURE_AUTO: ctrl->value = mt9m111->autoexposure; break; case V4L2_CID_AUTO_WHITE_BALANCE: ctrl->value = mt9m111->autowhitebalance; break; } return 0; } static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct i2c_client *client = sd->priv; struct mt9m111 *mt9m111 = to_mt9m111(client); const struct v4l2_queryctrl *qctrl; int ret; qctrl = soc_camera_find_qctrl(&mt9m111_ops, ctrl->id); if (!qctrl) return -EINVAL; switch (ctrl->id) { case V4L2_CID_VFLIP: mt9m111->vflip = ctrl->value; ret = mt9m111_set_flip(client, ctrl->value, MT9M111_RMB_MIRROR_ROWS); break; case V4L2_CID_HFLIP: mt9m111->hflip = ctrl->value; ret = mt9m111_set_flip(client, ctrl->value, MT9M111_RMB_MIRROR_COLS); break; case V4L2_CID_GAIN: ret = mt9m111_set_global_gain(client, ctrl->value); break; case V4L2_CID_EXPOSURE_AUTO: ret = mt9m111_set_autoexposure(client, ctrl->value); break; case V4L2_CID_AUTO_WHITE_BALANCE: ret = mt9m111_set_autowhitebalance(client, ctrl->value); break; default: ret = -EINVAL; } return ret; } static int mt9m111_suspend(struct soc_camera_device *icd, pm_message_t state) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); struct mt9m111 *mt9m111 = to_mt9m111(client); mt9m111->gain = mt9m111_get_global_gain(client); return 0; } static int mt9m111_restore_state(struct i2c_client *client) { struct mt9m111 *mt9m111 = to_mt9m111(client); mt9m111_set_context(client, mt9m111->context); mt9m111_set_pixfmt(client, mt9m111->pixfmt); mt9m111_setup_rect(client, &mt9m111->rect); mt9m111_set_flip(client, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS); mt9m111_set_flip(client, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS); mt9m111_set_global_gain(client, mt9m111->gain); mt9m111_set_autoexposure(client, mt9m111->autoexposure); mt9m111_set_autowhitebalance(client, mt9m111->autowhitebalance); return 0; } static int mt9m111_resume(struct soc_camera_device *icd) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); struct mt9m111 *mt9m111 = to_mt9m111(client); int ret = 0; if (mt9m111->powered) { ret = mt9m111_enable(client); if (!ret) ret = mt9m111_reset(client); if (!ret) ret = mt9m111_restore_state(client); } return ret; } static int mt9m111_init(struct i2c_client *client) { struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; mt9m111->context = HIGHPOWER; ret = mt9m111_enable(client); if (!ret) ret = mt9m111_reset(client); if (!ret) ret = mt9m111_set_context(client, mt9m111->context); if (!ret) ret = mt9m111_set_autoexposure(client, mt9m111->autoexposure); if (ret) dev_err(&client->dev, "mt9m11x init failed: %d\n", ret); return ret; } /* * Interface active, can use i2c. If it fails, it can indeed mean, that * this wasn't our capture interface, so, we wait for the right one */ static int mt9m111_video_probe(struct soc_camera_device *icd, struct i2c_client *client) { struct mt9m111 *mt9m111 = to_mt9m111(client); s32 data; int ret; /* * We must have a parent by now. And it cannot be a wrong one. * So this entire test is completely redundant. */ if (!icd->dev.parent || to_soc_camera_host(icd->dev.parent)->nr != icd->iface) return -ENODEV; mt9m111->autoexposure = 1; mt9m111->autowhitebalance = 1; mt9m111->swap_rgb_even_odd = 1; mt9m111->swap_rgb_red_blue = 1; ret = mt9m111_init(client); if (ret) goto ei2c; data = reg_read(CHIP_VERSION); switch (data) { case 0x143a: /* MT9M111 */ mt9m111->model = V4L2_IDENT_MT9M111; break; case 0x148c: /* MT9M112 */ mt9m111->model = V4L2_IDENT_MT9M112; break; default: ret = -ENODEV; dev_err(&client->dev, "No MT9M11x chip detected, register read %x\n", data); goto ei2c; } icd->formats = mt9m111_colour_formats; icd->num_formats = ARRAY_SIZE(mt9m111_colour_formats); dev_info(&client->dev, "Detected a MT9M11x chip ID %x\n", data); ei2c: return ret; } static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = { .g_ctrl = mt9m111_g_ctrl, .s_ctrl = mt9m111_s_ctrl, .g_chip_ident = mt9m111_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = mt9m111_g_register, .s_register = mt9m111_s_register, #endif }; static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = { .s_fmt = mt9m111_s_fmt, .g_fmt = mt9m111_g_fmt, .try_fmt = mt9m111_try_fmt, .s_crop = mt9m111_s_crop, .g_crop = mt9m111_g_crop, .cropcap = mt9m111_cropcap, }; static struct v4l2_subdev_ops mt9m111_subdev_ops = { .core = &mt9m111_subdev_core_ops, .video = &mt9m111_subdev_video_ops, }; static int mt9m111_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct mt9m111 *mt9m111; struct soc_camera_device *icd = client->dev.platform_data; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct soc_camera_link *icl; int ret; if (!icd) { dev_err(&client->dev, "MT9M11x: missing soc-camera data!\n"); return -EINVAL; } icl = to_soc_camera_link(icd); if (!icl) { dev_err(&client->dev, "MT9M11x driver needs platform data\n"); return -EINVAL; } if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) { dev_warn(&adapter->dev, "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n"); return -EIO; } mt9m111 = kzalloc(sizeof(struct mt9m111), GFP_KERNEL); if (!mt9m111) return -ENOMEM; v4l2_i2c_subdev_init(&mt9m111->subdev, client, &mt9m111_subdev_ops); /* Second stage probe - when a capture adapter is there */ icd->ops = &mt9m111_ops; icd->y_skip_top = 0; mt9m111->rect.left = MT9M111_MIN_DARK_COLS; mt9m111->rect.top = MT9M111_MIN_DARK_ROWS; mt9m111->rect.width = MT9M111_MAX_WIDTH; mt9m111->rect.height = MT9M111_MAX_HEIGHT; ret = mt9m111_video_probe(icd, client); if (ret) { icd->ops = NULL; i2c_set_clientdata(client, NULL); kfree(mt9m111); } return ret; } static int mt9m111_remove(struct i2c_client *client) { struct mt9m111 *mt9m111 = to_mt9m111(client); struct soc_camera_device *icd = client->dev.platform_data; icd->ops = NULL; i2c_set_clientdata(client, NULL); client->driver = NULL; kfree(mt9m111); return 0; } static const struct i2c_device_id mt9m111_id[] = { { "mt9m111", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, mt9m111_id); static struct i2c_driver mt9m111_i2c_driver = { .driver = { .name = "mt9m111", }, .probe = mt9m111_probe, .remove = mt9m111_remove, .id_table = mt9m111_id, }; static int __init mt9m111_mod_init(void) { return i2c_add_driver(&mt9m111_i2c_driver); } static void __exit mt9m111_mod_exit(void) { i2c_del_driver(&mt9m111_i2c_driver); } module_init(mt9m111_mod_init); module_exit(mt9m111_mod_exit); MODULE_DESCRIPTION("Micron MT9M111/MT9M112 Camera driver"); MODULE_AUTHOR("Robert Jarzmik"); MODULE_LICENSE("GPL");
gpl-2.0
netico-solutions/linux-urtu-bb
arch/mn10300/unit-asb2303/flash.c
986
2583
/* Handle mapping of the flash on the ASB2303 board * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #define ASB2303_PROM_ADDR 0xA0000000 /* Boot PROM */ #define ASB2303_PROM_SIZE (2 * 1024 * 1024) #define ASB2303_FLASH_ADDR 0xA4000000 /* System Flash */ #define ASB2303_FLASH_SIZE (32 * 1024 * 1024) #define ASB2303_CONFIG_ADDR 0xA6000000 /* System Config EEPROM */ #define ASB2303_CONFIG_SIZE (8 * 1024) /* * default MTD partition table for both main flash devices, expected to be * overridden by RedBoot */ static struct mtd_partition asb2303_partitions[] = { { .name = "Bootloader", .size = 0x00040000, .offset = 0, .mask_flags = MTD_CAP_ROM /* force read-only */ }, { .name = "Kernel", .size = 0x00400000, .offset = 0x00040000, }, { .name = "Filesystem", .size = MTDPART_SIZ_FULL, .offset = 0x00440000 } }; /* * the ASB2303 Boot PROM definition */ static struct physmap_flash_data asb2303_bootprom_data = { .width = 2, .nr_parts = 1, .parts = asb2303_partitions, }; static struct resource asb2303_bootprom_resource = { .start = ASB2303_PROM_ADDR, .end = ASB2303_PROM_ADDR + ASB2303_PROM_SIZE, .flags = IORESOURCE_MEM, }; static struct platform_device asb2303_bootprom = { .name = "physmap-flash", .id = 0, .dev.platform_data = &asb2303_bootprom_data, .num_resources = 1, .resource = &asb2303_bootprom_resource, }; /* * the ASB2303 System Flash definition */ static struct physmap_flash_data asb2303_sysflash_data = { .width = 4, .nr_parts = 1, .parts = asb2303_partitions, }; static struct resource asb2303_sysflash_resource = { .start = ASB2303_FLASH_ADDR, .end = ASB2303_FLASH_ADDR + ASB2303_FLASH_SIZE, .flags = IORESOURCE_MEM, }; static struct platform_device asb2303_sysflash = { .name = "physmap-flash", .id = 1, .dev.platform_data = &asb2303_sysflash_data, .num_resources = 1, .resource = &asb2303_sysflash_resource, }; /* * register the ASB2303 flashes */ static int __init asb2303_mtd_init(void) { platform_device_register(&asb2303_bootprom); platform_device_register(&asb2303_sysflash); return 0; } device_initcall(asb2303_mtd_init);
gpl-2.0
roguesyko/kernel_asus_grouper
sound/pci/oxygen/xonar_wm87x6.c
1498
38968
/* * card driver for models with WM8776/WM8766 DACs (Xonar DS/HDAV1.3 Slim) * * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this driver; if not, see <http://www.gnu.org/licenses/>. */ /* * Xonar DS * -------- * * CMI8788: * * SPI 0 -> WM8766 (surround, center/LFE, back) * SPI 1 -> WM8776 (front, input) * * GPIO 4 <- headphone detect, 0 = plugged * GPIO 6 -> route input jack to mic-in (0) or line-in (1) * GPIO 7 -> enable output to front L/R speaker channels * GPIO 8 -> enable output to other speaker channels and front panel headphone * * WM8776: * * input 1 <- line * input 2 <- mic * input 3 <- front mic * input 4 <- aux */ /* * Xonar HDAV1.3 Slim * ------------------ * * CMI8788: * * I²C <-> WM8776 (addr 0011010) * * GPIO 0 -> disable HDMI output * GPIO 1 -> enable HP output * GPIO 6 -> firmware EEPROM I²C clock * GPIO 7 <-> firmware EEPROM I²C data * * UART <-> HDMI controller * * WM8776: * * input 1 <- mic * input 2 <- aux */ #include <linux/pci.h> #include <linux/delay.h> #include <sound/control.h> #include <sound/core.h> #include <sound/info.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/tlv.h> #include "xonar.h" #include "wm8776.h" #include "wm8766.h" #define GPIO_DS_HP_DETECT 0x0010 #define GPIO_DS_INPUT_ROUTE 0x0040 #define GPIO_DS_OUTPUT_FRONTLR 0x0080 #define GPIO_DS_OUTPUT_ENABLE 0x0100 #define GPIO_SLIM_HDMI_DISABLE 0x0001 #define GPIO_SLIM_OUTPUT_ENABLE 0x0002 #define GPIO_SLIM_FIRMWARE_CLK 0x0040 #define GPIO_SLIM_FIRMWARE_DATA 0x0080 #define I2C_DEVICE_WM8776 0x34 /* 001101, 0, /W=0 */ #define LC_CONTROL_LIMITER 0x40000000 #define LC_CONTROL_ALC 0x20000000 struct xonar_wm87x6 { struct xonar_generic generic; u16 wm8776_regs[0x17]; u16 wm8766_regs[0x10]; struct snd_kcontrol *line_adcmux_control; struct snd_kcontrol *mic_adcmux_control; struct snd_kcontrol *lc_controls[13]; struct snd_jack *hp_jack; struct xonar_hdmi hdmi; }; static void wm8776_write_spi(struct oxygen *chip, unsigned int reg, unsigned int value) { oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER | OXYGEN_SPI_DATA_LENGTH_2 | OXYGEN_SPI_CLOCK_160 | (1 << OXYGEN_SPI_CODEC_SHIFT) | OXYGEN_SPI_CEN_LATCH_CLOCK_LO, (reg << 9) | value); } static void wm8776_write_i2c(struct oxygen *chip, unsigned int reg, unsigned int value) { oxygen_write_i2c(chip, I2C_DEVICE_WM8776, (reg << 1) | (value >> 8), value); } static void wm8776_write(struct oxygen *chip, unsigned int reg, unsigned int value) { struct xonar_wm87x6 *data = chip->model_data; if ((chip->model.function_flags & OXYGEN_FUNCTION_2WIRE_SPI_MASK) == OXYGEN_FUNCTION_SPI) wm8776_write_spi(chip, reg, value); else wm8776_write_i2c(chip, reg, value); if (reg < ARRAY_SIZE(data->wm8776_regs)) { if (reg >= WM8776_HPLVOL && reg <= WM8776_DACMASTER) value &= ~WM8776_UPDATE; data->wm8776_regs[reg] = value; } } static void wm8776_write_cached(struct oxygen *chip, unsigned int reg, unsigned int value) { struct xonar_wm87x6 *data = chip->model_data; if (reg >= ARRAY_SIZE(data->wm8776_regs) || value != data->wm8776_regs[reg]) wm8776_write(chip, reg, value); } static void wm8766_write(struct oxygen *chip, unsigned int reg, unsigned int value) { struct xonar_wm87x6 *data = chip->model_data; oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER | OXYGEN_SPI_DATA_LENGTH_2 | OXYGEN_SPI_CLOCK_160 | (0 << OXYGEN_SPI_CODEC_SHIFT) | OXYGEN_SPI_CEN_LATCH_CLOCK_LO, (reg << 9) | value); if (reg < ARRAY_SIZE(data->wm8766_regs)) { if ((reg >= WM8766_LDA1 && reg <= WM8766_RDA1) || (reg >= WM8766_LDA2 && reg <= WM8766_MASTDA)) value &= ~WM8766_UPDATE; data->wm8766_regs[reg] = value; } } static void wm8766_write_cached(struct oxygen *chip, unsigned int reg, unsigned int value) { struct xonar_wm87x6 *data = chip->model_data; if (reg >= ARRAY_SIZE(data->wm8766_regs) || value != data->wm8766_regs[reg]) wm8766_write(chip, reg, value); } static void wm8776_registers_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; wm8776_write(chip, WM8776_RESET, 0); wm8776_write(chip, WM8776_DACCTRL1, WM8776_DZCEN | WM8776_PL_LEFT_LEFT | WM8776_PL_RIGHT_RIGHT); wm8776_write(chip, WM8776_DACMUTE, chip->dac_mute ? WM8776_DMUTE : 0); wm8776_write(chip, WM8776_DACIFCTRL, WM8776_DACFMT_LJUST | WM8776_DACWL_24); wm8776_write(chip, WM8776_ADCIFCTRL, data->wm8776_regs[WM8776_ADCIFCTRL]); wm8776_write(chip, WM8776_MSTRCTRL, data->wm8776_regs[WM8776_MSTRCTRL]); wm8776_write(chip, WM8776_PWRDOWN, data->wm8776_regs[WM8776_PWRDOWN]); wm8776_write(chip, WM8776_HPLVOL, data->wm8776_regs[WM8776_HPLVOL]); wm8776_write(chip, WM8776_HPRVOL, data->wm8776_regs[WM8776_HPRVOL] | WM8776_UPDATE); wm8776_write(chip, WM8776_ADCLVOL, data->wm8776_regs[WM8776_ADCLVOL]); wm8776_write(chip, WM8776_ADCRVOL, data->wm8776_regs[WM8776_ADCRVOL]); wm8776_write(chip, WM8776_ADCMUX, data->wm8776_regs[WM8776_ADCMUX]); wm8776_write(chip, WM8776_DACLVOL, chip->dac_volume[0]); wm8776_write(chip, WM8776_DACRVOL, chip->dac_volume[1] | WM8776_UPDATE); } static void wm8766_registers_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; wm8766_write(chip, WM8766_RESET, 0); wm8766_write(chip, WM8766_DAC_CTRL, data->wm8766_regs[WM8766_DAC_CTRL]); wm8766_write(chip, WM8766_INT_CTRL, WM8766_FMT_LJUST | WM8766_IWL_24); wm8766_write(chip, WM8766_DAC_CTRL2, WM8766_ZCD | (chip->dac_mute ? WM8766_DMUTE_MASK : 0)); wm8766_write(chip, WM8766_LDA1, chip->dac_volume[2]); wm8766_write(chip, WM8766_RDA1, chip->dac_volume[3]); wm8766_write(chip, WM8766_LDA2, chip->dac_volume[4]); wm8766_write(chip, WM8766_RDA2, chip->dac_volume[5]); wm8766_write(chip, WM8766_LDA3, chip->dac_volume[6]); wm8766_write(chip, WM8766_RDA3, chip->dac_volume[7] | WM8766_UPDATE); } static void wm8776_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; data->wm8776_regs[WM8776_HPLVOL] = (0x79 - 60) | WM8776_HPZCEN; data->wm8776_regs[WM8776_HPRVOL] = (0x79 - 60) | WM8776_HPZCEN; data->wm8776_regs[WM8776_ADCIFCTRL] = WM8776_ADCFMT_LJUST | WM8776_ADCWL_24 | WM8776_ADCMCLK; data->wm8776_regs[WM8776_MSTRCTRL] = WM8776_ADCRATE_256 | WM8776_DACRATE_256; data->wm8776_regs[WM8776_PWRDOWN] = WM8776_HPPD; data->wm8776_regs[WM8776_ADCLVOL] = 0xa5 | WM8776_ZCA; data->wm8776_regs[WM8776_ADCRVOL] = 0xa5 | WM8776_ZCA; data->wm8776_regs[WM8776_ADCMUX] = 0x001; wm8776_registers_init(chip); } static void wm8766_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; data->wm8766_regs[WM8766_DAC_CTRL] = WM8766_PL_LEFT_LEFT | WM8766_PL_RIGHT_RIGHT; wm8766_registers_init(chip); } static void xonar_ds_handle_hp_jack(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; bool hp_plugged; unsigned int reg; mutex_lock(&chip->mutex); hp_plugged = !(oxygen_read16(chip, OXYGEN_GPIO_DATA) & GPIO_DS_HP_DETECT); oxygen_write16_masked(chip, OXYGEN_GPIO_DATA, hp_plugged ? 0 : GPIO_DS_OUTPUT_FRONTLR, GPIO_DS_OUTPUT_FRONTLR); reg = data->wm8766_regs[WM8766_DAC_CTRL] & ~WM8766_MUTEALL; if (hp_plugged) reg |= WM8766_MUTEALL; wm8766_write_cached(chip, WM8766_DAC_CTRL, reg); snd_jack_report(data->hp_jack, hp_plugged ? SND_JACK_HEADPHONE : 0); mutex_unlock(&chip->mutex); } static void xonar_ds_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; data->generic.anti_pop_delay = 300; data->generic.output_enable_bit = GPIO_DS_OUTPUT_ENABLE; wm8776_init(chip); wm8766_init(chip); oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_DS_INPUT_ROUTE | GPIO_DS_OUTPUT_FRONTLR); oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_DS_HP_DETECT); oxygen_set_bits16(chip, OXYGEN_GPIO_DATA, GPIO_DS_INPUT_ROUTE); oxygen_set_bits16(chip, OXYGEN_GPIO_INTERRUPT_MASK, GPIO_DS_HP_DETECT); chip->interrupt_mask |= OXYGEN_INT_GPIO; xonar_enable_output(chip); snd_jack_new(chip->card, "Headphone", SND_JACK_HEADPHONE, &data->hp_jack); xonar_ds_handle_hp_jack(chip); snd_component_add(chip->card, "WM8776"); snd_component_add(chip->card, "WM8766"); } static void xonar_hdav_slim_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; data->generic.anti_pop_delay = 300; data->generic.output_enable_bit = GPIO_SLIM_OUTPUT_ENABLE; wm8776_init(chip); oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_SLIM_HDMI_DISABLE | GPIO_SLIM_FIRMWARE_CLK | GPIO_SLIM_FIRMWARE_DATA); xonar_hdmi_init(chip, &data->hdmi); xonar_enable_output(chip); snd_component_add(chip->card, "WM8776"); } static void xonar_ds_cleanup(struct oxygen *chip) { xonar_disable_output(chip); wm8776_write(chip, WM8776_RESET, 0); } static void xonar_hdav_slim_cleanup(struct oxygen *chip) { xonar_hdmi_cleanup(chip); xonar_disable_output(chip); wm8776_write(chip, WM8776_RESET, 0); msleep(2); } static void xonar_ds_suspend(struct oxygen *chip) { xonar_ds_cleanup(chip); } static void xonar_hdav_slim_suspend(struct oxygen *chip) { xonar_hdav_slim_cleanup(chip); } static void xonar_ds_resume(struct oxygen *chip) { wm8776_registers_init(chip); wm8766_registers_init(chip); xonar_enable_output(chip); xonar_ds_handle_hp_jack(chip); } static void xonar_hdav_slim_resume(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; wm8776_registers_init(chip); xonar_hdmi_resume(chip, &data->hdmi); xonar_enable_output(chip); } static void wm8776_adc_hardware_filter(unsigned int channel, struct snd_pcm_hardware *hardware) { if (channel == PCM_A) { hardware->rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000; hardware->rate_max = 96000; } } static void xonar_hdav_slim_hardware_filter(unsigned int channel, struct snd_pcm_hardware *hardware) { wm8776_adc_hardware_filter(channel, hardware); xonar_hdmi_pcm_hardware_filter(channel, hardware); } static void set_wm87x6_dac_params(struct oxygen *chip, struct snd_pcm_hw_params *params) { } static void set_wm8776_adc_params(struct oxygen *chip, struct snd_pcm_hw_params *params) { u16 reg; reg = WM8776_ADCRATE_256 | WM8776_DACRATE_256; if (params_rate(params) > 48000) reg |= WM8776_ADCOSR; wm8776_write_cached(chip, WM8776_MSTRCTRL, reg); } static void set_hdav_slim_dac_params(struct oxygen *chip, struct snd_pcm_hw_params *params) { struct xonar_wm87x6 *data = chip->model_data; xonar_set_hdmi_params(chip, &data->hdmi, params); } static void update_wm8776_volume(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; u8 to_change; if (chip->dac_volume[0] == chip->dac_volume[1]) { if (chip->dac_volume[0] != data->wm8776_regs[WM8776_DACLVOL] || chip->dac_volume[1] != data->wm8776_regs[WM8776_DACRVOL]) { wm8776_write(chip, WM8776_DACMASTER, chip->dac_volume[0] | WM8776_UPDATE); data->wm8776_regs[WM8776_DACLVOL] = chip->dac_volume[0]; data->wm8776_regs[WM8776_DACRVOL] = chip->dac_volume[0]; } } else { to_change = (chip->dac_volume[0] != data->wm8776_regs[WM8776_DACLVOL]) << 0; to_change |= (chip->dac_volume[1] != data->wm8776_regs[WM8776_DACLVOL]) << 1; if (to_change & 1) wm8776_write(chip, WM8776_DACLVOL, chip->dac_volume[0] | ((to_change & 2) ? 0 : WM8776_UPDATE)); if (to_change & 2) wm8776_write(chip, WM8776_DACRVOL, chip->dac_volume[1] | WM8776_UPDATE); } } static void update_wm87x6_volume(struct oxygen *chip) { static const u8 wm8766_regs[6] = { WM8766_LDA1, WM8766_RDA1, WM8766_LDA2, WM8766_RDA2, WM8766_LDA3, WM8766_RDA3, }; struct xonar_wm87x6 *data = chip->model_data; unsigned int i; u8 to_change; update_wm8776_volume(chip); if (chip->dac_volume[2] == chip->dac_volume[3] && chip->dac_volume[2] == chip->dac_volume[4] && chip->dac_volume[2] == chip->dac_volume[5] && chip->dac_volume[2] == chip->dac_volume[6] && chip->dac_volume[2] == chip->dac_volume[7]) { to_change = 0; for (i = 0; i < 6; ++i) if (chip->dac_volume[2] != data->wm8766_regs[wm8766_regs[i]]) to_change = 1; if (to_change) { wm8766_write(chip, WM8766_MASTDA, chip->dac_volume[2] | WM8766_UPDATE); for (i = 0; i < 6; ++i) data->wm8766_regs[wm8766_regs[i]] = chip->dac_volume[2]; } } else { to_change = 0; for (i = 0; i < 6; ++i) to_change |= (chip->dac_volume[2 + i] != data->wm8766_regs[wm8766_regs[i]]) << i; for (i = 0; i < 6; ++i) if (to_change & (1 << i)) wm8766_write(chip, wm8766_regs[i], chip->dac_volume[2 + i] | ((to_change & (0x3e << i)) ? 0 : WM8766_UPDATE)); } } static void update_wm8776_mute(struct oxygen *chip) { wm8776_write_cached(chip, WM8776_DACMUTE, chip->dac_mute ? WM8776_DMUTE : 0); } static void update_wm87x6_mute(struct oxygen *chip) { update_wm8776_mute(chip); wm8766_write_cached(chip, WM8766_DAC_CTRL2, WM8766_ZCD | (chip->dac_mute ? WM8766_DMUTE_MASK : 0)); } static void update_wm8766_center_lfe_mix(struct oxygen *chip, bool mixed) { struct xonar_wm87x6 *data = chip->model_data; unsigned int reg; /* * The WM8766 can mix left and right channels, but this setting * applies to all three stereo pairs. */ reg = data->wm8766_regs[WM8766_DAC_CTRL] & ~(WM8766_PL_LEFT_MASK | WM8766_PL_RIGHT_MASK); if (mixed) reg |= WM8766_PL_LEFT_LRMIX | WM8766_PL_RIGHT_LRMIX; else reg |= WM8766_PL_LEFT_LEFT | WM8766_PL_RIGHT_RIGHT; wm8766_write_cached(chip, WM8766_DAC_CTRL, reg); } static void xonar_ds_gpio_changed(struct oxygen *chip) { xonar_ds_handle_hp_jack(chip); } static int wm8776_bit_switch_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; u16 bit = ctl->private_value & 0xffff; unsigned int reg_index = (ctl->private_value >> 16) & 0xff; bool invert = (ctl->private_value >> 24) & 1; value->value.integer.value[0] = ((data->wm8776_regs[reg_index] & bit) != 0) ^ invert; return 0; } static int wm8776_bit_switch_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; u16 bit = ctl->private_value & 0xffff; u16 reg_value; unsigned int reg_index = (ctl->private_value >> 16) & 0xff; bool invert = (ctl->private_value >> 24) & 1; int changed; mutex_lock(&chip->mutex); reg_value = data->wm8776_regs[reg_index] & ~bit; if (value->value.integer.value[0] ^ invert) reg_value |= bit; changed = reg_value != data->wm8776_regs[reg_index]; if (changed) wm8776_write(chip, reg_index, reg_value); mutex_unlock(&chip->mutex); return changed; } static int wm8776_field_enum_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const hld[16] = { "0 ms", "2.67 ms", "5.33 ms", "10.6 ms", "21.3 ms", "42.7 ms", "85.3 ms", "171 ms", "341 ms", "683 ms", "1.37 s", "2.73 s", "5.46 s", "10.9 s", "21.8 s", "43.7 s", }; static const char *const atk_lim[11] = { "0.25 ms", "0.5 ms", "1 ms", "2 ms", "4 ms", "8 ms", "16 ms", "32 ms", "64 ms", "128 ms", "256 ms", }; static const char *const atk_alc[11] = { "8.40 ms", "16.8 ms", "33.6 ms", "67.2 ms", "134 ms", "269 ms", "538 ms", "1.08 s", "2.15 s", "4.3 s", "8.6 s", }; static const char *const dcy_lim[11] = { "1.2 ms", "2.4 ms", "4.8 ms", "9.6 ms", "19.2 ms", "38.4 ms", "76.8 ms", "154 ms", "307 ms", "614 ms", "1.23 s", }; static const char *const dcy_alc[11] = { "33.5 ms", "67.0 ms", "134 ms", "268 ms", "536 ms", "1.07 s", "2.14 s", "4.29 s", "8.58 s", "17.2 s", "34.3 s", }; static const char *const tranwin[8] = { "0 us", "62.5 us", "125 us", "250 us", "500 us", "1 ms", "2 ms", "4 ms", }; u8 max; const char *const *names; max = (ctl->private_value >> 12) & 0xf; switch ((ctl->private_value >> 24) & 0x1f) { case WM8776_ALCCTRL2: names = hld; break; case WM8776_ALCCTRL3: if (((ctl->private_value >> 20) & 0xf) == 0) { if (ctl->private_value & LC_CONTROL_LIMITER) names = atk_lim; else names = atk_alc; } else { if (ctl->private_value & LC_CONTROL_LIMITER) names = dcy_lim; else names = dcy_alc; } break; case WM8776_LIMITER: names = tranwin; break; default: return -ENXIO; } return snd_ctl_enum_info(info, 1, max + 1, names); } static int wm8776_field_volume_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 1; info->value.integer.min = (ctl->private_value >> 8) & 0xf; info->value.integer.max = (ctl->private_value >> 12) & 0xf; return 0; } static void wm8776_field_set_from_ctl(struct snd_kcontrol *ctl) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; unsigned int value, reg_index, mode; u8 min, max, shift; u16 mask, reg_value; bool invert; if ((data->wm8776_regs[WM8776_ALCCTRL1] & WM8776_LCSEL_MASK) == WM8776_LCSEL_LIMITER) mode = LC_CONTROL_LIMITER; else mode = LC_CONTROL_ALC; if (!(ctl->private_value & mode)) return; value = ctl->private_value & 0xf; min = (ctl->private_value >> 8) & 0xf; max = (ctl->private_value >> 12) & 0xf; mask = (ctl->private_value >> 16) & 0xf; shift = (ctl->private_value >> 20) & 0xf; reg_index = (ctl->private_value >> 24) & 0x1f; invert = (ctl->private_value >> 29) & 0x1; if (invert) value = max - (value - min); reg_value = data->wm8776_regs[reg_index]; reg_value &= ~(mask << shift); reg_value |= value << shift; wm8776_write_cached(chip, reg_index, reg_value); } static int wm8776_field_set(struct snd_kcontrol *ctl, unsigned int value) { struct oxygen *chip = ctl->private_data; u8 min, max; int changed; min = (ctl->private_value >> 8) & 0xf; max = (ctl->private_value >> 12) & 0xf; if (value < min || value > max) return -EINVAL; mutex_lock(&chip->mutex); changed = value != (ctl->private_value & 0xf); if (changed) { ctl->private_value = (ctl->private_value & ~0xf) | value; wm8776_field_set_from_ctl(ctl); } mutex_unlock(&chip->mutex); return changed; } static int wm8776_field_enum_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { value->value.enumerated.item[0] = ctl->private_value & 0xf; return 0; } static int wm8776_field_volume_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { value->value.integer.value[0] = ctl->private_value & 0xf; return 0; } static int wm8776_field_enum_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { return wm8776_field_set(ctl, value->value.enumerated.item[0]); } static int wm8776_field_volume_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { return wm8776_field_set(ctl, value->value.integer.value[0]); } static int wm8776_hp_vol_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 2; info->value.integer.min = 0x79 - 60; info->value.integer.max = 0x7f; return 0; } static int wm8776_hp_vol_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; mutex_lock(&chip->mutex); value->value.integer.value[0] = data->wm8776_regs[WM8776_HPLVOL] & WM8776_HPATT_MASK; value->value.integer.value[1] = data->wm8776_regs[WM8776_HPRVOL] & WM8776_HPATT_MASK; mutex_unlock(&chip->mutex); return 0; } static int wm8776_hp_vol_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; u8 to_update; mutex_lock(&chip->mutex); to_update = (value->value.integer.value[0] != (data->wm8776_regs[WM8776_HPLVOL] & WM8776_HPATT_MASK)) << 0; to_update |= (value->value.integer.value[1] != (data->wm8776_regs[WM8776_HPRVOL] & WM8776_HPATT_MASK)) << 1; if (value->value.integer.value[0] == value->value.integer.value[1]) { if (to_update) { wm8776_write(chip, WM8776_HPMASTER, value->value.integer.value[0] | WM8776_HPZCEN | WM8776_UPDATE); data->wm8776_regs[WM8776_HPLVOL] = value->value.integer.value[0] | WM8776_HPZCEN; data->wm8776_regs[WM8776_HPRVOL] = value->value.integer.value[0] | WM8776_HPZCEN; } } else { if (to_update & 1) wm8776_write(chip, WM8776_HPLVOL, value->value.integer.value[0] | WM8776_HPZCEN | ((to_update & 2) ? 0 : WM8776_UPDATE)); if (to_update & 2) wm8776_write(chip, WM8776_HPRVOL, value->value.integer.value[1] | WM8776_HPZCEN | WM8776_UPDATE); } mutex_unlock(&chip->mutex); return to_update != 0; } static int wm8776_input_mux_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; unsigned int mux_bit = ctl->private_value; value->value.integer.value[0] = !!(data->wm8776_regs[WM8776_ADCMUX] & mux_bit); return 0; } static int wm8776_input_mux_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; struct snd_kcontrol *other_ctl; unsigned int mux_bit = ctl->private_value; u16 reg; int changed; mutex_lock(&chip->mutex); reg = data->wm8776_regs[WM8776_ADCMUX]; if (value->value.integer.value[0]) { reg |= mux_bit; /* line-in and mic-in are exclusive */ mux_bit ^= 3; if (reg & mux_bit) { reg &= ~mux_bit; if (mux_bit == 1) other_ctl = data->line_adcmux_control; else other_ctl = data->mic_adcmux_control; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &other_ctl->id); } } else reg &= ~mux_bit; changed = reg != data->wm8776_regs[WM8776_ADCMUX]; if (changed) { oxygen_write16_masked(chip, OXYGEN_GPIO_DATA, reg & 1 ? GPIO_DS_INPUT_ROUTE : 0, GPIO_DS_INPUT_ROUTE); wm8776_write(chip, WM8776_ADCMUX, reg); } mutex_unlock(&chip->mutex); return changed; } static int wm8776_input_vol_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 2; info->value.integer.min = 0xa5; info->value.integer.max = 0xff; return 0; } static int wm8776_input_vol_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; mutex_lock(&chip->mutex); value->value.integer.value[0] = data->wm8776_regs[WM8776_ADCLVOL] & WM8776_AGMASK; value->value.integer.value[1] = data->wm8776_regs[WM8776_ADCRVOL] & WM8776_AGMASK; mutex_unlock(&chip->mutex); return 0; } static int wm8776_input_vol_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; int changed = 0; mutex_lock(&chip->mutex); changed = (value->value.integer.value[0] != (data->wm8776_regs[WM8776_ADCLVOL] & WM8776_AGMASK)) || (value->value.integer.value[1] != (data->wm8776_regs[WM8776_ADCRVOL] & WM8776_AGMASK)); wm8776_write_cached(chip, WM8776_ADCLVOL, value->value.integer.value[0] | WM8776_ZCA); wm8776_write_cached(chip, WM8776_ADCRVOL, value->value.integer.value[1] | WM8776_ZCA); mutex_unlock(&chip->mutex); return changed; } static int wm8776_level_control_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[3] = { "None", "Peak Limiter", "Automatic Level Control" }; return snd_ctl_enum_info(info, 1, 3, names); } static int wm8776_level_control_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; if (!(data->wm8776_regs[WM8776_ALCCTRL2] & WM8776_LCEN)) value->value.enumerated.item[0] = 0; else if ((data->wm8776_regs[WM8776_ALCCTRL1] & WM8776_LCSEL_MASK) == WM8776_LCSEL_LIMITER) value->value.enumerated.item[0] = 1; else value->value.enumerated.item[0] = 2; return 0; } static void activate_control(struct oxygen *chip, struct snd_kcontrol *ctl, unsigned int mode) { unsigned int access; if (ctl->private_value & mode) access = 0; else access = SNDRV_CTL_ELEM_ACCESS_INACTIVE; if ((ctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_INACTIVE) != access) { ctl->vd[0].access ^= SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &ctl->id); } } static int wm8776_level_control_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; unsigned int mode = 0, i; u16 ctrl1, ctrl2; int changed; if (value->value.enumerated.item[0] >= 3) return -EINVAL; mutex_lock(&chip->mutex); changed = value->value.enumerated.item[0] != ctl->private_value; if (changed) { ctl->private_value = value->value.enumerated.item[0]; ctrl1 = data->wm8776_regs[WM8776_ALCCTRL1]; ctrl2 = data->wm8776_regs[WM8776_ALCCTRL2]; switch (value->value.enumerated.item[0]) { default: wm8776_write_cached(chip, WM8776_ALCCTRL2, ctrl2 & ~WM8776_LCEN); break; case 1: wm8776_write_cached(chip, WM8776_ALCCTRL1, (ctrl1 & ~WM8776_LCSEL_MASK) | WM8776_LCSEL_LIMITER); wm8776_write_cached(chip, WM8776_ALCCTRL2, ctrl2 | WM8776_LCEN); mode = LC_CONTROL_LIMITER; break; case 2: wm8776_write_cached(chip, WM8776_ALCCTRL1, (ctrl1 & ~WM8776_LCSEL_MASK) | WM8776_LCSEL_ALC_STEREO); wm8776_write_cached(chip, WM8776_ALCCTRL2, ctrl2 | WM8776_LCEN); mode = LC_CONTROL_ALC; break; } for (i = 0; i < ARRAY_SIZE(data->lc_controls); ++i) activate_control(chip, data->lc_controls[i], mode); } mutex_unlock(&chip->mutex); return changed; } static int hpf_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[2] = { "None", "High-pass Filter" }; return snd_ctl_enum_info(info, 1, 2, names); } static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; value->value.enumerated.item[0] = !(data->wm8776_regs[WM8776_ADCIFCTRL] & WM8776_ADCHPD); return 0; } static int hpf_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; unsigned int reg; int changed; mutex_lock(&chip->mutex); reg = data->wm8776_regs[WM8776_ADCIFCTRL] & ~WM8776_ADCHPD; if (!value->value.enumerated.item[0]) reg |= WM8776_ADCHPD; changed = reg != data->wm8776_regs[WM8776_ADCIFCTRL]; if (changed) wm8776_write(chip, WM8776_ADCIFCTRL, reg); mutex_unlock(&chip->mutex); return changed; } #define WM8776_BIT_SWITCH(xname, reg, bit, invert, flags) { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .info = snd_ctl_boolean_mono_info, \ .get = wm8776_bit_switch_get, \ .put = wm8776_bit_switch_put, \ .private_value = ((reg) << 16) | (bit) | ((invert) << 24) | (flags), \ } #define _WM8776_FIELD_CTL(xname, reg, shift, initval, min, max, mask, flags) \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .private_value = (initval) | ((min) << 8) | ((max) << 12) | \ ((mask) << 16) | ((shift) << 20) | ((reg) << 24) | (flags) #define WM8776_FIELD_CTL_ENUM(xname, reg, shift, init, min, max, mask, flags) {\ _WM8776_FIELD_CTL(xname " Capture Enum", \ reg, shift, init, min, max, mask, flags), \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \ SNDRV_CTL_ELEM_ACCESS_INACTIVE, \ .info = wm8776_field_enum_info, \ .get = wm8776_field_enum_get, \ .put = wm8776_field_enum_put, \ } #define WM8776_FIELD_CTL_VOLUME(a, b, c, d, e, f, g, h, tlv_p) { \ _WM8776_FIELD_CTL(a " Capture Volume", b, c, d, e, f, g, h), \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \ SNDRV_CTL_ELEM_ACCESS_INACTIVE | \ SNDRV_CTL_ELEM_ACCESS_TLV_READ, \ .info = wm8776_field_volume_info, \ .get = wm8776_field_volume_get, \ .put = wm8776_field_volume_put, \ .tlv = { .p = tlv_p }, \ } static const DECLARE_TLV_DB_SCALE(wm87x6_dac_db_scale, -6000, 50, 0); static const DECLARE_TLV_DB_SCALE(wm8776_adc_db_scale, -2100, 50, 0); static const DECLARE_TLV_DB_SCALE(wm8776_hp_db_scale, -6000, 100, 0); static const DECLARE_TLV_DB_SCALE(wm8776_lct_db_scale, -1600, 100, 0); static const DECLARE_TLV_DB_SCALE(wm8776_maxgain_db_scale, 0, 400, 0); static const DECLARE_TLV_DB_SCALE(wm8776_ngth_db_scale, -7800, 600, 0); static const DECLARE_TLV_DB_SCALE(wm8776_maxatten_lim_db_scale, -1200, 100, 0); static const DECLARE_TLV_DB_SCALE(wm8776_maxatten_alc_db_scale, -2100, 400, 0); static const struct snd_kcontrol_new ds_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Headphone Playback Volume", .info = wm8776_hp_vol_info, .get = wm8776_hp_vol_get, .put = wm8776_hp_vol_put, .tlv = { .p = wm8776_hp_db_scale }, }, WM8776_BIT_SWITCH("Headphone Playback Switch", WM8776_PWRDOWN, WM8776_HPPD, 1, 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Input Capture Volume", .info = wm8776_input_vol_info, .get = wm8776_input_vol_get, .put = wm8776_input_vol_put, .tlv = { .p = wm8776_adc_db_scale }, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Line Capture Switch", .info = snd_ctl_boolean_mono_info, .get = wm8776_input_mux_get, .put = wm8776_input_mux_put, .private_value = 1 << 0, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Mic Capture Switch", .info = snd_ctl_boolean_mono_info, .get = wm8776_input_mux_get, .put = wm8776_input_mux_put, .private_value = 1 << 1, }, WM8776_BIT_SWITCH("Front Mic Capture Switch", WM8776_ADCMUX, 1 << 2, 0, 0), WM8776_BIT_SWITCH("Aux Capture Switch", WM8776_ADCMUX, 1 << 3, 0, 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "ADC Filter Capture Enum", .info = hpf_info, .get = hpf_get, .put = hpf_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Level Control Capture Enum", .info = wm8776_level_control_info, .get = wm8776_level_control_get, .put = wm8776_level_control_put, .private_value = 0, }, }; static const struct snd_kcontrol_new hdav_slim_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "HDMI Playback Switch", .info = snd_ctl_boolean_mono_info, .get = xonar_gpio_bit_switch_get, .put = xonar_gpio_bit_switch_put, .private_value = GPIO_SLIM_HDMI_DISABLE | XONAR_GPIO_BIT_INVERT, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Headphone Playback Volume", .info = wm8776_hp_vol_info, .get = wm8776_hp_vol_get, .put = wm8776_hp_vol_put, .tlv = { .p = wm8776_hp_db_scale }, }, WM8776_BIT_SWITCH("Headphone Playback Switch", WM8776_PWRDOWN, WM8776_HPPD, 1, 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Input Capture Volume", .info = wm8776_input_vol_info, .get = wm8776_input_vol_get, .put = wm8776_input_vol_put, .tlv = { .p = wm8776_adc_db_scale }, }, WM8776_BIT_SWITCH("Mic Capture Switch", WM8776_ADCMUX, 1 << 0, 0, 0), WM8776_BIT_SWITCH("Aux Capture Switch", WM8776_ADCMUX, 1 << 1, 0, 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "ADC Filter Capture Enum", .info = hpf_info, .get = hpf_get, .put = hpf_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Level Control Capture Enum", .info = wm8776_level_control_info, .get = wm8776_level_control_get, .put = wm8776_level_control_put, .private_value = 0, }, }; static const struct snd_kcontrol_new lc_controls[] = { WM8776_FIELD_CTL_VOLUME("Limiter Threshold", WM8776_ALCCTRL1, 0, 11, 0, 15, 0xf, LC_CONTROL_LIMITER, wm8776_lct_db_scale), WM8776_FIELD_CTL_ENUM("Limiter Attack Time", WM8776_ALCCTRL3, 0, 2, 0, 10, 0xf, LC_CONTROL_LIMITER), WM8776_FIELD_CTL_ENUM("Limiter Decay Time", WM8776_ALCCTRL3, 4, 3, 0, 10, 0xf, LC_CONTROL_LIMITER), WM8776_FIELD_CTL_ENUM("Limiter Transient Window", WM8776_LIMITER, 4, 2, 0, 7, 0x7, LC_CONTROL_LIMITER), WM8776_FIELD_CTL_VOLUME("Limiter Maximum Attenuation", WM8776_LIMITER, 0, 6, 3, 12, 0xf, LC_CONTROL_LIMITER, wm8776_maxatten_lim_db_scale), WM8776_FIELD_CTL_VOLUME("ALC Target Level", WM8776_ALCCTRL1, 0, 11, 0, 15, 0xf, LC_CONTROL_ALC, wm8776_lct_db_scale), WM8776_FIELD_CTL_ENUM("ALC Attack Time", WM8776_ALCCTRL3, 0, 2, 0, 10, 0xf, LC_CONTROL_ALC), WM8776_FIELD_CTL_ENUM("ALC Decay Time", WM8776_ALCCTRL3, 4, 3, 0, 10, 0xf, LC_CONTROL_ALC), WM8776_FIELD_CTL_VOLUME("ALC Maximum Gain", WM8776_ALCCTRL1, 4, 7, 1, 7, 0x7, LC_CONTROL_ALC, wm8776_maxgain_db_scale), WM8776_FIELD_CTL_VOLUME("ALC Maximum Attenuation", WM8776_LIMITER, 0, 10, 10, 15, 0xf, LC_CONTROL_ALC, wm8776_maxatten_alc_db_scale), WM8776_FIELD_CTL_ENUM("ALC Hold Time", WM8776_ALCCTRL2, 0, 0, 0, 15, 0xf, LC_CONTROL_ALC), WM8776_BIT_SWITCH("Noise Gate Capture Switch", WM8776_NOISEGATE, WM8776_NGAT, 0, LC_CONTROL_ALC), WM8776_FIELD_CTL_VOLUME("Noise Gate Threshold", WM8776_NOISEGATE, 2, 0, 0, 7, 0x7, LC_CONTROL_ALC, wm8776_ngth_db_scale), }; static int add_lc_controls(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; unsigned int i; struct snd_kcontrol *ctl; int err; BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls)); for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) { ctl = snd_ctl_new1(&lc_controls[i], chip); if (!ctl) return -ENOMEM; err = snd_ctl_add(chip->card, ctl); if (err < 0) return err; data->lc_controls[i] = ctl; } return 0; } static int xonar_ds_mixer_init(struct oxygen *chip) { struct xonar_wm87x6 *data = chip->model_data; unsigned int i; struct snd_kcontrol *ctl; int err; for (i = 0; i < ARRAY_SIZE(ds_controls); ++i) { ctl = snd_ctl_new1(&ds_controls[i], chip); if (!ctl) return -ENOMEM; err = snd_ctl_add(chip->card, ctl); if (err < 0) return err; if (!strcmp(ctl->id.name, "Line Capture Switch")) data->line_adcmux_control = ctl; else if (!strcmp(ctl->id.name, "Mic Capture Switch")) data->mic_adcmux_control = ctl; } if (!data->line_adcmux_control || !data->mic_adcmux_control) return -ENXIO; return add_lc_controls(chip); } static int xonar_hdav_slim_mixer_init(struct oxygen *chip) { unsigned int i; struct snd_kcontrol *ctl; int err; for (i = 0; i < ARRAY_SIZE(hdav_slim_controls); ++i) { ctl = snd_ctl_new1(&hdav_slim_controls[i], chip); if (!ctl) return -ENOMEM; err = snd_ctl_add(chip->card, ctl); if (err < 0) return err; } return add_lc_controls(chip); } static void dump_wm8776_registers(struct oxygen *chip, struct snd_info_buffer *buffer) { struct xonar_wm87x6 *data = chip->model_data; unsigned int i; snd_iprintf(buffer, "\nWM8776:\n00:"); for (i = 0; i < 0x10; ++i) snd_iprintf(buffer, " %03x", data->wm8776_regs[i]); snd_iprintf(buffer, "\n10:"); for (i = 0x10; i < 0x17; ++i) snd_iprintf(buffer, " %03x", data->wm8776_regs[i]); snd_iprintf(buffer, "\n"); } static void dump_wm87x6_registers(struct oxygen *chip, struct snd_info_buffer *buffer) { struct xonar_wm87x6 *data = chip->model_data; unsigned int i; dump_wm8776_registers(chip, buffer); snd_iprintf(buffer, "\nWM8766:\n00:"); for (i = 0; i < 0x10; ++i) snd_iprintf(buffer, " %03x", data->wm8766_regs[i]); snd_iprintf(buffer, "\n"); } static const struct oxygen_model model_xonar_ds = { .shortname = "Xonar DS", .longname = "Asus Virtuoso 66", .chip = "AV200", .init = xonar_ds_init, .mixer_init = xonar_ds_mixer_init, .cleanup = xonar_ds_cleanup, .suspend = xonar_ds_suspend, .resume = xonar_ds_resume, .pcm_hardware_filter = wm8776_adc_hardware_filter, .set_dac_params = set_wm87x6_dac_params, .set_adc_params = set_wm8776_adc_params, .update_dac_volume = update_wm87x6_volume, .update_dac_mute = update_wm87x6_mute, .update_center_lfe_mix = update_wm8766_center_lfe_mix, .gpio_changed = xonar_ds_gpio_changed, .dump_registers = dump_wm87x6_registers, .dac_tlv = wm87x6_dac_db_scale, .model_data_size = sizeof(struct xonar_wm87x6), .device_config = PLAYBACK_0_TO_I2S | PLAYBACK_1_TO_SPDIF | CAPTURE_0_FROM_I2S_1, .dac_channels_pcm = 8, .dac_channels_mixer = 8, .dac_volume_min = 255 - 2*60, .dac_volume_max = 255, .function_flags = OXYGEN_FUNCTION_SPI, .dac_mclks = OXYGEN_MCLKS(256, 256, 128), .adc_mclks = OXYGEN_MCLKS(256, 256, 128), .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, }; static const struct oxygen_model model_xonar_hdav_slim = { .shortname = "Xonar HDAV1.3 Slim", .longname = "Asus Virtuoso 200", .chip = "AV200", .init = xonar_hdav_slim_init, .mixer_init = xonar_hdav_slim_mixer_init, .cleanup = xonar_hdav_slim_cleanup, .suspend = xonar_hdav_slim_suspend, .resume = xonar_hdav_slim_resume, .pcm_hardware_filter = xonar_hdav_slim_hardware_filter, .set_dac_params = set_hdav_slim_dac_params, .set_adc_params = set_wm8776_adc_params, .update_dac_volume = update_wm8776_volume, .update_dac_mute = update_wm8776_mute, .uart_input = xonar_hdmi_uart_input, .dump_registers = dump_wm8776_registers, .dac_tlv = wm87x6_dac_db_scale, .model_data_size = sizeof(struct xonar_wm87x6), .device_config = PLAYBACK_0_TO_I2S | PLAYBACK_1_TO_SPDIF | CAPTURE_0_FROM_I2S_1, .dac_channels_pcm = 8, .dac_channels_mixer = 2, .dac_volume_min = 255 - 2*60, .dac_volume_max = 255, .function_flags = OXYGEN_FUNCTION_2WIRE, .dac_mclks = OXYGEN_MCLKS(256, 256, 128), .adc_mclks = OXYGEN_MCLKS(256, 256, 128), .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, }; int __devinit get_xonar_wm87x6_model(struct oxygen *chip, const struct pci_device_id *id) { switch (id->subdevice) { case 0x838e: chip->model = model_xonar_ds; break; case 0x835e: chip->model = model_xonar_hdav_slim; break; default: return -EINVAL; } return 0; }
gpl-2.0
krachlatte/Sony-Xperia-Go-ST27i
arch/arm/mach-at91/at91sam9260_devices.c
1754
34094
/* * arch/arm/mach-at91/at91sam9260_devices.c * * Copyright (C) 2006 Atmel * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/i2c-gpio.h> #include <mach/board.h> #include <mach/gpio.h> #include <mach/cpu.h> #include <mach/at91sam9260.h> #include <mach/at91sam9260_matrix.h> #include <mach/at91sam9_smc.h> #include "generic.h" /* -------------------------------------------------------------------- * USB Host * -------------------------------------------------------------------- */ #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) static u64 ohci_dmamask = DMA_BIT_MASK(32); static struct at91_usbh_data usbh_data; static struct resource usbh_resources[] = { [0] = { .start = AT91SAM9260_UHP_BASE, .end = AT91SAM9260_UHP_BASE + SZ_1M - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_UHP, .end = AT91SAM9260_ID_UHP, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91_usbh_device = { .name = "at91_ohci", .id = -1, .dev = { .dma_mask = &ohci_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &usbh_data, }, .resource = usbh_resources, .num_resources = ARRAY_SIZE(usbh_resources), }; void __init at91_add_device_usbh(struct at91_usbh_data *data) { if (!data) return; usbh_data = *data; platform_device_register(&at91_usbh_device); } #else void __init at91_add_device_usbh(struct at91_usbh_data *data) {} #endif /* -------------------------------------------------------------------- * USB Device (Gadget) * -------------------------------------------------------------------- */ #ifdef CONFIG_USB_GADGET_AT91 static struct at91_udc_data udc_data; static struct resource udc_resources[] = { [0] = { .start = AT91SAM9260_BASE_UDP, .end = AT91SAM9260_BASE_UDP + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_UDP, .end = AT91SAM9260_ID_UDP, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91_udc_device = { .name = "at91_udc", .id = -1, .dev = { .platform_data = &udc_data, }, .resource = udc_resources, .num_resources = ARRAY_SIZE(udc_resources), }; void __init at91_add_device_udc(struct at91_udc_data *data) { if (!data) return; if (data->vbus_pin) { at91_set_gpio_input(data->vbus_pin, 0); at91_set_deglitch(data->vbus_pin, 1); } /* Pullup pin is handled internally by USB device peripheral */ udc_data = *data; platform_device_register(&at91_udc_device); } #else void __init at91_add_device_udc(struct at91_udc_data *data) {} #endif /* -------------------------------------------------------------------- * Ethernet * -------------------------------------------------------------------- */ #if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE) static u64 eth_dmamask = DMA_BIT_MASK(32); static struct at91_eth_data eth_data; static struct resource eth_resources[] = { [0] = { .start = AT91SAM9260_BASE_EMAC, .end = AT91SAM9260_BASE_EMAC + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_EMAC, .end = AT91SAM9260_ID_EMAC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_eth_device = { .name = "macb", .id = -1, .dev = { .dma_mask = &eth_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &eth_data, }, .resource = eth_resources, .num_resources = ARRAY_SIZE(eth_resources), }; void __init at91_add_device_eth(struct at91_eth_data *data) { if (!data) return; if (data->phy_irq_pin) { at91_set_gpio_input(data->phy_irq_pin, 0); at91_set_deglitch(data->phy_irq_pin, 1); } /* Pins used for MII and RMII */ at91_set_A_periph(AT91_PIN_PA19, 0); /* ETXCK_EREFCK */ at91_set_A_periph(AT91_PIN_PA17, 0); /* ERXDV */ at91_set_A_periph(AT91_PIN_PA14, 0); /* ERX0 */ at91_set_A_periph(AT91_PIN_PA15, 0); /* ERX1 */ at91_set_A_periph(AT91_PIN_PA18, 0); /* ERXER */ at91_set_A_periph(AT91_PIN_PA16, 0); /* ETXEN */ at91_set_A_periph(AT91_PIN_PA12, 0); /* ETX0 */ at91_set_A_periph(AT91_PIN_PA13, 0); /* ETX1 */ at91_set_A_periph(AT91_PIN_PA21, 0); /* EMDIO */ at91_set_A_periph(AT91_PIN_PA20, 0); /* EMDC */ if (!data->is_rmii) { at91_set_B_periph(AT91_PIN_PA28, 0); /* ECRS */ at91_set_B_periph(AT91_PIN_PA29, 0); /* ECOL */ at91_set_B_periph(AT91_PIN_PA25, 0); /* ERX2 */ at91_set_B_periph(AT91_PIN_PA26, 0); /* ERX3 */ at91_set_B_periph(AT91_PIN_PA27, 0); /* ERXCK */ at91_set_B_periph(AT91_PIN_PA23, 0); /* ETX2 */ at91_set_B_periph(AT91_PIN_PA24, 0); /* ETX3 */ at91_set_B_periph(AT91_PIN_PA22, 0); /* ETXER */ } eth_data = *data; platform_device_register(&at91sam9260_eth_device); } #else void __init at91_add_device_eth(struct at91_eth_data *data) {} #endif /* -------------------------------------------------------------------- * MMC / SD * -------------------------------------------------------------------- */ #if defined(CONFIG_MMC_AT91) || defined(CONFIG_MMC_AT91_MODULE) static u64 mmc_dmamask = DMA_BIT_MASK(32); static struct at91_mmc_data mmc_data; static struct resource mmc_resources[] = { [0] = { .start = AT91SAM9260_BASE_MCI, .end = AT91SAM9260_BASE_MCI + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_MCI, .end = AT91SAM9260_ID_MCI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_mmc_device = { .name = "at91_mci", .id = -1, .dev = { .dma_mask = &mmc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &mmc_data, }, .resource = mmc_resources, .num_resources = ARRAY_SIZE(mmc_resources), }; void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) { if (!data) return; /* input/irq */ if (data->det_pin) { at91_set_gpio_input(data->det_pin, 1); at91_set_deglitch(data->det_pin, 1); } if (data->wp_pin) at91_set_gpio_input(data->wp_pin, 1); if (data->vcc_pin) at91_set_gpio_output(data->vcc_pin, 0); /* CLK */ at91_set_A_periph(AT91_PIN_PA8, 0); if (data->slot_b) { /* CMD */ at91_set_B_periph(AT91_PIN_PA1, 1); /* DAT0, maybe DAT1..DAT3 */ at91_set_B_periph(AT91_PIN_PA0, 1); if (data->wire4) { at91_set_B_periph(AT91_PIN_PA5, 1); at91_set_B_periph(AT91_PIN_PA4, 1); at91_set_B_periph(AT91_PIN_PA3, 1); } } else { /* CMD */ at91_set_A_periph(AT91_PIN_PA7, 1); /* DAT0, maybe DAT1..DAT3 */ at91_set_A_periph(AT91_PIN_PA6, 1); if (data->wire4) { at91_set_A_periph(AT91_PIN_PA9, 1); at91_set_A_periph(AT91_PIN_PA10, 1); at91_set_A_periph(AT91_PIN_PA11, 1); } } mmc_data = *data; platform_device_register(&at91sam9260_mmc_device); } #else void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {} #endif /* -------------------------------------------------------------------- * MMC / SD Slot for Atmel MCI Driver * -------------------------------------------------------------------- */ #if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) static u64 mmc_dmamask = DMA_BIT_MASK(32); static struct mci_platform_data mmc_data; static struct resource mmc_resources[] = { [0] = { .start = AT91SAM9260_BASE_MCI, .end = AT91SAM9260_BASE_MCI + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_MCI, .end = AT91SAM9260_ID_MCI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_mmc_device = { .name = "atmel_mci", .id = -1, .dev = { .dma_mask = &mmc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &mmc_data, }, .resource = mmc_resources, .num_resources = ARRAY_SIZE(mmc_resources), }; void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) { unsigned int i; unsigned int slot_count = 0; if (!data) return; for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { if (data->slot[i].bus_width) { /* input/irq */ if (data->slot[i].detect_pin) { at91_set_gpio_input(data->slot[i].detect_pin, 1); at91_set_deglitch(data->slot[i].detect_pin, 1); } if (data->slot[i].wp_pin) at91_set_gpio_input(data->slot[i].wp_pin, 1); switch (i) { case 0: /* CMD */ at91_set_A_periph(AT91_PIN_PA7, 1); /* DAT0, maybe DAT1..DAT3 */ at91_set_A_periph(AT91_PIN_PA6, 1); if (data->slot[i].bus_width == 4) { at91_set_A_periph(AT91_PIN_PA9, 1); at91_set_A_periph(AT91_PIN_PA10, 1); at91_set_A_periph(AT91_PIN_PA11, 1); } slot_count++; break; case 1: /* CMD */ at91_set_B_periph(AT91_PIN_PA1, 1); /* DAT0, maybe DAT1..DAT3 */ at91_set_B_periph(AT91_PIN_PA0, 1); if (data->slot[i].bus_width == 4) { at91_set_B_periph(AT91_PIN_PA5, 1); at91_set_B_periph(AT91_PIN_PA4, 1); at91_set_B_periph(AT91_PIN_PA3, 1); } slot_count++; break; default: printk(KERN_ERR "AT91: SD/MMC slot %d not available\n", i); break; } } } if (slot_count) { /* CLK */ at91_set_A_periph(AT91_PIN_PA8, 0); mmc_data = *data; platform_device_register(&at91sam9260_mmc_device); } } #else void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) {} #endif /* -------------------------------------------------------------------- * NAND / SmartMedia * -------------------------------------------------------------------- */ #if defined(CONFIG_MTD_NAND_ATMEL) || defined(CONFIG_MTD_NAND_ATMEL_MODULE) static struct atmel_nand_data nand_data; #define NAND_BASE AT91_CHIPSELECT_3 static struct resource nand_resources[] = { [0] = { .start = NAND_BASE, .end = NAND_BASE + SZ_256M - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91_BASE_SYS + AT91_ECC, .end = AT91_BASE_SYS + AT91_ECC + SZ_512 - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device at91sam9260_nand_device = { .name = "atmel_nand", .id = -1, .dev = { .platform_data = &nand_data, }, .resource = nand_resources, .num_resources = ARRAY_SIZE(nand_resources), }; void __init at91_add_device_nand(struct atmel_nand_data *data) { unsigned long csa; if (!data) return; csa = at91_sys_read(AT91_MATRIX_EBICSA); at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_CS3A_SMC_SMARTMEDIA); /* enable pin */ if (data->enable_pin) at91_set_gpio_output(data->enable_pin, 1); /* ready/busy pin */ if (data->rdy_pin) at91_set_gpio_input(data->rdy_pin, 1); /* card detect pin */ if (data->det_pin) at91_set_gpio_input(data->det_pin, 1); nand_data = *data; platform_device_register(&at91sam9260_nand_device); } #else void __init at91_add_device_nand(struct atmel_nand_data *data) {} #endif /* -------------------------------------------------------------------- * TWI (i2c) * -------------------------------------------------------------------- */ /* * Prefer the GPIO code since the TWI controller isn't robust * (gets overruns and underruns under load) and can only issue * repeated STARTs in one scenario (the driver doesn't yet handle them). */ #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) static struct i2c_gpio_platform_data pdata = { .sda_pin = AT91_PIN_PA23, .sda_is_open_drain = 1, .scl_pin = AT91_PIN_PA24, .scl_is_open_drain = 1, .udelay = 2, /* ~100 kHz */ }; static struct platform_device at91sam9260_twi_device = { .name = "i2c-gpio", .id = -1, .dev.platform_data = &pdata, }; void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) { at91_set_GPIO_periph(AT91_PIN_PA23, 1); /* TWD (SDA) */ at91_set_multi_drive(AT91_PIN_PA23, 1); at91_set_GPIO_periph(AT91_PIN_PA24, 1); /* TWCK (SCL) */ at91_set_multi_drive(AT91_PIN_PA24, 1); i2c_register_board_info(0, devices, nr_devices); platform_device_register(&at91sam9260_twi_device); } #elif defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE) static struct resource twi_resources[] = { [0] = { .start = AT91SAM9260_BASE_TWI, .end = AT91SAM9260_BASE_TWI + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_TWI, .end = AT91SAM9260_ID_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_twi_device = { .name = "at91_i2c", .id = -1, .resource = twi_resources, .num_resources = ARRAY_SIZE(twi_resources), }; void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) { /* pins used for TWI interface */ at91_set_A_periph(AT91_PIN_PA23, 0); /* TWD */ at91_set_multi_drive(AT91_PIN_PA23, 1); at91_set_A_periph(AT91_PIN_PA24, 0); /* TWCK */ at91_set_multi_drive(AT91_PIN_PA24, 1); i2c_register_board_info(0, devices, nr_devices); platform_device_register(&at91sam9260_twi_device); } #else void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) {} #endif /* -------------------------------------------------------------------- * SPI * -------------------------------------------------------------------- */ #if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE) static u64 spi_dmamask = DMA_BIT_MASK(32); static struct resource spi0_resources[] = { [0] = { .start = AT91SAM9260_BASE_SPI0, .end = AT91SAM9260_BASE_SPI0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_SPI0, .end = AT91SAM9260_ID_SPI0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_spi0_device = { .name = "atmel_spi", .id = 0, .dev = { .dma_mask = &spi_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = spi0_resources, .num_resources = ARRAY_SIZE(spi0_resources), }; static const unsigned spi0_standard_cs[4] = { AT91_PIN_PA3, AT91_PIN_PC11, AT91_PIN_PC16, AT91_PIN_PC17 }; static struct resource spi1_resources[] = { [0] = { .start = AT91SAM9260_BASE_SPI1, .end = AT91SAM9260_BASE_SPI1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_SPI1, .end = AT91SAM9260_ID_SPI1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_spi1_device = { .name = "atmel_spi", .id = 1, .dev = { .dma_mask = &spi_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = spi1_resources, .num_resources = ARRAY_SIZE(spi1_resources), }; static const unsigned spi1_standard_cs[4] = { AT91_PIN_PB3, AT91_PIN_PC5, AT91_PIN_PC4, AT91_PIN_PC3 }; void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) { int i; unsigned long cs_pin; short enable_spi0 = 0; short enable_spi1 = 0; /* Choose SPI chip-selects */ for (i = 0; i < nr_devices; i++) { if (devices[i].controller_data) cs_pin = (unsigned long) devices[i].controller_data; else if (devices[i].bus_num == 0) cs_pin = spi0_standard_cs[devices[i].chip_select]; else cs_pin = spi1_standard_cs[devices[i].chip_select]; if (devices[i].bus_num == 0) enable_spi0 = 1; else enable_spi1 = 1; /* enable chip-select pin */ at91_set_gpio_output(cs_pin, 1); /* pass chip-select pin to driver */ devices[i].controller_data = (void *) cs_pin; } spi_register_board_info(devices, nr_devices); /* Configure SPI bus(es) */ if (enable_spi0) { at91_set_A_periph(AT91_PIN_PA0, 0); /* SPI0_MISO */ at91_set_A_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */ at91_set_A_periph(AT91_PIN_PA2, 0); /* SPI1_SPCK */ at91_clock_associate("spi0_clk", &at91sam9260_spi0_device.dev, "spi_clk"); platform_device_register(&at91sam9260_spi0_device); } if (enable_spi1) { at91_set_A_periph(AT91_PIN_PB0, 0); /* SPI1_MISO */ at91_set_A_periph(AT91_PIN_PB1, 0); /* SPI1_MOSI */ at91_set_A_periph(AT91_PIN_PB2, 0); /* SPI1_SPCK */ at91_clock_associate("spi1_clk", &at91sam9260_spi1_device.dev, "spi_clk"); platform_device_register(&at91sam9260_spi1_device); } } #else void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) {} #endif /* -------------------------------------------------------------------- * Timer/Counter blocks * -------------------------------------------------------------------- */ #ifdef CONFIG_ATMEL_TCLIB static struct resource tcb0_resources[] = { [0] = { .start = AT91SAM9260_BASE_TCB0, .end = AT91SAM9260_BASE_TCB0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_TC0, .end = AT91SAM9260_ID_TC0, .flags = IORESOURCE_IRQ, }, [2] = { .start = AT91SAM9260_ID_TC1, .end = AT91SAM9260_ID_TC1, .flags = IORESOURCE_IRQ, }, [3] = { .start = AT91SAM9260_ID_TC2, .end = AT91SAM9260_ID_TC2, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_tcb0_device = { .name = "atmel_tcb", .id = 0, .resource = tcb0_resources, .num_resources = ARRAY_SIZE(tcb0_resources), }; static struct resource tcb1_resources[] = { [0] = { .start = AT91SAM9260_BASE_TCB1, .end = AT91SAM9260_BASE_TCB1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_TC3, .end = AT91SAM9260_ID_TC3, .flags = IORESOURCE_IRQ, }, [2] = { .start = AT91SAM9260_ID_TC4, .end = AT91SAM9260_ID_TC4, .flags = IORESOURCE_IRQ, }, [3] = { .start = AT91SAM9260_ID_TC5, .end = AT91SAM9260_ID_TC5, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_tcb1_device = { .name = "atmel_tcb", .id = 1, .resource = tcb1_resources, .num_resources = ARRAY_SIZE(tcb1_resources), }; static void __init at91_add_device_tc(void) { /* this chip has a separate clock and irq for each TC channel */ at91_clock_associate("tc0_clk", &at91sam9260_tcb0_device.dev, "t0_clk"); at91_clock_associate("tc1_clk", &at91sam9260_tcb0_device.dev, "t1_clk"); at91_clock_associate("tc2_clk", &at91sam9260_tcb0_device.dev, "t2_clk"); platform_device_register(&at91sam9260_tcb0_device); at91_clock_associate("tc3_clk", &at91sam9260_tcb1_device.dev, "t0_clk"); at91_clock_associate("tc4_clk", &at91sam9260_tcb1_device.dev, "t1_clk"); at91_clock_associate("tc5_clk", &at91sam9260_tcb1_device.dev, "t2_clk"); platform_device_register(&at91sam9260_tcb1_device); } #else static void __init at91_add_device_tc(void) { } #endif /* -------------------------------------------------------------------- * RTT * -------------------------------------------------------------------- */ static struct resource rtt_resources[] = { { .start = AT91_BASE_SYS + AT91_RTT, .end = AT91_BASE_SYS + AT91_RTT + SZ_16 - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device at91sam9260_rtt_device = { .name = "at91_rtt", .id = 0, .resource = rtt_resources, .num_resources = ARRAY_SIZE(rtt_resources), }; static void __init at91_add_device_rtt(void) { platform_device_register(&at91sam9260_rtt_device); } /* -------------------------------------------------------------------- * Watchdog * -------------------------------------------------------------------- */ #if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE) static struct platform_device at91sam9260_wdt_device = { .name = "at91_wdt", .id = -1, .num_resources = 0, }; static void __init at91_add_device_watchdog(void) { platform_device_register(&at91sam9260_wdt_device); } #else static void __init at91_add_device_watchdog(void) {} #endif /* -------------------------------------------------------------------- * SSC -- Synchronous Serial Controller * -------------------------------------------------------------------- */ #if defined(CONFIG_ATMEL_SSC) || defined(CONFIG_ATMEL_SSC_MODULE) static u64 ssc_dmamask = DMA_BIT_MASK(32); static struct resource ssc_resources[] = { [0] = { .start = AT91SAM9260_BASE_SSC, .end = AT91SAM9260_BASE_SSC + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_SSC, .end = AT91SAM9260_ID_SSC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device at91sam9260_ssc_device = { .name = "ssc", .id = 0, .dev = { .dma_mask = &ssc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = ssc_resources, .num_resources = ARRAY_SIZE(ssc_resources), }; static inline void configure_ssc_pins(unsigned pins) { if (pins & ATMEL_SSC_TF) at91_set_A_periph(AT91_PIN_PB17, 1); if (pins & ATMEL_SSC_TK) at91_set_A_periph(AT91_PIN_PB16, 1); if (pins & ATMEL_SSC_TD) at91_set_A_periph(AT91_PIN_PB18, 1); if (pins & ATMEL_SSC_RD) at91_set_A_periph(AT91_PIN_PB19, 1); if (pins & ATMEL_SSC_RK) at91_set_A_periph(AT91_PIN_PB20, 1); if (pins & ATMEL_SSC_RF) at91_set_A_periph(AT91_PIN_PB21, 1); } /* * SSC controllers are accessed through library code, instead of any * kind of all-singing/all-dancing driver. For example one could be * used by a particular I2S audio codec's driver, while another one * on the same system might be used by a custom data capture driver. */ void __init at91_add_device_ssc(unsigned id, unsigned pins) { struct platform_device *pdev; /* * NOTE: caller is responsible for passing information matching * "pins" to whatever will be using each particular controller. */ switch (id) { case AT91SAM9260_ID_SSC: pdev = &at91sam9260_ssc_device; configure_ssc_pins(pins); at91_clock_associate("ssc_clk", &pdev->dev, "pclk"); break; default: return; } platform_device_register(pdev); } #else void __init at91_add_device_ssc(unsigned id, unsigned pins) {} #endif /* -------------------------------------------------------------------- * UART * -------------------------------------------------------------------- */ #if defined(CONFIG_SERIAL_ATMEL) static struct resource dbgu_resources[] = { [0] = { .start = AT91_VA_BASE_SYS + AT91_DBGU, .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91_ID_SYS, .end = AT91_ID_SYS, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data dbgu_data = { .use_dma_tx = 0, .use_dma_rx = 0, /* DBGU not capable of receive DMA */ .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU), }; static u64 dbgu_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9260_dbgu_device = { .name = "atmel_usart", .id = 0, .dev = { .dma_mask = &dbgu_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &dbgu_data, }, .resource = dbgu_resources, .num_resources = ARRAY_SIZE(dbgu_resources), }; static inline void configure_dbgu_pins(void) { at91_set_A_periph(AT91_PIN_PB14, 0); /* DRXD */ at91_set_A_periph(AT91_PIN_PB15, 1); /* DTXD */ } static struct resource uart0_resources[] = { [0] = { .start = AT91SAM9260_BASE_US0, .end = AT91SAM9260_BASE_US0 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_US0, .end = AT91SAM9260_ID_US0, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart0_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart0_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9260_uart0_device = { .name = "atmel_usart", .id = 1, .dev = { .dma_mask = &uart0_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart0_data, }, .resource = uart0_resources, .num_resources = ARRAY_SIZE(uart0_resources), }; static inline void configure_usart0_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PB4, 1); /* TXD0 */ at91_set_A_periph(AT91_PIN_PB5, 0); /* RXD0 */ if (pins & ATMEL_UART_RTS) at91_set_A_periph(AT91_PIN_PB26, 0); /* RTS0 */ if (pins & ATMEL_UART_CTS) at91_set_A_periph(AT91_PIN_PB27, 0); /* CTS0 */ if (pins & ATMEL_UART_DTR) at91_set_A_periph(AT91_PIN_PB24, 0); /* DTR0 */ if (pins & ATMEL_UART_DSR) at91_set_A_periph(AT91_PIN_PB22, 0); /* DSR0 */ if (pins & ATMEL_UART_DCD) at91_set_A_periph(AT91_PIN_PB23, 0); /* DCD0 */ if (pins & ATMEL_UART_RI) at91_set_A_periph(AT91_PIN_PB25, 0); /* RI0 */ } static struct resource uart1_resources[] = { [0] = { .start = AT91SAM9260_BASE_US1, .end = AT91SAM9260_BASE_US1 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_US1, .end = AT91SAM9260_ID_US1, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart1_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart1_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9260_uart1_device = { .name = "atmel_usart", .id = 2, .dev = { .dma_mask = &uart1_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart1_data, }, .resource = uart1_resources, .num_resources = ARRAY_SIZE(uart1_resources), }; static inline void configure_usart1_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PB6, 1); /* TXD1 */ at91_set_A_periph(AT91_PIN_PB7, 0); /* RXD1 */ if (pins & ATMEL_UART_RTS) at91_set_A_periph(AT91_PIN_PB28, 0); /* RTS1 */ if (pins & ATMEL_UART_CTS) at91_set_A_periph(AT91_PIN_PB29, 0); /* CTS1 */ } static struct resource uart2_resources[] = { [0] = { .start = AT91SAM9260_BASE_US2, .end = AT91SAM9260_BASE_US2 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_US2, .end = AT91SAM9260_ID_US2, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart2_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart2_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9260_uart2_device = { .name = "atmel_usart", .id = 3, .dev = { .dma_mask = &uart2_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart2_data, }, .resource = uart2_resources, .num_resources = ARRAY_SIZE(uart2_resources), }; static inline void configure_usart2_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PB8, 1); /* TXD2 */ at91_set_A_periph(AT91_PIN_PB9, 0); /* RXD2 */ if (pins & ATMEL_UART_RTS) at91_set_A_periph(AT91_PIN_PA4, 0); /* RTS2 */ if (pins & ATMEL_UART_CTS) at91_set_A_periph(AT91_PIN_PA5, 0); /* CTS2 */ } static struct resource uart3_resources[] = { [0] = { .start = AT91SAM9260_BASE_US3, .end = AT91SAM9260_BASE_US3 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_US3, .end = AT91SAM9260_ID_US3, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart3_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart3_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9260_uart3_device = { .name = "atmel_usart", .id = 4, .dev = { .dma_mask = &uart3_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart3_data, }, .resource = uart3_resources, .num_resources = ARRAY_SIZE(uart3_resources), }; static inline void configure_usart3_pins(unsigned pins) { at91_set_A_periph(AT91_PIN_PB10, 1); /* TXD3 */ at91_set_A_periph(AT91_PIN_PB11, 0); /* RXD3 */ if (pins & ATMEL_UART_RTS) at91_set_B_periph(AT91_PIN_PC8, 0); /* RTS3 */ if (pins & ATMEL_UART_CTS) at91_set_B_periph(AT91_PIN_PC10, 0); /* CTS3 */ } static struct resource uart4_resources[] = { [0] = { .start = AT91SAM9260_BASE_US4, .end = AT91SAM9260_BASE_US4 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_US4, .end = AT91SAM9260_ID_US4, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart4_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart4_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9260_uart4_device = { .name = "atmel_usart", .id = 5, .dev = { .dma_mask = &uart4_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart4_data, }, .resource = uart4_resources, .num_resources = ARRAY_SIZE(uart4_resources), }; static inline void configure_usart4_pins(void) { at91_set_B_periph(AT91_PIN_PA31, 1); /* TXD4 */ at91_set_B_periph(AT91_PIN_PA30, 0); /* RXD4 */ } static struct resource uart5_resources[] = { [0] = { .start = AT91SAM9260_BASE_US5, .end = AT91SAM9260_BASE_US5 + SZ_16K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AT91SAM9260_ID_US5, .end = AT91SAM9260_ID_US5, .flags = IORESOURCE_IRQ, }, }; static struct atmel_uart_data uart5_data = { .use_dma_tx = 1, .use_dma_rx = 1, }; static u64 uart5_dmamask = DMA_BIT_MASK(32); static struct platform_device at91sam9260_uart5_device = { .name = "atmel_usart", .id = 6, .dev = { .dma_mask = &uart5_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &uart5_data, }, .resource = uart5_resources, .num_resources = ARRAY_SIZE(uart5_resources), }; static inline void configure_usart5_pins(void) { at91_set_A_periph(AT91_PIN_PB12, 1); /* TXD5 */ at91_set_A_periph(AT91_PIN_PB13, 0); /* RXD5 */ } static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */ struct platform_device *atmel_default_console_device; /* the serial console device */ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) { struct platform_device *pdev; switch (id) { case 0: /* DBGU */ pdev = &at91sam9260_dbgu_device; configure_dbgu_pins(); at91_clock_associate("mck", &pdev->dev, "usart"); break; case AT91SAM9260_ID_US0: pdev = &at91sam9260_uart0_device; configure_usart0_pins(pins); at91_clock_associate("usart0_clk", &pdev->dev, "usart"); break; case AT91SAM9260_ID_US1: pdev = &at91sam9260_uart1_device; configure_usart1_pins(pins); at91_clock_associate("usart1_clk", &pdev->dev, "usart"); break; case AT91SAM9260_ID_US2: pdev = &at91sam9260_uart2_device; configure_usart2_pins(pins); at91_clock_associate("usart2_clk", &pdev->dev, "usart"); break; case AT91SAM9260_ID_US3: pdev = &at91sam9260_uart3_device; configure_usart3_pins(pins); at91_clock_associate("usart3_clk", &pdev->dev, "usart"); break; case AT91SAM9260_ID_US4: pdev = &at91sam9260_uart4_device; configure_usart4_pins(); at91_clock_associate("usart4_clk", &pdev->dev, "usart"); break; case AT91SAM9260_ID_US5: pdev = &at91sam9260_uart5_device; configure_usart5_pins(); at91_clock_associate("usart5_clk", &pdev->dev, "usart"); break; default: return; } pdev->id = portnr; /* update to mapped ID */ if (portnr < ATMEL_MAX_UART) at91_uarts[portnr] = pdev; } void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) atmel_default_console_device = at91_uarts[portnr]; } void __init at91_add_device_serial(void) { int i; for (i = 0; i < ATMEL_MAX_UART; i++) { if (at91_uarts[i]) platform_device_register(at91_uarts[i]); } if (!atmel_default_console_device) printk(KERN_INFO "AT91: No default serial console defined.\n"); } #else void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {} void __init at91_set_serial_console(unsigned portnr) {} void __init at91_add_device_serial(void) {} #endif /* -------------------------------------------------------------------- * CF/IDE * -------------------------------------------------------------------- */ #if defined(CONFIG_BLK_DEV_IDE_AT91) || defined(CONFIG_BLK_DEV_IDE_AT91_MODULE) || \ defined(CONFIG_PATA_AT91) || defined(CONFIG_PATA_AT91_MODULE) || \ defined(CONFIG_AT91_CF) || defined(CONFIG_AT91_CF_MODULE) static struct at91_cf_data cf0_data; static struct resource cf0_resources[] = { [0] = { .start = AT91_CHIPSELECT_4, .end = AT91_CHIPSELECT_4 + SZ_256M - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device cf0_device = { .id = 0, .dev = { .platform_data = &cf0_data, }, .resource = cf0_resources, .num_resources = ARRAY_SIZE(cf0_resources), }; static struct at91_cf_data cf1_data; static struct resource cf1_resources[] = { [0] = { .start = AT91_CHIPSELECT_5, .end = AT91_CHIPSELECT_5 + SZ_256M - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device cf1_device = { .id = 1, .dev = { .platform_data = &cf1_data, }, .resource = cf1_resources, .num_resources = ARRAY_SIZE(cf1_resources), }; void __init at91_add_device_cf(struct at91_cf_data *data) { struct platform_device *pdev; unsigned long csa; if (!data) return; csa = at91_sys_read(AT91_MATRIX_EBICSA); switch (data->chipselect) { case 4: at91_set_multi_drive(AT91_PIN_PC8, 0); at91_set_A_periph(AT91_PIN_PC8, 0); csa |= AT91_MATRIX_CS4A_SMC_CF1; cf0_data = *data; pdev = &cf0_device; break; case 5: at91_set_multi_drive(AT91_PIN_PC9, 0); at91_set_A_periph(AT91_PIN_PC9, 0); csa |= AT91_MATRIX_CS5A_SMC_CF2; cf1_data = *data; pdev = &cf1_device; break; default: printk(KERN_ERR "AT91 CF: bad chip-select requested (%u)\n", data->chipselect); return; } at91_sys_write(AT91_MATRIX_EBICSA, csa); if (data->rst_pin) { at91_set_multi_drive(data->rst_pin, 0); at91_set_gpio_output(data->rst_pin, 1); } if (data->irq_pin) { at91_set_gpio_input(data->irq_pin, 0); at91_set_deglitch(data->irq_pin, 1); } if (data->det_pin) { at91_set_gpio_input(data->det_pin, 0); at91_set_deglitch(data->det_pin, 1); } at91_set_B_periph(AT91_PIN_PC6, 0); /* CFCE1 */ at91_set_B_periph(AT91_PIN_PC7, 0); /* CFCE2 */ at91_set_A_periph(AT91_PIN_PC10, 0); /* CFRNW */ at91_set_A_periph(AT91_PIN_PC15, 1); /* NWAIT */ if (data->flags & AT91_CF_TRUE_IDE) #if defined(CONFIG_PATA_AT91) || defined(CONFIG_PATA_AT91_MODULE) pdev->name = "pata_at91"; #elif defined(CONFIG_BLK_DEV_IDE_AT91) || defined(CONFIG_BLK_DEV_IDE_AT91_MODULE) pdev->name = "at91_ide"; #else #warning "board requires AT91_CF_TRUE_IDE: enable either at91_ide or pata_at91" #endif else pdev->name = "at91_cf"; platform_device_register(pdev); } #else void __init at91_add_device_cf(struct at91_cf_data * data) {} #endif /* -------------------------------------------------------------------- */ /* * These devices are always present and don't need any board-specific * setup. */ static int __init at91_add_standard_devices(void) { at91_add_device_rtt(); at91_add_device_watchdog(); at91_add_device_tc(); return 0; } arch_initcall(at91_add_standard_devices);
gpl-2.0
Snuzzo/Kitten_Kernel
drivers/video/backlight/ltv350qv.c
3034
8402
/* * Power control for Samsung LTV350QV Quarter VGA LCD Panel * * Copyright (C) 2006, 2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/lcd.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include "ltv350qv.h" #define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) struct ltv350qv { struct spi_device *spi; u8 *buffer; int power; struct lcd_device *ld; }; /* * The power-on and power-off sequences are taken from the * LTV350QV-F04 data sheet from Samsung. The register definitions are * taken from the S6F2002 command list also from Samsung. Both * documents are distributed with the AVR32 Linux BSP CD from Atmel. * * There's still some voodoo going on here, but it's a lot better than * in the first incarnation of the driver where all we had was the raw * numbers from the initialization sequence. */ static int ltv350qv_write_reg(struct ltv350qv *lcd, u8 reg, u16 val) { struct spi_message msg; struct spi_transfer index_xfer = { .len = 3, .cs_change = 1, }; struct spi_transfer value_xfer = { .len = 3, }; spi_message_init(&msg); /* register index */ lcd->buffer[0] = LTV_OPC_INDEX; lcd->buffer[1] = 0x00; lcd->buffer[2] = reg & 0x7f; index_xfer.tx_buf = lcd->buffer; spi_message_add_tail(&index_xfer, &msg); /* register value */ lcd->buffer[4] = LTV_OPC_DATA; lcd->buffer[5] = val >> 8; lcd->buffer[6] = val; value_xfer.tx_buf = lcd->buffer + 4; spi_message_add_tail(&value_xfer, &msg); return spi_sync(lcd->spi, &msg); } /* The comments are taken straight from the data sheet */ static int ltv350qv_power_on(struct ltv350qv *lcd) { int ret; /* Power On Reset Display off State */ if (ltv350qv_write_reg(lcd, LTV_PWRCTL1, 0x0000)) goto err; msleep(15); /* Power Setting Function 1 */ if (ltv350qv_write_reg(lcd, LTV_PWRCTL1, LTV_VCOM_DISABLE)) goto err; if (ltv350qv_write_reg(lcd, LTV_PWRCTL2, LTV_VCOML_ENABLE)) goto err_power1; /* Power Setting Function 2 */ if (ltv350qv_write_reg(lcd, LTV_PWRCTL1, LTV_VCOM_DISABLE | LTV_DRIVE_CURRENT(5) | LTV_SUPPLY_CURRENT(5))) goto err_power2; msleep(55); /* Instruction Setting */ ret = ltv350qv_write_reg(lcd, LTV_IFCTL, LTV_NMD | LTV_REV | LTV_NL(0x1d)); ret |= ltv350qv_write_reg(lcd, LTV_DATACTL, LTV_DS_SAME | LTV_CHS_480 | LTV_DF_RGB | LTV_RGB_BGR); ret |= ltv350qv_write_reg(lcd, LTV_ENTRY_MODE, LTV_VSPL_ACTIVE_LOW | LTV_HSPL_ACTIVE_LOW | LTV_DPL_SAMPLE_RISING | LTV_EPL_ACTIVE_LOW | LTV_SS_RIGHT_TO_LEFT); ret |= ltv350qv_write_reg(lcd, LTV_GATECTL1, LTV_CLW(3)); ret |= ltv350qv_write_reg(lcd, LTV_GATECTL2, LTV_NW_INV_1LINE | LTV_FWI(3)); ret |= ltv350qv_write_reg(lcd, LTV_VBP, 0x000a); ret |= ltv350qv_write_reg(lcd, LTV_HBP, 0x0021); ret |= ltv350qv_write_reg(lcd, LTV_SOTCTL, LTV_SDT(3) | LTV_EQ(0)); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(0), 0x0103); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(1), 0x0301); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(2), 0x1f0f); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(3), 0x1f0f); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(4), 0x0707); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(5), 0x0307); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(6), 0x0707); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(7), 0x0000); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(8), 0x0004); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(9), 0x0000); if (ret) goto err_settings; /* Wait more than 2 frames */ msleep(20); /* Display On Sequence */ ret = ltv350qv_write_reg(lcd, LTV_PWRCTL1, LTV_VCOM_DISABLE | LTV_VCOMOUT_ENABLE | LTV_POWER_ON | LTV_DRIVE_CURRENT(5) | LTV_SUPPLY_CURRENT(5)); ret |= ltv350qv_write_reg(lcd, LTV_GATECTL2, LTV_NW_INV_1LINE | LTV_DSC | LTV_FWI(3)); if (ret) goto err_disp_on; /* Display should now be ON. Phew. */ return 0; err_disp_on: /* * Try to recover. Error handling probably isn't very useful * at this point, just make a best effort to switch the panel * off. */ ltv350qv_write_reg(lcd, LTV_PWRCTL1, LTV_VCOM_DISABLE | LTV_DRIVE_CURRENT(5) | LTV_SUPPLY_CURRENT(5)); ltv350qv_write_reg(lcd, LTV_GATECTL2, LTV_NW_INV_1LINE | LTV_FWI(3)); err_settings: err_power2: err_power1: ltv350qv_write_reg(lcd, LTV_PWRCTL2, 0x0000); msleep(1); err: ltv350qv_write_reg(lcd, LTV_PWRCTL1, LTV_VCOM_DISABLE); return -EIO; } static int ltv350qv_power_off(struct ltv350qv *lcd) { int ret; /* Display Off Sequence */ ret = ltv350qv_write_reg(lcd, LTV_PWRCTL1, LTV_VCOM_DISABLE | LTV_DRIVE_CURRENT(5) | LTV_SUPPLY_CURRENT(5)); ret |= ltv350qv_write_reg(lcd, LTV_GATECTL2, LTV_NW_INV_1LINE | LTV_FWI(3)); /* Power down setting 1 */ ret |= ltv350qv_write_reg(lcd, LTV_PWRCTL2, 0x0000); /* Wait at least 1 ms */ msleep(1); /* Power down setting 2 */ ret |= ltv350qv_write_reg(lcd, LTV_PWRCTL1, LTV_VCOM_DISABLE); /* * No point in trying to recover here. If we can't switch the * panel off, what are we supposed to do other than inform the * user about the failure? */ if (ret) return -EIO; /* Display power should now be OFF */ return 0; } static int ltv350qv_power(struct ltv350qv *lcd, int power) { int ret = 0; if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power)) ret = ltv350qv_power_on(lcd); else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power)) ret = ltv350qv_power_off(lcd); if (!ret) lcd->power = power; return ret; } static int ltv350qv_set_power(struct lcd_device *ld, int power) { struct ltv350qv *lcd = lcd_get_data(ld); return ltv350qv_power(lcd, power); } static int ltv350qv_get_power(struct lcd_device *ld) { struct ltv350qv *lcd = lcd_get_data(ld); return lcd->power; } static struct lcd_ops ltv_ops = { .get_power = ltv350qv_get_power, .set_power = ltv350qv_set_power, }; static int __devinit ltv350qv_probe(struct spi_device *spi) { struct ltv350qv *lcd; struct lcd_device *ld; int ret; lcd = kzalloc(sizeof(struct ltv350qv), GFP_KERNEL); if (!lcd) return -ENOMEM; lcd->spi = spi; lcd->power = FB_BLANK_POWERDOWN; lcd->buffer = kzalloc(8, GFP_KERNEL); if (!lcd->buffer) { ret = -ENOMEM; goto out_free_lcd; } ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops); if (IS_ERR(ld)) { ret = PTR_ERR(ld); goto out_free_buffer; } lcd->ld = ld; ret = ltv350qv_power(lcd, FB_BLANK_UNBLANK); if (ret) goto out_unregister; dev_set_drvdata(&spi->dev, lcd); return 0; out_unregister: lcd_device_unregister(ld); out_free_buffer: kfree(lcd->buffer); out_free_lcd: kfree(lcd); return ret; } static int __devexit ltv350qv_remove(struct spi_device *spi) { struct ltv350qv *lcd = dev_get_drvdata(&spi->dev); ltv350qv_power(lcd, FB_BLANK_POWERDOWN); lcd_device_unregister(lcd->ld); kfree(lcd->buffer); kfree(lcd); return 0; } #ifdef CONFIG_PM static int ltv350qv_suspend(struct spi_device *spi, pm_message_t state) { struct ltv350qv *lcd = dev_get_drvdata(&spi->dev); return ltv350qv_power(lcd, FB_BLANK_POWERDOWN); } static int ltv350qv_resume(struct spi_device *spi) { struct ltv350qv *lcd = dev_get_drvdata(&spi->dev); return ltv350qv_power(lcd, FB_BLANK_UNBLANK); } #else #define ltv350qv_suspend NULL #define ltv350qv_resume NULL #endif /* Power down all displays on reboot, poweroff or halt */ static void ltv350qv_shutdown(struct spi_device *spi) { struct ltv350qv *lcd = dev_get_drvdata(&spi->dev); ltv350qv_power(lcd, FB_BLANK_POWERDOWN); } static struct spi_driver ltv350qv_driver = { .driver = { .name = "ltv350qv", .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = ltv350qv_probe, .remove = __devexit_p(ltv350qv_remove), .shutdown = ltv350qv_shutdown, .suspend = ltv350qv_suspend, .resume = ltv350qv_resume, }; static int __init ltv350qv_init(void) { return spi_register_driver(&ltv350qv_driver); } static void __exit ltv350qv_exit(void) { spi_unregister_driver(&ltv350qv_driver); } module_init(ltv350qv_init); module_exit(ltv350qv_exit); MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); MODULE_DESCRIPTION("Samsung LTV350QV LCD Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:ltv350qv");
gpl-2.0
Soorma07/linux-davinci
drivers/mtd/ubi/gluebi.c
3034
15070
/* * Copyright (c) International Business Machines Corp., 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: Artem Bityutskiy (Битюцкий Артём), Joern Engel */ /* * This is a small driver which implements fake MTD devices on top of UBI * volumes. This sounds strange, but it is in fact quite useful to make * MTD-oriented software (including all the legacy software) work on top of * UBI. * * Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit * size (@mtd->writesize) is equivalent to the UBI minimal I/O unit. The * eraseblock size is equivalent to the logical eraseblock size of the volume. */ #include <linux/err.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/math64.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/mtd/ubi.h> #include <linux/mtd/mtd.h> #include "ubi-media.h" #define err_msg(fmt, ...) \ printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \ current->pid, __func__, ##__VA_ARGS__) /** * struct gluebi_device - a gluebi device description data structure. * @mtd: emulated MTD device description object * @refcnt: gluebi device reference count * @desc: UBI volume descriptor * @ubi_num: UBI device number this gluebi device works on * @vol_id: ID of UBI volume this gluebi device works on * @list: link in a list of gluebi devices */ struct gluebi_device { struct mtd_info mtd; int refcnt; struct ubi_volume_desc *desc; int ubi_num; int vol_id; struct list_head list; }; /* List of all gluebi devices */ static LIST_HEAD(gluebi_devices); static DEFINE_MUTEX(devices_mutex); /** * find_gluebi_nolock - find a gluebi device. * @ubi_num: UBI device number * @vol_id: volume ID * * This function seraches for gluebi device corresponding to UBI device * @ubi_num and UBI volume @vol_id. Returns the gluebi device description * object in case of success and %NULL in case of failure. The caller has to * have the &devices_mutex locked. */ static struct gluebi_device *find_gluebi_nolock(int ubi_num, int vol_id) { struct gluebi_device *gluebi; list_for_each_entry(gluebi, &gluebi_devices, list) if (gluebi->ubi_num == ubi_num && gluebi->vol_id == vol_id) return gluebi; return NULL; } /** * gluebi_get_device - get MTD device reference. * @mtd: the MTD device description object * * This function is called every time the MTD device is being opened and * implements the MTD get_device() operation. Returns zero in case of success * and a negative error code in case of failure. */ static int gluebi_get_device(struct mtd_info *mtd) { struct gluebi_device *gluebi; int ubi_mode = UBI_READONLY; if (!try_module_get(THIS_MODULE)) return -ENODEV; if (mtd->flags & MTD_WRITEABLE) ubi_mode = UBI_READWRITE; gluebi = container_of(mtd, struct gluebi_device, mtd); mutex_lock(&devices_mutex); if (gluebi->refcnt > 0) { /* * The MTD device is already referenced and this is just one * more reference. MTD allows many users to open the same * volume simultaneously and do not distinguish between * readers/writers/exclusive openers as UBI does. So we do not * open the UBI volume again - just increase the reference * counter and return. */ gluebi->refcnt += 1; mutex_unlock(&devices_mutex); return 0; } /* * This is the first reference to this UBI volume via the MTD device * interface. Open the corresponding volume in read-write mode. */ gluebi->desc = ubi_open_volume(gluebi->ubi_num, gluebi->vol_id, ubi_mode); if (IS_ERR(gluebi->desc)) { mutex_unlock(&devices_mutex); module_put(THIS_MODULE); return PTR_ERR(gluebi->desc); } gluebi->refcnt += 1; mutex_unlock(&devices_mutex); return 0; } /** * gluebi_put_device - put MTD device reference. * @mtd: the MTD device description object * * This function is called every time the MTD device is being put. Returns * zero in case of success and a negative error code in case of failure. */ static void gluebi_put_device(struct mtd_info *mtd) { struct gluebi_device *gluebi; gluebi = container_of(mtd, struct gluebi_device, mtd); mutex_lock(&devices_mutex); gluebi->refcnt -= 1; if (gluebi->refcnt == 0) ubi_close_volume(gluebi->desc); module_put(THIS_MODULE); mutex_unlock(&devices_mutex); } /** * gluebi_read - read operation of emulated MTD devices. * @mtd: MTD device description object * @from: absolute offset from where to read * @len: how many bytes to read * @retlen: count of read bytes is returned here * @buf: buffer to store the read data * * This function returns zero in case of success and a negative error code in * case of failure. */ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, unsigned char *buf) { int err = 0, lnum, offs, total_read; struct gluebi_device *gluebi; if (len < 0 || from < 0 || from + len > mtd->size) return -EINVAL; gluebi = container_of(mtd, struct gluebi_device, mtd); lnum = div_u64_rem(from, mtd->erasesize, &offs); total_read = len; while (total_read) { size_t to_read = mtd->erasesize - offs; if (to_read > total_read) to_read = total_read; err = ubi_read(gluebi->desc, lnum, buf, offs, to_read); if (err) break; lnum += 1; offs = 0; total_read -= to_read; buf += to_read; } *retlen = len - total_read; return err; } /** * gluebi_write - write operation of emulated MTD devices. * @mtd: MTD device description object * @to: absolute offset where to write * @len: how many bytes to write * @retlen: count of written bytes is returned here * @buf: buffer with data to write * * This function returns zero in case of success and a negative error code in * case of failure. */ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { int err = 0, lnum, offs, total_written; struct gluebi_device *gluebi; if (len < 0 || to < 0 || len + to > mtd->size) return -EINVAL; gluebi = container_of(mtd, struct gluebi_device, mtd); if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; lnum = div_u64_rem(to, mtd->erasesize, &offs); if (len % mtd->writesize || offs % mtd->writesize) return -EINVAL; total_written = len; while (total_written) { size_t to_write = mtd->erasesize - offs; if (to_write > total_written) to_write = total_written; err = ubi_write(gluebi->desc, lnum, buf, offs, to_write); if (err) break; lnum += 1; offs = 0; total_written -= to_write; buf += to_write; } *retlen = len - total_written; return err; } /** * gluebi_erase - erase operation of emulated MTD devices. * @mtd: the MTD device description object * @instr: the erase operation description * * This function calls the erase callback when finishes. Returns zero in case * of success and a negative error code in case of failure. */ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) { int err, i, lnum, count; struct gluebi_device *gluebi; if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) return -EINVAL; if (instr->len < 0 || instr->addr + instr->len > mtd->size) return -EINVAL; if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) return -EINVAL; lnum = mtd_div_by_eb(instr->addr, mtd); count = mtd_div_by_eb(instr->len, mtd); gluebi = container_of(mtd, struct gluebi_device, mtd); if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; for (i = 0; i < count - 1; i++) { err = ubi_leb_unmap(gluebi->desc, lnum + i); if (err) goto out_err; } /* * MTD erase operations are synchronous, so we have to make sure the * physical eraseblock is wiped out. * * Thus, perform leb_erase instead of leb_unmap operation - leb_erase * will wait for the end of operations */ err = ubi_leb_erase(gluebi->desc, lnum + i); if (err) goto out_err; instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); return 0; out_err: instr->state = MTD_ERASE_FAILED; instr->fail_addr = (long long)lnum * mtd->erasesize; return err; } /** * gluebi_create - create a gluebi device for an UBI volume. * @di: UBI device description object * @vi: UBI volume description object * * This function is called when a new UBI volume is created in order to create * corresponding fake MTD device. Returns zero in case of success and a * negative error code in case of failure. */ static int gluebi_create(struct ubi_device_info *di, struct ubi_volume_info *vi) { struct gluebi_device *gluebi, *g; struct mtd_info *mtd; gluebi = kzalloc(sizeof(struct gluebi_device), GFP_KERNEL); if (!gluebi) return -ENOMEM; mtd = &gluebi->mtd; mtd->name = kmemdup(vi->name, vi->name_len + 1, GFP_KERNEL); if (!mtd->name) { kfree(gluebi); return -ENOMEM; } gluebi->vol_id = vi->vol_id; gluebi->ubi_num = vi->ubi_num; mtd->type = MTD_UBIVOLUME; if (!di->ro_mode) mtd->flags = MTD_WRITEABLE; mtd->owner = THIS_MODULE; mtd->writesize = di->min_io_size; mtd->erasesize = vi->usable_leb_size; mtd->read = gluebi_read; mtd->write = gluebi_write; mtd->erase = gluebi_erase; mtd->get_device = gluebi_get_device; mtd->put_device = gluebi_put_device; /* * In case of dynamic a volume, MTD device size is just volume size. In * case of a static volume the size is equivalent to the amount of data * bytes. */ if (vi->vol_type == UBI_DYNAMIC_VOLUME) mtd->size = (unsigned long long)vi->usable_leb_size * vi->size; else mtd->size = vi->used_bytes; /* Just a sanity check - make sure this gluebi device does not exist */ mutex_lock(&devices_mutex); g = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (g) err_msg("gluebi MTD device %d form UBI device %d volume %d " "already exists", g->mtd.index, vi->ubi_num, vi->vol_id); mutex_unlock(&devices_mutex); if (mtd_device_register(mtd, NULL, 0)) { err_msg("cannot add MTD device"); kfree(mtd->name); kfree(gluebi); return -ENFILE; } mutex_lock(&devices_mutex); list_add_tail(&gluebi->list, &gluebi_devices); mutex_unlock(&devices_mutex); return 0; } /** * gluebi_remove - remove a gluebi device. * @vi: UBI volume description object * * This function is called when an UBI volume is removed and it removes * corresponding fake MTD device. Returns zero in case of success and a * negative error code in case of failure. */ static int gluebi_remove(struct ubi_volume_info *vi) { int err = 0; struct mtd_info *mtd; struct gluebi_device *gluebi; mutex_lock(&devices_mutex); gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (!gluebi) { err_msg("got remove notification for unknown UBI device %d " "volume %d", vi->ubi_num, vi->vol_id); err = -ENOENT; } else if (gluebi->refcnt) err = -EBUSY; else list_del(&gluebi->list); mutex_unlock(&devices_mutex); if (err) return err; mtd = &gluebi->mtd; err = mtd_device_unregister(mtd); if (err) { err_msg("cannot remove fake MTD device %d, UBI device %d, " "volume %d, error %d", mtd->index, gluebi->ubi_num, gluebi->vol_id, err); mutex_lock(&devices_mutex); list_add_tail(&gluebi->list, &gluebi_devices); mutex_unlock(&devices_mutex); return err; } kfree(mtd->name); kfree(gluebi); return 0; } /** * gluebi_updated - UBI volume was updated notifier. * @vi: volume info structure * * This function is called every time an UBI volume is updated. It does nothing * if te volume @vol is dynamic, and changes MTD device size if the * volume is static. This is needed because static volumes cannot be read past * data they contain. This function returns zero in case of success and a * negative error code in case of error. */ static int gluebi_updated(struct ubi_volume_info *vi) { struct gluebi_device *gluebi; mutex_lock(&devices_mutex); gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (!gluebi) { mutex_unlock(&devices_mutex); err_msg("got update notification for unknown UBI device %d " "volume %d", vi->ubi_num, vi->vol_id); return -ENOENT; } if (vi->vol_type == UBI_STATIC_VOLUME) gluebi->mtd.size = vi->used_bytes; mutex_unlock(&devices_mutex); return 0; } /** * gluebi_resized - UBI volume was re-sized notifier. * @vi: volume info structure * * This function is called every time an UBI volume is re-size. It changes the * corresponding fake MTD device size. This function returns zero in case of * success and a negative error code in case of error. */ static int gluebi_resized(struct ubi_volume_info *vi) { struct gluebi_device *gluebi; mutex_lock(&devices_mutex); gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (!gluebi) { mutex_unlock(&devices_mutex); err_msg("got update notification for unknown UBI device %d " "volume %d", vi->ubi_num, vi->vol_id); return -ENOENT; } gluebi->mtd.size = vi->used_bytes; mutex_unlock(&devices_mutex); return 0; } /** * gluebi_notify - UBI notification handler. * @nb: registered notifier block * @l: notification type * @ptr: pointer to the &struct ubi_notification object */ static int gluebi_notify(struct notifier_block *nb, unsigned long l, void *ns_ptr) { struct ubi_notification *nt = ns_ptr; switch (l) { case UBI_VOLUME_ADDED: gluebi_create(&nt->di, &nt->vi); break; case UBI_VOLUME_REMOVED: gluebi_remove(&nt->vi); break; case UBI_VOLUME_RESIZED: gluebi_resized(&nt->vi); break; case UBI_VOLUME_UPDATED: gluebi_updated(&nt->vi); break; default: break; } return NOTIFY_OK; } static struct notifier_block gluebi_notifier = { .notifier_call = gluebi_notify, }; static int __init ubi_gluebi_init(void) { return ubi_register_volume_notifier(&gluebi_notifier, 0); } static void __exit ubi_gluebi_exit(void) { struct gluebi_device *gluebi, *g; list_for_each_entry_safe(gluebi, g, &gluebi_devices, list) { int err; struct mtd_info *mtd = &gluebi->mtd; err = mtd_device_unregister(mtd); if (err) err_msg("error %d while removing gluebi MTD device %d, " "UBI device %d, volume %d - ignoring", err, mtd->index, gluebi->ubi_num, gluebi->vol_id); kfree(mtd->name); kfree(gluebi); } ubi_unregister_volume_notifier(&gluebi_notifier); } module_init(ubi_gluebi_init); module_exit(ubi_gluebi_exit); MODULE_DESCRIPTION("MTD emulation layer over UBI volumes"); MODULE_AUTHOR("Artem Bityutskiy, Joern Engel"); MODULE_LICENSE("GPL");
gpl-2.0
DmitryADP/diff_qc750
kernel/drivers/media/dvb/dvb-usb/az6027.c
3034
28108
/* DVB USB compliant Linux driver for the AZUREWAVE DVB-S/S2 USB2.0 (AZ6027) * receiver. * * Copyright (C) 2009 Adams.Xu <adams.xu@azwave.com.cn> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "az6027.h" #include "stb0899_drv.h" #include "stb0899_reg.h" #include "stb0899_cfg.h" #include "stb6100.h" #include "stb6100_cfg.h" #include "dvb_ca_en50221.h" int dvb_usb_az6027_debug; module_param_named(debug, dvb_usb_az6027_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4 (or-able))." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); struct az6027_device_state { struct dvb_ca_en50221 ca; struct mutex ca_mutex; u8 power_state; }; static const struct stb0899_s1_reg az6027_stb0899_s1_init_1[] = { /* 0x0000000b, SYSREG */ { STB0899_DEV_ID , 0x30 }, { STB0899_DISCNTRL1 , 0x32 }, { STB0899_DISCNTRL2 , 0x80 }, { STB0899_DISRX_ST0 , 0x04 }, { STB0899_DISRX_ST1 , 0x00 }, { STB0899_DISPARITY , 0x00 }, { STB0899_DISFIFO , 0x00 }, { STB0899_DISSTATUS , 0x20 }, { STB0899_DISF22 , 0x99 }, { STB0899_DISF22RX , 0xa8 }, /* SYSREG ? */ { STB0899_ACRPRESC , 0x11 }, { STB0899_ACRDIV1 , 0x0a }, { STB0899_ACRDIV2 , 0x05 }, { STB0899_DACR1 , 0x00 }, { STB0899_DACR2 , 0x00 }, { STB0899_OUTCFG , 0x00 }, { STB0899_MODECFG , 0x00 }, { STB0899_IRQSTATUS_3 , 0xfe }, { STB0899_IRQSTATUS_2 , 0x03 }, { STB0899_IRQSTATUS_1 , 0x7c }, { STB0899_IRQSTATUS_0 , 0xf4 }, { STB0899_IRQMSK_3 , 0xf3 }, { STB0899_IRQMSK_2 , 0xfc }, { STB0899_IRQMSK_1 , 0xff }, { STB0899_IRQMSK_0 , 0xff }, { STB0899_IRQCFG , 0x00 }, { STB0899_I2CCFG , 0x88 }, { STB0899_I2CRPT , 0x58 }, { STB0899_IOPVALUE5 , 0x00 }, { STB0899_IOPVALUE4 , 0x33 }, { STB0899_IOPVALUE3 , 0x6d }, { STB0899_IOPVALUE2 , 0x90 }, { STB0899_IOPVALUE1 , 0x60 }, { STB0899_IOPVALUE0 , 0x00 }, { STB0899_GPIO00CFG , 0x82 }, { STB0899_GPIO01CFG , 0x82 }, { STB0899_GPIO02CFG , 0x82 }, { STB0899_GPIO03CFG , 0x82 }, { STB0899_GPIO04CFG , 0x82 }, { STB0899_GPIO05CFG , 0x82 }, { STB0899_GPIO06CFG , 0x82 }, { STB0899_GPIO07CFG , 0x82 }, { STB0899_GPIO08CFG , 0x82 }, { STB0899_GPIO09CFG , 0x82 }, { STB0899_GPIO10CFG , 0x82 }, { STB0899_GPIO11CFG , 0x82 }, { STB0899_GPIO12CFG , 0x82 }, { STB0899_GPIO13CFG , 0x82 }, { STB0899_GPIO14CFG , 0x82 }, { STB0899_GPIO15CFG , 0x82 }, { STB0899_GPIO16CFG , 0x82 }, { STB0899_GPIO17CFG , 0x82 }, { STB0899_GPIO18CFG , 0x82 }, { STB0899_GPIO19CFG , 0x82 }, { STB0899_GPIO20CFG , 0x82 }, { STB0899_SDATCFG , 0xb8 }, { STB0899_SCLTCFG , 0xba }, { STB0899_AGCRFCFG , 0x1c }, /* 0x11 */ { STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */ { STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */ { STB0899_DIRCLKCFG , 0x82 }, { STB0899_CLKOUT27CFG , 0x7e }, { STB0899_STDBYCFG , 0x82 }, { STB0899_CS0CFG , 0x82 }, { STB0899_CS1CFG , 0x82 }, { STB0899_DISEQCOCFG , 0x20 }, { STB0899_GPIO32CFG , 0x82 }, { STB0899_GPIO33CFG , 0x82 }, { STB0899_GPIO34CFG , 0x82 }, { STB0899_GPIO35CFG , 0x82 }, { STB0899_GPIO36CFG , 0x82 }, { STB0899_GPIO37CFG , 0x82 }, { STB0899_GPIO38CFG , 0x82 }, { STB0899_GPIO39CFG , 0x82 }, { STB0899_NCOARSE , 0x17 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */ { STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */ { STB0899_FILTCTRL , 0x00 }, { STB0899_SYSCTRL , 0x01 }, { STB0899_STOPCLK1 , 0x20 }, { STB0899_STOPCLK2 , 0x00 }, { STB0899_INTBUFSTATUS , 0x00 }, { STB0899_INTBUFCTRL , 0x0a }, { 0xffff , 0xff }, }; static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = { { STB0899_DEMOD , 0x00 }, { STB0899_RCOMPC , 0xc9 }, { STB0899_AGC1CN , 0x01 }, { STB0899_AGC1REF , 0x10 }, { STB0899_RTC , 0x23 }, { STB0899_TMGCFG , 0x4e }, { STB0899_AGC2REF , 0x34 }, { STB0899_TLSR , 0x84 }, { STB0899_CFD , 0xf7 }, { STB0899_ACLC , 0x87 }, { STB0899_BCLC , 0x94 }, { STB0899_EQON , 0x41 }, { STB0899_LDT , 0xf1 }, { STB0899_LDT2 , 0xe3 }, { STB0899_EQUALREF , 0xb4 }, { STB0899_TMGRAMP , 0x10 }, { STB0899_TMGTHD , 0x30 }, { STB0899_IDCCOMP , 0xfd }, { STB0899_QDCCOMP , 0xff }, { STB0899_POWERI , 0x0c }, { STB0899_POWERQ , 0x0f }, { STB0899_RCOMP , 0x6c }, { STB0899_AGCIQIN , 0x80 }, { STB0899_AGC2I1 , 0x06 }, { STB0899_AGC2I2 , 0x00 }, { STB0899_TLIR , 0x30 }, { STB0899_RTF , 0x7f }, { STB0899_DSTATUS , 0x00 }, { STB0899_LDI , 0xbc }, { STB0899_CFRM , 0xea }, { STB0899_CFRL , 0x31 }, { STB0899_NIRM , 0x2b }, { STB0899_NIRL , 0x80 }, { STB0899_ISYMB , 0x1d }, { STB0899_QSYMB , 0xa6 }, { STB0899_SFRH , 0x2f }, { STB0899_SFRM , 0x68 }, { STB0899_SFRL , 0x40 }, { STB0899_SFRUPH , 0x2f }, { STB0899_SFRUPM , 0x68 }, { STB0899_SFRUPL , 0x40 }, { STB0899_EQUAI1 , 0x02 }, { STB0899_EQUAQ1 , 0xff }, { STB0899_EQUAI2 , 0x04 }, { STB0899_EQUAQ2 , 0x05 }, { STB0899_EQUAI3 , 0x02 }, { STB0899_EQUAQ3 , 0xfd }, { STB0899_EQUAI4 , 0x03 }, { STB0899_EQUAQ4 , 0x07 }, { STB0899_EQUAI5 , 0x08 }, { STB0899_EQUAQ5 , 0xf5 }, { STB0899_DSTATUS2 , 0x00 }, { STB0899_VSTATUS , 0x00 }, { STB0899_VERROR , 0x86 }, { STB0899_IQSWAP , 0x2a }, { STB0899_ECNT1M , 0x00 }, { STB0899_ECNT1L , 0x00 }, { STB0899_ECNT2M , 0x00 }, { STB0899_ECNT2L , 0x00 }, { STB0899_ECNT3M , 0x0a }, { STB0899_ECNT3L , 0xad }, { STB0899_FECAUTO1 , 0x06 }, { STB0899_FECM , 0x01 }, { STB0899_VTH12 , 0xb0 }, { STB0899_VTH23 , 0x7a }, { STB0899_VTH34 , 0x58 }, { STB0899_VTH56 , 0x38 }, { STB0899_VTH67 , 0x34 }, { STB0899_VTH78 , 0x24 }, { STB0899_PRVIT , 0xff }, { STB0899_VITSYNC , 0x19 }, { STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */ { STB0899_TSULC , 0x42 }, { STB0899_RSLLC , 0x41 }, { STB0899_TSLPL , 0x12 }, { STB0899_TSCFGH , 0x0c }, { STB0899_TSCFGM , 0x00 }, { STB0899_TSCFGL , 0x00 }, { STB0899_TSOUT , 0x69 }, /* 0x0d for CAM */ { STB0899_RSSYNCDEL , 0x00 }, { STB0899_TSINHDELH , 0x02 }, { STB0899_TSINHDELM , 0x00 }, { STB0899_TSINHDELL , 0x00 }, { STB0899_TSLLSTKM , 0x1b }, { STB0899_TSLLSTKL , 0xb3 }, { STB0899_TSULSTKM , 0x00 }, { STB0899_TSULSTKL , 0x00 }, { STB0899_PCKLENUL , 0xbc }, { STB0899_PCKLENLL , 0xcc }, { STB0899_RSPCKLEN , 0xbd }, { STB0899_TSSTATUS , 0x90 }, { STB0899_ERRCTRL1 , 0xb6 }, { STB0899_ERRCTRL2 , 0x95 }, { STB0899_ERRCTRL3 , 0x8d }, { STB0899_DMONMSK1 , 0x27 }, { STB0899_DMONMSK0 , 0x03 }, { STB0899_DEMAPVIT , 0x5c }, { STB0899_PLPARM , 0x19 }, { STB0899_PDELCTRL , 0x48 }, { STB0899_PDELCTRL2 , 0x00 }, { STB0899_BBHCTRL1 , 0x00 }, { STB0899_BBHCTRL2 , 0x00 }, { STB0899_HYSTTHRESH , 0x77 }, { STB0899_MATCSTM , 0x00 }, { STB0899_MATCSTL , 0x00 }, { STB0899_UPLCSTM , 0x00 }, { STB0899_UPLCSTL , 0x00 }, { STB0899_DFLCSTM , 0x00 }, { STB0899_DFLCSTL , 0x00 }, { STB0899_SYNCCST , 0x00 }, { STB0899_SYNCDCSTM , 0x00 }, { STB0899_SYNCDCSTL , 0x00 }, { STB0899_ISI_ENTRY , 0x00 }, { STB0899_ISI_BIT_EN , 0x00 }, { STB0899_MATSTRM , 0xf0 }, { STB0899_MATSTRL , 0x02 }, { STB0899_UPLSTRM , 0x45 }, { STB0899_UPLSTRL , 0x60 }, { STB0899_DFLSTRM , 0xe3 }, { STB0899_DFLSTRL , 0x00 }, { STB0899_SYNCSTR , 0x47 }, { STB0899_SYNCDSTRM , 0x05 }, { STB0899_SYNCDSTRL , 0x18 }, { STB0899_CFGPDELSTATUS1 , 0x19 }, { STB0899_CFGPDELSTATUS2 , 0x2b }, { STB0899_BBFERRORM , 0x00 }, { STB0899_BBFERRORL , 0x01 }, { STB0899_UPKTERRORM , 0x00 }, { STB0899_UPKTERRORL , 0x00 }, { 0xffff , 0xff }, }; struct stb0899_config az6027_stb0899_config = { .init_dev = az6027_stb0899_s1_init_1, .init_s2_demod = stb0899_s2_init_2, .init_s1_demod = az6027_stb0899_s1_init_3, .init_s2_fec = stb0899_s2_init_4, .init_tst = stb0899_s1_init_5, .demod_address = 0xd0, /* 0x68, 0xd0 >> 1 */ .xtal_freq = 27000000, .inversion = IQ_SWAP_ON, /* 1 */ .lo_clk = 76500000, .hi_clk = 99000000, .esno_ave = STB0899_DVBS2_ESNO_AVE, .esno_quant = STB0899_DVBS2_ESNO_QUANT, .avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE, .avframes_fine = STB0899_DVBS2_AVFRAMES_FINE, .miss_threshold = STB0899_DVBS2_MISS_THRESHOLD, .uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ, .uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK, .uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF, .sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT, .btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS, .btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET, .crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS, .ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER, .tuner_get_frequency = stb6100_get_frequency, .tuner_set_frequency = stb6100_set_frequency, .tuner_set_bandwidth = stb6100_set_bandwidth, .tuner_get_bandwidth = stb6100_get_bandwidth, .tuner_set_rfsiggain = NULL, }; struct stb6100_config az6027_stb6100_config = { .tuner_address = 0xc0, .refclock = 27000000, }; /* check for mutex FIXME */ int az6027_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { int ret = -1; if (mutex_lock_interruptible(&d->usb_mutex)) return -EAGAIN; ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), req, USB_TYPE_VENDOR | USB_DIR_IN, value, index, b, blen, 2000); if (ret < 0) { warn("usb in operation failed. (%d)", ret); ret = -EIO; } else ret = 0; deb_xfer("in: req. %02x, val: %04x, ind: %04x, buffer: ", req, value, index); debug_dump(b, blen, deb_xfer); mutex_unlock(&d->usb_mutex); return ret; } static int az6027_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { int ret; deb_xfer("out: req. %02x, val: %04x, ind: %04x, buffer: ", req, value, index); debug_dump(b, blen, deb_xfer); if (mutex_lock_interruptible(&d->usb_mutex)) return -EAGAIN; ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), req, USB_TYPE_VENDOR | USB_DIR_OUT, value, index, b, blen, 2000); if (ret != blen) { warn("usb out operation failed. (%d)", ret); mutex_unlock(&d->usb_mutex); return -EIO; } else{ mutex_unlock(&d->usb_mutex); return 0; } } static int az6027_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { int ret; u8 req; u16 value; u16 index; int blen; deb_info("%s %d", __func__, onoff); req = 0xBC; value = onoff; index = 0; blen = 0; ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); if (ret != 0) warn("usb out operation failed. (%d)", ret); return ret; } /* keys for the enclosed remote control */ static struct rc_map_table rc_map_az6027_table[] = { { 0x01, KEY_1 }, { 0x02, KEY_2 }, }; /* remote control stuff (does not work with my box) */ static int az6027_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { return 0; } /* int az6027_power_ctrl(struct dvb_usb_device *d, int onoff) { u8 v = onoff; return az6027_usb_out_op(d,0xBC,v,3,NULL,1); } */ static int az6027_ci_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret; u8 req; u16 value; u16 index; int blen; u8 *b; if (slot != 0) return -EINVAL; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; mutex_lock(&state->ca_mutex); req = 0xC1; value = address; index = 0; blen = 1; ret = az6027_usb_in_op(d, req, value, index, b, blen); if (ret < 0) { warn("usb in operation failed. (%d)", ret); ret = -EINVAL; } else { ret = b[0]; } mutex_unlock(&state->ca_mutex); kfree(b); return ret; } static int az6027_ci_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret; u8 req; u16 value1; u16 index; int blen; deb_info("%s %d", __func__, slot); if (slot != 0) return -EINVAL; mutex_lock(&state->ca_mutex); req = 0xC2; value1 = address; index = value; blen = 0; ret = az6027_usb_out_op(d, req, value1, index, NULL, blen); if (ret != 0) warn("usb out operation failed. (%d)", ret); mutex_unlock(&state->ca_mutex); return ret; } static int az6027_ci_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret; u8 req; u16 value; u16 index; int blen; u8 *b; if (slot != 0) return -EINVAL; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; mutex_lock(&state->ca_mutex); req = 0xC3; value = address; index = 0; blen = 2; ret = az6027_usb_in_op(d, req, value, index, b, blen); if (ret < 0) { warn("usb in operation failed. (%d)", ret); ret = -EINVAL; } else { if (b[0] == 0) warn("Read CI IO error"); ret = b[1]; deb_info("read cam data = %x from 0x%x", b[1], value); } mutex_unlock(&state->ca_mutex); kfree(b); return ret; } static int az6027_ci_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret; u8 req; u16 value1; u16 index; int blen; if (slot != 0) return -EINVAL; mutex_lock(&state->ca_mutex); req = 0xC4; value1 = address; index = value; blen = 0; ret = az6027_usb_out_op(d, req, value1, index, NULL, blen); if (ret != 0) { warn("usb out operation failed. (%d)", ret); goto failed; } failed: mutex_unlock(&state->ca_mutex); return ret; } static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; int ret; u8 req; u16 value; u16 index; int blen; u8 *b; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; req = 0xC8; value = 0; index = 0; blen = 1; ret = az6027_usb_in_op(d, req, value, index, b, blen); if (ret < 0) { warn("usb in operation failed. (%d)", ret); ret = -EIO; } else{ ret = b[0]; } kfree(b); return ret; } static int az6027_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret, i; u8 req; u16 value; u16 index; int blen; mutex_lock(&state->ca_mutex); req = 0xC6; value = 1; index = 0; blen = 0; ret = az6027_usb_out_op(d, req, value, index, NULL, blen); if (ret != 0) { warn("usb out operation failed. (%d)", ret); goto failed; } msleep(500); req = 0xC6; value = 0; index = 0; blen = 0; ret = az6027_usb_out_op(d, req, value, index, NULL, blen); if (ret != 0) { warn("usb out operation failed. (%d)", ret); goto failed; } for (i = 0; i < 15; i++) { msleep(100); if (CI_CamReady(ca, slot)) { deb_info("CAM Ready"); break; } } msleep(5000); failed: mutex_unlock(&state->ca_mutex); return ret; } static int az6027_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot) { return 0; } static int az6027_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret; u8 req; u16 value; u16 index; int blen; deb_info("%s", __func__); mutex_lock(&state->ca_mutex); req = 0xC7; value = 1; index = 0; blen = 0; ret = az6027_usb_out_op(d, req, value, index, NULL, blen); if (ret != 0) { warn("usb out operation failed. (%d)", ret); goto failed; } failed: mutex_unlock(&state->ca_mutex); return ret; } static int az6027_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret; u8 req; u16 value; u16 index; int blen; u8 *b; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; mutex_lock(&state->ca_mutex); req = 0xC5; value = 0; index = 0; blen = 1; ret = az6027_usb_in_op(d, req, value, index, b, blen); if (ret < 0) { warn("usb in operation failed. (%d)", ret); ret = -EIO; } else ret = 0; if (!ret && b[0] == 1) { ret = DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY; } mutex_unlock(&state->ca_mutex); kfree(b); return ret; } static void az6027_ci_uninit(struct dvb_usb_device *d) { struct az6027_device_state *state; deb_info("%s", __func__); if (NULL == d) return; state = (struct az6027_device_state *)d->priv; if (NULL == state) return; if (NULL == state->ca.data) return; dvb_ca_en50221_release(&state->ca); memset(&state->ca, 0, sizeof(state->ca)); } static int az6027_ci_init(struct dvb_usb_adapter *a) { struct dvb_usb_device *d = a->dev; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret; deb_info("%s", __func__); mutex_init(&state->ca_mutex); state->ca.owner = THIS_MODULE; state->ca.read_attribute_mem = az6027_ci_read_attribute_mem; state->ca.write_attribute_mem = az6027_ci_write_attribute_mem; state->ca.read_cam_control = az6027_ci_read_cam_control; state->ca.write_cam_control = az6027_ci_write_cam_control; state->ca.slot_reset = az6027_ci_slot_reset; state->ca.slot_shutdown = az6027_ci_slot_shutdown; state->ca.slot_ts_enable = az6027_ci_slot_ts_enable; state->ca.poll_slot_status = az6027_ci_poll_slot_status; state->ca.data = d; ret = dvb_ca_en50221_init(&a->dvb_adap, &state->ca, 0, /* flags */ 1);/* n_slots */ if (ret != 0) { err("Cannot initialize CI: Error %d.", ret); memset(&state->ca, 0, sizeof(state->ca)); return ret; } deb_info("CI initialized."); return 0; } /* static int az6027_read_mac_addr(struct dvb_usb_device *d, u8 mac[6]) { az6027_usb_in_op(d, 0xb7, 6, 0, &mac[0], 6); return 0; } */ static int az6027_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { u8 buf; int ret; struct dvb_usb_adapter *adap = fe->dvb->priv; struct i2c_msg i2c_msg = { .addr = 0x99, .flags = 0, .buf = &buf, .len = 1 }; /* * 2 --18v * 1 --13v * 0 --off */ switch (voltage) { case SEC_VOLTAGE_13: buf = 1; ret = i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1); break; case SEC_VOLTAGE_18: buf = 2; ret = i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1); break; case SEC_VOLTAGE_OFF: buf = 0; ret = i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1); break; default: return -EINVAL; } return 0; } static int az6027_frontend_poweron(struct dvb_usb_adapter *adap) { int ret; u8 req; u16 value; u16 index; int blen; req = 0xBC; value = 1; /* power on */ index = 3; blen = 0; ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); if (ret != 0) return -EIO; return 0; } static int az6027_frontend_reset(struct dvb_usb_adapter *adap) { int ret; u8 req; u16 value; u16 index; int blen; /* reset demodulator */ req = 0xC0; value = 1; /* high */ index = 3; blen = 0; ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); if (ret != 0) return -EIO; req = 0xC0; value = 0; /* low */ index = 3; blen = 0; msleep_interruptible(200); ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); if (ret != 0) return -EIO; msleep_interruptible(200); req = 0xC0; value = 1; /*high */ index = 3; blen = 0; ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); if (ret != 0) return -EIO; msleep_interruptible(200); return 0; } static int az6027_frontend_tsbypass(struct dvb_usb_adapter *adap, int onoff) { int ret; u8 req; u16 value; u16 index; int blen; /* TS passthrough */ req = 0xC7; value = onoff; index = 0; blen = 0; ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); if (ret != 0) return -EIO; return 0; } static int az6027_frontend_attach(struct dvb_usb_adapter *adap) { az6027_frontend_poweron(adap); az6027_frontend_reset(adap); deb_info("adap = %p, dev = %p\n", adap, adap->dev); adap->fe = stb0899_attach(&az6027_stb0899_config, &adap->dev->i2c_adap); if (adap->fe) { deb_info("found STB0899 DVB-S/DVB-S2 frontend @0x%02x", az6027_stb0899_config.demod_address); if (stb6100_attach(adap->fe, &az6027_stb6100_config, &adap->dev->i2c_adap)) { deb_info("found STB6100 DVB-S/DVB-S2 frontend @0x%02x", az6027_stb6100_config.tuner_address); adap->fe->ops.set_voltage = az6027_set_voltage; az6027_ci_init(adap); } else { adap->fe = NULL; } } else warn("no front-end attached\n"); az6027_frontend_tsbypass(adap, 0); return 0; } static struct dvb_usb_device_properties az6027_properties; static void az6027_usb_disconnect(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); az6027_ci_uninit(d); dvb_usb_device_exit(intf); } static int az6027_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { return dvb_usb_device_init(intf, &az6027_properties, THIS_MODULE, NULL, adapter_nr); } /* I2C */ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i = 0, j = 0, len = 0; int ret; u16 index; u16 value; int length; u8 req; u8 *data; data = kmalloc(256, GFP_KERNEL); if (!data) return -ENOMEM; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) { kfree(data); return -EAGAIN; } if (num > 2) warn("more than 2 i2c messages at a time is not handled yet. TODO."); for (i = 0; i < num; i++) { if (msg[i].addr == 0x99) { req = 0xBE; index = 0; value = msg[i].buf[0] & 0x00ff; length = 1; az6027_usb_out_op(d, req, value, index, data, length); } if (msg[i].addr == 0xd0) { /* write/read request */ if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) { req = 0xB9; index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff)); value = msg[i].addr + (msg[i].len << 8); length = msg[i + 1].len + 6; ret = az6027_usb_in_op(d, req, value, index, data, length); len = msg[i + 1].len; for (j = 0; j < len; j++) msg[i + 1].buf[j] = data[j + 5]; i++; } else { /* demod 16bit addr */ req = 0xBD; index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff)); value = msg[i].addr + (2 << 8); length = msg[i].len - 2; len = msg[i].len - 2; for (j = 0; j < len; j++) data[j] = msg[i].buf[j + 2]; az6027_usb_out_op(d, req, value, index, data, length); } } if (msg[i].addr == 0xc0) { if (msg[i].flags & I2C_M_RD) { req = 0xB9; index = 0x0; value = msg[i].addr; length = msg[i].len + 6; ret = az6027_usb_in_op(d, req, value, index, data, length); len = msg[i].len; for (j = 0; j < len; j++) msg[i].buf[j] = data[j + 5]; } else { req = 0xBD; index = msg[i].buf[0] & 0x00FF; value = msg[i].addr + (1 << 8); length = msg[i].len - 1; len = msg[i].len - 1; for (j = 0; j < len; j++) data[j] = msg[i].buf[j + 1]; az6027_usb_out_op(d, req, value, index, data, length); } } } mutex_unlock(&d->i2c_mutex); kfree(data); return i; } static u32 az6027_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm az6027_i2c_algo = { .master_xfer = az6027_i2c_xfer, .functionality = az6027_i2c_func, }; int az6027_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { u8 *b; s16 ret; b = kmalloc(16, GFP_KERNEL); if (!b) return -ENOMEM; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0xb7, USB_TYPE_VENDOR | USB_DIR_IN, 6, 0, b, 6, USB_CTRL_GET_TIMEOUT); *cold = ret <= 0; kfree(b); deb_info("cold: %d\n", *cold); return 0; } static struct usb_device_id az6027_usb_table[] = { { USB_DEVICE(USB_VID_AZUREWAVE, USB_PID_AZUREWAVE_AZ6027) }, { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI_V1) }, { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI_V2) }, { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI_V1) }, { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI_V2) }, { USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_SAT) }, { }, }; MODULE_DEVICE_TABLE(usb, az6027_usb_table); static struct dvb_usb_device_properties az6027_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-az6027-03.fw", .no_reconnect = 1, .size_of_priv = sizeof(struct az6027_device_state), .identify_state = az6027_identify_state, .num_adapters = 1, .adapter = { { .streaming_ctrl = az6027_streaming_ctrl, .frontend_attach = az6027_frontend_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 10, .endpoint = 0x02, .u = { .bulk = { .buffersize = 4096, } } }, } }, /* .power_ctrl = az6027_power_ctrl, .read_mac_address = az6027_read_mac_addr, */ .rc.legacy = { .rc_map_table = rc_map_az6027_table, .rc_map_size = ARRAY_SIZE(rc_map_az6027_table), .rc_interval = 400, .rc_query = az6027_rc_query, }, .i2c_algo = &az6027_i2c_algo, .num_device_descs = 6, .devices = { { .name = "AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)", .cold_ids = { &az6027_usb_table[0], NULL }, .warm_ids = { NULL }, }, { .name = "TERRATEC S7", .cold_ids = { &az6027_usb_table[1], NULL }, .warm_ids = { NULL }, }, { .name = "TERRATEC S7 MKII", .cold_ids = { &az6027_usb_table[2], NULL }, .warm_ids = { NULL }, }, { .name = "Technisat SkyStar USB 2 HD CI", .cold_ids = { &az6027_usb_table[3], NULL }, .warm_ids = { NULL }, }, { .name = "Technisat SkyStar USB 2 HD CI", .cold_ids = { &az6027_usb_table[4], NULL }, .warm_ids = { NULL }, }, { .name = "Elgato EyeTV Sat", .cold_ids = { &az6027_usb_table[5], NULL }, .warm_ids = { NULL }, }, { NULL }, } }; /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver az6027_usb_driver = { .name = "dvb_usb_az6027", .probe = az6027_usb_probe, .disconnect = az6027_usb_disconnect, .id_table = az6027_usb_table, }; /* module stuff */ static int __init az6027_usb_module_init(void) { int result; result = usb_register(&az6027_usb_driver); if (result) { err("usb_register failed. (%d)", result); return result; } return 0; } static void __exit az6027_usb_module_exit(void) { /* deregister this driver from the USB subsystem */ usb_deregister(&az6027_usb_driver); } module_init(az6027_usb_module_init); module_exit(az6027_usb_module_exit); MODULE_AUTHOR("Adams Xu <Adams.xu@azwave.com.cn>"); MODULE_DESCRIPTION("Driver for AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
gpl-2.0
deafnote/android_kernel_huawei_u88251
drivers/watchdog/m54xx_wdt.c
3290
5334
/* * drivers/watchdog/m54xx_wdt.c * * Watchdog driver for ColdFire MCF547x & MCF548x processors * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be> * * Adapted from the IXP4xx watchdog driver, which carries these notices: * * Author: Deepak Saxena <dsaxena@plexity.net> * * Copyright 2004 (c) MontaVista, Software, Inc. * Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin <green@crimea.edu> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/ioport.h> #include <linux/uaccess.h> #include <asm/coldfire.h> #include <asm/m54xxsim.h> #include <asm/m54xxgpt.h> static int nowayout = WATCHDOG_NOWAYOUT; static unsigned int heartbeat = 30; /* (secs) Default is 0.5 minute */ static unsigned long wdt_status; #define WDT_IN_USE 0 #define WDT_OK_TO_CLOSE 1 static void wdt_enable(void) { unsigned int gms0; /* preserve GPIO usage, if any */ gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0); if (gms0 & MCF_GPT_GMS_TMS_GPIO) gms0 &= (MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_GPIO_MASK | MCF_GPT_GMS_OD); else gms0 = MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_OD; __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); __raw_writel(MCF_GPT_GCIR_PRE(heartbeat*(MCF_BUSCLK/0xffff)) | MCF_GPT_GCIR_CNT(0xffff), MCF_MBAR + MCF_GPT_GCIR0); gms0 |= MCF_GPT_GMS_OCPW(0xA5) | MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE; __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); } static void wdt_disable(void) { unsigned int gms0; /* disable watchdog */ gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0); gms0 &= ~(MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE); __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); } static void wdt_keepalive(void) { unsigned int gms0; gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0); gms0 |= MCF_GPT_GMS_OCPW(0xA5); __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); } static int m54xx_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(WDT_IN_USE, &wdt_status)) return -EBUSY; clear_bit(WDT_OK_TO_CLOSE, &wdt_status); wdt_enable(); return nonseekable_open(inode, file); } static ssize_t m54xx_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) { if (len) { if (!nowayout) { size_t i; clear_bit(WDT_OK_TO_CLOSE, &wdt_status); for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') set_bit(WDT_OK_TO_CLOSE, &wdt_status); } } wdt_keepalive(); } return len; } static const struct watchdog_info ident = { .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, .identity = "Coldfire M54xx Watchdog", }; static long m54xx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = -ENOTTY; int time; switch (cmd) { case WDIOC_GETSUPPORT: ret = copy_to_user((struct watchdog_info *)arg, &ident, sizeof(ident)) ? -EFAULT : 0; break; case WDIOC_GETSTATUS: ret = put_user(0, (int *)arg); break; case WDIOC_GETBOOTSTATUS: ret = put_user(0, (int *)arg); break; case WDIOC_KEEPALIVE: wdt_keepalive(); ret = 0; break; case WDIOC_SETTIMEOUT: ret = get_user(time, (int *)arg); if (ret) break; if (time <= 0 || time > 30) { ret = -EINVAL; break; } heartbeat = time; wdt_enable(); /* Fall through */ case WDIOC_GETTIMEOUT: ret = put_user(heartbeat, (int *)arg); break; } return ret; } static int m54xx_wdt_release(struct inode *inode, struct file *file) { if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) wdt_disable(); else { printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - " "timer will not stop\n"); wdt_keepalive(); } clear_bit(WDT_IN_USE, &wdt_status); clear_bit(WDT_OK_TO_CLOSE, &wdt_status); return 0; } static const struct file_operations m54xx_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = m54xx_wdt_write, .unlocked_ioctl = m54xx_wdt_ioctl, .open = m54xx_wdt_open, .release = m54xx_wdt_release, }; static struct miscdevice m54xx_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &m54xx_wdt_fops, }; static int __init m54xx_wdt_init(void) { if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4, "Coldfire M54xx Watchdog")) { printk(KERN_WARNING "Coldfire M54xx Watchdog : I/O region busy\n"); return -EBUSY; } printk(KERN_INFO "ColdFire watchdog driver is loaded.\n"); return misc_register(&m54xx_wdt_miscdev); } static void __exit m54xx_wdt_exit(void) { misc_deregister(&m54xx_wdt_miscdev); release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4); } module_init(m54xx_wdt_init); module_exit(m54xx_wdt_exit); MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>"); MODULE_DESCRIPTION("Coldfire M54xx Watchdog"); module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)"); module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
bilalliberty/android_kernel_htc_villec2-caf-based
arch/um/drivers/xterm_kern.c
4570
1564
/* * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <linux/slab.h> #include <linux/completion.h> #include <linux/irqreturn.h> #include <asm/irq.h> #include "irq_kern.h" #include "os.h" struct xterm_wait { struct completion ready; int fd; int pid; int new_fd; }; static irqreturn_t xterm_interrupt(int irq, void *data) { struct xterm_wait *xterm = data; int fd; fd = os_rcv_fd(xterm->fd, &xterm->pid); if (fd == -EAGAIN) return IRQ_NONE; xterm->new_fd = fd; complete(&xterm->ready); return IRQ_HANDLED; } int xterm_fd(int socket, int *pid_out) { struct xterm_wait *data; int err, ret; data = kmalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) { printk(KERN_ERR "xterm_fd : failed to allocate xterm_wait\n"); return -ENOMEM; } /* This is a locked semaphore... */ *data = ((struct xterm_wait) { .fd = socket, .pid = -1, .new_fd = -1 }); init_completion(&data->ready); err = um_request_irq(XTERM_IRQ, socket, IRQ_READ, xterm_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, "xterm", data); if (err) { printk(KERN_ERR "xterm_fd : failed to get IRQ for xterm, " "err = %d\n", err); ret = err; goto out; } /* ... so here we wait for an xterm interrupt. * * XXX Note, if the xterm doesn't work for some reason (eg. DISPLAY * isn't set) this will hang... */ wait_for_completion(&data->ready); free_irq(XTERM_IRQ, data); ret = data->new_fd; *pid_out = data->pid; out: kfree(data); return ret; }
gpl-2.0
PatrikKT/useless
drivers/input/touchscreen/jornada720_ts.c
4826
4650
/* * drivers/input/touchscreen/jornada720_ts.c * * Copyright (C) 2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com> * * Copyright (C) 2006 Filip Zyzniewski <filip.zyzniewski@tefnet.pl> * based on HP Jornada 56x touchscreen driver by Alex Lange <chicken@handhelds.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * HP Jornada 710/720/729 Touchscreen Driver */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/slab.h> #include <mach/hardware.h> #include <mach/jornada720.h> #include <mach/irqs.h> MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>"); MODULE_DESCRIPTION("HP Jornada 710/720/728 touchscreen driver"); MODULE_LICENSE("GPL v2"); struct jornada_ts { struct input_dev *dev; int x_data[4]; /* X sample values */ int y_data[4]; /* Y sample values */ }; static void jornada720_ts_collect_data(struct jornada_ts *jornada_ts) { /* 3 low word X samples */ jornada_ts->x_data[0] = jornada_ssp_byte(TXDUMMY); jornada_ts->x_data[1] = jornada_ssp_byte(TXDUMMY); jornada_ts->x_data[2] = jornada_ssp_byte(TXDUMMY); /* 3 low word Y samples */ jornada_ts->y_data[0] = jornada_ssp_byte(TXDUMMY); jornada_ts->y_data[1] = jornada_ssp_byte(TXDUMMY); jornada_ts->y_data[2] = jornada_ssp_byte(TXDUMMY); /* combined x samples bits */ jornada_ts->x_data[3] = jornada_ssp_byte(TXDUMMY); /* combined y samples bits */ jornada_ts->y_data[3] = jornada_ssp_byte(TXDUMMY); } static int jornada720_ts_average(int coords[4]) { int coord, high_bits = coords[3]; coord = coords[0] | ((high_bits & 0x03) << 8); coord += coords[1] | ((high_bits & 0x0c) << 6); coord += coords[2] | ((high_bits & 0x30) << 4); return coord / 3; } static irqreturn_t jornada720_ts_interrupt(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct jornada_ts *jornada_ts = platform_get_drvdata(pdev); struct input_dev *input = jornada_ts->dev; int x, y; /* If GPIO_GPIO9 is set to high then report pen up */ if (GPLR & GPIO_GPIO(9)) { input_report_key(input, BTN_TOUCH, 0); input_sync(input); } else { jornada_ssp_start(); /* proper reply to request is always TXDUMMY */ if (jornada_ssp_inout(GETTOUCHSAMPLES) == TXDUMMY) { jornada720_ts_collect_data(jornada_ts); x = jornada720_ts_average(jornada_ts->x_data); y = jornada720_ts_average(jornada_ts->y_data); input_report_key(input, BTN_TOUCH, 1); input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); input_sync(input); } jornada_ssp_end(); } return IRQ_HANDLED; } static int __devinit jornada720_ts_probe(struct platform_device *pdev) { struct jornada_ts *jornada_ts; struct input_dev *input_dev; int error; jornada_ts = kzalloc(sizeof(struct jornada_ts), GFP_KERNEL); input_dev = input_allocate_device(); if (!jornada_ts || !input_dev) { error = -ENOMEM; goto fail1; } platform_set_drvdata(pdev, jornada_ts); jornada_ts->dev = input_dev; input_dev->name = "HP Jornada 7xx Touchscreen"; input_dev->phys = "jornadats/input0"; input_dev->id.bustype = BUS_HOST; input_dev->dev.parent = &pdev->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, 270, 3900, 0, 0); input_set_abs_params(input_dev, ABS_Y, 180, 3700, 0, 0); error = request_irq(IRQ_GPIO9, jornada720_ts_interrupt, IRQF_TRIGGER_RISING, "HP7XX Touchscreen driver", pdev); if (error) { printk(KERN_INFO "HP7XX TS : Unable to acquire irq!\n"); goto fail1; } error = input_register_device(jornada_ts->dev); if (error) goto fail2; return 0; fail2: free_irq(IRQ_GPIO9, pdev); fail1: platform_set_drvdata(pdev, NULL); input_free_device(input_dev); kfree(jornada_ts); return error; } static int __devexit jornada720_ts_remove(struct platform_device *pdev) { struct jornada_ts *jornada_ts = platform_get_drvdata(pdev); free_irq(IRQ_GPIO9, pdev); platform_set_drvdata(pdev, NULL); input_unregister_device(jornada_ts->dev); kfree(jornada_ts); return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:jornada_ts"); static struct platform_driver jornada720_ts_driver = { .probe = jornada720_ts_probe, .remove = __devexit_p(jornada720_ts_remove), .driver = { .name = "jornada_ts", .owner = THIS_MODULE, }, }; module_platform_driver(jornada720_ts_driver);
gpl-2.0
showp1984/bricked-hammerhead
arch/arm/mach-davinci/board-tnetv107x-evm.c
4826
6598
/* * Texas Instruments TNETV107X EVM Board Support * * Copyright (C) 2010 Texas Instruments * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/console.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/ratelimit.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/input.h> #include <linux/input/matrix_keypad.h> #include <linux/spi/spi.h> #include <asm/mach/arch.h> #include <asm/mach-types.h> #include <mach/irqs.h> #include <mach/edma.h> #include <mach/mux.h> #include <mach/cp_intc.h> #include <mach/tnetv107x.h> #define EVM_MMC_WP_GPIO 21 #define EVM_MMC_CD_GPIO 24 #define EVM_SPI_CS_GPIO 54 static int initialize_gpio(int gpio, char *desc) { int ret; ret = gpio_request(gpio, desc); if (ret < 0) { pr_err_ratelimited("cannot open %s gpio\n", desc); return -ENOSYS; } gpio_direction_input(gpio); return gpio; } static int mmc_get_cd(int index) { static int gpio; if (!gpio) gpio = initialize_gpio(EVM_MMC_CD_GPIO, "mmc card detect"); if (gpio < 0) return gpio; return gpio_get_value(gpio) ? 0 : 1; } static int mmc_get_ro(int index) { static int gpio; if (!gpio) gpio = initialize_gpio(EVM_MMC_WP_GPIO, "mmc write protect"); if (gpio < 0) return gpio; return gpio_get_value(gpio) ? 1 : 0; } static struct davinci_mmc_config mmc_config = { .get_cd = mmc_get_cd, .get_ro = mmc_get_ro, .wires = 4, .max_freq = 50000000, .caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED, .version = MMC_CTLR_VERSION_1, }; static const short sdio1_pins[] __initdata = { TNETV107X_SDIO1_CLK_1, TNETV107X_SDIO1_CMD_1, TNETV107X_SDIO1_DATA0_1, TNETV107X_SDIO1_DATA1_1, TNETV107X_SDIO1_DATA2_1, TNETV107X_SDIO1_DATA3_1, TNETV107X_GPIO21, TNETV107X_GPIO24, -1 }; static const short uart1_pins[] __initdata = { TNETV107X_UART1_RD, TNETV107X_UART1_TD, -1 }; static const short ssp_pins[] __initdata = { TNETV107X_SSP0_0, TNETV107X_SSP0_1, TNETV107X_SSP0_2, TNETV107X_SSP1_0, TNETV107X_SSP1_1, TNETV107X_SSP1_2, TNETV107X_SSP1_3, -1 }; static struct mtd_partition nand_partitions[] = { /* bootloader (U-Boot, etc) in first 12 sectors */ { .name = "bootloader", .offset = 0, .size = (12*SZ_128K), .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* bootloader params in the next sector */ { .name = "params", .offset = MTDPART_OFS_NXTBLK, .size = SZ_128K, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* kernel */ { .name = "kernel", .offset = MTDPART_OFS_NXTBLK, .size = SZ_4M, .mask_flags = 0, }, /* file system */ { .name = "filesystem", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, .mask_flags = 0, } }; static struct davinci_nand_pdata nand_config = { .mask_cle = 0x4000, .mask_ale = 0x2000, .parts = nand_partitions, .nr_parts = ARRAY_SIZE(nand_partitions), .ecc_mode = NAND_ECC_HW, .bbt_options = NAND_BBT_USE_FLASH, .ecc_bits = 1, }; static struct davinci_uart_config serial_config __initconst = { .enabled_uarts = BIT(1), }; static const uint32_t keymap[] = { KEY(0, 0, KEY_NUMERIC_1), KEY(0, 1, KEY_NUMERIC_2), KEY(0, 2, KEY_NUMERIC_3), KEY(0, 3, KEY_FN_F1), KEY(0, 4, KEY_MENU), KEY(1, 0, KEY_NUMERIC_4), KEY(1, 1, KEY_NUMERIC_5), KEY(1, 2, KEY_NUMERIC_6), KEY(1, 3, KEY_UP), KEY(1, 4, KEY_FN_F2), KEY(2, 0, KEY_NUMERIC_7), KEY(2, 1, KEY_NUMERIC_8), KEY(2, 2, KEY_NUMERIC_9), KEY(2, 3, KEY_LEFT), KEY(2, 4, KEY_ENTER), KEY(3, 0, KEY_NUMERIC_STAR), KEY(3, 1, KEY_NUMERIC_0), KEY(3, 2, KEY_NUMERIC_POUND), KEY(3, 3, KEY_DOWN), KEY(3, 4, KEY_RIGHT), KEY(4, 0, KEY_FN_F3), KEY(4, 1, KEY_FN_F4), KEY(4, 2, KEY_MUTE), KEY(4, 3, KEY_HOME), KEY(4, 4, KEY_BACK), KEY(5, 0, KEY_VOLUMEDOWN), KEY(5, 1, KEY_VOLUMEUP), KEY(5, 2, KEY_F1), KEY(5, 3, KEY_F2), KEY(5, 4, KEY_F3), }; static const struct matrix_keymap_data keymap_data = { .keymap = keymap, .keymap_size = ARRAY_SIZE(keymap), }; static struct matrix_keypad_platform_data keypad_config = { .keymap_data = &keymap_data, .num_row_gpios = 6, .num_col_gpios = 5, .debounce_ms = 0, /* minimum */ .active_low = 0, /* pull up realization */ .no_autorepeat = 0, }; static void spi_select_device(int cs) { static int gpio; if (!gpio) { int ret; ret = gpio_request(EVM_SPI_CS_GPIO, "spi chipsel"); if (ret < 0) { pr_err("cannot open spi chipsel gpio\n"); gpio = -ENOSYS; return; } else { gpio = EVM_SPI_CS_GPIO; gpio_direction_output(gpio, 0); } } if (gpio < 0) return; return gpio_set_value(gpio, cs ? 1 : 0); } static struct ti_ssp_spi_data spi_master_data = { .num_cs = 2, .select = spi_select_device, .iosel = SSP_PIN_SEL(0, SSP_CLOCK) | SSP_PIN_SEL(1, SSP_DATA) | SSP_PIN_SEL(2, SSP_CHIPSEL) | SSP_PIN_SEL(3, SSP_IN) | SSP_INPUT_SEL(3), }; static struct ti_ssp_data ssp_config = { .out_clock = 250 * 1000, .dev_data = { [1] = { .dev_name = "ti-ssp-spi", .pdata = &spi_master_data, .pdata_size = sizeof(spi_master_data), }, }, }; static struct tnetv107x_device_info evm_device_info __initconst = { .serial_config = &serial_config, .mmc_config[1] = &mmc_config, /* controller 1 */ .nand_config[0] = &nand_config, /* chip select 0 */ .keypad_config = &keypad_config, .ssp_config = &ssp_config, }; static struct spi_board_info spi_info[] __initconst = { }; static __init void tnetv107x_evm_board_init(void) { davinci_cfg_reg_list(sdio1_pins); davinci_cfg_reg_list(uart1_pins); davinci_cfg_reg_list(ssp_pins); tnetv107x_devices_init(&evm_device_info); spi_register_board_info(spi_info, ARRAY_SIZE(spi_info)); } #ifdef CONFIG_SERIAL_8250_CONSOLE static int __init tnetv107x_evm_console_init(void) { return add_preferred_console("ttyS", 0, "115200"); } console_initcall(tnetv107x_evm_console_init); #endif MACHINE_START(TNETV107X, "TNETV107X EVM") .atag_offset = 0x100, .map_io = tnetv107x_init, .init_irq = cp_intc_init, .timer = &davinci_timer, .init_machine = tnetv107x_evm_board_init, .dma_zone_size = SZ_128M, .restart = tnetv107x_restart, MACHINE_END
gpl-2.0
The-Nemesis-Project/hltetmo_kernel
drivers/input/touchscreen/jornada720_ts.c
4826
4650
/* * drivers/input/touchscreen/jornada720_ts.c * * Copyright (C) 2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com> * * Copyright (C) 2006 Filip Zyzniewski <filip.zyzniewski@tefnet.pl> * based on HP Jornada 56x touchscreen driver by Alex Lange <chicken@handhelds.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * HP Jornada 710/720/729 Touchscreen Driver */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/slab.h> #include <mach/hardware.h> #include <mach/jornada720.h> #include <mach/irqs.h> MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>"); MODULE_DESCRIPTION("HP Jornada 710/720/728 touchscreen driver"); MODULE_LICENSE("GPL v2"); struct jornada_ts { struct input_dev *dev; int x_data[4]; /* X sample values */ int y_data[4]; /* Y sample values */ }; static void jornada720_ts_collect_data(struct jornada_ts *jornada_ts) { /* 3 low word X samples */ jornada_ts->x_data[0] = jornada_ssp_byte(TXDUMMY); jornada_ts->x_data[1] = jornada_ssp_byte(TXDUMMY); jornada_ts->x_data[2] = jornada_ssp_byte(TXDUMMY); /* 3 low word Y samples */ jornada_ts->y_data[0] = jornada_ssp_byte(TXDUMMY); jornada_ts->y_data[1] = jornada_ssp_byte(TXDUMMY); jornada_ts->y_data[2] = jornada_ssp_byte(TXDUMMY); /* combined x samples bits */ jornada_ts->x_data[3] = jornada_ssp_byte(TXDUMMY); /* combined y samples bits */ jornada_ts->y_data[3] = jornada_ssp_byte(TXDUMMY); } static int jornada720_ts_average(int coords[4]) { int coord, high_bits = coords[3]; coord = coords[0] | ((high_bits & 0x03) << 8); coord += coords[1] | ((high_bits & 0x0c) << 6); coord += coords[2] | ((high_bits & 0x30) << 4); return coord / 3; } static irqreturn_t jornada720_ts_interrupt(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct jornada_ts *jornada_ts = platform_get_drvdata(pdev); struct input_dev *input = jornada_ts->dev; int x, y; /* If GPIO_GPIO9 is set to high then report pen up */ if (GPLR & GPIO_GPIO(9)) { input_report_key(input, BTN_TOUCH, 0); input_sync(input); } else { jornada_ssp_start(); /* proper reply to request is always TXDUMMY */ if (jornada_ssp_inout(GETTOUCHSAMPLES) == TXDUMMY) { jornada720_ts_collect_data(jornada_ts); x = jornada720_ts_average(jornada_ts->x_data); y = jornada720_ts_average(jornada_ts->y_data); input_report_key(input, BTN_TOUCH, 1); input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); input_sync(input); } jornada_ssp_end(); } return IRQ_HANDLED; } static int __devinit jornada720_ts_probe(struct platform_device *pdev) { struct jornada_ts *jornada_ts; struct input_dev *input_dev; int error; jornada_ts = kzalloc(sizeof(struct jornada_ts), GFP_KERNEL); input_dev = input_allocate_device(); if (!jornada_ts || !input_dev) { error = -ENOMEM; goto fail1; } platform_set_drvdata(pdev, jornada_ts); jornada_ts->dev = input_dev; input_dev->name = "HP Jornada 7xx Touchscreen"; input_dev->phys = "jornadats/input0"; input_dev->id.bustype = BUS_HOST; input_dev->dev.parent = &pdev->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, 270, 3900, 0, 0); input_set_abs_params(input_dev, ABS_Y, 180, 3700, 0, 0); error = request_irq(IRQ_GPIO9, jornada720_ts_interrupt, IRQF_TRIGGER_RISING, "HP7XX Touchscreen driver", pdev); if (error) { printk(KERN_INFO "HP7XX TS : Unable to acquire irq!\n"); goto fail1; } error = input_register_device(jornada_ts->dev); if (error) goto fail2; return 0; fail2: free_irq(IRQ_GPIO9, pdev); fail1: platform_set_drvdata(pdev, NULL); input_free_device(input_dev); kfree(jornada_ts); return error; } static int __devexit jornada720_ts_remove(struct platform_device *pdev) { struct jornada_ts *jornada_ts = platform_get_drvdata(pdev); free_irq(IRQ_GPIO9, pdev); platform_set_drvdata(pdev, NULL); input_unregister_device(jornada_ts->dev); kfree(jornada_ts); return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:jornada_ts"); static struct platform_driver jornada720_ts_driver = { .probe = jornada720_ts_probe, .remove = __devexit_p(jornada720_ts_remove), .driver = { .name = "jornada_ts", .owner = THIS_MODULE, }, }; module_platform_driver(jornada720_ts_driver);
gpl-2.0
FrancescoCG/CrazySuperKernel-CM
fs/ext3/xattr.c
5082
35449
/* * linux/fs/ext3/xattr.c * * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> * * Fix by Harrison Xing <harrison@mountainviewdata.com>. * Ext3 code with a lot of help from Eric Jarman <ejarman@acm.org>. * Extended attributes for symlinks and special files added per * suggestion of Luka Renko <luka.renko@hermes.si>. * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>, * Red Hat Inc. * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz * and Andreas Gruenbacher <agruen@suse.de>. */ /* * Extended attributes are stored directly in inodes (on file systems with * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl * field contains the block number if an inode uses an additional block. All * attributes must fit in the inode and one additional block. Blocks that * contain the identical set of attributes may be shared among several inodes. * Identical blocks are detected by keeping a cache of blocks that have * recently been accessed. * * The attributes in inodes and on blocks have a different header; the entries * are stored in the same format: * * +------------------+ * | header | * | entry 1 | | * | entry 2 | | growing downwards * | entry 3 | v * | four null bytes | * | . . . | * | value 1 | ^ * | value 3 | | growing upwards * | value 2 | | * +------------------+ * * The header is followed by multiple entry descriptors. In disk blocks, the * entry descriptors are kept sorted. In inodes, they are unsorted. The * attribute values are aligned to the end of the block in no specific order. * * Locking strategy * ---------------- * EXT3_I(inode)->i_file_acl is protected by EXT3_I(inode)->xattr_sem. * EA blocks are only changed if they are exclusive to an inode, so * holding xattr_sem also means that nothing but the EA block's reference * count can change. Multiple writers to the same block are synchronized * by the buffer lock. */ #include "ext3.h" #include <linux/mbcache.h> #include <linux/quotaops.h> #include "xattr.h" #include "acl.h" #define BHDR(bh) ((struct ext3_xattr_header *)((bh)->b_data)) #define ENTRY(ptr) ((struct ext3_xattr_entry *)(ptr)) #define BFIRST(bh) ENTRY(BHDR(bh)+1) #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) #define IHDR(inode, raw_inode) \ ((struct ext3_xattr_ibody_header *) \ ((void *)raw_inode + \ EXT3_GOOD_OLD_INODE_SIZE + \ EXT3_I(inode)->i_extra_isize)) #define IFIRST(hdr) ((struct ext3_xattr_entry *)((hdr)+1)) #ifdef EXT3_XATTR_DEBUG # define ea_idebug(inode, f...) do { \ printk(KERN_DEBUG "inode %s:%lu: ", \ inode->i_sb->s_id, inode->i_ino); \ printk(f); \ printk("\n"); \ } while (0) # define ea_bdebug(bh, f...) do { \ char b[BDEVNAME_SIZE]; \ printk(KERN_DEBUG "block %s:%lu: ", \ bdevname(bh->b_bdev, b), \ (unsigned long) bh->b_blocknr); \ printk(f); \ printk("\n"); \ } while (0) #else # define ea_idebug(f...) # define ea_bdebug(f...) #endif static void ext3_xattr_cache_insert(struct buffer_head *); static struct buffer_head *ext3_xattr_cache_find(struct inode *, struct ext3_xattr_header *, struct mb_cache_entry **); static void ext3_xattr_rehash(struct ext3_xattr_header *, struct ext3_xattr_entry *); static int ext3_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size); static struct mb_cache *ext3_xattr_cache; static const struct xattr_handler *ext3_xattr_handler_map[] = { [EXT3_XATTR_INDEX_USER] = &ext3_xattr_user_handler, #ifdef CONFIG_EXT3_FS_POSIX_ACL [EXT3_XATTR_INDEX_POSIX_ACL_ACCESS] = &ext3_xattr_acl_access_handler, [EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext3_xattr_acl_default_handler, #endif [EXT3_XATTR_INDEX_TRUSTED] = &ext3_xattr_trusted_handler, #ifdef CONFIG_EXT3_FS_SECURITY [EXT3_XATTR_INDEX_SECURITY] = &ext3_xattr_security_handler, #endif }; const struct xattr_handler *ext3_xattr_handlers[] = { &ext3_xattr_user_handler, &ext3_xattr_trusted_handler, #ifdef CONFIG_EXT3_FS_POSIX_ACL &ext3_xattr_acl_access_handler, &ext3_xattr_acl_default_handler, #endif #ifdef CONFIG_EXT3_FS_SECURITY &ext3_xattr_security_handler, #endif NULL }; static inline const struct xattr_handler * ext3_xattr_handler(int name_index) { const struct xattr_handler *handler = NULL; if (name_index > 0 && name_index < ARRAY_SIZE(ext3_xattr_handler_map)) handler = ext3_xattr_handler_map[name_index]; return handler; } /* * Inode operation listxattr() * * dentry->d_inode->i_mutex: don't care */ ssize_t ext3_listxattr(struct dentry *dentry, char *buffer, size_t size) { return ext3_xattr_list(dentry, buffer, size); } static int ext3_xattr_check_names(struct ext3_xattr_entry *entry, void *end) { while (!IS_LAST_ENTRY(entry)) { struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(entry); if ((void *)next >= end) return -EIO; entry = next; } return 0; } static inline int ext3_xattr_check_block(struct buffer_head *bh) { int error; if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) || BHDR(bh)->h_blocks != cpu_to_le32(1)) return -EIO; error = ext3_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size); return error; } static inline int ext3_xattr_check_entry(struct ext3_xattr_entry *entry, size_t size) { size_t value_size = le32_to_cpu(entry->e_value_size); if (entry->e_value_block != 0 || value_size > size || le16_to_cpu(entry->e_value_offs) + value_size > size) return -EIO; return 0; } static int ext3_xattr_find_entry(struct ext3_xattr_entry **pentry, int name_index, const char *name, size_t size, int sorted) { struct ext3_xattr_entry *entry; size_t name_len; int cmp = 1; if (name == NULL) return -EINVAL; name_len = strlen(name); entry = *pentry; for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) { cmp = name_index - entry->e_name_index; if (!cmp) cmp = name_len - entry->e_name_len; if (!cmp) cmp = memcmp(name, entry->e_name, name_len); if (cmp <= 0 && (sorted || cmp == 0)) break; } *pentry = entry; if (!cmp && ext3_xattr_check_entry(entry, size)) return -EIO; return cmp ? -ENODATA : 0; } static int ext3_xattr_block_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size) { struct buffer_head *bh = NULL; struct ext3_xattr_entry *entry; size_t size; int error; ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", name_index, name, buffer, (long)buffer_size); error = -ENODATA; if (!EXT3_I(inode)->i_file_acl) goto cleanup; ea_idebug(inode, "reading block %u", EXT3_I(inode)->i_file_acl); bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl); if (!bh) goto cleanup; ea_bdebug(bh, "b_count=%d, refcount=%d", atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); if (ext3_xattr_check_block(bh)) { bad_block: ext3_error(inode->i_sb, __func__, "inode %lu: bad block "E3FSBLK, inode->i_ino, EXT3_I(inode)->i_file_acl); error = -EIO; goto cleanup; } ext3_xattr_cache_insert(bh); entry = BFIRST(bh); error = ext3_xattr_find_entry(&entry, name_index, name, bh->b_size, 1); if (error == -EIO) goto bad_block; if (error) goto cleanup; size = le32_to_cpu(entry->e_value_size); if (buffer) { error = -ERANGE; if (size > buffer_size) goto cleanup; memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), size); } error = size; cleanup: brelse(bh); return error; } static int ext3_xattr_ibody_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size) { struct ext3_xattr_ibody_header *header; struct ext3_xattr_entry *entry; struct ext3_inode *raw_inode; struct ext3_iloc iloc; size_t size; void *end; int error; if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR)) return -ENODATA; error = ext3_get_inode_loc(inode, &iloc); if (error) return error; raw_inode = ext3_raw_inode(&iloc); header = IHDR(inode, raw_inode); entry = IFIRST(header); end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size; error = ext3_xattr_check_names(entry, end); if (error) goto cleanup; error = ext3_xattr_find_entry(&entry, name_index, name, end - (void *)entry, 0); if (error) goto cleanup; size = le32_to_cpu(entry->e_value_size); if (buffer) { error = -ERANGE; if (size > buffer_size) goto cleanup; memcpy(buffer, (void *)IFIRST(header) + le16_to_cpu(entry->e_value_offs), size); } error = size; cleanup: brelse(iloc.bh); return error; } /* * ext3_xattr_get() * * Copy an extended attribute into the buffer * provided, or compute the buffer size required. * Buffer is NULL to compute the size of the buffer required. * * Returns a negative error number on failure, or the number of bytes * used / required on success. */ int ext3_xattr_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size) { int error; down_read(&EXT3_I(inode)->xattr_sem); error = ext3_xattr_ibody_get(inode, name_index, name, buffer, buffer_size); if (error == -ENODATA) error = ext3_xattr_block_get(inode, name_index, name, buffer, buffer_size); up_read(&EXT3_I(inode)->xattr_sem); return error; } static int ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry, char *buffer, size_t buffer_size) { size_t rest = buffer_size; for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) { const struct xattr_handler *handler = ext3_xattr_handler(entry->e_name_index); if (handler) { size_t size = handler->list(dentry, buffer, rest, entry->e_name, entry->e_name_len, handler->flags); if (buffer) { if (size > rest) return -ERANGE; buffer += size; } rest -= size; } } return buffer_size - rest; } static int ext3_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size) { struct inode *inode = dentry->d_inode; struct buffer_head *bh = NULL; int error; ea_idebug(inode, "buffer=%p, buffer_size=%ld", buffer, (long)buffer_size); error = 0; if (!EXT3_I(inode)->i_file_acl) goto cleanup; ea_idebug(inode, "reading block %u", EXT3_I(inode)->i_file_acl); bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl); error = -EIO; if (!bh) goto cleanup; ea_bdebug(bh, "b_count=%d, refcount=%d", atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); if (ext3_xattr_check_block(bh)) { ext3_error(inode->i_sb, __func__, "inode %lu: bad block "E3FSBLK, inode->i_ino, EXT3_I(inode)->i_file_acl); error = -EIO; goto cleanup; } ext3_xattr_cache_insert(bh); error = ext3_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size); cleanup: brelse(bh); return error; } static int ext3_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) { struct inode *inode = dentry->d_inode; struct ext3_xattr_ibody_header *header; struct ext3_inode *raw_inode; struct ext3_iloc iloc; void *end; int error; if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR)) return 0; error = ext3_get_inode_loc(inode, &iloc); if (error) return error; raw_inode = ext3_raw_inode(&iloc); header = IHDR(inode, raw_inode); end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size; error = ext3_xattr_check_names(IFIRST(header), end); if (error) goto cleanup; error = ext3_xattr_list_entries(dentry, IFIRST(header), buffer, buffer_size); cleanup: brelse(iloc.bh); return error; } /* * ext3_xattr_list() * * Copy a list of attribute names into the buffer * provided, or compute the buffer size required. * Buffer is NULL to compute the size of the buffer required. * * Returns a negative error number on failure, or the number of bytes * used / required on success. */ static int ext3_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) { int i_error, b_error; down_read(&EXT3_I(dentry->d_inode)->xattr_sem); i_error = ext3_xattr_ibody_list(dentry, buffer, buffer_size); if (i_error < 0) { b_error = 0; } else { if (buffer) { buffer += i_error; buffer_size -= i_error; } b_error = ext3_xattr_block_list(dentry, buffer, buffer_size); if (b_error < 0) i_error = 0; } up_read(&EXT3_I(dentry->d_inode)->xattr_sem); return i_error + b_error; } /* * If the EXT3_FEATURE_COMPAT_EXT_ATTR feature of this file system is * not set, set it. */ static void ext3_xattr_update_super_block(handle_t *handle, struct super_block *sb) { if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR)) return; if (ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh) == 0) { EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR); ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); } } /* * Release the xattr block BH: If the reference count is > 1, decrement * it; otherwise free the block. */ static void ext3_xattr_release_block(handle_t *handle, struct inode *inode, struct buffer_head *bh) { struct mb_cache_entry *ce = NULL; int error = 0; ce = mb_cache_entry_get(ext3_xattr_cache, bh->b_bdev, bh->b_blocknr); error = ext3_journal_get_write_access(handle, bh); if (error) goto out; lock_buffer(bh); if (BHDR(bh)->h_refcount == cpu_to_le32(1)) { ea_bdebug(bh, "refcount now=0; freeing"); if (ce) mb_cache_entry_free(ce); ext3_free_blocks(handle, inode, bh->b_blocknr, 1); get_bh(bh); ext3_forget(handle, 1, inode, bh, bh->b_blocknr); } else { le32_add_cpu(&BHDR(bh)->h_refcount, -1); error = ext3_journal_dirty_metadata(handle, bh); if (IS_SYNC(inode)) handle->h_sync = 1; dquot_free_block(inode, 1); ea_bdebug(bh, "refcount now=%d; releasing", le32_to_cpu(BHDR(bh)->h_refcount)); if (ce) mb_cache_entry_release(ce); } unlock_buffer(bh); out: ext3_std_error(inode->i_sb, error); return; } struct ext3_xattr_info { int name_index; const char *name; const void *value; size_t value_len; }; struct ext3_xattr_search { struct ext3_xattr_entry *first; void *base; void *end; struct ext3_xattr_entry *here; int not_found; }; static int ext3_xattr_set_entry(struct ext3_xattr_info *i, struct ext3_xattr_search *s) { struct ext3_xattr_entry *last; size_t free, min_offs = s->end - s->base, name_len = strlen(i->name); /* Compute min_offs and last. */ last = s->first; for (; !IS_LAST_ENTRY(last); last = EXT3_XATTR_NEXT(last)) { if (!last->e_value_block && last->e_value_size) { size_t offs = le16_to_cpu(last->e_value_offs); if (offs < min_offs) min_offs = offs; } } free = min_offs - ((void *)last - s->base) - sizeof(__u32); if (!s->not_found) { if (!s->here->e_value_block && s->here->e_value_size) { size_t size = le32_to_cpu(s->here->e_value_size); free += EXT3_XATTR_SIZE(size); } free += EXT3_XATTR_LEN(name_len); } if (i->value) { if (free < EXT3_XATTR_SIZE(i->value_len) || free < EXT3_XATTR_LEN(name_len) + EXT3_XATTR_SIZE(i->value_len)) return -ENOSPC; } if (i->value && s->not_found) { /* Insert the new name. */ size_t size = EXT3_XATTR_LEN(name_len); size_t rest = (void *)last - (void *)s->here + sizeof(__u32); memmove((void *)s->here + size, s->here, rest); memset(s->here, 0, size); s->here->e_name_index = i->name_index; s->here->e_name_len = name_len; memcpy(s->here->e_name, i->name, name_len); } else { if (!s->here->e_value_block && s->here->e_value_size) { void *first_val = s->base + min_offs; size_t offs = le16_to_cpu(s->here->e_value_offs); void *val = s->base + offs; size_t size = EXT3_XATTR_SIZE( le32_to_cpu(s->here->e_value_size)); if (i->value && size == EXT3_XATTR_SIZE(i->value_len)) { /* The old and the new value have the same size. Just replace. */ s->here->e_value_size = cpu_to_le32(i->value_len); memset(val + size - EXT3_XATTR_PAD, 0, EXT3_XATTR_PAD); /* Clear pad bytes. */ memcpy(val, i->value, i->value_len); return 0; } /* Remove the old value. */ memmove(first_val + size, first_val, val - first_val); memset(first_val, 0, size); s->here->e_value_size = 0; s->here->e_value_offs = 0; min_offs += size; /* Adjust all value offsets. */ last = s->first; while (!IS_LAST_ENTRY(last)) { size_t o = le16_to_cpu(last->e_value_offs); if (!last->e_value_block && last->e_value_size && o < offs) last->e_value_offs = cpu_to_le16(o + size); last = EXT3_XATTR_NEXT(last); } } if (!i->value) { /* Remove the old name. */ size_t size = EXT3_XATTR_LEN(name_len); last = ENTRY((void *)last - size); memmove(s->here, (void *)s->here + size, (void *)last - (void *)s->here + sizeof(__u32)); memset(last, 0, size); } } if (i->value) { /* Insert the new value. */ s->here->e_value_size = cpu_to_le32(i->value_len); if (i->value_len) { size_t size = EXT3_XATTR_SIZE(i->value_len); void *val = s->base + min_offs - size; s->here->e_value_offs = cpu_to_le16(min_offs - size); memset(val + size - EXT3_XATTR_PAD, 0, EXT3_XATTR_PAD); /* Clear the pad bytes. */ memcpy(val, i->value, i->value_len); } } return 0; } struct ext3_xattr_block_find { struct ext3_xattr_search s; struct buffer_head *bh; }; static int ext3_xattr_block_find(struct inode *inode, struct ext3_xattr_info *i, struct ext3_xattr_block_find *bs) { struct super_block *sb = inode->i_sb; int error; ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", i->name_index, i->name, i->value, (long)i->value_len); if (EXT3_I(inode)->i_file_acl) { /* The inode already has an extended attribute block. */ bs->bh = sb_bread(sb, EXT3_I(inode)->i_file_acl); error = -EIO; if (!bs->bh) goto cleanup; ea_bdebug(bs->bh, "b_count=%d, refcount=%d", atomic_read(&(bs->bh->b_count)), le32_to_cpu(BHDR(bs->bh)->h_refcount)); if (ext3_xattr_check_block(bs->bh)) { ext3_error(sb, __func__, "inode %lu: bad block "E3FSBLK, inode->i_ino, EXT3_I(inode)->i_file_acl); error = -EIO; goto cleanup; } /* Find the named attribute. */ bs->s.base = BHDR(bs->bh); bs->s.first = BFIRST(bs->bh); bs->s.end = bs->bh->b_data + bs->bh->b_size; bs->s.here = bs->s.first; error = ext3_xattr_find_entry(&bs->s.here, i->name_index, i->name, bs->bh->b_size, 1); if (error && error != -ENODATA) goto cleanup; bs->s.not_found = error; } error = 0; cleanup: return error; } static int ext3_xattr_block_set(handle_t *handle, struct inode *inode, struct ext3_xattr_info *i, struct ext3_xattr_block_find *bs) { struct super_block *sb = inode->i_sb; struct buffer_head *new_bh = NULL; struct ext3_xattr_search *s = &bs->s; struct mb_cache_entry *ce = NULL; int error = 0; #define header(x) ((struct ext3_xattr_header *)(x)) if (i->value && i->value_len > sb->s_blocksize) return -ENOSPC; if (s->base) { ce = mb_cache_entry_get(ext3_xattr_cache, bs->bh->b_bdev, bs->bh->b_blocknr); error = ext3_journal_get_write_access(handle, bs->bh); if (error) goto cleanup; lock_buffer(bs->bh); if (header(s->base)->h_refcount == cpu_to_le32(1)) { if (ce) { mb_cache_entry_free(ce); ce = NULL; } ea_bdebug(bs->bh, "modifying in-place"); error = ext3_xattr_set_entry(i, s); if (!error) { if (!IS_LAST_ENTRY(s->first)) ext3_xattr_rehash(header(s->base), s->here); ext3_xattr_cache_insert(bs->bh); } unlock_buffer(bs->bh); if (error == -EIO) goto bad_block; if (!error) error = ext3_journal_dirty_metadata(handle, bs->bh); if (error) goto cleanup; goto inserted; } else { int offset = (char *)s->here - bs->bh->b_data; unlock_buffer(bs->bh); journal_release_buffer(handle, bs->bh); if (ce) { mb_cache_entry_release(ce); ce = NULL; } ea_bdebug(bs->bh, "cloning"); s->base = kmalloc(bs->bh->b_size, GFP_NOFS); error = -ENOMEM; if (s->base == NULL) goto cleanup; memcpy(s->base, BHDR(bs->bh), bs->bh->b_size); s->first = ENTRY(header(s->base)+1); header(s->base)->h_refcount = cpu_to_le32(1); s->here = ENTRY(s->base + offset); s->end = s->base + bs->bh->b_size; } } else { /* Allocate a buffer where we construct the new block. */ s->base = kzalloc(sb->s_blocksize, GFP_NOFS); /* assert(header == s->base) */ error = -ENOMEM; if (s->base == NULL) goto cleanup; header(s->base)->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC); header(s->base)->h_blocks = cpu_to_le32(1); header(s->base)->h_refcount = cpu_to_le32(1); s->first = ENTRY(header(s->base)+1); s->here = ENTRY(header(s->base)+1); s->end = s->base + sb->s_blocksize; } error = ext3_xattr_set_entry(i, s); if (error == -EIO) goto bad_block; if (error) goto cleanup; if (!IS_LAST_ENTRY(s->first)) ext3_xattr_rehash(header(s->base), s->here); inserted: if (!IS_LAST_ENTRY(s->first)) { new_bh = ext3_xattr_cache_find(inode, header(s->base), &ce); if (new_bh) { /* We found an identical block in the cache. */ if (new_bh == bs->bh) ea_bdebug(new_bh, "keeping"); else { /* The old block is released after updating the inode. */ error = dquot_alloc_block(inode, 1); if (error) goto cleanup; error = ext3_journal_get_write_access(handle, new_bh); if (error) goto cleanup_dquot; lock_buffer(new_bh); le32_add_cpu(&BHDR(new_bh)->h_refcount, 1); ea_bdebug(new_bh, "reusing; refcount now=%d", le32_to_cpu(BHDR(new_bh)->h_refcount)); unlock_buffer(new_bh); error = ext3_journal_dirty_metadata(handle, new_bh); if (error) goto cleanup_dquot; } mb_cache_entry_release(ce); ce = NULL; } else if (bs->bh && s->base == bs->bh->b_data) { /* We were modifying this block in-place. */ ea_bdebug(bs->bh, "keeping this block"); new_bh = bs->bh; get_bh(new_bh); } else { /* We need to allocate a new block */ ext3_fsblk_t goal = ext3_group_first_block_no(sb, EXT3_I(inode)->i_block_group); ext3_fsblk_t block; /* * Protect us agaist concurrent allocations to the * same inode from ext3_..._writepage(). Reservation * code does not expect racing allocations. */ mutex_lock(&EXT3_I(inode)->truncate_mutex); block = ext3_new_block(handle, inode, goal, &error); mutex_unlock(&EXT3_I(inode)->truncate_mutex); if (error) goto cleanup; ea_idebug(inode, "creating block %d", block); new_bh = sb_getblk(sb, block); if (!new_bh) { getblk_failed: ext3_free_blocks(handle, inode, block, 1); error = -EIO; goto cleanup; } lock_buffer(new_bh); error = ext3_journal_get_create_access(handle, new_bh); if (error) { unlock_buffer(new_bh); goto getblk_failed; } memcpy(new_bh->b_data, s->base, new_bh->b_size); set_buffer_uptodate(new_bh); unlock_buffer(new_bh); ext3_xattr_cache_insert(new_bh); error = ext3_journal_dirty_metadata(handle, new_bh); if (error) goto cleanup; } } /* Update the inode. */ EXT3_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; /* Drop the previous xattr block. */ if (bs->bh && bs->bh != new_bh) ext3_xattr_release_block(handle, inode, bs->bh); error = 0; cleanup: if (ce) mb_cache_entry_release(ce); brelse(new_bh); if (!(bs->bh && s->base == bs->bh->b_data)) kfree(s->base); return error; cleanup_dquot: dquot_free_block(inode, 1); goto cleanup; bad_block: ext3_error(inode->i_sb, __func__, "inode %lu: bad block "E3FSBLK, inode->i_ino, EXT3_I(inode)->i_file_acl); goto cleanup; #undef header } struct ext3_xattr_ibody_find { struct ext3_xattr_search s; struct ext3_iloc iloc; }; static int ext3_xattr_ibody_find(struct inode *inode, struct ext3_xattr_info *i, struct ext3_xattr_ibody_find *is) { struct ext3_xattr_ibody_header *header; struct ext3_inode *raw_inode; int error; if (EXT3_I(inode)->i_extra_isize == 0) return 0; raw_inode = ext3_raw_inode(&is->iloc); header = IHDR(inode, raw_inode); is->s.base = is->s.first = IFIRST(header); is->s.here = is->s.first; is->s.end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size; if (ext3_test_inode_state(inode, EXT3_STATE_XATTR)) { error = ext3_xattr_check_names(IFIRST(header), is->s.end); if (error) return error; /* Find the named attribute. */ error = ext3_xattr_find_entry(&is->s.here, i->name_index, i->name, is->s.end - (void *)is->s.base, 0); if (error && error != -ENODATA) return error; is->s.not_found = error; } return 0; } static int ext3_xattr_ibody_set(handle_t *handle, struct inode *inode, struct ext3_xattr_info *i, struct ext3_xattr_ibody_find *is) { struct ext3_xattr_ibody_header *header; struct ext3_xattr_search *s = &is->s; int error; if (EXT3_I(inode)->i_extra_isize == 0) return -ENOSPC; error = ext3_xattr_set_entry(i, s); if (error) return error; header = IHDR(inode, ext3_raw_inode(&is->iloc)); if (!IS_LAST_ENTRY(s->first)) { header->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC); ext3_set_inode_state(inode, EXT3_STATE_XATTR); } else { header->h_magic = cpu_to_le32(0); ext3_clear_inode_state(inode, EXT3_STATE_XATTR); } return 0; } /* * ext3_xattr_set_handle() * * Create, replace or remove an extended attribute for this inode. Value * is NULL to remove an existing extended attribute, and non-NULL to * either replace an existing extended attribute, or create a new extended * attribute. The flags XATTR_REPLACE and XATTR_CREATE * specify that an extended attribute must exist and must not exist * previous to the call, respectively. * * Returns 0, or a negative error number on failure. */ int ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, const char *name, const void *value, size_t value_len, int flags) { struct ext3_xattr_info i = { .name_index = name_index, .name = name, .value = value, .value_len = value_len, }; struct ext3_xattr_ibody_find is = { .s = { .not_found = -ENODATA, }, }; struct ext3_xattr_block_find bs = { .s = { .not_found = -ENODATA, }, }; int error; if (!name) return -EINVAL; if (strlen(name) > 255) return -ERANGE; down_write(&EXT3_I(inode)->xattr_sem); error = ext3_get_inode_loc(inode, &is.iloc); if (error) goto cleanup; error = ext3_journal_get_write_access(handle, is.iloc.bh); if (error) goto cleanup; if (ext3_test_inode_state(inode, EXT3_STATE_NEW)) { struct ext3_inode *raw_inode = ext3_raw_inode(&is.iloc); memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size); ext3_clear_inode_state(inode, EXT3_STATE_NEW); } error = ext3_xattr_ibody_find(inode, &i, &is); if (error) goto cleanup; if (is.s.not_found) error = ext3_xattr_block_find(inode, &i, &bs); if (error) goto cleanup; if (is.s.not_found && bs.s.not_found) { error = -ENODATA; if (flags & XATTR_REPLACE) goto cleanup; error = 0; if (!value) goto cleanup; } else { error = -EEXIST; if (flags & XATTR_CREATE) goto cleanup; } if (!value) { if (!is.s.not_found) error = ext3_xattr_ibody_set(handle, inode, &i, &is); else if (!bs.s.not_found) error = ext3_xattr_block_set(handle, inode, &i, &bs); } else { error = ext3_xattr_ibody_set(handle, inode, &i, &is); if (!error && !bs.s.not_found) { i.value = NULL; error = ext3_xattr_block_set(handle, inode, &i, &bs); } else if (error == -ENOSPC) { if (EXT3_I(inode)->i_file_acl && !bs.s.base) { error = ext3_xattr_block_find(inode, &i, &bs); if (error) goto cleanup; } error = ext3_xattr_block_set(handle, inode, &i, &bs); if (error) goto cleanup; if (!is.s.not_found) { i.value = NULL; error = ext3_xattr_ibody_set(handle, inode, &i, &is); } } } if (!error) { ext3_xattr_update_super_block(handle, inode->i_sb); inode->i_ctime = CURRENT_TIME_SEC; error = ext3_mark_iloc_dirty(handle, inode, &is.iloc); /* * The bh is consumed by ext3_mark_iloc_dirty, even with * error != 0. */ is.iloc.bh = NULL; if (IS_SYNC(inode)) handle->h_sync = 1; } cleanup: brelse(is.iloc.bh); brelse(bs.bh); up_write(&EXT3_I(inode)->xattr_sem); return error; } /* * ext3_xattr_set() * * Like ext3_xattr_set_handle, but start from an inode. This extended * attribute modification is a filesystem transaction by itself. * * Returns 0, or a negative error number on failure. */ int ext3_xattr_set(struct inode *inode, int name_index, const char *name, const void *value, size_t value_len, int flags) { handle_t *handle; int error, retries = 0; retry: handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS(inode->i_sb)); if (IS_ERR(handle)) { error = PTR_ERR(handle); } else { int error2; error = ext3_xattr_set_handle(handle, inode, name_index, name, value, value_len, flags); error2 = ext3_journal_stop(handle); if (error == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) goto retry; if (error == 0) error = error2; } return error; } /* * ext3_xattr_delete_inode() * * Free extended attribute resources associated with this inode. This * is called immediately before an inode is freed. We have exclusive * access to the inode. */ void ext3_xattr_delete_inode(handle_t *handle, struct inode *inode) { struct buffer_head *bh = NULL; if (!EXT3_I(inode)->i_file_acl) goto cleanup; bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl); if (!bh) { ext3_error(inode->i_sb, __func__, "inode %lu: block "E3FSBLK" read error", inode->i_ino, EXT3_I(inode)->i_file_acl); goto cleanup; } if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) || BHDR(bh)->h_blocks != cpu_to_le32(1)) { ext3_error(inode->i_sb, __func__, "inode %lu: bad block "E3FSBLK, inode->i_ino, EXT3_I(inode)->i_file_acl); goto cleanup; } ext3_xattr_release_block(handle, inode, bh); EXT3_I(inode)->i_file_acl = 0; cleanup: brelse(bh); } /* * ext3_xattr_put_super() * * This is called when a file system is unmounted. */ void ext3_xattr_put_super(struct super_block *sb) { mb_cache_shrink(sb->s_bdev); } /* * ext3_xattr_cache_insert() * * Create a new entry in the extended attribute cache, and insert * it unless such an entry is already in the cache. * * Returns 0, or a negative error number on failure. */ static void ext3_xattr_cache_insert(struct buffer_head *bh) { __u32 hash = le32_to_cpu(BHDR(bh)->h_hash); struct mb_cache_entry *ce; int error; ce = mb_cache_entry_alloc(ext3_xattr_cache, GFP_NOFS); if (!ce) { ea_bdebug(bh, "out of memory"); return; } error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash); if (error) { mb_cache_entry_free(ce); if (error == -EBUSY) { ea_bdebug(bh, "already in cache"); error = 0; } } else { ea_bdebug(bh, "inserting [%x]", (int)hash); mb_cache_entry_release(ce); } } /* * ext3_xattr_cmp() * * Compare two extended attribute blocks for equality. * * Returns 0 if the blocks are equal, 1 if they differ, and * a negative error number on errors. */ static int ext3_xattr_cmp(struct ext3_xattr_header *header1, struct ext3_xattr_header *header2) { struct ext3_xattr_entry *entry1, *entry2; entry1 = ENTRY(header1+1); entry2 = ENTRY(header2+1); while (!IS_LAST_ENTRY(entry1)) { if (IS_LAST_ENTRY(entry2)) return 1; if (entry1->e_hash != entry2->e_hash || entry1->e_name_index != entry2->e_name_index || entry1->e_name_len != entry2->e_name_len || entry1->e_value_size != entry2->e_value_size || memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) return 1; if (entry1->e_value_block != 0 || entry2->e_value_block != 0) return -EIO; if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), (char *)header2 + le16_to_cpu(entry2->e_value_offs), le32_to_cpu(entry1->e_value_size))) return 1; entry1 = EXT3_XATTR_NEXT(entry1); entry2 = EXT3_XATTR_NEXT(entry2); } if (!IS_LAST_ENTRY(entry2)) return 1; return 0; } /* * ext3_xattr_cache_find() * * Find an identical extended attribute block. * * Returns a pointer to the block found, or NULL if such a block was * not found or an error occurred. */ static struct buffer_head * ext3_xattr_cache_find(struct inode *inode, struct ext3_xattr_header *header, struct mb_cache_entry **pce) { __u32 hash = le32_to_cpu(header->h_hash); struct mb_cache_entry *ce; if (!header->h_hash) return NULL; /* never share */ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); again: ce = mb_cache_entry_find_first(ext3_xattr_cache, inode->i_sb->s_bdev, hash); while (ce) { struct buffer_head *bh; if (IS_ERR(ce)) { if (PTR_ERR(ce) == -EAGAIN) goto again; break; } bh = sb_bread(inode->i_sb, ce->e_block); if (!bh) { ext3_error(inode->i_sb, __func__, "inode %lu: block %lu read error", inode->i_ino, (unsigned long) ce->e_block); } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= EXT3_XATTR_REFCOUNT_MAX) { ea_idebug(inode, "block %lu refcount %d>=%d", (unsigned long) ce->e_block, le32_to_cpu(BHDR(bh)->h_refcount), EXT3_XATTR_REFCOUNT_MAX); } else if (ext3_xattr_cmp(header, BHDR(bh)) == 0) { *pce = ce; return bh; } brelse(bh); ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash); } return NULL; } #define NAME_HASH_SHIFT 5 #define VALUE_HASH_SHIFT 16 /* * ext3_xattr_hash_entry() * * Compute the hash of an extended attribute. */ static inline void ext3_xattr_hash_entry(struct ext3_xattr_header *header, struct ext3_xattr_entry *entry) { __u32 hash = 0; char *name = entry->e_name; int n; for (n=0; n < entry->e_name_len; n++) { hash = (hash << NAME_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ *name++; } if (entry->e_value_block == 0 && entry->e_value_size != 0) { __le32 *value = (__le32 *)((char *)header + le16_to_cpu(entry->e_value_offs)); for (n = (le32_to_cpu(entry->e_value_size) + EXT3_XATTR_ROUND) >> EXT3_XATTR_PAD_BITS; n; n--) { hash = (hash << VALUE_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ le32_to_cpu(*value++); } } entry->e_hash = cpu_to_le32(hash); } #undef NAME_HASH_SHIFT #undef VALUE_HASH_SHIFT #define BLOCK_HASH_SHIFT 16 /* * ext3_xattr_rehash() * * Re-compute the extended attribute hash value after an entry has changed. */ static void ext3_xattr_rehash(struct ext3_xattr_header *header, struct ext3_xattr_entry *entry) { struct ext3_xattr_entry *here; __u32 hash = 0; ext3_xattr_hash_entry(header, entry); here = ENTRY(header+1); while (!IS_LAST_ENTRY(here)) { if (!here->e_hash) { /* Block is not shared if an entry's hash value == 0 */ hash = 0; break; } hash = (hash << BLOCK_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ le32_to_cpu(here->e_hash); here = EXT3_XATTR_NEXT(here); } header->h_hash = cpu_to_le32(hash); } #undef BLOCK_HASH_SHIFT int __init init_ext3_xattr(void) { ext3_xattr_cache = mb_cache_create("ext3_xattr", 6); if (!ext3_xattr_cache) return -ENOMEM; return 0; } void exit_ext3_xattr(void) { if (ext3_xattr_cache) mb_cache_destroy(ext3_xattr_cache); ext3_xattr_cache = NULL; }
gpl-2.0
pakohan/syso-kernel
linux-3.4.68/fs/xfs/xfs_fs_subr.c
5082
2387
/* * Copyright (c) 2000-2002,2005-2006 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_vnodeops.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" #include "xfs_trace.h" /* * note: all filemap functions return negative error codes. These * need to be inverted before returning to the xfs core functions. */ void xfs_tosspages( xfs_inode_t *ip, xfs_off_t first, xfs_off_t last, int fiopt) { /* can't toss partial tail pages, so mask them out */ last &= ~(PAGE_SIZE - 1); truncate_inode_pages_range(VFS_I(ip)->i_mapping, first, last - 1); } int xfs_flushinval_pages( xfs_inode_t *ip, xfs_off_t first, xfs_off_t last, int fiopt) { struct address_space *mapping = VFS_I(ip)->i_mapping; int ret = 0; trace_xfs_pagecache_inval(ip, first, last); xfs_iflags_clear(ip, XFS_ITRUNCATED); ret = filemap_write_and_wait_range(mapping, first, last == -1 ? LLONG_MAX : last); if (!ret) truncate_inode_pages_range(mapping, first, last); return -ret; } int xfs_flush_pages( xfs_inode_t *ip, xfs_off_t first, xfs_off_t last, uint64_t flags, int fiopt) { struct address_space *mapping = VFS_I(ip)->i_mapping; int ret = 0; int ret2; xfs_iflags_clear(ip, XFS_ITRUNCATED); ret = -filemap_fdatawrite_range(mapping, first, last == -1 ? LLONG_MAX : last); if (flags & XBF_ASYNC) return ret; ret2 = xfs_wait_on_pages(ip, first, last); if (!ret) ret = ret2; return ret; } int xfs_wait_on_pages( xfs_inode_t *ip, xfs_off_t first, xfs_off_t last) { struct address_space *mapping = VFS_I(ip)->i_mapping; if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) { return -filemap_fdatawait_range(mapping, first, last == -1 ? XFS_ISIZE(ip) - 1 : last); } return 0; }
gpl-2.0
RickeysWorld/linux
arch/mips/lib/dump_tlb.c
9178
2643
/* * Dump R4x00 TLB for debugging purposes. * * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle. * Copyright (C) 1999 by Silicon Graphics, Inc. */ #include <linux/kernel.h> #include <linux/mm.h> #include <asm/mipsregs.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlbdebug.h> static inline const char *msk2str(unsigned int mask) { switch (mask) { case PM_4K: return "4kb"; case PM_16K: return "16kb"; case PM_64K: return "64kb"; case PM_256K: return "256kb"; #ifdef CONFIG_CPU_CAVIUM_OCTEON case PM_8K: return "8kb"; case PM_32K: return "32kb"; case PM_128K: return "128kb"; case PM_512K: return "512kb"; case PM_2M: return "2Mb"; case PM_8M: return "8Mb"; case PM_32M: return "32Mb"; #endif #ifndef CONFIG_CPU_VR41XX case PM_1M: return "1Mb"; case PM_4M: return "4Mb"; case PM_16M: return "16Mb"; case PM_64M: return "64Mb"; case PM_256M: return "256Mb"; case PM_1G: return "1Gb"; #endif } return ""; } #define BARRIER() \ __asm__ __volatile__( \ ".set\tnoreorder\n\t" \ "nop;nop;nop;nop;nop;nop;nop\n\t" \ ".set\treorder"); static void dump_tlb(int first, int last) { unsigned long s_entryhi, entryhi, asid; unsigned long long entrylo0, entrylo1; unsigned int s_index, pagemask, c0, c1, i; s_entryhi = read_c0_entryhi(); s_index = read_c0_index(); asid = s_entryhi & 0xff; for (i = first; i <= last; i++) { write_c0_index(i); BARRIER(); tlb_read(); BARRIER(); pagemask = read_c0_pagemask(); entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); entrylo1 = read_c0_entrylo1(); /* Unused entries have a virtual address of CKSEG0. */ if ((entryhi & ~0x1ffffUL) != CKSEG0 && (entryhi & 0xff) == asid) { #ifdef CONFIG_32BIT int width = 8; #else int width = 11; #endif /* * Only print entries in use */ printk("Index: %2d pgmask=%s ", i, msk2str(pagemask)); c0 = (entrylo0 >> 3) & 7; c1 = (entrylo1 >> 3) & 7; printk("va=%0*lx asid=%02lx\n", width, (entryhi & ~0x1fffUL), entryhi & 0xff); printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", width, (entrylo0 << 6) & PAGE_MASK, c0, (entrylo0 & 4) ? 1 : 0, (entrylo0 & 2) ? 1 : 0, (entrylo0 & 1) ? 1 : 0); printk("[pa=%0*llx c=%d d=%d v=%d g=%d]\n", width, (entrylo1 << 6) & PAGE_MASK, c1, (entrylo1 & 4) ? 1 : 0, (entrylo1 & 2) ? 1 : 0, (entrylo1 & 1) ? 1 : 0); } } printk("\n"); write_c0_entryhi(s_entryhi); write_c0_index(s_index); } void dump_tlb_all(void) { dump_tlb(0, current_cpu_data.tlbsize - 1); }
gpl-2.0
varigit/wl18xx
drivers/i2c/busses/scx200_i2c.c
9178
3466
/* linux/drivers/i2c/busses/scx200_i2c.c Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> National Semiconductor SCx200 I2C bus on GPIO pins Based on i2c-velleman.c Copyright (C) 1995-96, 2000 Simon G. Vogl This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/io.h> #include <linux/scx200_gpio.h> #define NAME "scx200_i2c" MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); MODULE_DESCRIPTION("NatSemi SCx200 I2C Driver"); MODULE_LICENSE("GPL"); static int scl = CONFIG_SCx200_I2C_SCL; static int sda = CONFIG_SCx200_I2C_SDA; module_param(scl, int, 0); MODULE_PARM_DESC(scl, "GPIO line for SCL"); module_param(sda, int, 0); MODULE_PARM_DESC(sda, "GPIO line for SDA"); static void scx200_i2c_setscl(void *data, int state) { scx200_gpio_set(scl, state); } static void scx200_i2c_setsda(void *data, int state) { scx200_gpio_set(sda, state); } static int scx200_i2c_getscl(void *data) { return scx200_gpio_get(scl); } static int scx200_i2c_getsda(void *data) { return scx200_gpio_get(sda); } /* ------------------------------------------------------------------------ * Encapsulate the above functions in the correct operations structure. * This is only done when more than one hardware adapter is supported. */ static struct i2c_algo_bit_data scx200_i2c_data = { .setsda = scx200_i2c_setsda, .setscl = scx200_i2c_setscl, .getsda = scx200_i2c_getsda, .getscl = scx200_i2c_getscl, .udelay = 10, .timeout = HZ, }; static struct i2c_adapter scx200_i2c_ops = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo_data = &scx200_i2c_data, .name = "NatSemi SCx200 I2C", }; static int scx200_i2c_init(void) { pr_debug(NAME ": NatSemi SCx200 I2C Driver\n"); if (!scx200_gpio_present()) { printk(KERN_ERR NAME ": no SCx200 gpio pins available\n"); return -ENODEV; } pr_debug(NAME ": SCL=GPIO%02u, SDA=GPIO%02u\n", scl, sda); if (scl == -1 || sda == -1 || scl == sda) { printk(KERN_ERR NAME ": scl and sda must be specified\n"); return -EINVAL; } /* Configure GPIOs as open collector outputs */ scx200_gpio_configure(scl, ~2, 5); scx200_gpio_configure(sda, ~2, 5); if (i2c_bit_add_bus(&scx200_i2c_ops) < 0) { printk(KERN_ERR NAME ": adapter %s registration failed\n", scx200_i2c_ops.name); return -ENODEV; } return 0; } static void scx200_i2c_cleanup(void) { i2c_del_adapter(&scx200_i2c_ops); } module_init(scx200_i2c_init); module_exit(scx200_i2c_cleanup); /* Local variables: compile-command: "make -k -C ../.. SUBDIRS=drivers/i2c modules" c-basic-offset: 8 End: */
gpl-2.0
jdkoreclipse/android_kernel_htc_msm8960
fs/nilfs2/bmap.c
11482
14761
/* * bmap.c - NILFS block mapping. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Koji Sato <koji@osrg.net>. */ #include <linux/fs.h> #include <linux/string.h> #include <linux/errno.h> #include "nilfs.h" #include "bmap.h" #include "btree.h" #include "direct.h" #include "btnode.h" #include "mdt.h" #include "dat.h" #include "alloc.h" struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap) { struct the_nilfs *nilfs = bmap->b_inode->i_sb->s_fs_info; return nilfs->ns_dat; } static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap, const char *fname, int err) { struct inode *inode = bmap->b_inode; if (err == -EINVAL) { nilfs_error(inode->i_sb, fname, "broken bmap (inode number=%lu)\n", inode->i_ino); err = -EIO; } return err; } /** * nilfs_bmap_lookup_at_level - find a data block or node block * @bmap: bmap * @key: key * @level: level * @ptrp: place to store the value associated to @key * * Description: nilfs_bmap_lookup_at_level() finds a record whose key * matches @key in the block at @level of the bmap. * * Return Value: On success, 0 is returned and the record associated with @key * is stored in the place pointed by @ptrp. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - A record associated with @key does not exist. */ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, __u64 *ptrp) { sector_t blocknr; int ret; down_read(&bmap->b_sem); ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp); if (ret < 0) { ret = nilfs_bmap_convert_error(bmap, __func__, ret); goto out; } if (NILFS_BMAP_USE_VBN(bmap)) { ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp, &blocknr); if (!ret) *ptrp = blocknr; } out: up_read(&bmap->b_sem); return ret; } int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp, unsigned maxblocks) { int ret; down_read(&bmap->b_sem); ret = bmap->b_ops->bop_lookup_contig(bmap, key, ptrp, maxblocks); up_read(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) { __u64 keys[NILFS_BMAP_SMALL_HIGH + 1]; __u64 ptrs[NILFS_BMAP_SMALL_HIGH + 1]; int ret, n; if (bmap->b_ops->bop_check_insert != NULL) { ret = bmap->b_ops->bop_check_insert(bmap, key); if (ret > 0) { n = bmap->b_ops->bop_gather_data( bmap, keys, ptrs, NILFS_BMAP_SMALL_HIGH + 1); if (n < 0) return n; ret = nilfs_btree_convert_and_insert( bmap, key, ptr, keys, ptrs, n); if (ret == 0) bmap->b_u.u_flags |= NILFS_BMAP_LARGE; return ret; } else if (ret < 0) return ret; } return bmap->b_ops->bop_insert(bmap, key, ptr); } /** * nilfs_bmap_insert - insert a new key-record pair into a bmap * @bmap: bmap * @key: key * @rec: record * * Description: nilfs_bmap_insert() inserts the new key-record pair specified * by @key and @rec into @bmap. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EEXIST - A record associated with @key already exist. */ int nilfs_bmap_insert(struct nilfs_bmap *bmap, unsigned long key, unsigned long rec) { int ret; down_write(&bmap->b_sem); ret = nilfs_bmap_do_insert(bmap, key, rec); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key) { __u64 keys[NILFS_BMAP_LARGE_LOW + 1]; __u64 ptrs[NILFS_BMAP_LARGE_LOW + 1]; int ret, n; if (bmap->b_ops->bop_check_delete != NULL) { ret = bmap->b_ops->bop_check_delete(bmap, key); if (ret > 0) { n = bmap->b_ops->bop_gather_data( bmap, keys, ptrs, NILFS_BMAP_LARGE_LOW + 1); if (n < 0) return n; ret = nilfs_direct_delete_and_convert( bmap, key, keys, ptrs, n); if (ret == 0) bmap->b_u.u_flags &= ~NILFS_BMAP_LARGE; return ret; } else if (ret < 0) return ret; } return bmap->b_ops->bop_delete(bmap, key); } int nilfs_bmap_last_key(struct nilfs_bmap *bmap, unsigned long *key) { __u64 lastkey; int ret; down_read(&bmap->b_sem); ret = bmap->b_ops->bop_last_key(bmap, &lastkey); up_read(&bmap->b_sem); if (ret < 0) ret = nilfs_bmap_convert_error(bmap, __func__, ret); else *key = lastkey; return ret; } /** * nilfs_bmap_delete - delete a key-record pair from a bmap * @bmap: bmap * @key: key * * Description: nilfs_bmap_delete() deletes the key-record pair specified by * @key from @bmap. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - A record associated with @key does not exist. */ int nilfs_bmap_delete(struct nilfs_bmap *bmap, unsigned long key) { int ret; down_write(&bmap->b_sem); ret = nilfs_bmap_do_delete(bmap, key); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, unsigned long key) { __u64 lastkey; int ret; ret = bmap->b_ops->bop_last_key(bmap, &lastkey); if (ret < 0) { if (ret == -ENOENT) ret = 0; return ret; } while (key <= lastkey) { ret = nilfs_bmap_do_delete(bmap, lastkey); if (ret < 0) return ret; ret = bmap->b_ops->bop_last_key(bmap, &lastkey); if (ret < 0) { if (ret == -ENOENT) ret = 0; return ret; } } return 0; } /** * nilfs_bmap_truncate - truncate a bmap to a specified key * @bmap: bmap * @key: key * * Description: nilfs_bmap_truncate() removes key-record pairs whose keys are * greater than or equal to @key from @bmap. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_bmap_truncate(struct nilfs_bmap *bmap, unsigned long key) { int ret; down_write(&bmap->b_sem); ret = nilfs_bmap_do_truncate(bmap, key); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } /** * nilfs_bmap_clear - free resources a bmap holds * @bmap: bmap * * Description: nilfs_bmap_clear() frees resources associated with @bmap. */ void nilfs_bmap_clear(struct nilfs_bmap *bmap) { down_write(&bmap->b_sem); if (bmap->b_ops->bop_clear != NULL) bmap->b_ops->bop_clear(bmap); up_write(&bmap->b_sem); } /** * nilfs_bmap_propagate - propagate dirty state * @bmap: bmap * @bh: buffer head * * Description: nilfs_bmap_propagate() marks the buffers that directly or * indirectly refer to the block specified by @bh dirty. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) { int ret; down_write(&bmap->b_sem); ret = bmap->b_ops->bop_propagate(bmap, bh); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } /** * nilfs_bmap_lookup_dirty_buffers - * @bmap: bmap * @listp: pointer to buffer head list */ void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *bmap, struct list_head *listp) { if (bmap->b_ops->bop_lookup_dirty_buffers != NULL) bmap->b_ops->bop_lookup_dirty_buffers(bmap, listp); } /** * nilfs_bmap_assign - assign a new block number to a block * @bmap: bmap * @bhp: pointer to buffer head * @blocknr: block number * @binfo: block information * * Description: nilfs_bmap_assign() assigns the block number @blocknr to the * buffer specified by @bh. * * Return Value: On success, 0 is returned and the buffer head of a newly * create buffer and the block information associated with the buffer are * stored in the place pointed by @bh and @binfo, respectively. On error, one * of the following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_bmap_assign(struct nilfs_bmap *bmap, struct buffer_head **bh, unsigned long blocknr, union nilfs_binfo *binfo) { int ret; down_write(&bmap->b_sem); ret = bmap->b_ops->bop_assign(bmap, bh, blocknr, binfo); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } /** * nilfs_bmap_mark - mark block dirty * @bmap: bmap * @key: key * @level: level * * Description: nilfs_bmap_mark() marks the block specified by @key and @level * as dirty. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level) { int ret; if (bmap->b_ops->bop_mark == NULL) return 0; down_write(&bmap->b_sem); ret = bmap->b_ops->bop_mark(bmap, key, level); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } /** * nilfs_bmap_test_and_clear_dirty - test and clear a bmap dirty state * @bmap: bmap * * Description: nilfs_test_and_clear() is the atomic operation to test and * clear the dirty state of @bmap. * * Return Value: 1 is returned if @bmap is dirty, or 0 if clear. */ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap) { int ret; down_write(&bmap->b_sem); ret = nilfs_bmap_dirty(bmap); nilfs_bmap_clear_dirty(bmap); up_write(&bmap->b_sem); return ret; } /* * Internal use only */ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, const struct buffer_head *bh) { struct buffer_head *pbh; __u64 key; key = page_index(bh->b_page) << (PAGE_CACHE_SHIFT - bmap->b_inode->i_blkbits); for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page) key++; return key; } __u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *bmap, __u64 key) { __s64 diff; diff = key - bmap->b_last_allocated_key; if ((nilfs_bmap_keydiff_abs(diff) < NILFS_INODE_BMAP_SIZE) && (bmap->b_last_allocated_ptr != NILFS_BMAP_INVALID_PTR) && (bmap->b_last_allocated_ptr + diff > 0)) return bmap->b_last_allocated_ptr + diff; else return NILFS_BMAP_INVALID_PTR; } #define NILFS_BMAP_GROUP_DIV 8 __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap) { struct inode *dat = nilfs_bmap_get_dat(bmap); unsigned long entries_per_group = nilfs_palloc_entries_per_group(dat); unsigned long group = bmap->b_inode->i_ino / entries_per_group; return group * entries_per_group + (bmap->b_inode->i_ino % NILFS_BMAP_GROUP_DIV) * (entries_per_group / NILFS_BMAP_GROUP_DIV); } static struct lock_class_key nilfs_bmap_dat_lock_key; static struct lock_class_key nilfs_bmap_mdt_lock_key; /** * nilfs_bmap_read - read a bmap from an inode * @bmap: bmap * @raw_inode: on-disk inode * * Description: nilfs_bmap_read() initializes the bmap @bmap. * * Return Value: On success, 0 is returned. On error, the following negative * error code is returned. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) { if (raw_inode == NULL) memset(bmap->b_u.u_data, 0, NILFS_BMAP_SIZE); else memcpy(bmap->b_u.u_data, raw_inode->i_bmap, NILFS_BMAP_SIZE); init_rwsem(&bmap->b_sem); bmap->b_state = 0; bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; switch (bmap->b_inode->i_ino) { case NILFS_DAT_INO: bmap->b_ptr_type = NILFS_BMAP_PTR_P; bmap->b_last_allocated_key = 0; bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); break; case NILFS_CPFILE_INO: case NILFS_SUFILE_INO: bmap->b_ptr_type = NILFS_BMAP_PTR_VS; bmap->b_last_allocated_key = 0; bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); break; case NILFS_IFILE_INO: lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); /* Fall through */ default: bmap->b_ptr_type = NILFS_BMAP_PTR_VM; bmap->b_last_allocated_key = 0; bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; break; } return (bmap->b_u.u_flags & NILFS_BMAP_LARGE) ? nilfs_btree_init(bmap) : nilfs_direct_init(bmap); } /** * nilfs_bmap_write - write back a bmap to an inode * @bmap: bmap * @raw_inode: on-disk inode * * Description: nilfs_bmap_write() stores @bmap in @raw_inode. */ void nilfs_bmap_write(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) { down_write(&bmap->b_sem); memcpy(raw_inode->i_bmap, bmap->b_u.u_data, NILFS_INODE_BMAP_SIZE * sizeof(__le64)); if (bmap->b_inode->i_ino == NILFS_DAT_INO) bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; up_write(&bmap->b_sem); } void nilfs_bmap_init_gc(struct nilfs_bmap *bmap) { memset(&bmap->b_u, 0, NILFS_BMAP_SIZE); init_rwsem(&bmap->b_sem); bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; bmap->b_ptr_type = NILFS_BMAP_PTR_U; bmap->b_last_allocated_key = 0; bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; bmap->b_state = 0; nilfs_btree_init_gc(bmap); } void nilfs_bmap_save(const struct nilfs_bmap *bmap, struct nilfs_bmap_store *store) { memcpy(store->data, bmap->b_u.u_data, sizeof(store->data)); store->last_allocated_key = bmap->b_last_allocated_key; store->last_allocated_ptr = bmap->b_last_allocated_ptr; store->state = bmap->b_state; } void nilfs_bmap_restore(struct nilfs_bmap *bmap, const struct nilfs_bmap_store *store) { memcpy(bmap->b_u.u_data, store->data, sizeof(store->data)); bmap->b_last_allocated_key = store->last_allocated_key; bmap->b_last_allocated_ptr = store->last_allocated_ptr; bmap->b_state = store->state; }
gpl-2.0
leshak/i5700-leshak-kernel
drivers/video/valkyriefb.c
219
16684
/* * valkyriefb.c -- frame buffer device for the PowerMac 'valkyrie' display * * Created 8 August 1998 by * Martin Costabel <costabel@wanadoo.fr> and Kevin Schoedel * * Vmode-switching changes and vmode 15/17 modifications created 29 August * 1998 by Barry K. Nathan <barryn@pobox.com>. * * Ported to m68k Macintosh by David Huggins-Daines <dhd@debian.org> * * Derived directly from: * * controlfb.c -- frame buffer device for the PowerMac 'control' display * Copyright (C) 1998 Dan Jacobowitz <dan@debian.org> * * pmc-valkyrie.c -- Console support for PowerMac "valkyrie" display adaptor. * Copyright (C) 1997 Paul Mackerras. * * and indirectly: * * Frame buffer structure from: * drivers/video/chipsfb.c -- frame buffer device for * Chips & Technologies 65550 chip. * * Copyright (C) 1998 Paul Mackerras * * This file is derived from the Powermac "chips" driver: * Copyright (C) 1997 Fabio Riccardi. * And from the frame buffer device for Open Firmware-initialized devices: * Copyright (C) 1997 Geert Uytterhoeven. * * Hardware information from: * control.c: Console support for PowerMac "control" display adaptor. * Copyright (C) 1996 Paul Mackerras * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/selection.h> #include <linux/init.h> #include <linux/nvram.h> #include <linux/adb.h> #include <linux/cuda.h> #include <asm/io.h> #ifdef CONFIG_MAC #include <asm/bootinfo.h> #include <asm/macintosh.h> #else #include <asm/prom.h> #endif #include <asm/pgtable.h> #include "macmodes.h" #include "valkyriefb.h" #ifdef CONFIG_MAC /* We don't yet have functions to read the PRAM... perhaps we can adapt them from the PPC code? */ static int default_vmode = VMODE_640_480_67; static int default_cmode = CMODE_8; #else static int default_vmode = VMODE_NVRAM; static int default_cmode = CMODE_NVRAM; #endif struct fb_par_valkyrie { int vmode, cmode; int xres, yres; int vxres, vyres; struct valkyrie_regvals *init; }; struct fb_info_valkyrie { struct fb_info info; struct fb_par_valkyrie par; struct cmap_regs __iomem *cmap_regs; unsigned long cmap_regs_phys; struct valkyrie_regs __iomem *valkyrie_regs; unsigned long valkyrie_regs_phys; __u8 __iomem *frame_buffer; unsigned long frame_buffer_phys; int sense; unsigned long total_vram; u32 pseudo_palette[16]; }; /* * Exported functions */ int valkyriefb_init(void); int valkyriefb_setup(char*); static int valkyriefb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); static int valkyriefb_set_par(struct fb_info *info); static int valkyriefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info); static int valkyriefb_blank(int blank_mode, struct fb_info *info); static int read_valkyrie_sense(struct fb_info_valkyrie *p); static void set_valkyrie_clock(unsigned char *params); static int valkyrie_var_to_par(struct fb_var_screeninfo *var, struct fb_par_valkyrie *par, const struct fb_info *fb_info); static void valkyrie_init_info(struct fb_info *info, struct fb_info_valkyrie *p); static void valkyrie_par_to_fix(struct fb_par_valkyrie *par, struct fb_fix_screeninfo *fix); static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valkyrie *p); static struct fb_ops valkyriefb_ops = { .owner = THIS_MODULE, .fb_check_var = valkyriefb_check_var, .fb_set_par = valkyriefb_set_par, .fb_setcolreg = valkyriefb_setcolreg, .fb_blank = valkyriefb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* Sets the video mode according to info->var */ static int valkyriefb_set_par(struct fb_info *info) { struct fb_info_valkyrie *p = (struct fb_info_valkyrie *) info; volatile struct valkyrie_regs __iomem *valkyrie_regs = p->valkyrie_regs; struct fb_par_valkyrie *par = info->par; struct valkyrie_regvals *init; int err; if ((err = valkyrie_var_to_par(&info->var, par, info))) return err; valkyrie_par_to_fix(par, &info->fix); /* Reset the valkyrie */ out_8(&valkyrie_regs->status.r, 0); udelay(100); /* Initialize display timing registers */ init = par->init; out_8(&valkyrie_regs->mode.r, init->mode | 0x80); out_8(&valkyrie_regs->depth.r, par->cmode + 3); set_valkyrie_clock(init->clock_params); udelay(100); /* Turn on display */ out_8(&valkyrie_regs->mode.r, init->mode); return 0; } static inline int valkyrie_par_to_var(struct fb_par_valkyrie *par, struct fb_var_screeninfo *var) { return mac_vmode_to_var(par->vmode, par->cmode, var); } static int valkyriefb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { int err; struct fb_par_valkyrie par; if ((err = valkyrie_var_to_par(var, &par, info))) return err; valkyrie_par_to_var(&par, var); return 0; } /* * Blank the screen if blank_mode != 0, else unblank. If blank_mode == NULL * then the caller blanks by setting the CLUT (Color Look Up Table) to all * black. Return 0 if blanking succeeded, != 0 if un-/blanking failed due * to e.g. a video mode which doesn't support it. Implements VESA suspend * and powerdown modes on hardware that supports disabling hsync/vsync: * blank_mode == 2: suspend vsync * blank_mode == 3: suspend hsync * blank_mode == 4: powerdown */ static int valkyriefb_blank(int blank_mode, struct fb_info *info) { struct fb_info_valkyrie *p = (struct fb_info_valkyrie *) info; struct fb_par_valkyrie *par = info->par; struct valkyrie_regvals *init = par->init; if (init == NULL) return 1; switch (blank_mode) { case FB_BLANK_UNBLANK: /* unblank */ out_8(&p->valkyrie_regs->mode.r, init->mode); break; case FB_BLANK_NORMAL: return 1; /* get caller to set CLUT to all black */ case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: /* * [kps] Value extracted from MacOS. I don't know * whether this bit disables hsync or vsync, or * whether the hardware can do the other as well. */ out_8(&p->valkyrie_regs->mode.r, init->mode | 0x40); break; case FB_BLANK_POWERDOWN: out_8(&p->valkyrie_regs->mode.r, 0x66); break; } return 0; } static int valkyriefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct fb_info_valkyrie *p = (struct fb_info_valkyrie *) info; volatile struct cmap_regs __iomem *cmap_regs = p->cmap_regs; struct fb_par_valkyrie *par = info->par; if (regno > 255) return 1; red >>= 8; green >>= 8; blue >>= 8; /* tell clut which address to fill */ out_8(&p->cmap_regs->addr, regno); udelay(1); /* send one color channel at a time */ out_8(&cmap_regs->lut, red); out_8(&cmap_regs->lut, green); out_8(&cmap_regs->lut, blue); if (regno < 16 && par->cmode == CMODE_16) ((u32 *)info->pseudo_palette)[regno] = (regno << 10) | (regno << 5) | regno; return 0; } static inline int valkyrie_vram_reqd(int video_mode, int color_mode) { int pitch; struct valkyrie_regvals *init = valkyrie_reg_init[video_mode-1]; if ((pitch = init->pitch[color_mode]) == 0) pitch = 2 * init->pitch[0]; return init->vres * pitch; } static void set_valkyrie_clock(unsigned char *params) { struct adb_request req; int i; #ifdef CONFIG_ADB_CUDA for (i = 0; i < 3; ++i) { cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_GET_SET_IIC, 0x50, i + 1, params[i]); while (!req.complete) cuda_poll(); } #endif } static void __init valkyrie_choose_mode(struct fb_info_valkyrie *p) { p->sense = read_valkyrie_sense(p); printk(KERN_INFO "Monitor sense value = 0x%x\n", p->sense); /* Try to pick a video mode out of NVRAM if we have one. */ #if !defined(CONFIG_MAC) && defined(CONFIG_NVRAM) if (default_vmode == VMODE_NVRAM) { default_vmode = nvram_read_byte(NV_VMODE); if (default_vmode <= 0 || default_vmode > VMODE_MAX || !valkyrie_reg_init[default_vmode - 1]) default_vmode = VMODE_CHOOSE; } #endif if (default_vmode == VMODE_CHOOSE) default_vmode = mac_map_monitor_sense(p->sense); if (!valkyrie_reg_init[default_vmode - 1]) default_vmode = VMODE_640_480_67; #if !defined(CONFIG_MAC) && defined(CONFIG_NVRAM) if (default_cmode == CMODE_NVRAM) default_cmode = nvram_read_byte(NV_CMODE); #endif /* * Reduce the pixel size if we don't have enough VRAM or bandwidth. */ if (default_cmode < CMODE_8 || default_cmode > CMODE_16 || valkyrie_reg_init[default_vmode-1]->pitch[default_cmode] == 0 || valkyrie_vram_reqd(default_vmode, default_cmode) > p->total_vram) default_cmode = CMODE_8; printk(KERN_INFO "using video mode %d and color mode %d.\n", default_vmode, default_cmode); } int __init valkyriefb_init(void) { struct fb_info_valkyrie *p; unsigned long frame_buffer_phys, cmap_regs_phys, flags; int err; char *option = NULL; if (fb_get_options("valkyriefb", &option)) return -ENODEV; valkyriefb_setup(option); #ifdef CONFIG_MAC if (!MACH_IS_MAC) return 0; if (!(mac_bi_data.id == MAC_MODEL_Q630 /* I'm not sure about this one */ || mac_bi_data.id == MAC_MODEL_P588)) return 0; /* Hardcoded addresses... welcome to 68k Macintosh country :-) */ frame_buffer_phys = 0xf9000000; cmap_regs_phys = 0x50f24000; flags = IOMAP_NOCACHE_SER; /* IOMAP_WRITETHROUGH?? */ #else /* ppc (!CONFIG_MAC) */ { struct device_node *dp; struct resource r; dp = of_find_node_by_name(NULL, "valkyrie"); if (dp == 0) return 0; if (of_address_to_resource(dp, 0, &r)) { printk(KERN_ERR "can't find address for valkyrie\n"); return 0; } frame_buffer_phys = r.start; cmap_regs_phys = r.start + 0x304000; flags = _PAGE_WRITETHRU; } #endif /* ppc (!CONFIG_MAC) */ p = kzalloc(sizeof(*p), GFP_ATOMIC); if (p == 0) return -ENOMEM; /* Map in frame buffer and registers */ if (!request_mem_region(frame_buffer_phys, 0x100000, "valkyriefb")) { kfree(p); return 0; } p->total_vram = 0x100000; p->frame_buffer_phys = frame_buffer_phys; p->frame_buffer = __ioremap(frame_buffer_phys, p->total_vram, flags); p->cmap_regs_phys = cmap_regs_phys; p->cmap_regs = ioremap(p->cmap_regs_phys, 0x1000); p->valkyrie_regs_phys = cmap_regs_phys+0x6000; p->valkyrie_regs = ioremap(p->valkyrie_regs_phys, 0x1000); err = -ENOMEM; if (p->frame_buffer == NULL || p->cmap_regs == NULL || p->valkyrie_regs == NULL) { printk(KERN_ERR "valkyriefb: couldn't map resources\n"); goto out_free; } valkyrie_choose_mode(p); mac_vmode_to_var(default_vmode, default_cmode, &p->info.var); valkyrie_init_info(&p->info, p); valkyrie_init_fix(&p->info.fix, p); if (valkyriefb_set_par(&p->info)) /* "can't happen" */ printk(KERN_ERR "valkyriefb: can't set default video mode\n"); if ((err = register_framebuffer(&p->info)) != 0) goto out_free; printk(KERN_INFO "fb%d: valkyrie frame buffer device\n", p->info.node); return 0; out_free: if (p->frame_buffer) iounmap(p->frame_buffer); if (p->cmap_regs) iounmap(p->cmap_regs); if (p->valkyrie_regs) iounmap(p->valkyrie_regs); kfree(p); return err; } /* * Get the monitor sense value. */ static int read_valkyrie_sense(struct fb_info_valkyrie *p) { int sense, in; out_8(&p->valkyrie_regs->msense.r, 0); /* release all lines */ __delay(20000); sense = ((in = in_8(&p->valkyrie_regs->msense.r)) & 0x70) << 4; /* drive each sense line low in turn and collect the other 2 */ out_8(&p->valkyrie_regs->msense.r, 4); /* drive A low */ __delay(20000); sense |= ((in = in_8(&p->valkyrie_regs->msense.r)) & 0x30); out_8(&p->valkyrie_regs->msense.r, 2); /* drive B low */ __delay(20000); sense |= ((in = in_8(&p->valkyrie_regs->msense.r)) & 0x40) >> 3; sense |= (in & 0x10) >> 2; out_8(&p->valkyrie_regs->msense.r, 1); /* drive C low */ __delay(20000); sense |= ((in = in_8(&p->valkyrie_regs->msense.r)) & 0x60) >> 5; out_8(&p->valkyrie_regs->msense.r, 7); return sense; } /* * This routine takes a user-supplied var, * and picks the best vmode/cmode from it. */ /* [bkn] I did a major overhaul of this function. * * Much of the old code was "swiped by jonh from atyfb.c". Because * macmodes has mac_var_to_vmode, I felt that it would be better to * rework this function to use that, instead of reinventing the wheel to * add support for vmode 17. This was reinforced by the fact that * the previously swiped atyfb.c code is no longer there. * * So, I swiped and adapted platinum_var_to_par (from platinumfb.c), replacing * most, but not all, of the old code in the process. One side benefit of * swiping the platinumfb code is that we now have more comprehensible error * messages when a vmode/cmode switch fails. (Most of the error messages are * platinumfb.c, but I added two of my own, and I also changed some commas * into colons to make the messages more consistent with other Linux error * messages.) In addition, I think the new code *might* fix some vmode- * switching oddities, but I'm not sure. * * There may be some more opportunities for cleanup in here, but this is a * good start... */ static int valkyrie_var_to_par(struct fb_var_screeninfo *var, struct fb_par_valkyrie *par, const struct fb_info *fb_info) { int vmode, cmode; struct valkyrie_regvals *init; struct fb_info_valkyrie *p = (struct fb_info_valkyrie *) fb_info; if (mac_var_to_vmode(var, &vmode, &cmode) != 0) { printk(KERN_ERR "valkyriefb: can't do %dx%dx%d.\n", var->xres, var->yres, var->bits_per_pixel); return -EINVAL; } /* Check if we know about the wanted video mode */ if (vmode < 1 || vmode > VMODE_MAX || !valkyrie_reg_init[vmode-1]) { printk(KERN_ERR "valkyriefb: vmode %d not valid.\n", vmode); return -EINVAL; } if (cmode != CMODE_8 && cmode != CMODE_16) { printk(KERN_ERR "valkyriefb: cmode %d not valid.\n", cmode); return -EINVAL; } if (var->xres_virtual > var->xres || var->yres_virtual > var->yres || var->xoffset != 0 || var->yoffset != 0) { return -EINVAL; } init = valkyrie_reg_init[vmode-1]; if (init->pitch[cmode] == 0) { printk(KERN_ERR "valkyriefb: vmode %d does not support " "cmode %d.\n", vmode, cmode); return -EINVAL; } if (valkyrie_vram_reqd(vmode, cmode) > p->total_vram) { printk(KERN_ERR "valkyriefb: not enough ram for vmode %d, " "cmode %d.\n", vmode, cmode); return -EINVAL; } par->vmode = vmode; par->cmode = cmode; par->init = init; par->xres = var->xres; par->yres = var->yres; par->vxres = par->xres; par->vyres = par->yres; return 0; } static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valkyrie *p) { memset(fix, 0, sizeof(*fix)); strcpy(fix->id, "valkyrie"); fix->mmio_start = p->valkyrie_regs_phys; fix->mmio_len = sizeof(struct valkyrie_regs); fix->type = FB_TYPE_PACKED_PIXELS; fix->smem_start = p->frame_buffer_phys + 0x1000; fix->smem_len = p->total_vram; fix->type_aux = 0; fix->ywrapstep = 0; fix->ypanstep = 0; fix->xpanstep = 0; } /* Fix must already be inited above */ static void valkyrie_par_to_fix(struct fb_par_valkyrie *par, struct fb_fix_screeninfo *fix) { fix->smem_len = valkyrie_vram_reqd(par->vmode, par->cmode); fix->visual = (par->cmode == CMODE_8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; fix->line_length = par->vxres << par->cmode; /* ywrapstep, xpanstep, ypanstep */ } static void __init valkyrie_init_info(struct fb_info *info, struct fb_info_valkyrie *p) { info->fbops = &valkyriefb_ops; info->screen_base = p->frame_buffer + 0x1000; info->flags = FBINFO_DEFAULT; info->pseudo_palette = p->pseudo_palette; fb_alloc_cmap(&info->cmap, 256, 0); info->par = &p->par; } /* * Parse user speficied options (`video=valkyriefb:') */ int __init valkyriefb_setup(char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!strncmp(this_opt, "vmode:", 6)) { int vmode = simple_strtoul(this_opt+6, NULL, 0); if (vmode > 0 && vmode <= VMODE_MAX) default_vmode = vmode; } else if (!strncmp(this_opt, "cmode:", 6)) { int depth = simple_strtoul(this_opt+6, NULL, 0); switch (depth) { case 8: default_cmode = CMODE_8; break; case 15: case 16: default_cmode = CMODE_16; break; } } } return 0; } module_init(valkyriefb_init); MODULE_LICENSE("GPL");
gpl-2.0
tejasjadhav/android_kernel_mocha
drivers/iio/adc/at91_adc.c
731
19774
/* * Driver for the ADC present in the Atmel AT91 evaluation boards. * * Copyright 2011 Free Electrons * * Licensed under the GPLv2 or later. */ #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/platform_data/at91_adc.h> #include <linux/iio/iio.h> #include <linux/iio/buffer.h> #include <linux/iio/trigger.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #include <mach/at91_adc.h> #define AT91_ADC_CHAN(st, ch) \ (st->registers->channel_base + (ch * 4)) #define at91_adc_readl(st, reg) \ (readl_relaxed(st->reg_base + reg)) #define at91_adc_writel(st, reg, val) \ (writel_relaxed(val, st->reg_base + reg)) struct at91_adc_state { struct clk *adc_clk; u16 *buffer; unsigned long channels_mask; struct clk *clk; bool done; int irq; u16 last_value; struct mutex lock; u8 num_channels; void __iomem *reg_base; struct at91_adc_reg_desc *registers; u8 startup_time; u8 sample_hold_time; bool sleep_mode; struct iio_trigger **trig; struct at91_adc_trigger *trigger_list; u32 trigger_number; bool use_external; u32 vref_mv; u32 res; /* resolution used for convertions */ bool low_res; /* the resolution corresponds to the lowest one */ wait_queue_head_t wq_data_avail; }; static irqreturn_t at91_adc_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *idev = pf->indio_dev; struct at91_adc_state *st = iio_priv(idev); int i, j = 0; for (i = 0; i < idev->masklength; i++) { if (!test_bit(i, idev->active_scan_mask)) continue; st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, i)); j++; } if (idev->scan_timestamp) { s64 *timestamp = (s64 *)((u8 *)st->buffer + ALIGN(j, sizeof(s64))); *timestamp = pf->timestamp; } iio_push_to_buffers(idev, (u8 *)st->buffer); iio_trigger_notify_done(idev->trig); /* Needed to ACK the DRDY interruption */ at91_adc_readl(st, AT91_ADC_LCDR); enable_irq(st->irq); return IRQ_HANDLED; } static irqreturn_t at91_adc_eoc_trigger(int irq, void *private) { struct iio_dev *idev = private; struct at91_adc_state *st = iio_priv(idev); u32 status = at91_adc_readl(st, st->registers->status_register); if (!(status & st->registers->drdy_mask)) return IRQ_HANDLED; if (iio_buffer_enabled(idev)) { disable_irq_nosync(irq); iio_trigger_poll(idev->trig, iio_get_time_ns()); } else { st->last_value = at91_adc_readl(st, AT91_ADC_LCDR); st->done = true; wake_up_interruptible(&st->wq_data_avail); } return IRQ_HANDLED; } static int at91_adc_channel_init(struct iio_dev *idev) { struct at91_adc_state *st = iio_priv(idev); struct iio_chan_spec *chan_array, *timestamp; int bit, idx = 0; idev->num_channels = bitmap_weight(&st->channels_mask, st->num_channels) + 1; chan_array = devm_kzalloc(&idev->dev, ((idev->num_channels + 1) * sizeof(struct iio_chan_spec)), GFP_KERNEL); if (!chan_array) return -ENOMEM; for_each_set_bit(bit, &st->channels_mask, st->num_channels) { struct iio_chan_spec *chan = chan_array + idx; chan->type = IIO_VOLTAGE; chan->indexed = 1; chan->channel = bit; chan->scan_index = idx; chan->scan_type.sign = 'u'; chan->scan_type.realbits = st->res; chan->scan_type.storagebits = 16; chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE); chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW); idx++; } timestamp = chan_array + idx; timestamp->type = IIO_TIMESTAMP; timestamp->channel = -1; timestamp->scan_index = idx; timestamp->scan_type.sign = 's'; timestamp->scan_type.realbits = 64; timestamp->scan_type.storagebits = 64; idev->channels = chan_array; return idev->num_channels; } static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev, struct at91_adc_trigger *triggers, const char *trigger_name) { struct at91_adc_state *st = iio_priv(idev); u8 value = 0; int i; for (i = 0; i < st->trigger_number; i++) { char *name = kasprintf(GFP_KERNEL, "%s-dev%d-%s", idev->name, idev->id, triggers[i].name); if (!name) return -ENOMEM; if (strcmp(trigger_name, name) == 0) { value = triggers[i].value; kfree(name); break; } kfree(name); } return value; } static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) { struct iio_dev *idev = iio_trigger_get_drvdata(trig); struct at91_adc_state *st = iio_priv(idev); struct iio_buffer *buffer = idev->buffer; struct at91_adc_reg_desc *reg = st->registers; u32 status = at91_adc_readl(st, reg->trigger_register); u8 value; u8 bit; value = at91_adc_get_trigger_value_by_name(idev, st->trigger_list, idev->trig->name); if (value == 0) return -EINVAL; if (state) { st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL); if (st->buffer == NULL) return -ENOMEM; at91_adc_writel(st, reg->trigger_register, status | value); for_each_set_bit(bit, buffer->scan_mask, st->num_channels) { struct iio_chan_spec const *chan = idev->channels + bit; at91_adc_writel(st, AT91_ADC_CHER, AT91_ADC_CH(chan->channel)); } at91_adc_writel(st, AT91_ADC_IER, reg->drdy_mask); } else { at91_adc_writel(st, AT91_ADC_IDR, reg->drdy_mask); at91_adc_writel(st, reg->trigger_register, status & ~value); for_each_set_bit(bit, buffer->scan_mask, st->num_channels) { struct iio_chan_spec const *chan = idev->channels + bit; at91_adc_writel(st, AT91_ADC_CHDR, AT91_ADC_CH(chan->channel)); } kfree(st->buffer); } return 0; } static const struct iio_trigger_ops at91_adc_trigger_ops = { .owner = THIS_MODULE, .set_trigger_state = &at91_adc_configure_trigger, }; static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *idev, struct at91_adc_trigger *trigger) { struct iio_trigger *trig; int ret; trig = iio_trigger_alloc("%s-dev%d-%s", idev->name, idev->id, trigger->name); if (trig == NULL) return NULL; trig->dev.parent = idev->dev.parent; iio_trigger_set_drvdata(trig, idev); trig->ops = &at91_adc_trigger_ops; ret = iio_trigger_register(trig); if (ret) return NULL; return trig; } static int at91_adc_trigger_init(struct iio_dev *idev) { struct at91_adc_state *st = iio_priv(idev); int i, ret; st->trig = devm_kzalloc(&idev->dev, st->trigger_number * sizeof(st->trig), GFP_KERNEL); if (st->trig == NULL) { ret = -ENOMEM; goto error_ret; } for (i = 0; i < st->trigger_number; i++) { if (st->trigger_list[i].is_external && !(st->use_external)) continue; st->trig[i] = at91_adc_allocate_trigger(idev, st->trigger_list + i); if (st->trig[i] == NULL) { dev_err(&idev->dev, "Could not allocate trigger %d\n", i); ret = -ENOMEM; goto error_trigger; } } return 0; error_trigger: for (i--; i >= 0; i--) { iio_trigger_unregister(st->trig[i]); iio_trigger_free(st->trig[i]); } error_ret: return ret; } static void at91_adc_trigger_remove(struct iio_dev *idev) { struct at91_adc_state *st = iio_priv(idev); int i; for (i = 0; i < st->trigger_number; i++) { iio_trigger_unregister(st->trig[i]); iio_trigger_free(st->trig[i]); } } static int at91_adc_buffer_init(struct iio_dev *idev) { return iio_triggered_buffer_setup(idev, &iio_pollfunc_store_time, &at91_adc_trigger_handler, NULL); } static void at91_adc_buffer_remove(struct iio_dev *idev) { iio_triggered_buffer_cleanup(idev); } static int at91_adc_read_raw(struct iio_dev *idev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct at91_adc_state *st = iio_priv(idev); int ret; switch (mask) { case IIO_CHAN_INFO_RAW: mutex_lock(&st->lock); at91_adc_writel(st, AT91_ADC_CHER, AT91_ADC_CH(chan->channel)); at91_adc_writel(st, AT91_ADC_IER, st->registers->drdy_mask); at91_adc_writel(st, AT91_ADC_CR, AT91_ADC_START); ret = wait_event_interruptible_timeout(st->wq_data_avail, st->done, msecs_to_jiffies(1000)); if (ret == 0) ret = -ETIMEDOUT; if (ret < 0) { mutex_unlock(&st->lock); return ret; } *val = st->last_value; at91_adc_writel(st, AT91_ADC_CHDR, AT91_ADC_CH(chan->channel)); at91_adc_writel(st, AT91_ADC_IDR, st->registers->drdy_mask); st->last_value = 0; st->done = false; mutex_unlock(&st->lock); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: *val = (st->vref_mv * 1000) >> chan->scan_type.realbits; *val2 = 0; return IIO_VAL_INT_PLUS_MICRO; default: break; } return -EINVAL; } static int at91_adc_of_get_resolution(struct at91_adc_state *st, struct platform_device *pdev) { struct iio_dev *idev = iio_priv_to_dev(st); struct device_node *np = pdev->dev.of_node; int count, i, ret = 0; char *res_name, *s; u32 *resolutions; count = of_property_count_strings(np, "atmel,adc-res-names"); if (count < 2) { dev_err(&idev->dev, "You must specified at least two resolution names for " "adc-res-names property in the DT\n"); return count; } resolutions = kmalloc(count * sizeof(*resolutions), GFP_KERNEL); if (!resolutions) return -ENOMEM; if (of_property_read_u32_array(np, "atmel,adc-res", resolutions, count)) { dev_err(&idev->dev, "Missing adc-res property in the DT.\n"); ret = -ENODEV; goto ret; } if (of_property_read_string(np, "atmel,adc-use-res", (const char **)&res_name)) res_name = "highres"; for (i = 0; i < count; i++) { if (of_property_read_string_index(np, "atmel,adc-res-names", i, (const char **)&s)) continue; if (strcmp(res_name, s)) continue; st->res = resolutions[i]; if (!strcmp(res_name, "lowres")) st->low_res = true; else st->low_res = false; dev_info(&idev->dev, "Resolution used: %u bits\n", st->res); goto ret; } dev_err(&idev->dev, "There is no resolution for %s\n", res_name); ret: kfree(resolutions); return ret; } static int at91_adc_probe_dt(struct at91_adc_state *st, struct platform_device *pdev) { struct iio_dev *idev = iio_priv_to_dev(st); struct device_node *node = pdev->dev.of_node; struct device_node *trig_node; int i = 0, ret; u32 prop; if (!node) return -EINVAL; st->use_external = of_property_read_bool(node, "atmel,adc-use-external-triggers"); if (of_property_read_u32(node, "atmel,adc-channels-used", &prop)) { dev_err(&idev->dev, "Missing adc-channels-used property in the DT.\n"); ret = -EINVAL; goto error_ret; } st->channels_mask = prop; if (of_property_read_u32(node, "atmel,adc-num-channels", &prop)) { dev_err(&idev->dev, "Missing adc-num-channels property in the DT.\n"); ret = -EINVAL; goto error_ret; } st->num_channels = prop; st->sleep_mode = of_property_read_bool(node, "atmel,adc-sleep-mode"); if (of_property_read_u32(node, "atmel,adc-startup-time", &prop)) { dev_err(&idev->dev, "Missing adc-startup-time property in the DT.\n"); ret = -EINVAL; goto error_ret; } st->startup_time = prop; prop = 0; of_property_read_u32(node, "atmel,adc-sample-hold-time", &prop); st->sample_hold_time = prop; if (of_property_read_u32(node, "atmel,adc-vref", &prop)) { dev_err(&idev->dev, "Missing adc-vref property in the DT.\n"); ret = -EINVAL; goto error_ret; } st->vref_mv = prop; ret = at91_adc_of_get_resolution(st, pdev); if (ret) goto error_ret; st->registers = devm_kzalloc(&idev->dev, sizeof(struct at91_adc_reg_desc), GFP_KERNEL); if (!st->registers) { dev_err(&idev->dev, "Could not allocate register memory.\n"); ret = -ENOMEM; goto error_ret; } if (of_property_read_u32(node, "atmel,adc-channel-base", &prop)) { dev_err(&idev->dev, "Missing adc-channel-base property in the DT.\n"); ret = -EINVAL; goto error_ret; } st->registers->channel_base = prop; if (of_property_read_u32(node, "atmel,adc-drdy-mask", &prop)) { dev_err(&idev->dev, "Missing adc-drdy-mask property in the DT.\n"); ret = -EINVAL; goto error_ret; } st->registers->drdy_mask = prop; if (of_property_read_u32(node, "atmel,adc-status-register", &prop)) { dev_err(&idev->dev, "Missing adc-status-register property in the DT.\n"); ret = -EINVAL; goto error_ret; } st->registers->status_register = prop; if (of_property_read_u32(node, "atmel,adc-trigger-register", &prop)) { dev_err(&idev->dev, "Missing adc-trigger-register property in the DT.\n"); ret = -EINVAL; goto error_ret; } st->registers->trigger_register = prop; st->trigger_number = of_get_child_count(node); st->trigger_list = devm_kzalloc(&idev->dev, st->trigger_number * sizeof(struct at91_adc_trigger), GFP_KERNEL); if (!st->trigger_list) { dev_err(&idev->dev, "Could not allocate trigger list memory.\n"); ret = -ENOMEM; goto error_ret; } for_each_child_of_node(node, trig_node) { struct at91_adc_trigger *trig = st->trigger_list + i; const char *name; if (of_property_read_string(trig_node, "trigger-name", &name)) { dev_err(&idev->dev, "Missing trigger-name property in the DT.\n"); ret = -EINVAL; goto error_ret; } trig->name = name; if (of_property_read_u32(trig_node, "trigger-value", &prop)) { dev_err(&idev->dev, "Missing trigger-value property in the DT.\n"); ret = -EINVAL; goto error_ret; } trig->value = prop; trig->is_external = of_property_read_bool(trig_node, "trigger-external"); i++; } return 0; error_ret: return ret; } static int at91_adc_probe_pdata(struct at91_adc_state *st, struct platform_device *pdev) { struct at91_adc_data *pdata = pdev->dev.platform_data; if (!pdata) return -EINVAL; st->use_external = pdata->use_external_triggers; st->vref_mv = pdata->vref; st->channels_mask = pdata->channels_used; st->num_channels = pdata->num_channels; st->startup_time = pdata->startup_time; st->trigger_number = pdata->trigger_number; st->trigger_list = pdata->trigger_list; st->registers = pdata->registers; return 0; } static const struct iio_info at91_adc_info = { .driver_module = THIS_MODULE, .read_raw = &at91_adc_read_raw, }; static int at91_adc_probe(struct platform_device *pdev) { unsigned int prsc, mstrclk, ticks, adc_clk, shtim; int ret; struct iio_dev *idev; struct at91_adc_state *st; struct resource *res; u32 reg; idev = iio_device_alloc(sizeof(struct at91_adc_state)); if (idev == NULL) { ret = -ENOMEM; goto error_ret; } st = iio_priv(idev); if (pdev->dev.of_node) ret = at91_adc_probe_dt(st, pdev); else ret = at91_adc_probe_pdata(st, pdev); if (ret) { dev_err(&pdev->dev, "No platform data available.\n"); ret = -EINVAL; goto error_free_device; } platform_set_drvdata(pdev, idev); idev->dev.parent = &pdev->dev; idev->name = dev_name(&pdev->dev); idev->modes = INDIO_DIRECT_MODE; idev->info = &at91_adc_info; st->irq = platform_get_irq(pdev, 0); if (st->irq < 0) { dev_err(&pdev->dev, "No IRQ ID is designated\n"); ret = -ENODEV; goto error_free_device; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); st->reg_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(st->reg_base)) { ret = PTR_ERR(st->reg_base); goto error_free_device; } /* * Disable all IRQs before setting up the handler */ at91_adc_writel(st, AT91_ADC_CR, AT91_ADC_SWRST); at91_adc_writel(st, AT91_ADC_IDR, 0xFFFFFFFF); ret = request_irq(st->irq, at91_adc_eoc_trigger, 0, pdev->dev.driver->name, idev); if (ret) { dev_err(&pdev->dev, "Failed to allocate IRQ.\n"); goto error_free_device; } st->clk = devm_clk_get(&pdev->dev, "adc_clk"); if (IS_ERR(st->clk)) { dev_err(&pdev->dev, "Failed to get the clock.\n"); ret = PTR_ERR(st->clk); goto error_free_irq; } ret = clk_prepare_enable(st->clk); if (ret) { dev_err(&pdev->dev, "Could not prepare or enable the clock.\n"); goto error_free_irq; } st->adc_clk = devm_clk_get(&pdev->dev, "adc_op_clk"); if (IS_ERR(st->adc_clk)) { dev_err(&pdev->dev, "Failed to get the ADC clock.\n"); ret = PTR_ERR(st->adc_clk); goto error_disable_clk; } ret = clk_prepare_enable(st->adc_clk); if (ret) { dev_err(&pdev->dev, "Could not prepare or enable the ADC clock.\n"); goto error_disable_clk; } /* * Prescaler rate computation using the formula from the Atmel's * datasheet : ADC Clock = MCK / ((Prescaler + 1) * 2), ADC Clock being * specified by the electrical characteristics of the board. */ mstrclk = clk_get_rate(st->clk); adc_clk = clk_get_rate(st->adc_clk); prsc = (mstrclk / (2 * adc_clk)) - 1; if (!st->startup_time) { dev_err(&pdev->dev, "No startup time available.\n"); ret = -EINVAL; goto error_disable_adc_clk; } /* * Number of ticks needed to cover the startup time of the ADC as * defined in the electrical characteristics of the board, divided by 8. * The formula thus is : Startup Time = (ticks + 1) * 8 / ADC Clock */ ticks = round_up((st->startup_time * adc_clk / 1000000) - 1, 8) / 8; /* * a minimal Sample and Hold Time is necessary for the ADC to guarantee * the best converted final value between two channels selection * The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock */ shtim = round_up((st->sample_hold_time * adc_clk / 1000000) - 1, 1); reg = AT91_ADC_PRESCAL_(prsc) & AT91_ADC_PRESCAL; reg |= AT91_ADC_STARTUP_(ticks) & AT91_ADC_STARTUP; if (st->low_res) reg |= AT91_ADC_LOWRES; if (st->sleep_mode) reg |= AT91_ADC_SLEEP; reg |= AT91_ADC_SHTIM_(shtim) & AT91_ADC_SHTIM; at91_adc_writel(st, AT91_ADC_MR, reg); /* Setup the ADC channels available on the board */ ret = at91_adc_channel_init(idev); if (ret < 0) { dev_err(&pdev->dev, "Couldn't initialize the channels.\n"); goto error_disable_adc_clk; } init_waitqueue_head(&st->wq_data_avail); mutex_init(&st->lock); ret = at91_adc_buffer_init(idev); if (ret < 0) { dev_err(&pdev->dev, "Couldn't initialize the buffer.\n"); goto error_disable_adc_clk; } ret = at91_adc_trigger_init(idev); if (ret < 0) { dev_err(&pdev->dev, "Couldn't setup the triggers.\n"); goto error_unregister_buffer; } ret = iio_device_register(idev); if (ret < 0) { dev_err(&pdev->dev, "Couldn't register the device.\n"); goto error_remove_triggers; } return 0; error_remove_triggers: at91_adc_trigger_remove(idev); error_unregister_buffer: at91_adc_buffer_remove(idev); error_disable_adc_clk: clk_disable_unprepare(st->adc_clk); error_disable_clk: clk_disable_unprepare(st->clk); error_free_irq: free_irq(st->irq, idev); error_free_device: iio_device_free(idev); error_ret: return ret; } static int at91_adc_remove(struct platform_device *pdev) { struct iio_dev *idev = platform_get_drvdata(pdev); struct at91_adc_state *st = iio_priv(idev); iio_device_unregister(idev); at91_adc_trigger_remove(idev); at91_adc_buffer_remove(idev); clk_disable_unprepare(st->adc_clk); clk_disable_unprepare(st->clk); free_irq(st->irq, idev); iio_device_free(idev); return 0; } static const struct of_device_id at91_adc_dt_ids[] = { { .compatible = "atmel,at91sam9260-adc" }, {}, }; MODULE_DEVICE_TABLE(of, at91_adc_dt_ids); static struct platform_driver at91_adc_driver = { .probe = at91_adc_probe, .remove = at91_adc_remove, .driver = { .name = "at91_adc", .of_match_table = of_match_ptr(at91_adc_dt_ids), }, }; module_platform_driver(at91_adc_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Atmel AT91 ADC Driver"); MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
gpl-2.0
PyYoshi/ponyo_kernel
drivers/net/tc35815.c
731
65839
/* * tc35815.c: A TOSHIBA TC35815CF PCI 10/100Mbps ethernet driver for linux. * * Based on skelton.c by Donald Becker. * * This driver is a replacement of older and less maintained version. * This is a header of the older version: * -----<snip>----- * Copyright 2001 MontaVista Software Inc. * Author: MontaVista Software, Inc. * ahennessy@mvista.com * Copyright (C) 2000-2001 Toshiba Corporation * static const char *version = * "tc35815.c:v0.00 26/07/2000 by Toshiba Corporation\n"; * -----<snip>----- * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * (C) Copyright TOSHIBA CORPORATION 2004-2005 * All Rights Reserved. */ #define DRV_VERSION "1.39" static const char *version = "tc35815.c:v" DRV_VERSION "\n"; #define MODNAME "tc35815" #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/if_vlan.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/phy.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <asm/io.h> #include <asm/byteorder.h> enum tc35815_chiptype { TC35815CF = 0, TC35815_NWU, TC35815_TX4939, }; /* indexed by tc35815_chiptype, above */ static const struct { const char *name; } chip_info[] __devinitdata = { { "TOSHIBA TC35815CF 10/100BaseTX" }, { "TOSHIBA TC35815 with Wake on LAN" }, { "TOSHIBA TC35815/TX4939" }, }; static DEFINE_PCI_DEVICE_TABLE(tc35815_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF }, {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU }, {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 }, {0,} }; MODULE_DEVICE_TABLE(pci, tc35815_pci_tbl); /* see MODULE_PARM_DESC */ static struct tc35815_options { int speed; int duplex; } options; /* * Registers */ struct tc35815_regs { __u32 DMA_Ctl; /* 0x00 */ __u32 TxFrmPtr; __u32 TxThrsh; __u32 TxPollCtr; __u32 BLFrmPtr; __u32 RxFragSize; __u32 Int_En; __u32 FDA_Bas; __u32 FDA_Lim; /* 0x20 */ __u32 Int_Src; __u32 unused0[2]; __u32 PauseCnt; __u32 RemPauCnt; __u32 TxCtlFrmStat; __u32 unused1; __u32 MAC_Ctl; /* 0x40 */ __u32 CAM_Ctl; __u32 Tx_Ctl; __u32 Tx_Stat; __u32 Rx_Ctl; __u32 Rx_Stat; __u32 MD_Data; __u32 MD_CA; __u32 CAM_Adr; /* 0x60 */ __u32 CAM_Data; __u32 CAM_Ena; __u32 PROM_Ctl; __u32 PROM_Data; __u32 Algn_Cnt; __u32 CRC_Cnt; __u32 Miss_Cnt; }; /* * Bit assignments */ /* DMA_Ctl bit asign ------------------------------------------------------- */ #define DMA_RxAlign 0x00c00000 /* 1:Reception Alignment */ #define DMA_RxAlign_1 0x00400000 #define DMA_RxAlign_2 0x00800000 #define DMA_RxAlign_3 0x00c00000 #define DMA_M66EnStat 0x00080000 /* 1:66MHz Enable State */ #define DMA_IntMask 0x00040000 /* 1:Interupt mask */ #define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */ #define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */ #define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */ #define DMA_TxBigE 0x00004000 /* 1:Transmit Big Endian */ #define DMA_TestMode 0x00002000 /* 1:Test Mode */ #define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */ #define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */ /* RxFragSize bit asign ---------------------------------------------------- */ #define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */ #define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */ /* MAC_Ctl bit asign ------------------------------------------------------- */ #define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */ #define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */ #define MAC_MissRoll 0x00000400 /* 1:Missed Roll */ #define MAC_Loop10 0x00000080 /* 1:Loop 10 Mbps */ #define MAC_Conn_Auto 0x00000000 /*00:Connection mode (Automatic) */ #define MAC_Conn_10M 0x00000020 /*01: (10Mbps endec)*/ #define MAC_Conn_Mll 0x00000040 /*10: (Mll clock) */ #define MAC_MacLoop 0x00000010 /* 1:MAC Loopback */ #define MAC_FullDup 0x00000008 /* 1:Full Duplex 0:Half Duplex */ #define MAC_Reset 0x00000004 /* 1:Software Reset */ #define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */ #define MAC_HaltReq 0x00000001 /* 1:Halt request */ /* PROM_Ctl bit asign ------------------------------------------------------ */ #define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */ #define PROM_Read 0x00004000 /*10:Read operation */ #define PROM_Write 0x00002000 /*01:Write operation */ #define PROM_Erase 0x00006000 /*11:Erase operation */ /*00:Enable or Disable Writting, */ /* as specified in PROM_Addr. */ #define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */ /*00xxxx: disable */ /* CAM_Ctl bit asign ------------------------------------------------------- */ #define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */ #define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/ /* accept other */ #define CAM_BroadAcc 0x00000004 /* 1:Broadcast assept */ #define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */ #define CAM_StationAcc 0x00000001 /* 1:unicast accept */ /* CAM_Ena bit asign ------------------------------------------------------- */ #define CAM_ENTRY_MAX 21 /* CAM Data entry max count */ #define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */ #define CAM_Ena_Bit(index) (1 << (index)) #define CAM_ENTRY_DESTINATION 0 #define CAM_ENTRY_SOURCE 1 #define CAM_ENTRY_MACCTL 20 /* Tx_Ctl bit asign -------------------------------------------------------- */ #define Tx_En 0x00000001 /* 1:Transmit enable */ #define Tx_TxHalt 0x00000002 /* 1:Transmit Halt Request */ #define Tx_NoPad 0x00000004 /* 1:Suppress Padding */ #define Tx_NoCRC 0x00000008 /* 1:Suppress Padding */ #define Tx_FBack 0x00000010 /* 1:Fast Back-off */ #define Tx_EnUnder 0x00000100 /* 1:Enable Underrun */ #define Tx_EnExDefer 0x00000200 /* 1:Enable Excessive Deferral */ #define Tx_EnLCarr 0x00000400 /* 1:Enable Lost Carrier */ #define Tx_EnExColl 0x00000800 /* 1:Enable Excessive Collision */ #define Tx_EnLateColl 0x00001000 /* 1:Enable Late Collision */ #define Tx_EnTxPar 0x00002000 /* 1:Enable Transmit Parity */ #define Tx_EnComp 0x00004000 /* 1:Enable Completion */ /* Tx_Stat bit asign ------------------------------------------------------- */ #define Tx_TxColl_MASK 0x0000000F /* Tx Collision Count */ #define Tx_ExColl 0x00000010 /* Excessive Collision */ #define Tx_TXDefer 0x00000020 /* Transmit Defered */ #define Tx_Paused 0x00000040 /* Transmit Paused */ #define Tx_IntTx 0x00000080 /* Interrupt on Tx */ #define Tx_Under 0x00000100 /* Underrun */ #define Tx_Defer 0x00000200 /* Deferral */ #define Tx_NCarr 0x00000400 /* No Carrier */ #define Tx_10Stat 0x00000800 /* 10Mbps Status */ #define Tx_LateColl 0x00001000 /* Late Collision */ #define Tx_TxPar 0x00002000 /* Tx Parity Error */ #define Tx_Comp 0x00004000 /* Completion */ #define Tx_Halted 0x00008000 /* Tx Halted */ #define Tx_SQErr 0x00010000 /* Signal Quality Error(SQE) */ /* Rx_Ctl bit asign -------------------------------------------------------- */ #define Rx_EnGood 0x00004000 /* 1:Enable Good */ #define Rx_EnRxPar 0x00002000 /* 1:Enable Receive Parity */ #define Rx_EnLongErr 0x00000800 /* 1:Enable Long Error */ #define Rx_EnOver 0x00000400 /* 1:Enable OverFlow */ #define Rx_EnCRCErr 0x00000200 /* 1:Enable CRC Error */ #define Rx_EnAlign 0x00000100 /* 1:Enable Alignment */ #define Rx_IgnoreCRC 0x00000040 /* 1:Ignore CRC Value */ #define Rx_StripCRC 0x00000010 /* 1:Strip CRC Value */ #define Rx_ShortEn 0x00000008 /* 1:Short Enable */ #define Rx_LongEn 0x00000004 /* 1:Long Enable */ #define Rx_RxHalt 0x00000002 /* 1:Receive Halt Request */ #define Rx_RxEn 0x00000001 /* 1:Receive Intrrupt Enable */ /* Rx_Stat bit asign ------------------------------------------------------- */ #define Rx_Halted 0x00008000 /* Rx Halted */ #define Rx_Good 0x00004000 /* Rx Good */ #define Rx_RxPar 0x00002000 /* Rx Parity Error */ #define Rx_TypePkt 0x00001000 /* Rx Type Packet */ #define Rx_LongErr 0x00000800 /* Rx Long Error */ #define Rx_Over 0x00000400 /* Rx Overflow */ #define Rx_CRCErr 0x00000200 /* Rx CRC Error */ #define Rx_Align 0x00000100 /* Rx Alignment Error */ #define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */ #define Rx_IntRx 0x00000040 /* Rx Interrupt */ #define Rx_CtlRecd 0x00000020 /* Rx Control Receive */ #define Rx_InLenErr 0x00000010 /* Rx In Range Frame Length Error */ #define Rx_Stat_Mask 0x0000FFF0 /* Rx All Status Mask */ /* Int_En bit asign -------------------------------------------------------- */ #define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */ #define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Ctl Complete Enable */ #define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */ #define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */ #define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */ #define Int_DParErrEn 0x00000040 /* 1:Detected Parity Error Enable */ #define Int_SSysErrEn 0x00000020 /* 1:Signalled System Error Enable */ #define Int_RMasAbtEn 0x00000010 /* 1:Received Master Abort Enable */ #define Int_RTargAbtEn 0x00000008 /* 1:Received Target Abort Enable */ #define Int_STargAbtEn 0x00000004 /* 1:Signalled Target Abort Enable */ #define Int_BLExEn 0x00000002 /* 1:Buffer List Exhausted Enable */ #define Int_FDAExEn 0x00000001 /* 1:Free Descriptor Area */ /* Exhausted Enable */ /* Int_Src bit asign ------------------------------------------------------- */ #define Int_NRabt 0x00004000 /* 1:Non Recoverable error */ #define Int_DmParErrStat 0x00002000 /* 1:DMA Parity Error & Clear */ #define Int_BLEx 0x00001000 /* 1:Buffer List Empty & Clear */ #define Int_FDAEx 0x00000800 /* 1:FDA Empty & Clear */ #define Int_IntNRAbt 0x00000400 /* 1:Non Recoverable Abort */ #define Int_IntCmp 0x00000200 /* 1:MAC control packet complete */ #define Int_IntExBD 0x00000100 /* 1:Interrupt Extra BD & Clear */ #define Int_DmParErr 0x00000080 /* 1:DMA Parity Error & Clear */ #define Int_IntEarNot 0x00000040 /* 1:Receive Data write & Clear */ #define Int_SWInt 0x00000020 /* 1:Software request & Clear */ #define Int_IntBLEx 0x00000010 /* 1:Buffer List Empty & Clear */ #define Int_IntFDAEx 0x00000008 /* 1:FDA Empty & Clear */ #define Int_IntPCI 0x00000004 /* 1:PCI controller & Clear */ #define Int_IntMacRx 0x00000002 /* 1:Rx controller & Clear */ #define Int_IntMacTx 0x00000001 /* 1:Tx controller & Clear */ /* MD_CA bit asign --------------------------------------------------------- */ #define MD_CA_PreSup 0x00001000 /* 1:Preamble Supress */ #define MD_CA_Busy 0x00000800 /* 1:Busy (Start Operation) */ #define MD_CA_Wr 0x00000400 /* 1:Write 0:Read */ /* * Descriptors */ /* Frame descripter */ struct FDesc { volatile __u32 FDNext; volatile __u32 FDSystem; volatile __u32 FDStat; volatile __u32 FDCtl; }; /* Buffer descripter */ struct BDesc { volatile __u32 BuffData; volatile __u32 BDCtl; }; #define FD_ALIGN 16 /* Frame Descripter bit asign ---------------------------------------------- */ #define FD_FDLength_MASK 0x0000FFFF /* Length MASK */ #define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */ #define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */ #define FD_FrmOpt_BigEndian 0x40000000 /* Tx/Rx */ #define FD_FrmOpt_IntTx 0x20000000 /* Tx only */ #define FD_FrmOpt_NoCRC 0x10000000 /* Tx only */ #define FD_FrmOpt_NoPadding 0x08000000 /* Tx only */ #define FD_FrmOpt_Packing 0x04000000 /* Rx only */ #define FD_CownsFD 0x80000000 /* FD Controller owner bit */ #define FD_Next_EOL 0x00000001 /* FD EOL indicator */ #define FD_BDCnt_SHIFT 16 /* Buffer Descripter bit asign --------------------------------------------- */ #define BD_BuffLength_MASK 0x0000FFFF /* Recieve Data Size */ #define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */ #define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */ #define BD_CownsBD 0x80000000 /* BD Controller owner bit */ #define BD_RxBDID_SHIFT 16 #define BD_RxBDSeqN_SHIFT 24 /* Some useful constants. */ #define TX_CTL_CMD (Tx_EnTxPar | Tx_EnLateColl | \ Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \ Tx_En) /* maybe 0x7b01 */ /* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */ #define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \ | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */ #define INT_EN_CMD (Int_NRAbtEn | \ Int_DmParErrEn | Int_DParDEn | Int_DParErrEn | \ Int_SSysErrEn | Int_RMasAbtEn | Int_RTargAbtEn | \ Int_STargAbtEn | \ Int_BLExEn | Int_FDAExEn) /* maybe 0xb7f*/ #define DMA_CTL_CMD DMA_BURST_SIZE #define HAVE_DMA_RXALIGN(lp) likely((lp)->chiptype != TC35815CF) /* Tuning parameters */ #define DMA_BURST_SIZE 32 #define TX_THRESHOLD 1024 /* used threshold with packet max byte for low pci transfer ability.*/ #define TX_THRESHOLD_MAX 1536 /* setting threshold max value when overrun error occured this count. */ #define TX_THRESHOLD_KEEP_LIMIT 10 /* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */ #define FD_PAGE_NUM 4 #define RX_BUF_NUM 128 /* < 256 */ #define RX_FD_NUM 256 /* >= 32 */ #define TX_FD_NUM 128 #if RX_CTL_CMD & Rx_LongEn #define RX_BUF_SIZE PAGE_SIZE #elif RX_CTL_CMD & Rx_StripCRC #define RX_BUF_SIZE \ L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + NET_IP_ALIGN) #else #define RX_BUF_SIZE \ L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN) #endif #define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */ #define NAPI_WEIGHT 16 struct TxFD { struct FDesc fd; struct BDesc bd; struct BDesc unused; }; struct RxFD { struct FDesc fd; struct BDesc bd[0]; /* variable length */ }; struct FrFD { struct FDesc fd; struct BDesc bd[RX_BUF_NUM]; }; #define tc_readl(addr) ioread32(addr) #define tc_writel(d, addr) iowrite32(d, addr) #define TC35815_TX_TIMEOUT msecs_to_jiffies(400) /* Information that need to be kept for each controller. */ struct tc35815_local { struct pci_dev *pci_dev; struct net_device *dev; struct napi_struct napi; /* statistics */ struct { int max_tx_qlen; int tx_ints; int rx_ints; int tx_underrun; } lstats; /* Tx control lock. This protects the transmit buffer ring * state along with the "tx full" state of the driver. This * means all netif_queue flow control actions are protected * by this lock as well. */ spinlock_t lock; spinlock_t rx_lock; struct mii_bus *mii_bus; struct phy_device *phy_dev; int duplex; int speed; int link; struct work_struct restart_work; /* * Transmitting: Batch Mode. * 1 BD in 1 TxFD. * Receiving: Non-Packing Mode. * 1 circular FD for Free Buffer List. * RX_BUF_NUM BD in Free Buffer FD. * One Free Buffer BD has ETH_FRAME_LEN data buffer. */ void *fd_buf; /* for TxFD, RxFD, FrFD */ dma_addr_t fd_buf_dma; struct TxFD *tfd_base; unsigned int tfd_start; unsigned int tfd_end; struct RxFD *rfd_base; struct RxFD *rfd_limit; struct RxFD *rfd_cur; struct FrFD *fbl_ptr; unsigned int fbl_count; struct { struct sk_buff *skb; dma_addr_t skb_dma; } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM]; u32 msg_enable; enum tc35815_chiptype chiptype; }; static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt) { return lp->fd_buf_dma + ((u8 *)virt - (u8 *)lp->fd_buf); } #ifdef DEBUG static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus) { return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma)); } #endif static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, struct pci_dev *hwdev, dma_addr_t *dma_handle) { struct sk_buff *skb; skb = dev_alloc_skb(RX_BUF_SIZE); if (!skb) return NULL; *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(hwdev, *dma_handle)) { dev_kfree_skb_any(skb); return NULL; } skb_reserve(skb, 2); /* make IP header 4byte aligned */ return skb; } static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_t dma_handle) { pci_unmap_single(hwdev, dma_handle, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(skb); } /* Index to functions, as function prototypes. */ static int tc35815_open(struct net_device *dev); static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t tc35815_interrupt(int irq, void *dev_id); static int tc35815_rx(struct net_device *dev, int limit); static int tc35815_poll(struct napi_struct *napi, int budget); static void tc35815_txdone(struct net_device *dev); static int tc35815_close(struct net_device *dev); static struct net_device_stats *tc35815_get_stats(struct net_device *dev); static void tc35815_set_multicast_list(struct net_device *dev); static void tc35815_tx_timeout(struct net_device *dev); static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); #ifdef CONFIG_NET_POLL_CONTROLLER static void tc35815_poll_controller(struct net_device *dev); #endif static const struct ethtool_ops tc35815_ethtool_ops; /* Example routines you must write ;->. */ static void tc35815_chip_reset(struct net_device *dev); static void tc35815_chip_init(struct net_device *dev); #ifdef DEBUG static void panic_queues(struct net_device *dev); #endif static void tc35815_restart_work(struct work_struct *work); static int tc_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct net_device *dev = bus->priv; struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; unsigned long timeout = jiffies + HZ; tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA); udelay(12); /* it takes 32 x 400ns at least */ while (tc_readl(&tr->MD_CA) & MD_CA_Busy) { if (time_after(jiffies, timeout)) return -EIO; cpu_relax(); } return tc_readl(&tr->MD_Data) & 0xffff; } static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val) { struct net_device *dev = bus->priv; struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; unsigned long timeout = jiffies + HZ; tc_writel(val, &tr->MD_Data); tc_writel(MD_CA_Busy | MD_CA_Wr | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA); udelay(12); /* it takes 32 x 400ns at least */ while (tc_readl(&tr->MD_CA) & MD_CA_Busy) { if (time_after(jiffies, timeout)) return -EIO; cpu_relax(); } return 0; } static void tc_handle_link_change(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); struct phy_device *phydev = lp->phy_dev; unsigned long flags; int status_change = 0; spin_lock_irqsave(&lp->lock, flags); if (phydev->link && (lp->speed != phydev->speed || lp->duplex != phydev->duplex)) { struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; u32 reg; reg = tc_readl(&tr->MAC_Ctl); reg |= MAC_HaltReq; tc_writel(reg, &tr->MAC_Ctl); if (phydev->duplex == DUPLEX_FULL) reg |= MAC_FullDup; else reg &= ~MAC_FullDup; tc_writel(reg, &tr->MAC_Ctl); reg &= ~MAC_HaltReq; tc_writel(reg, &tr->MAC_Ctl); /* * TX4939 PCFG.SPEEDn bit will be changed on * NETDEV_CHANGE event. */ /* * WORKAROUND: enable LostCrS only if half duplex * operation. * (TX4939 does not have EnLCarr) */ if (phydev->duplex == DUPLEX_HALF && lp->chiptype != TC35815_TX4939) tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, &tr->Tx_Ctl); lp->speed = phydev->speed; lp->duplex = phydev->duplex; status_change = 1; } if (phydev->link != lp->link) { if (phydev->link) { /* delayed promiscuous enabling */ if (dev->flags & IFF_PROMISC) tc35815_set_multicast_list(dev); } else { lp->speed = 0; lp->duplex = -1; } lp->link = phydev->link; status_change = 1; } spin_unlock_irqrestore(&lp->lock, flags); if (status_change && netif_msg_link(lp)) { phy_print_status(phydev); pr_debug("%s: MII BMCR %04x BMSR %04x LPA %04x\n", dev->name, phy_read(phydev, MII_BMCR), phy_read(phydev, MII_BMSR), phy_read(phydev, MII_LPA)); } } static int tc_mii_probe(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); struct phy_device *phydev = NULL; int phy_addr; u32 dropmask; /* find the first phy */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { if (lp->mii_bus->phy_map[phy_addr]) { if (phydev) { printk(KERN_ERR "%s: multiple PHYs found\n", dev->name); return -EINVAL; } phydev = lp->mii_bus->phy_map[phy_addr]; break; } } if (!phydev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); return -ENODEV; } /* attach the mac to the phy */ phydev = phy_connect(dev, dev_name(&phydev->dev), &tc_handle_link_change, 0, lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); return PTR_ERR(phydev); } printk(KERN_INFO "%s: attached PHY driver [%s] " "(mii_bus:phy_addr=%s, id=%x)\n", dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->phy_id); /* mask with MAC supported features */ phydev->supported &= PHY_BASIC_FEATURES; dropmask = 0; if (options.speed == 10) dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; else if (options.speed == 100) dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full; if (options.duplex == 1) dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full; else if (options.duplex == 2) dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half; phydev->supported &= ~dropmask; phydev->advertising = phydev->supported; lp->link = 0; lp->speed = 0; lp->duplex = -1; lp->phy_dev = phydev; return 0; } static int tc_mii_init(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); int err; int i; lp->mii_bus = mdiobus_alloc(); if (lp->mii_bus == NULL) { err = -ENOMEM; goto err_out; } lp->mii_bus->name = "tc35815_mii_bus"; lp->mii_bus->read = tc_mdio_read; lp->mii_bus->write = tc_mdio_write; snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn); lp->mii_bus->priv = dev; lp->mii_bus->parent = &lp->pci_dev->dev; lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!lp->mii_bus->irq) { err = -ENOMEM; goto err_out_free_mii_bus; } for (i = 0; i < PHY_MAX_ADDR; i++) lp->mii_bus->irq[i] = PHY_POLL; err = mdiobus_register(lp->mii_bus); if (err) goto err_out_free_mdio_irq; err = tc_mii_probe(dev); if (err) goto err_out_unregister_bus; return 0; err_out_unregister_bus: mdiobus_unregister(lp->mii_bus); err_out_free_mdio_irq: kfree(lp->mii_bus->irq); err_out_free_mii_bus: mdiobus_free(lp->mii_bus); err_out: return err; } #ifdef CONFIG_CPU_TX49XX /* * Find a platform_device providing a MAC address. The platform code * should provide a "tc35815-mac" device with a MAC address in its * platform_data. */ static int __devinit tc35815_mac_match(struct device *dev, void *data) { struct platform_device *plat_dev = to_platform_device(dev); struct pci_dev *pci_dev = data; unsigned int id = pci_dev->irq; return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id; } static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); struct device *pd = bus_find_device(&platform_bus_type, NULL, lp->pci_dev, tc35815_mac_match); if (pd) { if (pd->platform_data) memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN); put_device(pd); return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV; } return -ENODEV; } #else static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) { return -ENODEV; } #endif static int __devinit tc35815_init_dev_addr(struct net_device *dev) { struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; int i; while (tc_readl(&tr->PROM_Ctl) & PROM_Busy) ; for (i = 0; i < 6; i += 2) { unsigned short data; tc_writel(PROM_Busy | PROM_Read | (i / 2 + 2), &tr->PROM_Ctl); while (tc_readl(&tr->PROM_Ctl) & PROM_Busy) ; data = tc_readl(&tr->PROM_Data); dev->dev_addr[i] = data & 0xff; dev->dev_addr[i+1] = data >> 8; } if (!is_valid_ether_addr(dev->dev_addr)) return tc35815_read_plat_dev_addr(dev); return 0; } static const struct net_device_ops tc35815_netdev_ops = { .ndo_open = tc35815_open, .ndo_stop = tc35815_close, .ndo_start_xmit = tc35815_send_packet, .ndo_get_stats = tc35815_get_stats, .ndo_set_multicast_list = tc35815_set_multicast_list, .ndo_tx_timeout = tc35815_tx_timeout, .ndo_do_ioctl = tc35815_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = tc35815_poll_controller, #endif }; static int __devinit tc35815_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { void __iomem *ioaddr = NULL; struct net_device *dev; struct tc35815_local *lp; int rc; static int printed_version; if (!printed_version++) { printk(version); dev_printk(KERN_DEBUG, &pdev->dev, "speed:%d duplex:%d\n", options.speed, options.duplex); } if (!pdev->irq) { dev_warn(&pdev->dev, "no IRQ assigned.\n"); return -ENODEV; } /* dev zeroed in alloc_etherdev */ dev = alloc_etherdev(sizeof(*lp)); if (dev == NULL) { dev_err(&pdev->dev, "unable to alloc new ethernet\n"); return -ENOMEM; } SET_NETDEV_DEV(dev, &pdev->dev); lp = netdev_priv(dev); lp->dev = dev; /* enable device (incl. PCI PM wakeup), and bus-mastering */ rc = pcim_enable_device(pdev); if (rc) goto err_out; rc = pcim_iomap_regions(pdev, 1 << 1, MODNAME); if (rc) goto err_out; pci_set_master(pdev); ioaddr = pcim_iomap_table(pdev)[1]; /* Initialize the device structure. */ dev->netdev_ops = &tc35815_netdev_ops; dev->ethtool_ops = &tc35815_ethtool_ops; dev->watchdog_timeo = TC35815_TX_TIMEOUT; netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); dev->irq = pdev->irq; dev->base_addr = (unsigned long)ioaddr; INIT_WORK(&lp->restart_work, tc35815_restart_work); spin_lock_init(&lp->lock); spin_lock_init(&lp->rx_lock); lp->pci_dev = pdev; lp->chiptype = ent->driver_data; lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK; pci_set_drvdata(pdev, dev); /* Soft reset the chip. */ tc35815_chip_reset(dev); /* Retrieve the ethernet address. */ if (tc35815_init_dev_addr(dev)) { dev_warn(&pdev->dev, "not valid ether addr\n"); random_ether_addr(dev->dev_addr); } rc = register_netdev(dev); if (rc) goto err_out; memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); printk(KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n", dev->name, chip_info[ent->driver_data].name, dev->base_addr, dev->dev_addr, dev->irq); rc = tc_mii_init(dev); if (rc) goto err_out_unregister; return 0; err_out_unregister: unregister_netdev(dev); err_out: free_netdev(dev); return rc; } static void __devexit tc35815_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct tc35815_local *lp = netdev_priv(dev); phy_disconnect(lp->phy_dev); mdiobus_unregister(lp->mii_bus); kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); unregister_netdev(dev); free_netdev(dev); pci_set_drvdata(pdev, NULL); } static int tc35815_init_queues(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); int i; unsigned long fd_addr; if (!lp->fd_buf) { BUG_ON(sizeof(struct FDesc) + sizeof(struct BDesc) * RX_BUF_NUM + sizeof(struct FDesc) * RX_FD_NUM + sizeof(struct TxFD) * TX_FD_NUM > PAGE_SIZE * FD_PAGE_NUM); lp->fd_buf = pci_alloc_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, &lp->fd_buf_dma); if (!lp->fd_buf) return -ENOMEM; for (i = 0; i < RX_BUF_NUM; i++) { lp->rx_skbs[i].skb = alloc_rxbuf_skb(dev, lp->pci_dev, &lp->rx_skbs[i].skb_dma); if (!lp->rx_skbs[i].skb) { while (--i >= 0) { free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb, lp->rx_skbs[i].skb_dma); lp->rx_skbs[i].skb = NULL; } pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, lp->fd_buf, lp->fd_buf_dma); lp->fd_buf = NULL; return -ENOMEM; } } printk(KERN_DEBUG "%s: FD buf %p DataBuf", dev->name, lp->fd_buf); printk("\n"); } else { for (i = 0; i < FD_PAGE_NUM; i++) clear_page((void *)((unsigned long)lp->fd_buf + i * PAGE_SIZE)); } fd_addr = (unsigned long)lp->fd_buf; /* Free Descriptors (for Receive) */ lp->rfd_base = (struct RxFD *)fd_addr; fd_addr += sizeof(struct RxFD) * RX_FD_NUM; for (i = 0; i < RX_FD_NUM; i++) lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD); lp->rfd_cur = lp->rfd_base; lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1); /* Transmit Descriptors */ lp->tfd_base = (struct TxFD *)fd_addr; fd_addr += sizeof(struct TxFD) * TX_FD_NUM; for (i = 0; i < TX_FD_NUM; i++) { lp->tfd_base[i].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[i+1])); lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0); } lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[0])); lp->tfd_start = 0; lp->tfd_end = 0; /* Buffer List (for Receive) */ lp->fbl_ptr = (struct FrFD *)fd_addr; lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr)); lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD); /* * move all allocated skbs to head of rx_skbs[] array. * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in * tc35815_rx() had failed. */ lp->fbl_count = 0; for (i = 0; i < RX_BUF_NUM; i++) { if (lp->rx_skbs[i].skb) { if (i != lp->fbl_count) { lp->rx_skbs[lp->fbl_count].skb = lp->rx_skbs[i].skb; lp->rx_skbs[lp->fbl_count].skb_dma = lp->rx_skbs[i].skb_dma; } lp->fbl_count++; } } for (i = 0; i < RX_BUF_NUM; i++) { if (i >= lp->fbl_count) { lp->fbl_ptr->bd[i].BuffData = 0; lp->fbl_ptr->bd[i].BDCtl = 0; continue; } lp->fbl_ptr->bd[i].BuffData = cpu_to_le32(lp->rx_skbs[i].skb_dma); /* BDID is index of FrFD.bd[] */ lp->fbl_ptr->bd[i].BDCtl = cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) | RX_BUF_SIZE); } printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n", dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr); return 0; } static void tc35815_clear_queues(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); int i; for (i = 0; i < TX_FD_NUM; i++) { u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem); struct sk_buff *skb = fdsystem != 0xffffffff ? lp->tx_skbs[fdsystem].skb : NULL; #ifdef DEBUG if (lp->tx_skbs[i].skb != skb) { printk("%s: tx_skbs mismatch(%d).\n", dev->name, i); panic_queues(dev); } #else BUG_ON(lp->tx_skbs[i].skb != skb); #endif if (skb) { pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE); lp->tx_skbs[i].skb = NULL; lp->tx_skbs[i].skb_dma = 0; dev_kfree_skb_any(skb); } lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); } tc35815_init_queues(dev); } static void tc35815_free_queues(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); int i; if (lp->tfd_base) { for (i = 0; i < TX_FD_NUM; i++) { u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem); struct sk_buff *skb = fdsystem != 0xffffffff ? lp->tx_skbs[fdsystem].skb : NULL; #ifdef DEBUG if (lp->tx_skbs[i].skb != skb) { printk("%s: tx_skbs mismatch(%d).\n", dev->name, i); panic_queues(dev); } #else BUG_ON(lp->tx_skbs[i].skb != skb); #endif if (skb) { dev_kfree_skb(skb); pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE); lp->tx_skbs[i].skb = NULL; lp->tx_skbs[i].skb_dma = 0; } lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); } } lp->rfd_base = NULL; lp->rfd_limit = NULL; lp->rfd_cur = NULL; lp->fbl_ptr = NULL; for (i = 0; i < RX_BUF_NUM; i++) { if (lp->rx_skbs[i].skb) { free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb, lp->rx_skbs[i].skb_dma); lp->rx_skbs[i].skb = NULL; } } if (lp->fd_buf) { pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, lp->fd_buf, lp->fd_buf_dma); lp->fd_buf = NULL; } } static void dump_txfd(struct TxFD *fd) { printk("TxFD(%p): %08x %08x %08x %08x\n", fd, le32_to_cpu(fd->fd.FDNext), le32_to_cpu(fd->fd.FDSystem), le32_to_cpu(fd->fd.FDStat), le32_to_cpu(fd->fd.FDCtl)); printk("BD: "); printk(" %08x %08x", le32_to_cpu(fd->bd.BuffData), le32_to_cpu(fd->bd.BDCtl)); printk("\n"); } static int dump_rxfd(struct RxFD *fd) { int i, bd_count = (le32_to_cpu(fd->fd.FDCtl) & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT; if (bd_count > 8) bd_count = 8; printk("RxFD(%p): %08x %08x %08x %08x\n", fd, le32_to_cpu(fd->fd.FDNext), le32_to_cpu(fd->fd.FDSystem), le32_to_cpu(fd->fd.FDStat), le32_to_cpu(fd->fd.FDCtl)); if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD) return 0; printk("BD: "); for (i = 0; i < bd_count; i++) printk(" %08x %08x", le32_to_cpu(fd->bd[i].BuffData), le32_to_cpu(fd->bd[i].BDCtl)); printk("\n"); return bd_count; } #ifdef DEBUG static void dump_frfd(struct FrFD *fd) { int i; printk("FrFD(%p): %08x %08x %08x %08x\n", fd, le32_to_cpu(fd->fd.FDNext), le32_to_cpu(fd->fd.FDSystem), le32_to_cpu(fd->fd.FDStat), le32_to_cpu(fd->fd.FDCtl)); printk("BD: "); for (i = 0; i < RX_BUF_NUM; i++) printk(" %08x %08x", le32_to_cpu(fd->bd[i].BuffData), le32_to_cpu(fd->bd[i].BDCtl)); printk("\n"); } static void panic_queues(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); int i; printk("TxFD base %p, start %u, end %u\n", lp->tfd_base, lp->tfd_start, lp->tfd_end); printk("RxFD base %p limit %p cur %p\n", lp->rfd_base, lp->rfd_limit, lp->rfd_cur); printk("FrFD %p\n", lp->fbl_ptr); for (i = 0; i < TX_FD_NUM; i++) dump_txfd(&lp->tfd_base[i]); for (i = 0; i < RX_FD_NUM; i++) { int bd_count = dump_rxfd(&lp->rfd_base[i]); i += (bd_count + 1) / 2; /* skip BDs */ } dump_frfd(lp->fbl_ptr); panic("%s: Illegal queue state.", dev->name); } #endif static void print_eth(const u8 *add) { printk(KERN_DEBUG "print_eth(%p)\n", add); printk(KERN_DEBUG " %pM => %pM : %02x%02x\n", add + 6, add, add[12], add[13]); } static int tc35815_tx_full(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); return ((lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end); } static void tc35815_restart(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); if (lp->phy_dev) { int timeout; phy_write(lp->phy_dev, MII_BMCR, BMCR_RESET); timeout = 100; while (--timeout) { if (!(phy_read(lp->phy_dev, MII_BMCR) & BMCR_RESET)) break; udelay(1); } if (!timeout) printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name); } spin_lock_bh(&lp->rx_lock); spin_lock_irq(&lp->lock); tc35815_chip_reset(dev); tc35815_clear_queues(dev); tc35815_chip_init(dev); /* Reconfigure CAM again since tc35815_chip_init() initialize it. */ tc35815_set_multicast_list(dev); spin_unlock_irq(&lp->lock); spin_unlock_bh(&lp->rx_lock); netif_wake_queue(dev); } static void tc35815_restart_work(struct work_struct *work) { struct tc35815_local *lp = container_of(work, struct tc35815_local, restart_work); struct net_device *dev = lp->dev; tc35815_restart(dev); } static void tc35815_schedule_restart(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; unsigned long flags; /* disable interrupts */ spin_lock_irqsave(&lp->lock, flags); tc_writel(0, &tr->Int_En); tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl); schedule_work(&lp->restart_work); spin_unlock_irqrestore(&lp->lock, flags); } static void tc35815_tx_timeout(struct net_device *dev) { struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; printk(KERN_WARNING "%s: transmit timed out, status %#x\n", dev->name, tc_readl(&tr->Tx_Stat)); /* Try to restart the adaptor. */ tc35815_schedule_restart(dev); dev->stats.tx_errors++; } /* * Open/initialize the controller. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong. */ static int tc35815_open(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); /* * This is used if the interrupt line can turned off (shared). * See 3c503.c for an example of selecting the IRQ at config-time. */ if (request_irq(dev->irq, tc35815_interrupt, IRQF_SHARED, dev->name, dev)) return -EAGAIN; tc35815_chip_reset(dev); if (tc35815_init_queues(dev) != 0) { free_irq(dev->irq, dev); return -EAGAIN; } napi_enable(&lp->napi); /* Reset the hardware here. Don't forget to set the station address. */ spin_lock_irq(&lp->lock); tc35815_chip_init(dev); spin_unlock_irq(&lp->lock); netif_carrier_off(dev); /* schedule a link state check */ phy_start(lp->phy_dev); /* We are now ready to accept transmit requeusts from * the queueing layer of the networking. */ netif_start_queue(dev); return 0; } /* This will only be invoked if your driver is _not_ in XOFF state. * What this means is that you need not check it, and that this * invariant will hold if you make sure that the netif_*_queue() * calls are done at the proper times. */ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); struct TxFD *txfd; unsigned long flags; /* If some error occurs while trying to transmit this * packet, you should return '1' from this function. * In such a case you _may not_ do anything to the * SKB, it is still owned by the network queueing * layer when an error is returned. This means you * may not modify any SKB fields, you may not free * the SKB, etc. */ /* This is the most common case for modern hardware. * The spinlock protects this code from the TX complete * hardware interrupt handler. Queue flow control is * thus managed under this lock as well. */ spin_lock_irqsave(&lp->lock, flags); /* failsafe... (handle txdone now if half of FDs are used) */ if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM > TX_FD_NUM / 2) tc35815_txdone(dev); if (netif_msg_pktdata(lp)) print_eth(skb->data); #ifdef DEBUG if (lp->tx_skbs[lp->tfd_start].skb) { printk("%s: tx_skbs conflict.\n", dev->name); panic_queues(dev); } #else BUG_ON(lp->tx_skbs[lp->tfd_start].skb); #endif lp->tx_skbs[lp->tfd_start].skb = skb; lp->tx_skbs[lp->tfd_start].skb_dma = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); /*add to ring */ txfd = &lp->tfd_base[lp->tfd_start]; txfd->bd.BuffData = cpu_to_le32(lp->tx_skbs[lp->tfd_start].skb_dma); txfd->bd.BDCtl = cpu_to_le32(skb->len); txfd->fd.FDSystem = cpu_to_le32(lp->tfd_start); txfd->fd.FDCtl = cpu_to_le32(FD_CownsFD | (1 << FD_BDCnt_SHIFT)); if (lp->tfd_start == lp->tfd_end) { struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; /* Start DMA Transmitter. */ txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL); txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); if (netif_msg_tx_queued(lp)) { printk("%s: starting TxFD.\n", dev->name); dump_txfd(txfd); } tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr); } else { txfd->fd.FDNext &= cpu_to_le32(~FD_Next_EOL); if (netif_msg_tx_queued(lp)) { printk("%s: queueing TxFD.\n", dev->name); dump_txfd(txfd); } } lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM; /* If we just used up the very last entry in the * TX ring on this device, tell the queueing * layer to send no more. */ if (tc35815_tx_full(dev)) { if (netif_msg_tx_queued(lp)) printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name); netif_stop_queue(dev); } /* When the TX completion hw interrupt arrives, this * is when the transmit statistics are updated. */ spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; } #define FATAL_ERROR_INT \ (Int_IntPCI | Int_DmParErr | Int_IntNRAbt) static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status) { static int count; printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):", dev->name, status); if (status & Int_IntPCI) printk(" IntPCI"); if (status & Int_DmParErr) printk(" DmParErr"); if (status & Int_IntNRAbt) printk(" IntNRAbt"); printk("\n"); if (count++ > 100) panic("%s: Too many fatal errors.", dev->name); printk(KERN_WARNING "%s: Resetting ...\n", dev->name); /* Try to restart the adaptor. */ tc35815_schedule_restart(dev); } static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit) { struct tc35815_local *lp = netdev_priv(dev); int ret = -1; /* Fatal errors... */ if (status & FATAL_ERROR_INT) { tc35815_fatal_error_interrupt(dev, status); return 0; } /* recoverable errors */ if (status & Int_IntFDAEx) { if (netif_msg_rx_err(lp)) dev_warn(&dev->dev, "Free Descriptor Area Exhausted (%#x).\n", status); dev->stats.rx_dropped++; ret = 0; } if (status & Int_IntBLEx) { if (netif_msg_rx_err(lp)) dev_warn(&dev->dev, "Buffer List Exhausted (%#x).\n", status); dev->stats.rx_dropped++; ret = 0; } if (status & Int_IntExBD) { if (netif_msg_rx_err(lp)) dev_warn(&dev->dev, "Excessive Buffer Descriptiors (%#x).\n", status); dev->stats.rx_length_errors++; ret = 0; } /* normal notification */ if (status & Int_IntMacRx) { /* Got a packet(s). */ ret = tc35815_rx(dev, limit); lp->lstats.rx_ints++; } if (status & Int_IntMacTx) { /* Transmit complete. */ lp->lstats.tx_ints++; spin_lock_irq(&lp->lock); tc35815_txdone(dev); spin_unlock_irq(&lp->lock); if (ret < 0) ret = 0; } return ret; } /* * The typical workload of the driver: * Handle the network interface interrupts. */ static irqreturn_t tc35815_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct tc35815_local *lp = netdev_priv(dev); struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; u32 dmactl = tc_readl(&tr->DMA_Ctl); if (!(dmactl & DMA_IntMask)) { /* disable interrupts */ tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); if (napi_schedule_prep(&lp->napi)) __napi_schedule(&lp->napi); else { printk(KERN_ERR "%s: interrupt taken in poll\n", dev->name); BUG(); } (void)tc_readl(&tr->Int_Src); /* flush */ return IRQ_HANDLED; } return IRQ_NONE; } #ifdef CONFIG_NET_POLL_CONTROLLER static void tc35815_poll_controller(struct net_device *dev) { disable_irq(dev->irq); tc35815_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif /* We have a good packet(s), get it/them out of the buffers. */ static int tc35815_rx(struct net_device *dev, int limit) { struct tc35815_local *lp = netdev_priv(dev); unsigned int fdctl; int i; int received = 0; while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) { int status = le32_to_cpu(lp->rfd_cur->fd.FDStat); int pkt_len = fdctl & FD_FDLength_MASK; int bd_count = (fdctl & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT; #ifdef DEBUG struct RxFD *next_rfd; #endif #if (RX_CTL_CMD & Rx_StripCRC) == 0 pkt_len -= ETH_FCS_LEN; #endif if (netif_msg_rx_status(lp)) dump_rxfd(lp->rfd_cur); if (status & Rx_Good) { struct sk_buff *skb; unsigned char *data; int cur_bd; if (--limit < 0) break; BUG_ON(bd_count > 1); cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl) & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; #ifdef DEBUG if (cur_bd >= RX_BUF_NUM) { printk("%s: invalid BDID.\n", dev->name); panic_queues(dev); } BUG_ON(lp->rx_skbs[cur_bd].skb_dma != (le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3)); if (!lp->rx_skbs[cur_bd].skb) { printk("%s: NULL skb.\n", dev->name); panic_queues(dev); } #else BUG_ON(cur_bd >= RX_BUF_NUM); #endif skb = lp->rx_skbs[cur_bd].skb; prefetch(skb->data); lp->rx_skbs[cur_bd].skb = NULL; pci_unmap_single(lp->pci_dev, lp->rx_skbs[cur_bd].skb_dma, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN) memmove(skb->data, skb->data - NET_IP_ALIGN, pkt_len); data = skb_put(skb, pkt_len); if (netif_msg_pktdata(lp)) print_eth(data); skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); received++; dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } else { dev->stats.rx_errors++; if (netif_msg_rx_err(lp)) dev_info(&dev->dev, "Rx error (status %x)\n", status & Rx_Stat_Mask); /* WORKAROUND: LongErr and CRCErr means Overflow. */ if ((status & Rx_LongErr) && (status & Rx_CRCErr)) { status &= ~(Rx_LongErr|Rx_CRCErr); status |= Rx_Over; } if (status & Rx_LongErr) dev->stats.rx_length_errors++; if (status & Rx_Over) dev->stats.rx_fifo_errors++; if (status & Rx_CRCErr) dev->stats.rx_crc_errors++; if (status & Rx_Align) dev->stats.rx_frame_errors++; } if (bd_count > 0) { /* put Free Buffer back to controller */ int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl); unsigned char id = (bdctl & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; #ifdef DEBUG if (id >= RX_BUF_NUM) { printk("%s: invalid BDID.\n", dev->name); panic_queues(dev); } #else BUG_ON(id >= RX_BUF_NUM); #endif /* free old buffers */ lp->fbl_count--; while (lp->fbl_count < RX_BUF_NUM) { unsigned char curid = (id + 1 + lp->fbl_count) % RX_BUF_NUM; struct BDesc *bd = &lp->fbl_ptr->bd[curid]; #ifdef DEBUG bdctl = le32_to_cpu(bd->BDCtl); if (bdctl & BD_CownsBD) { printk("%s: Freeing invalid BD.\n", dev->name); panic_queues(dev); } #endif /* pass BD to controller */ if (!lp->rx_skbs[curid].skb) { lp->rx_skbs[curid].skb = alloc_rxbuf_skb(dev, lp->pci_dev, &lp->rx_skbs[curid].skb_dma); if (!lp->rx_skbs[curid].skb) break; /* try on next reception */ bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma); } /* Note: BDLength was modified by chip. */ bd->BDCtl = cpu_to_le32(BD_CownsBD | (curid << BD_RxBDID_SHIFT) | RX_BUF_SIZE); lp->fbl_count++; } } /* put RxFD back to controller */ #ifdef DEBUG next_rfd = fd_bus_to_virt(lp, le32_to_cpu(lp->rfd_cur->fd.FDNext)); if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) { printk("%s: RxFD FDNext invalid.\n", dev->name); panic_queues(dev); } #endif for (i = 0; i < (bd_count + 1) / 2 + 1; i++) { /* pass FD to controller */ #ifdef DEBUG lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead); #else lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL); #endif lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD); lp->rfd_cur++; } if (lp->rfd_cur > lp->rfd_limit) lp->rfd_cur = lp->rfd_base; #ifdef DEBUG if (lp->rfd_cur != next_rfd) printk("rfd_cur = %p, next_rfd %p\n", lp->rfd_cur, next_rfd); #endif } return received; } static int tc35815_poll(struct napi_struct *napi, int budget) { struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi); struct net_device *dev = lp->dev; struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; int received = 0, handled; u32 status; spin_lock(&lp->rx_lock); status = tc_readl(&tr->Int_Src); do { /* BLEx, FDAEx will be cleared later */ tc_writel(status & ~(Int_BLEx | Int_FDAEx), &tr->Int_Src); /* write to clear */ handled = tc35815_do_interrupt(dev, status, budget - received); if (status & (Int_BLEx | Int_FDAEx)) tc_writel(status & (Int_BLEx | Int_FDAEx), &tr->Int_Src); if (handled >= 0) { received += handled; if (received >= budget) break; } status = tc_readl(&tr->Int_Src); } while (status); spin_unlock(&lp->rx_lock); if (received < budget) { napi_complete(napi); /* enable interrupts */ tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); } return received; } #define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr) static void tc35815_check_tx_stat(struct net_device *dev, int status) { struct tc35815_local *lp = netdev_priv(dev); const char *msg = NULL; /* count collisions */ if (status & Tx_ExColl) dev->stats.collisions += 16; if (status & Tx_TxColl_MASK) dev->stats.collisions += status & Tx_TxColl_MASK; /* TX4939 does not have NCarr */ if (lp->chiptype == TC35815_TX4939) status &= ~Tx_NCarr; /* WORKAROUND: ignore LostCrS in full duplex operation */ if (!lp->link || lp->duplex == DUPLEX_FULL) status &= ~Tx_NCarr; if (!(status & TX_STA_ERR)) { /* no error. */ dev->stats.tx_packets++; return; } dev->stats.tx_errors++; if (status & Tx_ExColl) { dev->stats.tx_aborted_errors++; msg = "Excessive Collision."; } if (status & Tx_Under) { dev->stats.tx_fifo_errors++; msg = "Tx FIFO Underrun."; if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) { lp->lstats.tx_underrun++; if (lp->lstats.tx_underrun >= TX_THRESHOLD_KEEP_LIMIT) { struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; tc_writel(TX_THRESHOLD_MAX, &tr->TxThrsh); msg = "Tx FIFO Underrun.Change Tx threshold to max."; } } } if (status & Tx_Defer) { dev->stats.tx_fifo_errors++; msg = "Excessive Deferral."; } if (status & Tx_NCarr) { dev->stats.tx_carrier_errors++; msg = "Lost Carrier Sense."; } if (status & Tx_LateColl) { dev->stats.tx_aborted_errors++; msg = "Late Collision."; } if (status & Tx_TxPar) { dev->stats.tx_fifo_errors++; msg = "Transmit Parity Error."; } if (status & Tx_SQErr) { dev->stats.tx_heartbeat_errors++; msg = "Signal Quality Error."; } if (msg && netif_msg_tx_err(lp)) printk(KERN_WARNING "%s: %s (%#x)\n", dev->name, msg, status); } /* This handles TX complete events posted by the device * via interrupts. */ static void tc35815_txdone(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); struct TxFD *txfd; unsigned int fdctl; txfd = &lp->tfd_base[lp->tfd_end]; while (lp->tfd_start != lp->tfd_end && !((fdctl = le32_to_cpu(txfd->fd.FDCtl)) & FD_CownsFD)) { int status = le32_to_cpu(txfd->fd.FDStat); struct sk_buff *skb; unsigned long fdnext = le32_to_cpu(txfd->fd.FDNext); u32 fdsystem = le32_to_cpu(txfd->fd.FDSystem); if (netif_msg_tx_done(lp)) { printk("%s: complete TxFD.\n", dev->name); dump_txfd(txfd); } tc35815_check_tx_stat(dev, status); skb = fdsystem != 0xffffffff ? lp->tx_skbs[fdsystem].skb : NULL; #ifdef DEBUG if (lp->tx_skbs[lp->tfd_end].skb != skb) { printk("%s: tx_skbs mismatch.\n", dev->name); panic_queues(dev); } #else BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb); #endif if (skb) { dev->stats.tx_bytes += skb->len; pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE); lp->tx_skbs[lp->tfd_end].skb = NULL; lp->tx_skbs[lp->tfd_end].skb_dma = 0; dev_kfree_skb_any(skb); } txfd->fd.FDSystem = cpu_to_le32(0xffffffff); lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM; txfd = &lp->tfd_base[lp->tfd_end]; #ifdef DEBUG if ((fdnext & ~FD_Next_EOL) != fd_virt_to_bus(lp, txfd)) { printk("%s: TxFD FDNext invalid.\n", dev->name); panic_queues(dev); } #endif if (fdnext & FD_Next_EOL) { /* DMA Transmitter has been stopping... */ if (lp->tfd_end != lp->tfd_start) { struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM; struct TxFD *txhead = &lp->tfd_base[head]; int qlen = (lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM; #ifdef DEBUG if (!(le32_to_cpu(txfd->fd.FDCtl) & FD_CownsFD)) { printk("%s: TxFD FDCtl invalid.\n", dev->name); panic_queues(dev); } #endif /* log max queue length */ if (lp->lstats.max_tx_qlen < qlen) lp->lstats.max_tx_qlen = qlen; /* start DMA Transmitter again */ txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL); txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); if (netif_msg_tx_queued(lp)) { printk("%s: start TxFD on queue.\n", dev->name); dump_txfd(txfd); } tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr); } break; } } /* If we had stopped the queue due to a "tx full" * condition, and space has now been made available, * wake up the queue. */ if (netif_queue_stopped(dev) && !tc35815_tx_full(dev)) netif_wake_queue(dev); } /* The inverse routine to tc35815_open(). */ static int tc35815_close(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); netif_stop_queue(dev); napi_disable(&lp->napi); if (lp->phy_dev) phy_stop(lp->phy_dev); cancel_work_sync(&lp->restart_work); /* Flush the Tx and disable Rx here. */ tc35815_chip_reset(dev); free_irq(dev->irq, dev); tc35815_free_queues(dev); return 0; } /* * Get the current statistics. * This may be called with the card open or closed. */ static struct net_device_stats *tc35815_get_stats(struct net_device *dev) { struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; if (netif_running(dev)) /* Update the statistics from the device registers. */ dev->stats.rx_missed_errors += tc_readl(&tr->Miss_Cnt); return &dev->stats; } static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr) { struct tc35815_local *lp = netdev_priv(dev); struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; int cam_index = index * 6; u32 cam_data; u32 saved_addr; saved_addr = tc_readl(&tr->CAM_Adr); if (netif_msg_hw(lp)) printk(KERN_DEBUG "%s: CAM %d: %pM\n", dev->name, index, addr); if (index & 1) { /* read modify write */ tc_writel(cam_index - 2, &tr->CAM_Adr); cam_data = tc_readl(&tr->CAM_Data) & 0xffff0000; cam_data |= addr[0] << 8 | addr[1]; tc_writel(cam_data, &tr->CAM_Data); /* write whole word */ tc_writel(cam_index + 2, &tr->CAM_Adr); cam_data = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]; tc_writel(cam_data, &tr->CAM_Data); } else { /* write whole word */ tc_writel(cam_index, &tr->CAM_Adr); cam_data = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; tc_writel(cam_data, &tr->CAM_Data); /* read modify write */ tc_writel(cam_index + 4, &tr->CAM_Adr); cam_data = tc_readl(&tr->CAM_Data) & 0x0000ffff; cam_data |= addr[4] << 24 | (addr[5] << 16); tc_writel(cam_data, &tr->CAM_Data); } tc_writel(saved_addr, &tr->CAM_Adr); } /* * Set or clear the multicast filter for this adaptor. * num_addrs == -1 Promiscuous mode, receive all packets * num_addrs == 0 Normal mode, clear multicast list * num_addrs > 0 Multicast mode, receive normal and MC packets, * and do best-effort filtering. */ static void tc35815_set_multicast_list(struct net_device *dev) { struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; if (dev->flags & IFF_PROMISC) { /* With some (all?) 100MHalf HUB, controller will hang * if we enabled promiscuous mode before linkup... */ struct tc35815_local *lp = netdev_priv(dev); if (!lp->link) return; /* Enable promiscuous mode */ tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); } else if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) { /* CAM 0, 1, 20 are reserved. */ /* Disable promiscuous mode, use normal mode. */ tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); } else if (!netdev_mc_empty(dev)) { struct netdev_hw_addr *ha; int i; int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE); tc_writel(0, &tr->CAM_Ctl); /* Walk the address list, and load the filter */ i = 0; netdev_for_each_mc_addr(ha, dev) { /* entry 0,1 is reserved. */ tc35815_set_cam_entry(dev, i + 2, ha->addr); ena_bits |= CAM_Ena_Bit(i + 2); i++; } tc_writel(ena_bits, &tr->CAM_Ena); tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); } else { tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); } } static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct tc35815_local *lp = netdev_priv(dev); strcpy(info->driver, MODNAME); strcpy(info->version, DRV_VERSION); strcpy(info->bus_info, pci_name(lp->pci_dev)); } static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct tc35815_local *lp = netdev_priv(dev); if (!lp->phy_dev) return -ENODEV; return phy_ethtool_gset(lp->phy_dev, cmd); } static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct tc35815_local *lp = netdev_priv(dev); if (!lp->phy_dev) return -ENODEV; return phy_ethtool_sset(lp->phy_dev, cmd); } static u32 tc35815_get_msglevel(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); return lp->msg_enable; } static void tc35815_set_msglevel(struct net_device *dev, u32 datum) { struct tc35815_local *lp = netdev_priv(dev); lp->msg_enable = datum; } static int tc35815_get_sset_count(struct net_device *dev, int sset) { struct tc35815_local *lp = netdev_priv(dev); switch (sset) { case ETH_SS_STATS: return sizeof(lp->lstats) / sizeof(int); default: return -EOPNOTSUPP; } } static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct tc35815_local *lp = netdev_priv(dev); data[0] = lp->lstats.max_tx_qlen; data[1] = lp->lstats.tx_ints; data[2] = lp->lstats.rx_ints; data[3] = lp->lstats.tx_underrun; } static struct { const char str[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "max_tx_qlen" }, { "tx_ints" }, { "rx_ints" }, { "tx_underrun" }, }; static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data) { memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys)); } static const struct ethtool_ops tc35815_ethtool_ops = { .get_drvinfo = tc35815_get_drvinfo, .get_settings = tc35815_get_settings, .set_settings = tc35815_set_settings, .get_link = ethtool_op_get_link, .get_msglevel = tc35815_get_msglevel, .set_msglevel = tc35815_set_msglevel, .get_strings = tc35815_get_strings, .get_sset_count = tc35815_get_sset_count, .get_ethtool_stats = tc35815_get_ethtool_stats, }; static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct tc35815_local *lp = netdev_priv(dev); if (!netif_running(dev)) return -EINVAL; if (!lp->phy_dev) return -ENODEV; return phy_mii_ioctl(lp->phy_dev, if_mii(rq), cmd); } static void tc35815_chip_reset(struct net_device *dev) { struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; int i; /* reset the controller */ tc_writel(MAC_Reset, &tr->MAC_Ctl); udelay(4); /* 3200ns */ i = 0; while (tc_readl(&tr->MAC_Ctl) & MAC_Reset) { if (i++ > 100) { printk(KERN_ERR "%s: MAC reset failed.\n", dev->name); break; } mdelay(1); } tc_writel(0, &tr->MAC_Ctl); /* initialize registers to default value */ tc_writel(0, &tr->DMA_Ctl); tc_writel(0, &tr->TxThrsh); tc_writel(0, &tr->TxPollCtr); tc_writel(0, &tr->RxFragSize); tc_writel(0, &tr->Int_En); tc_writel(0, &tr->FDA_Bas); tc_writel(0, &tr->FDA_Lim); tc_writel(0xffffffff, &tr->Int_Src); /* Write 1 to clear */ tc_writel(0, &tr->CAM_Ctl); tc_writel(0, &tr->Tx_Ctl); tc_writel(0, &tr->Rx_Ctl); tc_writel(0, &tr->CAM_Ena); (void)tc_readl(&tr->Miss_Cnt); /* Read to clear */ /* initialize internal SRAM */ tc_writel(DMA_TestMode, &tr->DMA_Ctl); for (i = 0; i < 0x1000; i += 4) { tc_writel(i, &tr->CAM_Adr); tc_writel(0, &tr->CAM_Data); } tc_writel(0, &tr->DMA_Ctl); } static void tc35815_chip_init(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; unsigned long txctl = TX_CTL_CMD; /* load station address to CAM */ tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr); /* Enable CAM (broadcast and unicast) */ tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); /* Use DMA_RxAlign_2 to make IP header 4-byte aligned. */ if (HAVE_DMA_RXALIGN(lp)) tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl); else tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl); tc_writel(0, &tr->TxPollCtr); /* Batch mode */ tc_writel(TX_THRESHOLD, &tr->TxThrsh); tc_writel(INT_EN_CMD, &tr->Int_En); /* set queues */ tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas); tc_writel((unsigned long)lp->rfd_limit - (unsigned long)lp->rfd_base, &tr->FDA_Lim); /* * Activation method: * First, enable the MAC Transmitter and the DMA Receive circuits. * Then enable the DMA Transmitter and the MAC Receive circuits. */ tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr); /* start DMA receiver */ tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */ /* start MAC transmitter */ /* TX4939 does not have EnLCarr */ if (lp->chiptype == TC35815_TX4939) txctl &= ~Tx_EnLCarr; /* WORKAROUND: ignore LostCrS in full duplex operation */ if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL) txctl &= ~Tx_EnLCarr; tc_writel(txctl, &tr->Tx_Ctl); } #ifdef CONFIG_PM static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct tc35815_local *lp = netdev_priv(dev); unsigned long flags; pci_save_state(pdev); if (!netif_running(dev)) return 0; netif_device_detach(dev); if (lp->phy_dev) phy_stop(lp->phy_dev); spin_lock_irqsave(&lp->lock, flags); tc35815_chip_reset(dev); spin_unlock_irqrestore(&lp->lock, flags); pci_set_power_state(pdev, PCI_D3hot); return 0; } static int tc35815_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct tc35815_local *lp = netdev_priv(dev); pci_restore_state(pdev); if (!netif_running(dev)) return 0; pci_set_power_state(pdev, PCI_D0); tc35815_restart(dev); netif_carrier_off(dev); if (lp->phy_dev) phy_start(lp->phy_dev); netif_device_attach(dev); return 0; } #endif /* CONFIG_PM */ static struct pci_driver tc35815_pci_driver = { .name = MODNAME, .id_table = tc35815_pci_tbl, .probe = tc35815_init_one, .remove = __devexit_p(tc35815_remove_one), #ifdef CONFIG_PM .suspend = tc35815_suspend, .resume = tc35815_resume, #endif }; module_param_named(speed, options.speed, int, 0); MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps"); module_param_named(duplex, options.duplex, int, 0); MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full"); static int __init tc35815_init_module(void) { return pci_register_driver(&tc35815_pci_driver); } static void __exit tc35815_cleanup_module(void) { pci_unregister_driver(&tc35815_pci_driver); } module_init(tc35815_init_module); module_exit(tc35815_cleanup_module); MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver"); MODULE_LICENSE("GPL");
gpl-2.0
CyanideL/android_kernel_asus_grouper
drivers/infiniband/ulp/iser/iser_verbs.c
2779
23149
/* * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include "iscsi_iser.h" #define ISCSI_ISER_MAX_CONN 8 #define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN) #define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN) static void iser_cq_tasklet_fn(unsigned long data); static void iser_cq_callback(struct ib_cq *cq, void *cq_context); static void iser_cq_event_callback(struct ib_event *cause, void *context) { iser_err("got cq event %d \n", cause->event); } static void iser_qp_event_callback(struct ib_event *cause, void *context) { iser_err("got qp event %d\n",cause->event); } static void iser_event_handler(struct ib_event_handler *handler, struct ib_event *event) { iser_err("async event %d on device %s port %d\n", event->event, event->device->name, event->element.port_num); } /** * iser_create_device_ib_res - creates Protection Domain (PD), Completion * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with * the adapator. * * returns 0 on success, -1 on failure */ static int iser_create_device_ib_res(struct iser_device *device) { device->pd = ib_alloc_pd(device->ib_device); if (IS_ERR(device->pd)) goto pd_err; device->rx_cq = ib_create_cq(device->ib_device, iser_cq_callback, iser_cq_event_callback, (void *)device, ISER_MAX_RX_CQ_LEN, 0); if (IS_ERR(device->rx_cq)) goto rx_cq_err; device->tx_cq = ib_create_cq(device->ib_device, NULL, iser_cq_event_callback, (void *)device, ISER_MAX_TX_CQ_LEN, 0); if (IS_ERR(device->tx_cq)) goto tx_cq_err; if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP)) goto cq_arm_err; tasklet_init(&device->cq_tasklet, iser_cq_tasklet_fn, (unsigned long)device); device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ); if (IS_ERR(device->mr)) goto dma_mr_err; INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, iser_event_handler); if (ib_register_event_handler(&device->event_handler)) goto handler_err; return 0; handler_err: ib_dereg_mr(device->mr); dma_mr_err: tasklet_kill(&device->cq_tasklet); cq_arm_err: ib_destroy_cq(device->tx_cq); tx_cq_err: ib_destroy_cq(device->rx_cq); rx_cq_err: ib_dealloc_pd(device->pd); pd_err: iser_err("failed to allocate an IB resource\n"); return -1; } /** * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR, * CQ and PD created with the device associated with the adapator. */ static void iser_free_device_ib_res(struct iser_device *device) { BUG_ON(device->mr == NULL); tasklet_kill(&device->cq_tasklet); (void)ib_unregister_event_handler(&device->event_handler); (void)ib_dereg_mr(device->mr); (void)ib_destroy_cq(device->tx_cq); (void)ib_destroy_cq(device->rx_cq); (void)ib_dealloc_pd(device->pd); device->mr = NULL; device->tx_cq = NULL; device->rx_cq = NULL; device->pd = NULL; } /** * iser_create_ib_conn_res - Creates FMR pool and Queue-Pair (QP) * * returns 0 on success, -1 on failure */ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) { struct iser_device *device; struct ib_qp_init_attr init_attr; int ret = -ENOMEM; struct ib_fmr_pool_param params; BUG_ON(ib_conn->device == NULL); device = ib_conn->device; ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); if (!ib_conn->login_buf) goto out_err; ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device, (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), GFP_KERNEL); if (!ib_conn->page_vec) goto out_err; ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); params.page_shift = SHIFT_4K; /* when the first/last SG element are not start/end * * page aligned, the map whould be of N+1 pages */ params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1; /* make the pool size twice the max number of SCSI commands * * the ML is expected to queue, watermark for unmap at 50% */ params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2; params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX; params.cache = 0; params.flush_function = NULL; params.access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ); ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params); if (IS_ERR(ib_conn->fmr_pool)) { ret = PTR_ERR(ib_conn->fmr_pool); ib_conn->fmr_pool = NULL; goto out_err; } memset(&init_attr, 0, sizeof init_attr); init_attr.event_handler = iser_qp_event_callback; init_attr.qp_context = (void *)ib_conn; init_attr.send_cq = device->tx_cq; init_attr.recv_cq = device->rx_cq; init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; init_attr.cap.max_send_sge = 2; init_attr.cap.max_recv_sge = 1; init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; init_attr.qp_type = IB_QPT_RC; ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); if (ret) goto out_err; ib_conn->qp = ib_conn->cma_id->qp; iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", ib_conn, ib_conn->cma_id, ib_conn->fmr_pool, ib_conn->cma_id->qp); return ret; out_err: iser_err("unable to alloc mem or create resource, err %d\n", ret); return ret; } /** * releases the FMR pool, QP and CMA ID objects, returns 0 on success, * -1 on failure */ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) { BUG_ON(ib_conn == NULL); iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", ib_conn, ib_conn->cma_id, ib_conn->fmr_pool, ib_conn->qp); /* qp is created only once both addr & route are resolved */ if (ib_conn->fmr_pool != NULL) ib_destroy_fmr_pool(ib_conn->fmr_pool); if (ib_conn->qp != NULL) rdma_destroy_qp(ib_conn->cma_id); /* if cma handler context, the caller acts s.t the cma destroy the id */ if (ib_conn->cma_id != NULL && can_destroy_id) rdma_destroy_id(ib_conn->cma_id); ib_conn->fmr_pool = NULL; ib_conn->qp = NULL; ib_conn->cma_id = NULL; kfree(ib_conn->page_vec); return 0; } /** * based on the resolved device node GUID see if there already allocated * device for this device. If there's no such, create one. */ static struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) { struct iser_device *device; mutex_lock(&ig.device_list_mutex); list_for_each_entry(device, &ig.device_list, ig_list) /* find if there's a match using the node GUID */ if (device->ib_device->node_guid == cma_id->device->node_guid) goto inc_refcnt; device = kzalloc(sizeof *device, GFP_KERNEL); if (device == NULL) goto out; /* assign this device to the device */ device->ib_device = cma_id->device; /* init the device and link it into ig device list */ if (iser_create_device_ib_res(device)) { kfree(device); device = NULL; goto out; } list_add(&device->ig_list, &ig.device_list); inc_refcnt: device->refcount++; out: mutex_unlock(&ig.device_list_mutex); return device; } /* if there's no demand for this device, release it */ static void iser_device_try_release(struct iser_device *device) { mutex_lock(&ig.device_list_mutex); device->refcount--; iser_err("device %p refcount %d\n",device,device->refcount); if (!device->refcount) { iser_free_device_ib_res(device); list_del(&device->ig_list); kfree(device); } mutex_unlock(&ig.device_list_mutex); } static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, enum iser_ib_conn_state comp, enum iser_ib_conn_state exch) { int ret; spin_lock_bh(&ib_conn->lock); if ((ret = (ib_conn->state == comp))) ib_conn->state = exch; spin_unlock_bh(&ib_conn->lock); return ret; } /** * Frees all conn objects and deallocs conn descriptor */ static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id) { struct iser_device *device = ib_conn->device; BUG_ON(ib_conn->state != ISER_CONN_DOWN); mutex_lock(&ig.connlist_mutex); list_del(&ib_conn->conn_list); mutex_unlock(&ig.connlist_mutex); iser_free_rx_descriptors(ib_conn); iser_free_ib_conn_res(ib_conn, can_destroy_id); ib_conn->device = NULL; /* on EVENT_ADDR_ERROR there's no device yet for this conn */ if (device != NULL) iser_device_try_release(device); iscsi_destroy_endpoint(ib_conn->ep); } void iser_conn_get(struct iser_conn *ib_conn) { atomic_inc(&ib_conn->refcount); } int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id) { if (atomic_dec_and_test(&ib_conn->refcount)) { iser_conn_release(ib_conn, can_destroy_id); return 1; } return 0; } /** * triggers start of the disconnect procedures and wait for them to be done */ void iser_conn_terminate(struct iser_conn *ib_conn) { int err = 0; /* change the ib conn state only if the conn is UP, however always call * rdma_disconnect since this is the only way to cause the CMA to change * the QP state to ERROR */ iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING); err = rdma_disconnect(ib_conn->cma_id); if (err) iser_err("Failed to disconnect, conn: 0x%p err %d\n", ib_conn,err); wait_event_interruptible(ib_conn->wait, ib_conn->state == ISER_CONN_DOWN); iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */ } static int iser_connect_error(struct rdma_cm_id *cma_id) { struct iser_conn *ib_conn; ib_conn = (struct iser_conn *)cma_id->context; ib_conn->state = ISER_CONN_DOWN; wake_up_interruptible(&ib_conn->wait); return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */ } static int iser_addr_handler(struct rdma_cm_id *cma_id) { struct iser_device *device; struct iser_conn *ib_conn; int ret; device = iser_device_find_by_ib_device(cma_id); if (!device) { iser_err("device lookup/creation failed\n"); return iser_connect_error(cma_id); } ib_conn = (struct iser_conn *)cma_id->context; ib_conn->device = device; ret = rdma_resolve_route(cma_id, 1000); if (ret) { iser_err("resolve route failed: %d\n", ret); return iser_connect_error(cma_id); } return 0; } static int iser_route_handler(struct rdma_cm_id *cma_id) { struct rdma_conn_param conn_param; int ret; ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); if (ret) goto failure; memset(&conn_param, 0, sizeof conn_param); conn_param.responder_resources = 4; conn_param.initiator_depth = 1; conn_param.retry_count = 7; conn_param.rnr_retry_count = 6; ret = rdma_connect(cma_id, &conn_param); if (ret) { iser_err("failure connecting: %d\n", ret); goto failure; } return 0; failure: return iser_connect_error(cma_id); } static void iser_connected_handler(struct rdma_cm_id *cma_id) { struct iser_conn *ib_conn; ib_conn = (struct iser_conn *)cma_id->context; ib_conn->state = ISER_CONN_UP; wake_up_interruptible(&ib_conn->wait); } static int iser_disconnected_handler(struct rdma_cm_id *cma_id) { struct iser_conn *ib_conn; int ret; ib_conn = (struct iser_conn *)cma_id->context; /* getting here when the state is UP means that the conn is being * * terminated asynchronously from the iSCSI layer's perspective. */ if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING)) iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED); /* Complete the termination process if no posts are pending */ if (ib_conn->post_recv_buf_count == 0 && (atomic_read(&ib_conn->post_send_buf_count) == 0)) { ib_conn->state = ISER_CONN_DOWN; wake_up_interruptible(&ib_conn->wait); } ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */ return ret; } static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { int ret = 0; iser_err("event %d status %d conn %p id %p\n", event->event, event->status, cma_id->context, cma_id); switch (event->event) { case RDMA_CM_EVENT_ADDR_RESOLVED: ret = iser_addr_handler(cma_id); break; case RDMA_CM_EVENT_ROUTE_RESOLVED: ret = iser_route_handler(cma_id); break; case RDMA_CM_EVENT_ESTABLISHED: iser_connected_handler(cma_id); break; case RDMA_CM_EVENT_ADDR_ERROR: case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_REJECTED: ret = iser_connect_error(cma_id); break; case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_ADDR_CHANGE: ret = iser_disconnected_handler(cma_id); break; default: iser_err("Unexpected RDMA CM event (%d)\n", event->event); break; } return ret; } void iser_conn_init(struct iser_conn *ib_conn) { ib_conn->state = ISER_CONN_INIT; init_waitqueue_head(&ib_conn->wait); ib_conn->post_recv_buf_count = 0; atomic_set(&ib_conn->post_send_buf_count, 0); atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */ INIT_LIST_HEAD(&ib_conn->conn_list); spin_lock_init(&ib_conn->lock); } /** * starts the process of connecting to the target * sleeps until the connection is established or rejected */ int iser_connect(struct iser_conn *ib_conn, struct sockaddr_in *src_addr, struct sockaddr_in *dst_addr, int non_blocking) { struct sockaddr *src, *dst; int err = 0; sprintf(ib_conn->name, "%pI4:%d", &dst_addr->sin_addr.s_addr, dst_addr->sin_port); /* the device is known only --after-- address resolution */ ib_conn->device = NULL; iser_err("connecting to: %pI4, port 0x%x\n", &dst_addr->sin_addr, dst_addr->sin_port); ib_conn->state = ISER_CONN_PENDING; iser_conn_get(ib_conn); /* ref ib conn's cma id */ ib_conn->cma_id = rdma_create_id(iser_cma_handler, (void *)ib_conn, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(ib_conn->cma_id)) { err = PTR_ERR(ib_conn->cma_id); iser_err("rdma_create_id failed: %d\n", err); goto id_failure; } src = (struct sockaddr *)src_addr; dst = (struct sockaddr *)dst_addr; err = rdma_resolve_addr(ib_conn->cma_id, src, dst, 1000); if (err) { iser_err("rdma_resolve_addr failed: %d\n", err); goto addr_failure; } if (!non_blocking) { wait_event_interruptible(ib_conn->wait, (ib_conn->state != ISER_CONN_PENDING)); if (ib_conn->state != ISER_CONN_UP) { err = -EIO; goto connect_failure; } } mutex_lock(&ig.connlist_mutex); list_add(&ib_conn->conn_list, &ig.connlist); mutex_unlock(&ig.connlist_mutex); return 0; id_failure: ib_conn->cma_id = NULL; addr_failure: ib_conn->state = ISER_CONN_DOWN; connect_failure: iser_conn_release(ib_conn, 1); return err; } /** * iser_reg_page_vec - Register physical memory * * returns: 0 on success, errno code on failure */ int iser_reg_page_vec(struct iser_conn *ib_conn, struct iser_page_vec *page_vec, struct iser_mem_reg *mem_reg) { struct ib_pool_fmr *mem; u64 io_addr; u64 *page_list; int status; page_list = page_vec->pages; io_addr = page_list[0]; mem = ib_fmr_pool_map_phys(ib_conn->fmr_pool, page_list, page_vec->length, io_addr); if (IS_ERR(mem)) { status = (int)PTR_ERR(mem); iser_err("ib_fmr_pool_map_phys failed: %d\n", status); return status; } mem_reg->lkey = mem->fmr->lkey; mem_reg->rkey = mem->fmr->rkey; mem_reg->len = page_vec->length * SIZE_4K; mem_reg->va = io_addr; mem_reg->is_fmr = 1; mem_reg->mem_h = (void *)mem; mem_reg->va += page_vec->offset; mem_reg->len = page_vec->data_size; iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, " "entry[0]: (0x%08lx,%ld)] -> " "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n", page_vec, page_vec->length, (unsigned long)page_vec->pages[0], (unsigned long)page_vec->data_size, (unsigned int)mem_reg->lkey, mem_reg->mem_h, (unsigned long)mem_reg->va, (unsigned long)mem_reg->len); return 0; } /** * Unregister (previosuly registered) memory. */ void iser_unreg_mem(struct iser_mem_reg *reg) { int ret; iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h); ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h); if (ret) iser_err("ib_fmr_pool_unmap failed %d\n", ret); reg->mem_h = NULL; } int iser_post_recvl(struct iser_conn *ib_conn) { struct ib_recv_wr rx_wr, *rx_wr_failed; struct ib_sge sge; int ib_ret; sge.addr = ib_conn->login_dma; sge.length = ISER_RX_LOGIN_SIZE; sge.lkey = ib_conn->device->mr->lkey; rx_wr.wr_id = (unsigned long)ib_conn->login_buf; rx_wr.sg_list = &sge; rx_wr.num_sge = 1; rx_wr.next = NULL; ib_conn->post_recv_buf_count++; ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed); if (ib_ret) { iser_err("ib_post_recv failed ret=%d\n", ib_ret); ib_conn->post_recv_buf_count--; } return ib_ret; } int iser_post_recvm(struct iser_conn *ib_conn, int count) { struct ib_recv_wr *rx_wr, *rx_wr_failed; int i, ib_ret; unsigned int my_rx_head = ib_conn->rx_desc_head; struct iser_rx_desc *rx_desc; for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { rx_desc = &ib_conn->rx_descs[my_rx_head]; rx_wr->wr_id = (unsigned long)rx_desc; rx_wr->sg_list = &rx_desc->rx_sg; rx_wr->num_sge = 1; rx_wr->next = rx_wr + 1; my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1); } rx_wr--; rx_wr->next = NULL; /* mark end of work requests list */ ib_conn->post_recv_buf_count += count; ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed); if (ib_ret) { iser_err("ib_post_recv failed ret=%d\n", ib_ret); ib_conn->post_recv_buf_count -= count; } else ib_conn->rx_desc_head = my_rx_head; return ib_ret; } /** * iser_start_send - Initiate a Send DTO operation * * returns 0 on success, -1 on failure */ int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc) { int ib_ret; struct ib_send_wr send_wr, *send_wr_failed; ib_dma_sync_single_for_device(ib_conn->device->ib_device, tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); send_wr.next = NULL; send_wr.wr_id = (unsigned long)tx_desc; send_wr.sg_list = tx_desc->tx_sg; send_wr.num_sge = tx_desc->num_sge; send_wr.opcode = IB_WR_SEND; send_wr.send_flags = IB_SEND_SIGNALED; atomic_inc(&ib_conn->post_send_buf_count); ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); if (ib_ret) { iser_err("ib_post_send failed, ret:%d\n", ib_ret); atomic_dec(&ib_conn->post_send_buf_count); } return ib_ret; } static void iser_handle_comp_error(struct iser_tx_desc *desc, struct iser_conn *ib_conn) { if (desc && desc->type == ISCSI_TX_DATAOUT) kmem_cache_free(ig.desc_cache, desc); if (ib_conn->post_recv_buf_count == 0 && atomic_read(&ib_conn->post_send_buf_count) == 0) { /* getting here when the state is UP means that the conn is * * being terminated asynchronously from the iSCSI layer's * * perspective. */ if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING)) iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED); /* no more non completed posts to the QP, complete the * termination process w.o worrying on disconnect event */ ib_conn->state = ISER_CONN_DOWN; wake_up_interruptible(&ib_conn->wait); } } static int iser_drain_tx_cq(struct iser_device *device) { struct ib_cq *cq = device->tx_cq; struct ib_wc wc; struct iser_tx_desc *tx_desc; struct iser_conn *ib_conn; int completed_tx = 0; while (ib_poll_cq(cq, 1, &wc) == 1) { tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id; ib_conn = wc.qp->qp_context; if (wc.status == IB_WC_SUCCESS) { if (wc.opcode == IB_WC_SEND) iser_snd_completion(tx_desc, ib_conn); else iser_err("expected opcode %d got %d\n", IB_WC_SEND, wc.opcode); } else { iser_err("tx id %llx status %d vend_err %x\n", wc.wr_id, wc.status, wc.vendor_err); atomic_dec(&ib_conn->post_send_buf_count); iser_handle_comp_error(tx_desc, ib_conn); } completed_tx++; } return completed_tx; } static void iser_cq_tasklet_fn(unsigned long data) { struct iser_device *device = (struct iser_device *)data; struct ib_cq *cq = device->rx_cq; struct ib_wc wc; struct iser_rx_desc *desc; unsigned long xfer_len; struct iser_conn *ib_conn; int completed_tx, completed_rx; completed_tx = completed_rx = 0; while (ib_poll_cq(cq, 1, &wc) == 1) { desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; BUG_ON(desc == NULL); ib_conn = wc.qp->qp_context; if (wc.status == IB_WC_SUCCESS) { if (wc.opcode == IB_WC_RECV) { xfer_len = (unsigned long)wc.byte_len; iser_rcv_completion(desc, xfer_len, ib_conn); } else iser_err("expected opcode %d got %d\n", IB_WC_RECV, wc.opcode); } else { if (wc.status != IB_WC_WR_FLUSH_ERR) iser_err("rx id %llx status %d vend_err %x\n", wc.wr_id, wc.status, wc.vendor_err); ib_conn->post_recv_buf_count--; iser_handle_comp_error(NULL, ib_conn); } completed_rx++; if (!(completed_rx & 63)) completed_tx += iser_drain_tx_cq(device); } /* #warning "it is assumed here that arming CQ only once its empty" * * " would not cause interrupts to be missed" */ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); completed_tx += iser_drain_tx_cq(device); iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx); } static void iser_cq_callback(struct ib_cq *cq, void *cq_context) { struct iser_device *device = (struct iser_device *)cq_context; tasklet_schedule(&device->cq_tasklet); }
gpl-2.0
NooNameR/k3
drivers/edac/cpc925_edac.c
2779
30542
/* * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller. * * Copyright (c) 2008 Wind River Systems, Inc. * * Authors: Cao Qingtao <qingtao.cao@windriver.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/edac.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/gfp.h> #include "edac_core.h" #include "edac_module.h" #define CPC925_EDAC_REVISION " Ver: 1.0.0" #define CPC925_EDAC_MOD_STR "cpc925_edac" #define cpc925_printk(level, fmt, arg...) \ edac_printk(level, "CPC925", fmt, ##arg) #define cpc925_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg) /* * CPC925 registers are of 32 bits with bit0 defined at the * most significant bit and bit31 at that of least significant. */ #define CPC925_BITS_PER_REG 32 #define CPC925_BIT(nr) (1UL << (CPC925_BITS_PER_REG - 1 - nr)) /* * EDAC device names for the error detections of * CPU Interface and Hypertransport Link. */ #define CPC925_CPU_ERR_DEV "cpu" #define CPC925_HT_LINK_DEV "htlink" /* Suppose DDR Refresh cycle is 15.6 microsecond */ #define CPC925_REF_FREQ 0xFA69 #define CPC925_SCRUB_BLOCK_SIZE 64 /* bytes */ #define CPC925_NR_CSROWS 8 /* * All registers and bits definitions are taken from * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02". */ /* * CPU and Memory Controller Registers */ /************************************************************ * Processor Interface Exception Mask Register (APIMASK) ************************************************************/ #define REG_APIMASK_OFFSET 0x30070 enum apimask_bits { APIMASK_DART = CPC925_BIT(0), /* DART Exception */ APIMASK_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */ APIMASK_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */ APIMASK_STAT = CPC925_BIT(3), /* Status Exception */ APIMASK_DERR = CPC925_BIT(4), /* Data Error Exception */ APIMASK_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */ APIMASK_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */ /* BIT(7) Reserved */ APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */ APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */ APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */ APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */ CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 | APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 | APIMASK_ADRS1), ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H | APIMASK_ECC_UE_L | APIMASK_ECC_CE_L), }; /************************************************************ * Processor Interface Exception Register (APIEXCP) ************************************************************/ #define REG_APIEXCP_OFFSET 0x30060 enum apiexcp_bits { APIEXCP_DART = CPC925_BIT(0), /* DART Exception */ APIEXCP_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */ APIEXCP_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */ APIEXCP_STAT = CPC925_BIT(3), /* Status Exception */ APIEXCP_DERR = CPC925_BIT(4), /* Data Error Exception */ APIEXCP_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */ APIEXCP_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */ /* BIT(7) Reserved */ APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */ APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */ APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */ APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */ CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 | APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 | APIEXCP_ADRS1), UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L), CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L), ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED), }; /************************************************************ * Memory Bus Configuration Register (MBCR) ************************************************************/ #define REG_MBCR_OFFSET 0x2190 #define MBCR_64BITCFG_SHIFT 23 #define MBCR_64BITCFG_MASK (1UL << MBCR_64BITCFG_SHIFT) #define MBCR_64BITBUS_SHIFT 22 #define MBCR_64BITBUS_MASK (1UL << MBCR_64BITBUS_SHIFT) /************************************************************ * Memory Bank Mode Register (MBMR) ************************************************************/ #define REG_MBMR_OFFSET 0x21C0 #define MBMR_MODE_MAX_VALUE 0xF #define MBMR_MODE_SHIFT 25 #define MBMR_MODE_MASK (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT) #define MBMR_BBA_SHIFT 24 #define MBMR_BBA_MASK (1UL << MBMR_BBA_SHIFT) /************************************************************ * Memory Bank Boundary Address Register (MBBAR) ************************************************************/ #define REG_MBBAR_OFFSET 0x21D0 #define MBBAR_BBA_MAX_VALUE 0xFF #define MBBAR_BBA_SHIFT 24 #define MBBAR_BBA_MASK (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT) /************************************************************ * Memory Scrub Control Register (MSCR) ************************************************************/ #define REG_MSCR_OFFSET 0x2400 #define MSCR_SCRUB_MOD_MASK 0xC0000000 /* scrub_mod - bit0:1*/ #define MSCR_BACKGR_SCRUB 0x40000000 /* 01 */ #define MSCR_SI_SHIFT 16 /* si - bit8:15*/ #define MSCR_SI_MAX_VALUE 0xFF #define MSCR_SI_MASK (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT) /************************************************************ * Memory Scrub Range Start Register (MSRSR) ************************************************************/ #define REG_MSRSR_OFFSET 0x2410 /************************************************************ * Memory Scrub Range End Register (MSRER) ************************************************************/ #define REG_MSRER_OFFSET 0x2420 /************************************************************ * Memory Scrub Pattern Register (MSPR) ************************************************************/ #define REG_MSPR_OFFSET 0x2430 /************************************************************ * Memory Check Control Register (MCCR) ************************************************************/ #define REG_MCCR_OFFSET 0x2440 enum mccr_bits { MCCR_ECC_EN = CPC925_BIT(0), /* ECC high and low check */ }; /************************************************************ * Memory Check Range End Register (MCRER) ************************************************************/ #define REG_MCRER_OFFSET 0x2450 /************************************************************ * Memory Error Address Register (MEAR) ************************************************************/ #define REG_MEAR_OFFSET 0x2460 #define MEAR_BCNT_MAX_VALUE 0x3 #define MEAR_BCNT_SHIFT 30 #define MEAR_BCNT_MASK (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT) #define MEAR_RANK_MAX_VALUE 0x7 #define MEAR_RANK_SHIFT 27 #define MEAR_RANK_MASK (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT) #define MEAR_COL_MAX_VALUE 0x7FF #define MEAR_COL_SHIFT 16 #define MEAR_COL_MASK (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT) #define MEAR_BANK_MAX_VALUE 0x3 #define MEAR_BANK_SHIFT 14 #define MEAR_BANK_MASK (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT) #define MEAR_ROW_MASK 0x00003FFF /************************************************************ * Memory Error Syndrome Register (MESR) ************************************************************/ #define REG_MESR_OFFSET 0x2470 #define MESR_ECC_SYN_H_MASK 0xFF00 #define MESR_ECC_SYN_L_MASK 0x00FF /************************************************************ * Memory Mode Control Register (MMCR) ************************************************************/ #define REG_MMCR_OFFSET 0x2500 enum mmcr_bits { MMCR_REG_DIMM_MODE = CPC925_BIT(3), }; /* * HyperTransport Link Registers */ /************************************************************ * Error Handling/Enumeration Scratch Pad Register (ERRCTRL) ************************************************************/ #define REG_ERRCTRL_OFFSET 0x70140 enum errctrl_bits { /* nonfatal interrupts for */ ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */ ERRCTRL_CRC_NF = CPC925_BIT(1), /* CRC error */ ERRCTRL_RSP_NF = CPC925_BIT(2), /* Response error */ ERRCTRL_EOC_NF = CPC925_BIT(3), /* End-Of-Chain error */ ERRCTRL_OVF_NF = CPC925_BIT(4), /* Overflow error */ ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */ ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */ ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */ HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF | ERRCTRL_RSP_NF | ERRCTRL_EOC_NF | ERRCTRL_OVF_NF | ERRCTRL_PROT_NF), HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL), }; /************************************************************ * Link Configuration and Link Control Register (LINKCTRL) ************************************************************/ #define REG_LINKCTRL_OFFSET 0x70110 enum linkctrl_bits { LINKCTRL_CRC_ERR = (CPC925_BIT(22) | CPC925_BIT(23)), LINKCTRL_LINK_FAIL = CPC925_BIT(27), HT_LINKCTRL_DETECTED = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL), }; /************************************************************ * Link FreqCap/Error/Freq/Revision ID Register (LINKERR) ************************************************************/ #define REG_LINKERR_OFFSET 0x70120 enum linkerr_bits { LINKERR_EOC_ERR = CPC925_BIT(17), /* End-Of-Chain error */ LINKERR_OVF_ERR = CPC925_BIT(18), /* Receive Buffer Overflow */ LINKERR_PROT_ERR = CPC925_BIT(19), /* Protocol error */ HT_LINKERR_DETECTED = (LINKERR_EOC_ERR | LINKERR_OVF_ERR | LINKERR_PROT_ERR), }; /************************************************************ * Bridge Control Register (BRGCTRL) ************************************************************/ #define REG_BRGCTRL_OFFSET 0x70300 enum brgctrl_bits { BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */ BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */ }; /* Private structure for edac memory controller */ struct cpc925_mc_pdata { void __iomem *vbase; unsigned long total_mem; const char *name; int edac_idx; }; /* Private structure for common edac device */ struct cpc925_dev_info { void __iomem *vbase; struct platform_device *pdev; char *ctl_name; int edac_idx; struct edac_device_ctl_info *edac_dev; void (*init)(struct cpc925_dev_info *dev_info); void (*exit)(struct cpc925_dev_info *dev_info); void (*check)(struct edac_device_ctl_info *edac_dev); }; /* Get total memory size from Open Firmware DTB */ static void get_total_mem(struct cpc925_mc_pdata *pdata) { struct device_node *np = NULL; const unsigned int *reg, *reg_end; int len, sw, aw; unsigned long start, size; np = of_find_node_by_type(NULL, "memory"); if (!np) return; aw = of_n_addr_cells(np); sw = of_n_size_cells(np); reg = (const unsigned int *)of_get_property(np, "reg", &len); reg_end = reg + len/4; pdata->total_mem = 0; do { start = of_read_number(reg, aw); reg += aw; size = of_read_number(reg, sw); reg += sw; debugf1("%s: start 0x%lx, size 0x%lx\n", __func__, start, size); pdata->total_mem += size; } while (reg < reg_end); of_node_put(np); debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem); } static void cpc925_init_csrows(struct mem_ctl_info *mci) { struct cpc925_mc_pdata *pdata = mci->pvt_info; struct csrow_info *csrow; int index; u32 mbmr, mbbar, bba; unsigned long row_size, last_nr_pages = 0; get_total_mem(pdata); for (index = 0; index < mci->nr_csrows; index++) { mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET + 0x20 * index); mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET + 0x20 + index); bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) | ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT); if (bba == 0) continue; /* not populated */ csrow = &mci->csrows[index]; row_size = bba * (1UL << 28); /* 256M */ csrow->first_page = last_nr_pages; csrow->nr_pages = row_size >> PAGE_SHIFT; csrow->last_page = csrow->first_page + csrow->nr_pages - 1; last_nr_pages = csrow->last_page + 1; csrow->mtype = MEM_RDDR; csrow->edac_mode = EDAC_SECDED; switch (csrow->nr_channels) { case 1: /* Single channel */ csrow->grain = 32; /* four-beat burst of 32 bytes */ break; case 2: /* Dual channel */ default: csrow->grain = 64; /* four-beat burst of 64 bytes */ break; } switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) { case 6: /* 0110, no way to differentiate X8 VS X16 */ case 5: /* 0101 */ case 8: /* 1000 */ csrow->dtype = DEV_X16; break; case 7: /* 0111 */ case 9: /* 1001 */ csrow->dtype = DEV_X8; break; default: csrow->dtype = DEV_UNKNOWN; break; } } } /* Enable memory controller ECC detection */ static void cpc925_mc_init(struct mem_ctl_info *mci) { struct cpc925_mc_pdata *pdata = mci->pvt_info; u32 apimask; u32 mccr; /* Enable various ECC error exceptions */ apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET); if ((apimask & ECC_MASK_ENABLE) == 0) { apimask |= ECC_MASK_ENABLE; __raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET); } /* Enable ECC detection */ mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET); if ((mccr & MCCR_ECC_EN) == 0) { mccr |= MCCR_ECC_EN; __raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET); } } /* Disable memory controller ECC detection */ static void cpc925_mc_exit(struct mem_ctl_info *mci) { /* * WARNING: * We are supposed to clear the ECC error detection bits, * and it will be no problem to do so. However, once they * are cleared here if we want to re-install CPC925 EDAC * module later, setting them up in cpc925_mc_init() will * trigger machine check exception. * Also, it's ok to leave ECC error detection bits enabled, * since they are reset to 1 by default or by boot loader. */ return; } /* * Revert DDR column/row/bank addresses into page frame number and * offset in page. * * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs), * physical address(PA) bits to column address(CA) bits mappings are: * CA 0 1 2 3 4 5 6 7 8 9 10 * PA 59 58 57 56 55 54 53 52 51 50 49 * * physical address(PA) bits to bank address(BA) bits mappings are: * BA 0 1 * PA 43 44 * * physical address(PA) bits to row address(RA) bits mappings are: * RA 0 1 2 3 4 5 6 7 8 9 10 11 12 * PA 36 35 34 48 47 46 45 40 41 42 39 38 37 */ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, unsigned long *pfn, unsigned long *offset, int *csrow) { u32 bcnt, rank, col, bank, row; u32 c; unsigned long pa; int i; bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT; rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT; col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT; bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT; row = mear & MEAR_ROW_MASK; *csrow = rank; #ifdef CONFIG_EDAC_DEBUG if (mci->csrows[rank].first_page == 0) { cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a " "non-populated csrow, broken hardware?\n"); return; } #endif /* Revert csrow number */ pa = mci->csrows[rank].first_page << PAGE_SHIFT; /* Revert column address */ col += bcnt; for (i = 0; i < 11; i++) { c = col & 0x1; col >>= 1; pa |= c << (14 - i); } /* Revert bank address */ pa |= bank << 19; /* Revert row address, in 4 steps */ for (i = 0; i < 3; i++) { c = row & 0x1; row >>= 1; pa |= c << (26 - i); } for (i = 0; i < 3; i++) { c = row & 0x1; row >>= 1; pa |= c << (21 + i); } for (i = 0; i < 4; i++) { c = row & 0x1; row >>= 1; pa |= c << (18 - i); } for (i = 0; i < 3; i++) { c = row & 0x1; row >>= 1; pa |= c << (29 - i); } *offset = pa & (PAGE_SIZE - 1); *pfn = pa >> PAGE_SHIFT; debugf0("%s: ECC physical address 0x%lx\n", __func__, pa); } static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome) { if ((syndrome & MESR_ECC_SYN_H_MASK) == 0) return 0; if ((syndrome & MESR_ECC_SYN_L_MASK) == 0) return 1; cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n", syndrome); return 1; } /* Check memory controller registers for ECC errors */ static void cpc925_mc_check(struct mem_ctl_info *mci) { struct cpc925_mc_pdata *pdata = mci->pvt_info; u32 apiexcp; u32 mear; u32 mesr; u16 syndrome; unsigned long pfn = 0, offset = 0; int csrow = 0, channel = 0; /* APIEXCP is cleared when read */ apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET); if ((apiexcp & ECC_EXCP_DETECTED) == 0) return; mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET); syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK); mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET); /* Revert column/row addresses into page frame number, etc */ cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow); if (apiexcp & CECC_EXCP_DETECTED) { cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n"); channel = cpc925_mc_find_channel(mci, syndrome); edac_mc_handle_ce(mci, pfn, offset, syndrome, csrow, channel, mci->ctl_name); } if (apiexcp & UECC_EXCP_DETECTED) { cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name); } cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n"); cpc925_mc_printk(mci, KERN_INFO, "APIMASK 0x%08x\n", __raw_readl(pdata->vbase + REG_APIMASK_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "APIEXCP 0x%08x\n", apiexcp); cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl 0x%08x\n", __raw_readl(pdata->vbase + REG_MSCR_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start 0x%08x\n", __raw_readl(pdata->vbase + REG_MSRSR_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End 0x%08x\n", __raw_readl(pdata->vbase + REG_MSRER_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern 0x%08x\n", __raw_readl(pdata->vbase + REG_MSPR_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl 0x%08x\n", __raw_readl(pdata->vbase + REG_MCCR_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End 0x%08x\n", __raw_readl(pdata->vbase + REG_MCRER_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address 0x%08x\n", mesr); cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome 0x%08x\n", syndrome); } /******************** CPU err device********************************/ /* Enable CPU Errors detection */ static void cpc925_cpu_init(struct cpc925_dev_info *dev_info) { u32 apimask; apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); if ((apimask & CPU_MASK_ENABLE) == 0) { apimask |= CPU_MASK_ENABLE; __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET); } } /* Disable CPU Errors detection */ static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info) { /* * WARNING: * We are supposed to clear the CPU error detection bits, * and it will be no problem to do so. However, once they * are cleared here if we want to re-install CPC925 EDAC * module later, setting them up in cpc925_cpu_init() will * trigger machine check exception. * Also, it's ok to leave CPU error detection bits enabled, * since they are reset to 1 by default. */ return; } /* Check for CPU Errors */ static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev) { struct cpc925_dev_info *dev_info = edac_dev->pvt_info; u32 apiexcp; u32 apimask; /* APIEXCP is cleared when read */ apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET); if ((apiexcp & CPU_EXCP_DETECTED) == 0) return; apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); cpc925_printk(KERN_INFO, "Processor Interface Fault\n" "Processor Interface register dump:\n"); cpc925_printk(KERN_INFO, "APIMASK 0x%08x\n", apimask); cpc925_printk(KERN_INFO, "APIEXCP 0x%08x\n", apiexcp); edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); } /******************** HT Link err device****************************/ /* Enable HyperTransport Link Error detection */ static void cpc925_htlink_init(struct cpc925_dev_info *dev_info) { u32 ht_errctrl; ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) { ht_errctrl |= HT_ERRCTRL_ENABLE; __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET); } } /* Disable HyperTransport Link Error detection */ static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info) { u32 ht_errctrl; ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); ht_errctrl &= ~HT_ERRCTRL_ENABLE; __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET); } /* Check for HyperTransport Link errors */ static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev) { struct cpc925_dev_info *dev_info = edac_dev->pvt_info; u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET); u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET); u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET); if (!((brgctrl & BRGCTRL_DETSERR) || (linkctrl & HT_LINKCTRL_DETECTED) || (errctrl & HT_ERRCTRL_DETECTED) || (linkerr & HT_LINKERR_DETECTED))) return; cpc925_printk(KERN_INFO, "HT Link Fault\n" "HT register dump:\n"); cpc925_printk(KERN_INFO, "Bridge Ctrl 0x%08x\n", brgctrl); cpc925_printk(KERN_INFO, "Link Config Ctrl 0x%08x\n", linkctrl); cpc925_printk(KERN_INFO, "Error Enum and Ctrl 0x%08x\n", errctrl); cpc925_printk(KERN_INFO, "Link Error 0x%08x\n", linkerr); /* Clear by write 1 */ if (brgctrl & BRGCTRL_DETSERR) __raw_writel(BRGCTRL_DETSERR, dev_info->vbase + REG_BRGCTRL_OFFSET); if (linkctrl & HT_LINKCTRL_DETECTED) __raw_writel(HT_LINKCTRL_DETECTED, dev_info->vbase + REG_LINKCTRL_OFFSET); /* Initiate Secondary Bus Reset to clear the chain failure */ if (errctrl & ERRCTRL_CHN_FAL) __raw_writel(BRGCTRL_SECBUSRESET, dev_info->vbase + REG_BRGCTRL_OFFSET); if (errctrl & ERRCTRL_RSP_ERR) __raw_writel(ERRCTRL_RSP_ERR, dev_info->vbase + REG_ERRCTRL_OFFSET); if (linkerr & HT_LINKERR_DETECTED) __raw_writel(HT_LINKERR_DETECTED, dev_info->vbase + REG_LINKERR_OFFSET); edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name); } static struct cpc925_dev_info cpc925_devs[] = { { .ctl_name = CPC925_CPU_ERR_DEV, .init = cpc925_cpu_init, .exit = cpc925_cpu_exit, .check = cpc925_cpu_check, }, { .ctl_name = CPC925_HT_LINK_DEV, .init = cpc925_htlink_init, .exit = cpc925_htlink_exit, .check = cpc925_htlink_check, }, {0}, /* Terminated by NULL */ }; /* * Add CPU Err detection and HyperTransport Link Err detection * as common "edac_device", they have no corresponding device * nodes in the Open Firmware DTB and we have to add platform * devices for them. Also, they will share the MMIO with that * of memory controller. */ static void cpc925_add_edac_devices(void __iomem *vbase) { struct cpc925_dev_info *dev_info; if (!vbase) { cpc925_printk(KERN_ERR, "MMIO not established yet\n"); return; } for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) { dev_info->vbase = vbase; dev_info->pdev = platform_device_register_simple( dev_info->ctl_name, 0, NULL, 0); if (IS_ERR(dev_info->pdev)) { cpc925_printk(KERN_ERR, "Can't register platform device for %s\n", dev_info->ctl_name); continue; } /* * Don't have to allocate private structure but * make use of cpc925_devs[] instead. */ dev_info->edac_idx = edac_device_alloc_index(); dev_info->edac_dev = edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx); if (!dev_info->edac_dev) { cpc925_printk(KERN_ERR, "No memory for edac device\n"); goto err1; } dev_info->edac_dev->pvt_info = dev_info; dev_info->edac_dev->dev = &dev_info->pdev->dev; dev_info->edac_dev->ctl_name = dev_info->ctl_name; dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR; dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev); if (edac_op_state == EDAC_OPSTATE_POLL) dev_info->edac_dev->edac_check = dev_info->check; if (dev_info->init) dev_info->init(dev_info); if (edac_device_add_device(dev_info->edac_dev) > 0) { cpc925_printk(KERN_ERR, "Unable to add edac device for %s\n", dev_info->ctl_name); goto err2; } debugf0("%s: Successfully added edac device for %s\n", __func__, dev_info->ctl_name); continue; err2: if (dev_info->exit) dev_info->exit(dev_info); edac_device_free_ctl_info(dev_info->edac_dev); err1: platform_device_unregister(dev_info->pdev); } } /* * Delete the common "edac_device" for CPU Err Detection * and HyperTransport Link Err Detection */ static void cpc925_del_edac_devices(void) { struct cpc925_dev_info *dev_info; for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) { if (dev_info->edac_dev) { edac_device_del_device(dev_info->edac_dev->dev); edac_device_free_ctl_info(dev_info->edac_dev); platform_device_unregister(dev_info->pdev); } if (dev_info->exit) dev_info->exit(dev_info); debugf0("%s: Successfully deleted edac device for %s\n", __func__, dev_info->ctl_name); } } /* Convert current back-ground scrub rate into byte/sec bandwidth */ static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci) { struct cpc925_mc_pdata *pdata = mci->pvt_info; int bw; u32 mscr; u8 si; mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET); si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT; debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr); if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || (si == 0)) { cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n"); bw = 0; } else bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; return bw; } /* Return 0 for single channel; 1 for dual channel */ static int cpc925_mc_get_channels(void __iomem *vbase) { int dual = 0; u32 mbcr; mbcr = __raw_readl(vbase + REG_MBCR_OFFSET); /* * Dual channel only when 128-bit wide physical bus * and 128-bit configuration. */ if (((mbcr & MBCR_64BITCFG_MASK) == 0) && ((mbcr & MBCR_64BITBUS_MASK) == 0)) dual = 1; debugf0("%s: %s channel\n", __func__, (dual > 0) ? "Dual" : "Single"); return dual; } static int __devinit cpc925_probe(struct platform_device *pdev) { static int edac_mc_idx; struct mem_ctl_info *mci; void __iomem *vbase; struct cpc925_mc_pdata *pdata; struct resource *r; int res = 0, nr_channels; debugf0("%s: %s platform device found!\n", __func__, pdev->name); if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) { res = -ENOMEM; goto out; } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { cpc925_printk(KERN_ERR, "Unable to get resource\n"); res = -ENOENT; goto err1; } if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r), pdev->name)) { cpc925_printk(KERN_ERR, "Unable to request mem region\n"); res = -EBUSY; goto err1; } vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!vbase) { cpc925_printk(KERN_ERR, "Unable to ioremap device\n"); res = -ENOMEM; goto err2; } nr_channels = cpc925_mc_get_channels(vbase); mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata), CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx); if (!mci) { cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n"); res = -ENOMEM; goto err2; } pdata = mci->pvt_info; pdata->vbase = vbase; pdata->edac_idx = edac_mc_idx++; pdata->name = pdev->name; mci->dev = &pdev->dev; platform_set_drvdata(pdev, mci); mci->dev_name = dev_name(&pdev->dev); mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED; mci->mod_name = CPC925_EDAC_MOD_STR; mci->mod_ver = CPC925_EDAC_REVISION; mci->ctl_name = pdev->name; if (edac_op_state == EDAC_OPSTATE_POLL) mci->edac_check = cpc925_mc_check; mci->ctl_page_to_phys = NULL; mci->scrub_mode = SCRUB_SW_SRC; mci->set_sdram_scrub_rate = NULL; mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate; cpc925_init_csrows(mci); /* Setup memory controller registers */ cpc925_mc_init(mci); if (edac_mc_add_mc(mci) > 0) { cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n"); goto err3; } cpc925_add_edac_devices(vbase); /* get this far and it's successful */ debugf0("%s: success\n", __func__); res = 0; goto out; err3: cpc925_mc_exit(mci); edac_mc_free(mci); err2: devm_release_mem_region(&pdev->dev, r->start, resource_size(r)); err1: devres_release_group(&pdev->dev, cpc925_probe); out: return res; } static int cpc925_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); /* * Delete common edac devices before edac mc, because * the former share the MMIO of the latter. */ cpc925_del_edac_devices(); cpc925_mc_exit(mci); edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); return 0; } static struct platform_driver cpc925_edac_driver = { .probe = cpc925_probe, .remove = cpc925_remove, .driver = { .name = "cpc925_edac", } }; static int __init cpc925_edac_init(void) { int ret = 0; printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n"); printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n"); /* Only support POLL mode so far */ edac_op_state = EDAC_OPSTATE_POLL; ret = platform_driver_register(&cpc925_edac_driver); if (ret) { printk(KERN_WARNING "Failed to register %s\n", CPC925_EDAC_MOD_STR); } return ret; } static void __exit cpc925_edac_exit(void) { platform_driver_unregister(&cpc925_edac_driver); } module_init(cpc925_edac_init); module_exit(cpc925_edac_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>"); MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module");
gpl-2.0
Linhu86/mylinux
drivers/net/slip/slhc.c
6875
19083
/* * Routines to compress and uncompress tcp packets (for transmission * over low speed serial lines). * * Copyright (c) 1989 Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms are permitted * provided that the above copyright notice and this paragraph are * duplicated in all such forms and that any documentation, * advertising materials, and other materials related to such * distribution and use acknowledge that the software was developed * by the University of California, Berkeley. The name of the * University may not be used to endorse or promote products derived * from this software without specific prior written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989: * - Initial distribution. * * * modified for KA9Q Internet Software Package by * Katie Stevens (dkstevens@ucdavis.edu) * University of California, Davis * Computing Services * - 01-31-90 initial adaptation (from 1.19) * PPP.05 02-15-90 [ks] * PPP.08 05-02-90 [ks] use PPP protocol field to signal compression * PPP.15 09-90 [ks] improve mbuf handling * PPP.16 11-02 [karn] substantially rewritten to use NOS facilities * * - Feb 1991 Bill_Simpson@um.cc.umich.edu * variable number of conversation slots * allow zero or one slots * separate routines * status display * - Jul 1994 Dmitry Gorodchanin * Fixes for memory leaks. * - Oct 1994 Dmitry Gorodchanin * Modularization. * - Jan 1995 Bjorn Ekwall * Use ip_fast_csum from ip.h * - July 1995 Christos A. Polyzols * Spotted bug in tcp option checking * * * This module is a difficult issue. It's clearly inet code but it's also clearly * driver code belonging close to PPP and SLIP */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/kernel.h> #include <net/slhc_vj.h> #ifdef CONFIG_INET /* Entire module is for IP only */ #include <linux/mm.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/termios.h> #include <linux/in.h> #include <linux/fcntl.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <net/ip.h> #include <net/protocol.h> #include <net/icmp.h> #include <net/tcp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/timer.h> #include <asm/uaccess.h> #include <net/checksum.h> #include <asm/unaligned.h> static unsigned char *encode(unsigned char *cp, unsigned short n); static long decode(unsigned char **cpp); static unsigned char * put16(unsigned char *cp, unsigned short x); static unsigned short pull16(unsigned char **cpp); /* Initialize compression data structure * slots must be in range 0 to 255 (zero meaning no compression) */ struct slcompress * slhc_init(int rslots, int tslots) { register short i; register struct cstate *ts; struct slcompress *comp; comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL); if (! comp) goto out_fail; if ( rslots > 0 && rslots < 256 ) { size_t rsize = rslots * sizeof(struct cstate); comp->rstate = kzalloc(rsize, GFP_KERNEL); if (! comp->rstate) goto out_free; comp->rslot_limit = rslots - 1; } if ( tslots > 0 && tslots < 256 ) { size_t tsize = tslots * sizeof(struct cstate); comp->tstate = kzalloc(tsize, GFP_KERNEL); if (! comp->tstate) goto out_free2; comp->tslot_limit = tslots - 1; } comp->xmit_oldest = 0; comp->xmit_current = 255; comp->recv_current = 255; /* * don't accept any packets with implicit index until we get * one with an explicit index. Otherwise the uncompress code * will try to use connection 255, which is almost certainly * out of range */ comp->flags |= SLF_TOSS; if ( tslots > 0 ) { ts = comp->tstate; for(i = comp->tslot_limit; i > 0; --i){ ts[i].cs_this = i; ts[i].next = &(ts[i - 1]); } ts[0].next = &(ts[comp->tslot_limit]); ts[0].cs_this = 0; } return comp; out_free2: kfree(comp->rstate); out_free: kfree(comp); out_fail: return NULL; } /* Free a compression data structure */ void slhc_free(struct slcompress *comp) { if ( comp == NULLSLCOMPR ) return; if ( comp->tstate != NULLSLSTATE ) kfree( comp->tstate ); if ( comp->rstate != NULLSLSTATE ) kfree( comp->rstate ); kfree( comp ); } /* Put a short in host order into a char array in network order */ static inline unsigned char * put16(unsigned char *cp, unsigned short x) { *cp++ = x >> 8; *cp++ = x; return cp; } /* Encode a number */ static unsigned char * encode(unsigned char *cp, unsigned short n) { if(n >= 256 || n == 0){ *cp++ = 0; cp = put16(cp,n); } else { *cp++ = n; } return cp; } /* Pull a 16-bit integer in host order from buffer in network byte order */ static unsigned short pull16(unsigned char **cpp) { short rval; rval = *(*cpp)++; rval <<= 8; rval |= *(*cpp)++; return rval; } /* Decode a number */ static long decode(unsigned char **cpp) { register int x; x = *(*cpp)++; if(x == 0){ return pull16(cpp) & 0xffff; /* pull16 returns -1 on error */ } else { return x & 0xff; /* -1 if PULLCHAR returned error */ } } /* * icp and isize are the original packet. * ocp is a place to put a copy if necessary. * cpp is initially a pointer to icp. If the copy is used, * change it to ocp. */ int slhc_compress(struct slcompress *comp, unsigned char *icp, int isize, unsigned char *ocp, unsigned char **cpp, int compress_cid) { register struct cstate *ocs = &(comp->tstate[comp->xmit_oldest]); register struct cstate *lcs = ocs; register struct cstate *cs = lcs->next; register unsigned long deltaS, deltaA; register short changes = 0; int hlen; unsigned char new_seq[16]; register unsigned char *cp = new_seq; struct iphdr *ip; struct tcphdr *th, *oth; __sum16 csum; /* * Don't play with runt packets. */ if(isize<sizeof(struct iphdr)) return isize; ip = (struct iphdr *) icp; /* Bail if this packet isn't TCP, or is an IP fragment */ if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) { /* Send as regular IP */ if(ip->protocol != IPPROTO_TCP) comp->sls_o_nontcp++; else comp->sls_o_tcp++; return isize; } /* Extract TCP header */ th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4); hlen = ip->ihl*4 + th->doff*4; /* Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or * some other control bit is set). Also uncompressible if * it's a runt. */ if(hlen > isize || th->syn || th->fin || th->rst || ! (th->ack)){ /* TCP connection stuff; send as regular IP */ comp->sls_o_tcp++; return isize; } /* * Packet is compressible -- we're going to send either a * COMPRESSED_TCP or UNCOMPRESSED_TCP packet. Either way, * we need to locate (or create) the connection state. * * States are kept in a circularly linked list with * xmit_oldest pointing to the end of the list. The * list is kept in lru order by moving a state to the * head of the list whenever it is referenced. Since * the list is short and, empirically, the connection * we want is almost always near the front, we locate * states via linear search. If we don't find a state * for the datagram, the oldest state is (re-)used. */ for ( ; ; ) { if( ip->saddr == cs->cs_ip.saddr && ip->daddr == cs->cs_ip.daddr && th->source == cs->cs_tcp.source && th->dest == cs->cs_tcp.dest) goto found; /* if current equal oldest, at end of list */ if ( cs == ocs ) break; lcs = cs; cs = cs->next; comp->sls_o_searches++; } /* * Didn't find it -- re-use oldest cstate. Send an * uncompressed packet that tells the other side what * connection number we're using for this conversation. * * Note that since the state list is circular, the oldest * state points to the newest and we only need to set * xmit_oldest to update the lru linkage. */ comp->sls_o_misses++; comp->xmit_oldest = lcs->cs_this; goto uncompressed; found: /* * Found it -- move to the front on the connection list. */ if(lcs == ocs) { /* found at most recently used */ } else if (cs == ocs) { /* found at least recently used */ comp->xmit_oldest = lcs->cs_this; } else { /* more than 2 elements */ lcs->next = cs->next; cs->next = ocs->next; ocs->next = cs; } /* * Make sure that only what we expect to change changed. * Check the following: * IP protocol version, header length & type of service. * The "Don't fragment" bit. * The time-to-live field. * The TCP header length. * IP options, if any. * TCP options, if any. * If any of these things are different between the previous & * current datagram, we send the current datagram `uncompressed'. */ oth = &cs->cs_tcp; if(ip->version != cs->cs_ip.version || ip->ihl != cs->cs_ip.ihl || ip->tos != cs->cs_ip.tos || (ip->frag_off & htons(0x4000)) != (cs->cs_ip.frag_off & htons(0x4000)) || ip->ttl != cs->cs_ip.ttl || th->doff != cs->cs_tcp.doff || (ip->ihl > 5 && memcmp(ip+1,cs->cs_ipopt,((ip->ihl)-5)*4) != 0) || (th->doff > 5 && memcmp(th+1,cs->cs_tcpopt,((th->doff)-5)*4) != 0)){ goto uncompressed; } /* * Figure out which of the changing fields changed. The * receiver expects changes in the order: urgent, window, * ack, seq (the order minimizes the number of temporaries * needed in this section of code). */ if(th->urg){ deltaS = ntohs(th->urg_ptr); cp = encode(cp,deltaS); changes |= NEW_U; } else if(th->urg_ptr != oth->urg_ptr){ /* argh! URG not set but urp changed -- a sensible * implementation should never do this but RFC793 * doesn't prohibit the change so we have to deal * with it. */ goto uncompressed; } if((deltaS = ntohs(th->window) - ntohs(oth->window)) != 0){ cp = encode(cp,deltaS); changes |= NEW_W; } if((deltaA = ntohl(th->ack_seq) - ntohl(oth->ack_seq)) != 0L){ if(deltaA > 0x0000ffff) goto uncompressed; cp = encode(cp,deltaA); changes |= NEW_A; } if((deltaS = ntohl(th->seq) - ntohl(oth->seq)) != 0L){ if(deltaS > 0x0000ffff) goto uncompressed; cp = encode(cp,deltaS); changes |= NEW_S; } switch(changes){ case 0: /* Nothing changed. If this packet contains data and the * last one didn't, this is probably a data packet following * an ack (normal on an interactive connection) and we send * it compressed. Otherwise it's probably a retransmit, * retransmitted ack or window probe. Send it uncompressed * in case the other side missed the compressed version. */ if(ip->tot_len != cs->cs_ip.tot_len && ntohs(cs->cs_ip.tot_len) == hlen) break; goto uncompressed; break; case SPECIAL_I: case SPECIAL_D: /* actual changes match one of our special case encodings -- * send packet uncompressed. */ goto uncompressed; case NEW_S|NEW_A: if(deltaS == deltaA && deltaS == ntohs(cs->cs_ip.tot_len) - hlen){ /* special case for echoed terminal traffic */ changes = SPECIAL_I; cp = new_seq; } break; case NEW_S: if(deltaS == ntohs(cs->cs_ip.tot_len) - hlen){ /* special case for data xfer */ changes = SPECIAL_D; cp = new_seq; } break; } deltaS = ntohs(ip->id) - ntohs(cs->cs_ip.id); if(deltaS != 1){ cp = encode(cp,deltaS); changes |= NEW_I; } if(th->psh) changes |= TCP_PUSH_BIT; /* Grab the cksum before we overwrite it below. Then update our * state with this packet's header. */ csum = th->check; memcpy(&cs->cs_ip,ip,20); memcpy(&cs->cs_tcp,th,20); /* We want to use the original packet as our compressed packet. * (cp - new_seq) is the number of bytes we need for compressed * sequence numbers. In addition we need one byte for the change * mask, one for the connection id and two for the tcp checksum. * So, (cp - new_seq) + 4 bytes of header are needed. */ deltaS = cp - new_seq; if(compress_cid == 0 || comp->xmit_current != cs->cs_this){ cp = ocp; *cpp = ocp; *cp++ = changes | NEW_C; *cp++ = cs->cs_this; comp->xmit_current = cs->cs_this; } else { cp = ocp; *cpp = ocp; *cp++ = changes; } *(__sum16 *)cp = csum; cp += 2; /* deltaS is now the size of the change section of the compressed header */ memcpy(cp,new_seq,deltaS); /* Write list of deltas */ memcpy(cp+deltaS,icp+hlen,isize-hlen); comp->sls_o_compressed++; ocp[0] |= SL_TYPE_COMPRESSED_TCP; return isize - hlen + deltaS + (cp - ocp); /* Update connection state cs & send uncompressed packet (i.e., * a regular ip/tcp packet but with the 'conversation id' we hope * to use on future compressed packets in the protocol field). */ uncompressed: memcpy(&cs->cs_ip,ip,20); memcpy(&cs->cs_tcp,th,20); if (ip->ihl > 5) memcpy(cs->cs_ipopt, ip+1, ((ip->ihl) - 5) * 4); if (th->doff > 5) memcpy(cs->cs_tcpopt, th+1, ((th->doff) - 5) * 4); comp->xmit_current = cs->cs_this; comp->sls_o_uncompressed++; memcpy(ocp, icp, isize); *cpp = ocp; ocp[9] = cs->cs_this; ocp[0] |= SL_TYPE_UNCOMPRESSED_TCP; return isize; } int slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize) { register int changes; long x; register struct tcphdr *thp; register struct iphdr *ip; register struct cstate *cs; int len, hdrlen; unsigned char *cp = icp; /* We've got a compressed packet; read the change byte */ comp->sls_i_compressed++; if(isize < 3){ comp->sls_i_error++; return 0; } changes = *cp++; if(changes & NEW_C){ /* Make sure the state index is in range, then grab the state. * If we have a good state index, clear the 'discard' flag. */ x = *cp++; /* Read conn index */ if(x < 0 || x > comp->rslot_limit) goto bad; comp->flags &=~ SLF_TOSS; comp->recv_current = x; } else { /* this packet has an implicit state index. If we've * had a line error since the last time we got an * explicit state index, we have to toss the packet. */ if(comp->flags & SLF_TOSS){ comp->sls_i_tossed++; return 0; } } cs = &comp->rstate[comp->recv_current]; thp = &cs->cs_tcp; ip = &cs->cs_ip; thp->check = *(__sum16 *)cp; cp += 2; thp->psh = (changes & TCP_PUSH_BIT) ? 1 : 0; /* * we can use the same number for the length of the saved header and * the current one, because the packet wouldn't have been sent * as compressed unless the options were the same as the previous one */ hdrlen = ip->ihl * 4 + thp->doff * 4; switch(changes & SPECIALS_MASK){ case SPECIAL_I: /* Echoed terminal traffic */ { register short i; i = ntohs(ip->tot_len) - hdrlen; thp->ack_seq = htonl( ntohl(thp->ack_seq) + i); thp->seq = htonl( ntohl(thp->seq) + i); } break; case SPECIAL_D: /* Unidirectional data */ thp->seq = htonl( ntohl(thp->seq) + ntohs(ip->tot_len) - hdrlen); break; default: if(changes & NEW_U){ thp->urg = 1; if((x = decode(&cp)) == -1) { goto bad; } thp->urg_ptr = htons(x); } else thp->urg = 0; if(changes & NEW_W){ if((x = decode(&cp)) == -1) { goto bad; } thp->window = htons( ntohs(thp->window) + x); } if(changes & NEW_A){ if((x = decode(&cp)) == -1) { goto bad; } thp->ack_seq = htonl( ntohl(thp->ack_seq) + x); } if(changes & NEW_S){ if((x = decode(&cp)) == -1) { goto bad; } thp->seq = htonl( ntohl(thp->seq) + x); } break; } if(changes & NEW_I){ if((x = decode(&cp)) == -1) { goto bad; } ip->id = htons (ntohs (ip->id) + x); } else ip->id = htons (ntohs (ip->id) + 1); /* * At this point, cp points to the first byte of data in the * packet. Put the reconstructed TCP and IP headers back on the * packet. Recalculate IP checksum (but not TCP checksum). */ len = isize - (cp - icp); if (len < 0) goto bad; len += hdrlen; ip->tot_len = htons(len); ip->check = 0; memmove(icp + hdrlen, cp, len - hdrlen); cp = icp; memcpy(cp, ip, 20); cp += 20; if (ip->ihl > 5) { memcpy(cp, cs->cs_ipopt, (ip->ihl - 5) * 4); cp += (ip->ihl - 5) * 4; } put_unaligned(ip_fast_csum(icp, ip->ihl), &((struct iphdr *)icp)->check); memcpy(cp, thp, 20); cp += 20; if (thp->doff > 5) { memcpy(cp, cs->cs_tcpopt, ((thp->doff) - 5) * 4); cp += ((thp->doff) - 5) * 4; } return len; bad: comp->sls_i_error++; return slhc_toss( comp ); } int slhc_remember(struct slcompress *comp, unsigned char *icp, int isize) { register struct cstate *cs; unsigned ihl; unsigned char index; if(isize < 20) { /* The packet is shorter than a legal IP header */ comp->sls_i_runt++; return slhc_toss( comp ); } /* Peek at the IP header's IHL field to find its length */ ihl = icp[0] & 0xf; if(ihl < 20 / 4){ /* The IP header length field is too small */ comp->sls_i_runt++; return slhc_toss( comp ); } index = icp[9]; icp[9] = IPPROTO_TCP; if (ip_fast_csum(icp, ihl)) { /* Bad IP header checksum; discard */ comp->sls_i_badcheck++; return slhc_toss( comp ); } if(index > comp->rslot_limit) { comp->sls_i_error++; return slhc_toss(comp); } /* Update local state */ cs = &comp->rstate[comp->recv_current = index]; comp->flags &=~ SLF_TOSS; memcpy(&cs->cs_ip,icp,20); memcpy(&cs->cs_tcp,icp + ihl*4,20); if (ihl > 5) memcpy(cs->cs_ipopt, icp + sizeof(struct iphdr), (ihl - 5) * 4); if (cs->cs_tcp.doff > 5) memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4); cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2; /* Put headers back on packet * Neither header checksum is recalculated */ comp->sls_i_uncompressed++; return isize; } int slhc_toss(struct slcompress *comp) { if ( comp == NULLSLCOMPR ) return 0; comp->flags |= SLF_TOSS; return 0; } #else /* CONFIG_INET */ int slhc_toss(struct slcompress *comp) { printk(KERN_DEBUG "Called IP function on non IP-system: slhc_toss"); return -EINVAL; } int slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize) { printk(KERN_DEBUG "Called IP function on non IP-system: slhc_uncompress"); return -EINVAL; } int slhc_compress(struct slcompress *comp, unsigned char *icp, int isize, unsigned char *ocp, unsigned char **cpp, int compress_cid) { printk(KERN_DEBUG "Called IP function on non IP-system: slhc_compress"); return -EINVAL; } int slhc_remember(struct slcompress *comp, unsigned char *icp, int isize) { printk(KERN_DEBUG "Called IP function on non IP-system: slhc_remember"); return -EINVAL; } void slhc_free(struct slcompress *comp) { printk(KERN_DEBUG "Called IP function on non IP-system: slhc_free"); } struct slcompress * slhc_init(int rslots, int tslots) { printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init"); return NULL; } #endif /* CONFIG_INET */ /* VJ header compression */ EXPORT_SYMBOL(slhc_init); EXPORT_SYMBOL(slhc_free); EXPORT_SYMBOL(slhc_remember); EXPORT_SYMBOL(slhc_compress); EXPORT_SYMBOL(slhc_uncompress); EXPORT_SYMBOL(slhc_toss); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
theophile/SM-N920R7_MM_Kernel
drivers/rtc/rtc-pl030.c
7387
3980
/* * linux/drivers/rtc/rtc-pl030.c * * Copyright (C) 2000-2001 Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/amba/bus.h> #include <linux/io.h> #include <linux/slab.h> #define RTC_DR (0) #define RTC_MR (4) #define RTC_STAT (8) #define RTC_EOI (8) #define RTC_LR (12) #define RTC_CR (16) #define RTC_CR_MIE (1 << 0) struct pl030_rtc { struct rtc_device *rtc; void __iomem *base; }; static irqreturn_t pl030_interrupt(int irq, void *dev_id) { struct pl030_rtc *rtc = dev_id; writel(0, rtc->base + RTC_EOI); return IRQ_HANDLED; } static int pl030_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); rtc_time_to_tm(readl(rtc->base + RTC_MR), &alrm->time); return 0; } static int pl030_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); unsigned long time; int ret; /* * At the moment, we can only deal with non-wildcarded alarm times. */ ret = rtc_valid_tm(&alrm->time); if (ret == 0) ret = rtc_tm_to_time(&alrm->time, &time); if (ret == 0) writel(time, rtc->base + RTC_MR); return ret; } static int pl030_read_time(struct device *dev, struct rtc_time *tm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); rtc_time_to_tm(readl(rtc->base + RTC_DR), tm); return 0; } /* * Set the RTC time. Unfortunately, we can't accurately set * the point at which the counter updates. * * Also, since RTC_LR is transferred to RTC_CR on next rising * edge of the 1Hz clock, we must write the time one second * in advance. */ static int pl030_set_time(struct device *dev, struct rtc_time *tm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); unsigned long time; int ret; ret = rtc_tm_to_time(tm, &time); if (ret == 0) writel(time + 1, rtc->base + RTC_LR); return ret; } static const struct rtc_class_ops pl030_ops = { .read_time = pl030_read_time, .set_time = pl030_set_time, .read_alarm = pl030_read_alarm, .set_alarm = pl030_set_alarm, }; static int pl030_probe(struct amba_device *dev, const struct amba_id *id) { struct pl030_rtc *rtc; int ret; ret = amba_request_regions(dev, NULL); if (ret) goto err_req; rtc = kmalloc(sizeof(*rtc), GFP_KERNEL); if (!rtc) { ret = -ENOMEM; goto err_rtc; } rtc->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!rtc->base) { ret = -ENOMEM; goto err_map; } __raw_writel(0, rtc->base + RTC_CR); __raw_writel(0, rtc->base + RTC_EOI); amba_set_drvdata(dev, rtc); ret = request_irq(dev->irq[0], pl030_interrupt, 0, "rtc-pl030", rtc); if (ret) goto err_irq; rtc->rtc = rtc_device_register("pl030", &dev->dev, &pl030_ops, THIS_MODULE); if (IS_ERR(rtc->rtc)) { ret = PTR_ERR(rtc->rtc); goto err_reg; } return 0; err_reg: free_irq(dev->irq[0], rtc); err_irq: iounmap(rtc->base); err_map: kfree(rtc); err_rtc: amba_release_regions(dev); err_req: return ret; } static int pl030_remove(struct amba_device *dev) { struct pl030_rtc *rtc = amba_get_drvdata(dev); amba_set_drvdata(dev, NULL); writel(0, rtc->base + RTC_CR); free_irq(dev->irq[0], rtc); rtc_device_unregister(rtc->rtc); iounmap(rtc->base); kfree(rtc); amba_release_regions(dev); return 0; } static struct amba_id pl030_ids[] = { { .id = 0x00041030, .mask = 0x000fffff, }, { 0, 0 }, }; MODULE_DEVICE_TABLE(amba, pl030_ids); static struct amba_driver pl030_driver = { .drv = { .name = "rtc-pl030", }, .probe = pl030_probe, .remove = pl030_remove, .id_table = pl030_ids, }; module_amba_driver(pl030_driver); MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); MODULE_DESCRIPTION("ARM AMBA PL030 RTC Driver"); MODULE_LICENSE("GPL");
gpl-2.0
showp1984/bricked-mako
arch/arm/mach-omap2/clkt2xxx_sys.c
8155
1053
/* * OMAP2xxx sys_clk-specific clock code * * Copyright (C) 2005-2008 Texas Instruments, Inc. * Copyright (C) 2004-2010 Nokia Corporation * * Contacts: * Richard Woodruff <r-woodruff2@ti.com> * Paul Walmsley * * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, * Gordon McNutt and RidgeRun, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/io.h> #include <plat/clock.h> #include "clock.h" #include "clock2xxx.h" #include "prm2xxx_3xxx.h" #include "prm-regbits-24xx.h" void __iomem *prcm_clksrc_ctrl; u32 omap2xxx_get_sysclkdiv(void) { u32 div; div = __raw_readl(prcm_clksrc_ctrl); div &= OMAP_SYSCLKDIV_MASK; div >>= OMAP_SYSCLKDIV_SHIFT; return div; } unsigned long omap2xxx_sys_clk_recalc(struct clk *clk) { return clk->parent->rate / omap2xxx_get_sysclkdiv(); }
gpl-2.0
fefifofum/android_kernel_bq_maxwell2qc
arch/m68k/amiga/platform.c
8411
4538
/* * Copyright (C) 2007-2009 Geert Uytterhoeven * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/zorro.h> #include <asm/amigahw.h> #include <asm/amigayle.h> #ifdef CONFIG_ZORRO static const struct resource zorro_resources[] __initconst = { /* Zorro II regions (on Zorro II/III) */ { .name = "Zorro II exp", .start = 0x00e80000, .end = 0x00efffff, .flags = IORESOURCE_MEM, }, { .name = "Zorro II mem", .start = 0x00200000, .end = 0x009fffff, .flags = IORESOURCE_MEM, }, /* Zorro III regions (on Zorro III only) */ { .name = "Zorro III exp", .start = 0xff000000, .end = 0xffffffff, .flags = IORESOURCE_MEM, }, { .name = "Zorro III cfg", .start = 0x40000000, .end = 0x7fffffff, .flags = IORESOURCE_MEM, } }; static int __init amiga_init_bus(void) { if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO)) return -ENODEV; platform_device_register_simple("amiga-zorro", -1, zorro_resources, AMIGAHW_PRESENT(ZORRO3) ? 4 : 2); return 0; } subsys_initcall(amiga_init_bus); static int z_dev_present(zorro_id id) { unsigned int i; for (i = 0; i < zorro_num_autocon; i++) if (zorro_autocon[i].rom.er_Manufacturer == ZORRO_MANUF(id) && zorro_autocon[i].rom.er_Product == ZORRO_PROD(id)) return 1; return 0; } #else /* !CONFIG_ZORRO */ static inline int z_dev_present(zorro_id id) { return 0; } #endif /* !CONFIG_ZORRO */ static const struct resource a3000_scsi_resource __initconst = { .start = 0xdd0000, .end = 0xdd00ff, .flags = IORESOURCE_MEM, }; static const struct resource a4000t_scsi_resource __initconst = { .start = 0xdd0000, .end = 0xdd0fff, .flags = IORESOURCE_MEM, }; static const struct resource a1200_ide_resource __initconst = { .start = 0xda0000, .end = 0xda1fff, .flags = IORESOURCE_MEM, }; static const struct gayle_ide_platform_data a1200_ide_pdata __initconst = { .base = 0xda0000, .irqport = 0xda9000, .explicit_ack = 1, }; static const struct resource a4000_ide_resource __initconst = { .start = 0xdd2000, .end = 0xdd3fff, .flags = IORESOURCE_MEM, }; static const struct gayle_ide_platform_data a4000_ide_pdata __initconst = { .base = 0xdd2020, .irqport = 0xdd3020, .explicit_ack = 0, }; static const struct resource amiga_rtc_resource __initconst = { .start = 0x00dc0000, .end = 0x00dcffff, .flags = IORESOURCE_MEM, }; static int __init amiga_init_devices(void) { struct platform_device *pdev; if (!MACH_IS_AMIGA) return -ENODEV; /* video hardware */ if (AMIGAHW_PRESENT(AMI_VIDEO)) platform_device_register_simple("amiga-video", -1, NULL, 0); /* sound hardware */ if (AMIGAHW_PRESENT(AMI_AUDIO)) platform_device_register_simple("amiga-audio", -1, NULL, 0); /* storage interfaces */ if (AMIGAHW_PRESENT(AMI_FLOPPY)) platform_device_register_simple("amiga-floppy", -1, NULL, 0); if (AMIGAHW_PRESENT(A3000_SCSI)) platform_device_register_simple("amiga-a3000-scsi", -1, &a3000_scsi_resource, 1); if (AMIGAHW_PRESENT(A4000_SCSI)) platform_device_register_simple("amiga-a4000t-scsi", -1, &a4000t_scsi_resource, 1); if (AMIGAHW_PRESENT(A1200_IDE) || z_dev_present(ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE)) { pdev = platform_device_register_simple("amiga-gayle-ide", -1, &a1200_ide_resource, 1); platform_device_add_data(pdev, &a1200_ide_pdata, sizeof(a1200_ide_pdata)); } if (AMIGAHW_PRESENT(A4000_IDE)) { pdev = platform_device_register_simple("amiga-gayle-ide", -1, &a4000_ide_resource, 1); platform_device_add_data(pdev, &a4000_ide_pdata, sizeof(a4000_ide_pdata)); } /* other I/O hardware */ if (AMIGAHW_PRESENT(AMI_KEYBOARD)) platform_device_register_simple("amiga-keyboard", -1, NULL, 0); if (AMIGAHW_PRESENT(AMI_MOUSE)) platform_device_register_simple("amiga-mouse", -1, NULL, 0); if (AMIGAHW_PRESENT(AMI_SERIAL)) platform_device_register_simple("amiga-serial", -1, NULL, 0); if (AMIGAHW_PRESENT(AMI_PARALLEL)) platform_device_register_simple("amiga-parallel", -1, NULL, 0); /* real time clocks */ if (AMIGAHW_PRESENT(A2000_CLK)) platform_device_register_simple("rtc-msm6242", -1, &amiga_rtc_resource, 1); if (AMIGAHW_PRESENT(A3000_CLK)) platform_device_register_simple("rtc-rp5c01", -1, &amiga_rtc_resource, 1); return 0; } device_initcall(amiga_init_devices);
gpl-2.0
gundal/nobleltehk
tools/power/cpupower/utils/cpufreq-info.c
8411
15679
/* * (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de> * * Licensed under the terms of the GNU GPL License version 2. */ #include <unistd.h> #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include <getopt.h> #include "cpufreq.h" #include "helpers/helpers.h" #include "helpers/bitmask.h" #define LINE_LEN 10 static unsigned int count_cpus(void) { FILE *fp; char value[LINE_LEN]; unsigned int ret = 0; unsigned int cpunr = 0; fp = fopen("/proc/stat", "r"); if (!fp) { printf(_("Couldn't count the number of CPUs (%s: %s), assuming 1\n"), "/proc/stat", strerror(errno)); return 1; } while (!feof(fp)) { if (!fgets(value, LINE_LEN, fp)) continue; value[LINE_LEN - 1] = '\0'; if (strlen(value) < (LINE_LEN - 2)) continue; if (strstr(value, "cpu ")) continue; if (sscanf(value, "cpu%d ", &cpunr) != 1) continue; if (cpunr > ret) ret = cpunr; } fclose(fp); /* cpu count starts from 0, on error return 1 (UP) */ return ret + 1; } static void proc_cpufreq_output(void) { unsigned int cpu, nr_cpus; struct cpufreq_policy *policy; unsigned int min_pctg = 0; unsigned int max_pctg = 0; unsigned long min, max; printf(_(" minimum CPU frequency - maximum CPU frequency - governor\n")); nr_cpus = count_cpus(); for (cpu = 0; cpu < nr_cpus; cpu++) { policy = cpufreq_get_policy(cpu); if (!policy) continue; if (cpufreq_get_hardware_limits(cpu, &min, &max)) { max = 0; } else { min_pctg = (policy->min * 100) / max; max_pctg = (policy->max * 100) / max; } printf("CPU%3d %9lu kHz (%3d %%) - %9lu kHz (%3d %%) - %s\n", cpu , policy->min, max ? min_pctg : 0, policy->max, max ? max_pctg : 0, policy->governor); cpufreq_put_policy(policy); } } static void print_speed(unsigned long speed) { unsigned long tmp; if (speed > 1000000) { tmp = speed % 10000; if (tmp >= 5000) speed += 10000; printf("%u.%02u GHz", ((unsigned int) speed/1000000), ((unsigned int) (speed%1000000)/10000)); } else if (speed > 100000) { tmp = speed % 1000; if (tmp >= 500) speed += 1000; printf("%u MHz", ((unsigned int) speed / 1000)); } else if (speed > 1000) { tmp = speed % 100; if (tmp >= 50) speed += 100; printf("%u.%01u MHz", ((unsigned int) speed/1000), ((unsigned int) (speed%1000)/100)); } else printf("%lu kHz", speed); return; } static void print_duration(unsigned long duration) { unsigned long tmp; if (duration > 1000000) { tmp = duration % 10000; if (tmp >= 5000) duration += 10000; printf("%u.%02u ms", ((unsigned int) duration/1000000), ((unsigned int) (duration%1000000)/10000)); } else if (duration > 100000) { tmp = duration % 1000; if (tmp >= 500) duration += 1000; printf("%u us", ((unsigned int) duration / 1000)); } else if (duration > 1000) { tmp = duration % 100; if (tmp >= 50) duration += 100; printf("%u.%01u us", ((unsigned int) duration/1000), ((unsigned int) (duration%1000)/100)); } else printf("%lu ns", duration); return; } /* --boost / -b */ static int get_boost_mode(unsigned int cpu) { int support, active, b_states = 0, ret, pstate_no, i; /* ToDo: Make this more global */ unsigned long pstates[MAX_HW_PSTATES] = {0,}; if (cpupower_cpu_info.vendor != X86_VENDOR_AMD && cpupower_cpu_info.vendor != X86_VENDOR_INTEL) return 0; ret = cpufreq_has_boost_support(cpu, &support, &active, &b_states); if (ret) { printf(_("Error while evaluating Boost Capabilities" " on CPU %d -- are you root?\n"), cpu); return ret; } /* P state changes via MSR are identified via cpuid 80000007 on Intel and AMD, but we assume boost capable machines can do that if (cpuid_eax(0x80000000) >= 0x80000007 && (cpuid_edx(0x80000007) & (1 << 7))) */ printf(_(" boost state support:\n")); printf(_(" Supported: %s\n"), support ? _("yes") : _("no")); printf(_(" Active: %s\n"), active ? _("yes") : _("no")); if (cpupower_cpu_info.vendor == X86_VENDOR_AMD && cpupower_cpu_info.family >= 0x10) { ret = decode_pstates(cpu, cpupower_cpu_info.family, b_states, pstates, &pstate_no); if (ret) return ret; printf(_(" Boost States: %d\n"), b_states); printf(_(" Total States: %d\n"), pstate_no); for (i = 0; i < pstate_no; i++) { if (i < b_states) printf(_(" Pstate-Pb%d: %luMHz (boost state)" "\n"), i, pstates[i]); else printf(_(" Pstate-P%d: %luMHz\n"), i - b_states, pstates[i]); } } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_HAS_TURBO_RATIO) { double bclk; unsigned long long intel_turbo_ratio = 0; unsigned int ratio; /* Any way to autodetect this ? */ if (cpupower_cpu_info.caps & CPUPOWER_CAP_IS_SNB) bclk = 100.00; else bclk = 133.33; intel_turbo_ratio = msr_intel_get_turbo_ratio(cpu); dprint (" Ratio: 0x%llx - bclk: %f\n", intel_turbo_ratio, bclk); ratio = (intel_turbo_ratio >> 24) & 0xFF; if (ratio) printf(_(" %.0f MHz max turbo 4 active cores\n"), ratio * bclk); ratio = (intel_turbo_ratio >> 16) & 0xFF; if (ratio) printf(_(" %.0f MHz max turbo 3 active cores\n"), ratio * bclk); ratio = (intel_turbo_ratio >> 8) & 0xFF; if (ratio) printf(_(" %.0f MHz max turbo 2 active cores\n"), ratio * bclk); ratio = (intel_turbo_ratio >> 0) & 0xFF; if (ratio) printf(_(" %.0f MHz max turbo 1 active cores\n"), ratio * bclk); } return 0; } static void debug_output_one(unsigned int cpu) { char *driver; struct cpufreq_affected_cpus *cpus; struct cpufreq_available_frequencies *freqs; unsigned long min, max, freq_kernel, freq_hardware; unsigned long total_trans, latency; unsigned long long total_time; struct cpufreq_policy *policy; struct cpufreq_available_governors *governors; struct cpufreq_stats *stats; if (cpufreq_cpu_exists(cpu)) return; freq_kernel = cpufreq_get_freq_kernel(cpu); freq_hardware = cpufreq_get_freq_hardware(cpu); driver = cpufreq_get_driver(cpu); if (!driver) { printf(_(" no or unknown cpufreq driver is active on this CPU\n")); } else { printf(_(" driver: %s\n"), driver); cpufreq_put_driver(driver); } cpus = cpufreq_get_related_cpus(cpu); if (cpus) { printf(_(" CPUs which run at the same hardware frequency: ")); while (cpus->next) { printf("%d ", cpus->cpu); cpus = cpus->next; } printf("%d\n", cpus->cpu); cpufreq_put_related_cpus(cpus); } cpus = cpufreq_get_affected_cpus(cpu); if (cpus) { printf(_(" CPUs which need to have their frequency coordinated by software: ")); while (cpus->next) { printf("%d ", cpus->cpu); cpus = cpus->next; } printf("%d\n", cpus->cpu); cpufreq_put_affected_cpus(cpus); } latency = cpufreq_get_transition_latency(cpu); if (latency) { printf(_(" maximum transition latency: ")); print_duration(latency); printf(".\n"); } if (!(cpufreq_get_hardware_limits(cpu, &min, &max))) { printf(_(" hardware limits: ")); print_speed(min); printf(" - "); print_speed(max); printf("\n"); } freqs = cpufreq_get_available_frequencies(cpu); if (freqs) { printf(_(" available frequency steps: ")); while (freqs->next) { print_speed(freqs->frequency); printf(", "); freqs = freqs->next; } print_speed(freqs->frequency); printf("\n"); cpufreq_put_available_frequencies(freqs); } governors = cpufreq_get_available_governors(cpu); if (governors) { printf(_(" available cpufreq governors: ")); while (governors->next) { printf("%s, ", governors->governor); governors = governors->next; } printf("%s\n", governors->governor); cpufreq_put_available_governors(governors); } policy = cpufreq_get_policy(cpu); if (policy) { printf(_(" current policy: frequency should be within ")); print_speed(policy->min); printf(_(" and ")); print_speed(policy->max); printf(".\n "); printf(_("The governor \"%s\" may" " decide which speed to use\n within this range.\n"), policy->governor); cpufreq_put_policy(policy); } if (freq_kernel || freq_hardware) { printf(_(" current CPU frequency is ")); if (freq_hardware) { print_speed(freq_hardware); printf(_(" (asserted by call to hardware)")); } else print_speed(freq_kernel); printf(".\n"); } stats = cpufreq_get_stats(cpu, &total_time); if (stats) { printf(_(" cpufreq stats: ")); while (stats) { print_speed(stats->frequency); printf(":%.2f%%", (100.0 * stats->time_in_state) / total_time); stats = stats->next; if (stats) printf(", "); } cpufreq_put_stats(stats); total_trans = cpufreq_get_transitions(cpu); if (total_trans) printf(" (%lu)\n", total_trans); else printf("\n"); } get_boost_mode(cpu); } /* --freq / -f */ static int get_freq_kernel(unsigned int cpu, unsigned int human) { unsigned long freq = cpufreq_get_freq_kernel(cpu); if (!freq) return -EINVAL; if (human) { print_speed(freq); printf("\n"); } else printf("%lu\n", freq); return 0; } /* --hwfreq / -w */ static int get_freq_hardware(unsigned int cpu, unsigned int human) { unsigned long freq = cpufreq_get_freq_hardware(cpu); if (!freq) return -EINVAL; if (human) { print_speed(freq); printf("\n"); } else printf("%lu\n", freq); return 0; } /* --hwlimits / -l */ static int get_hardware_limits(unsigned int cpu) { unsigned long min, max; if (cpufreq_get_hardware_limits(cpu, &min, &max)) return -EINVAL; printf("%lu %lu\n", min, max); return 0; } /* --driver / -d */ static int get_driver(unsigned int cpu) { char *driver = cpufreq_get_driver(cpu); if (!driver) return -EINVAL; printf("%s\n", driver); cpufreq_put_driver(driver); return 0; } /* --policy / -p */ static int get_policy(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_get_policy(cpu); if (!policy) return -EINVAL; printf("%lu %lu %s\n", policy->min, policy->max, policy->governor); cpufreq_put_policy(policy); return 0; } /* --governors / -g */ static int get_available_governors(unsigned int cpu) { struct cpufreq_available_governors *governors = cpufreq_get_available_governors(cpu); if (!governors) return -EINVAL; while (governors->next) { printf("%s ", governors->governor); governors = governors->next; } printf("%s\n", governors->governor); cpufreq_put_available_governors(governors); return 0; } /* --affected-cpus / -a */ static int get_affected_cpus(unsigned int cpu) { struct cpufreq_affected_cpus *cpus = cpufreq_get_affected_cpus(cpu); if (!cpus) return -EINVAL; while (cpus->next) { printf("%d ", cpus->cpu); cpus = cpus->next; } printf("%d\n", cpus->cpu); cpufreq_put_affected_cpus(cpus); return 0; } /* --related-cpus / -r */ static int get_related_cpus(unsigned int cpu) { struct cpufreq_affected_cpus *cpus = cpufreq_get_related_cpus(cpu); if (!cpus) return -EINVAL; while (cpus->next) { printf("%d ", cpus->cpu); cpus = cpus->next; } printf("%d\n", cpus->cpu); cpufreq_put_related_cpus(cpus); return 0; } /* --stats / -s */ static int get_freq_stats(unsigned int cpu, unsigned int human) { unsigned long total_trans = cpufreq_get_transitions(cpu); unsigned long long total_time; struct cpufreq_stats *stats = cpufreq_get_stats(cpu, &total_time); while (stats) { if (human) { print_speed(stats->frequency); printf(":%.2f%%", (100.0 * stats->time_in_state) / total_time); } else printf("%lu:%llu", stats->frequency, stats->time_in_state); stats = stats->next; if (stats) printf(", "); } cpufreq_put_stats(stats); if (total_trans) printf(" (%lu)\n", total_trans); return 0; } /* --latency / -y */ static int get_latency(unsigned int cpu, unsigned int human) { unsigned long latency = cpufreq_get_transition_latency(cpu); if (!latency) return -EINVAL; if (human) { print_duration(latency); printf("\n"); } else printf("%lu\n", latency); return 0; } static struct option info_opts[] = { { .name = "debug", .has_arg = no_argument, .flag = NULL, .val = 'e'}, { .name = "boost", .has_arg = no_argument, .flag = NULL, .val = 'b'}, { .name = "freq", .has_arg = no_argument, .flag = NULL, .val = 'f'}, { .name = "hwfreq", .has_arg = no_argument, .flag = NULL, .val = 'w'}, { .name = "hwlimits", .has_arg = no_argument, .flag = NULL, .val = 'l'}, { .name = "driver", .has_arg = no_argument, .flag = NULL, .val = 'd'}, { .name = "policy", .has_arg = no_argument, .flag = NULL, .val = 'p'}, { .name = "governors", .has_arg = no_argument, .flag = NULL, .val = 'g'}, { .name = "related-cpus", .has_arg = no_argument, .flag = NULL, .val = 'r'}, { .name = "affected-cpus",.has_arg = no_argument, .flag = NULL, .val = 'a'}, { .name = "stats", .has_arg = no_argument, .flag = NULL, .val = 's'}, { .name = "latency", .has_arg = no_argument, .flag = NULL, .val = 'y'}, { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, { .name = "human", .has_arg = no_argument, .flag = NULL, .val = 'm'}, { }, }; int cmd_freq_info(int argc, char **argv) { extern char *optarg; extern int optind, opterr, optopt; int ret = 0, cont = 1; unsigned int cpu = 0; unsigned int human = 0; int output_param = 0; do { ret = getopt_long(argc, argv, "oefwldpgrasmyb", info_opts, NULL); switch (ret) { case '?': output_param = '?'; cont = 0; break; case -1: cont = 0; break; case 'b': case 'o': case 'a': case 'r': case 'g': case 'p': case 'd': case 'l': case 'w': case 'f': case 'e': case 's': case 'y': if (output_param) { output_param = -1; cont = 0; break; } output_param = ret; break; case 'm': if (human) { output_param = -1; cont = 0; break; } human = 1; break; default: fprintf(stderr, "invalid or unknown argument\n"); return EXIT_FAILURE; } } while (cont); switch (output_param) { case 'o': if (!bitmask_isallclear(cpus_chosen)) { printf(_("The argument passed to this tool can't be " "combined with passing a --cpu argument\n")); return -EINVAL; } break; case 0: output_param = 'e'; } ret = 0; /* Default is: show output of CPU 0 only */ if (bitmask_isallclear(cpus_chosen)) bitmask_setbit(cpus_chosen, 0); switch (output_param) { case -1: printf(_("You can't specify more than one --cpu parameter and/or\n" "more than one output-specific argument\n")); return -EINVAL; case '?': printf(_("invalid or unknown argument\n")); return -EINVAL; case 'o': proc_cpufreq_output(); return EXIT_SUCCESS; } for (cpu = bitmask_first(cpus_chosen); cpu <= bitmask_last(cpus_chosen); cpu++) { if (!bitmask_isbitset(cpus_chosen, cpu)) continue; if (cpufreq_cpu_exists(cpu)) { printf(_("couldn't analyze CPU %d as it doesn't seem to be present\n"), cpu); continue; } printf(_("analyzing CPU %d:\n"), cpu); switch (output_param) { case 'b': get_boost_mode(cpu); break; case 'e': debug_output_one(cpu); break; case 'a': ret = get_affected_cpus(cpu); break; case 'r': ret = get_related_cpus(cpu); break; case 'g': ret = get_available_governors(cpu); break; case 'p': ret = get_policy(cpu); break; case 'd': ret = get_driver(cpu); break; case 'l': ret = get_hardware_limits(cpu); break; case 'w': ret = get_freq_hardware(cpu, human); break; case 'f': ret = get_freq_kernel(cpu, human); break; case 's': ret = get_freq_stats(cpu, human); break; case 'y': ret = get_latency(cpu, human); break; } if (ret) return ret; } return ret; }
gpl-2.0
12019/android_kernel_samsung_lt02wifi
drivers/tty/hvc/hvc_beat.c
8667
3211
/* * Beat hypervisor console driver * * (C) Copyright 2006 TOSHIBA CORPORATION * * This code is based on drivers/char/hvc_rtas.c: * (C) Copyright IBM Corporation 2001-2005 * (C) Copyright Red Hat, Inc. 2005 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/string.h> #include <linux/console.h> #include <asm/prom.h> #include <asm/hvconsole.h> #include <asm/firmware.h> #include "hvc_console.h" extern int64_t beat_get_term_char(uint64_t, uint64_t *, uint64_t *, uint64_t *); extern int64_t beat_put_term_char(uint64_t, uint64_t, uint64_t, uint64_t); struct hvc_struct *hvc_beat_dev = NULL; /* bug: only one queue is available regardless of vtermno */ static int hvc_beat_get_chars(uint32_t vtermno, char *buf, int cnt) { static unsigned char q[sizeof(unsigned long) * 2] __attribute__((aligned(sizeof(unsigned long)))); static int qlen = 0; u64 got; again: if (qlen) { if (qlen > cnt) { memcpy(buf, q, cnt); qlen -= cnt; memmove(q + cnt, q, qlen); return cnt; } else { /* qlen <= cnt */ int r; memcpy(buf, q, qlen); r = qlen; qlen = 0; return r; } } if (beat_get_term_char(vtermno, &got, ((u64 *)q), ((u64 *)q) + 1) == 0) { qlen = got; goto again; } return 0; } static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt) { unsigned long kb[2]; int rest, nlen; for (rest = cnt; rest > 0; rest -= nlen) { nlen = (rest > 16) ? 16 : rest; memcpy(kb, buf, nlen); beat_put_term_char(vtermno, nlen, kb[0], kb[1]); buf += nlen; } return cnt; } static const struct hv_ops hvc_beat_get_put_ops = { .get_chars = hvc_beat_get_chars, .put_chars = hvc_beat_put_chars, }; static int hvc_beat_useit = 1; static int hvc_beat_config(char *p) { hvc_beat_useit = simple_strtoul(p, NULL, 0); return 0; } static int __init hvc_beat_console_init(void) { if (hvc_beat_useit && of_machine_is_compatible("Beat")) { hvc_instantiate(0, 0, &hvc_beat_get_put_ops); } return 0; } /* temp */ static int __init hvc_beat_init(void) { struct hvc_struct *hp; if (!firmware_has_feature(FW_FEATURE_BEAT)) return -ENODEV; hp = hvc_alloc(0, 0, &hvc_beat_get_put_ops, 16); if (IS_ERR(hp)) return PTR_ERR(hp); hvc_beat_dev = hp; return 0; } static void __exit hvc_beat_exit(void) { if (hvc_beat_dev) hvc_remove(hvc_beat_dev); } module_init(hvc_beat_init); module_exit(hvc_beat_exit); __setup("hvc_beat=", hvc_beat_config); console_initcall(hvc_beat_console_init);
gpl-2.0
darshan1205/kernel_cyanogen_msm8916_64
arch/score/kernel/ptrace.c
11995
9980
/* * arch/score/kernel/ptrace.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Chen Liqin <liqin.chen@sunplusct.com> * Lennox Wu <lennox.wu@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/elf.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <linux/regset.h> #include <asm/uaccess.h> /* * retrieve the contents of SCORE userspace general registers */ static int genregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const struct pt_regs *regs = task_pt_regs(target); int ret; /* skip 9 * sizeof(unsigned long) not use for pt_regs */ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 0, offsetof(struct pt_regs, regs)); /* r0 - r31, cel, ceh, sr0, sr1, sr2, epc, ema, psr, ecr, condition */ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs->regs, offsetof(struct pt_regs, regs), offsetof(struct pt_regs, cp0_condition)); if (!ret) ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, sizeof(struct pt_regs), -1); return ret; } /* * update the contents of the SCORE userspace general registers */ static int genregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct pt_regs *regs = task_pt_regs(target); int ret; /* skip 9 * sizeof(unsigned long) */ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offsetof(struct pt_regs, regs)); /* r0 - r31, cel, ceh, sr0, sr1, sr2, epc, ema, psr, ecr, condition */ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs->regs, offsetof(struct pt_regs, regs), offsetof(struct pt_regs, cp0_condition)); if (!ret) ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, sizeof(struct pt_regs), -1); return ret; } /* * Define the register sets available on the score7 under Linux */ enum score7_regset { REGSET_GENERAL, }; static const struct user_regset score7_regsets[] = { [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(long), .align = sizeof(long), .get = genregs_get, .set = genregs_set, }, }; static const struct user_regset_view user_score_native_view = { .name = "score7", .e_machine = EM_SCORE7, .regsets = score7_regsets, .n = ARRAY_SIZE(score7_regsets), }; const struct user_regset_view *task_user_regset_view(struct task_struct *task) { return &user_score_native_view; } static int is_16bitinsn(unsigned long insn) { if ((insn & INSN32_MASK) == INSN32_MASK) return 0; else return 1; } int read_tsk_long(struct task_struct *child, unsigned long addr, unsigned long *res) { int copied; copied = access_process_vm(child, addr, res, sizeof(*res), 0); return copied != sizeof(*res) ? -EIO : 0; } int read_tsk_short(struct task_struct *child, unsigned long addr, unsigned short *res) { int copied; copied = access_process_vm(child, addr, res, sizeof(*res), 0); return copied != sizeof(*res) ? -EIO : 0; } static int write_tsk_short(struct task_struct *child, unsigned long addr, unsigned short val) { int copied; copied = access_process_vm(child, addr, &val, sizeof(val), 1); return copied != sizeof(val) ? -EIO : 0; } static int write_tsk_long(struct task_struct *child, unsigned long addr, unsigned long val) { int copied; copied = access_process_vm(child, addr, &val, sizeof(val), 1); return copied != sizeof(val) ? -EIO : 0; } void user_enable_single_step(struct task_struct *child) { /* far_epc is the target of branch */ unsigned int epc, far_epc = 0; unsigned long epc_insn, far_epc_insn; int ninsn_type; /* next insn type 0=16b, 1=32b */ unsigned int tmp, tmp2; struct pt_regs *regs = task_pt_regs(child); child->thread.single_step = 1; child->thread.ss_nextcnt = 1; epc = regs->cp0_epc; read_tsk_long(child, epc, &epc_insn); if (is_16bitinsn(epc_insn)) { if ((epc_insn & J16M) == J16) { tmp = epc_insn & 0xFFE; epc = (epc & 0xFFFFF000) | tmp; } else if ((epc_insn & B16M) == B16) { child->thread.ss_nextcnt = 2; tmp = (epc_insn & 0xFF) << 1; tmp = tmp << 23; tmp = (unsigned int)((int) tmp >> 23); far_epc = epc + tmp; epc += 2; } else if ((epc_insn & BR16M) == BR16) { child->thread.ss_nextcnt = 2; tmp = (epc_insn >> 4) & 0xF; far_epc = regs->regs[tmp]; epc += 2; } else epc += 2; } else { if ((epc_insn & J32M) == J32) { tmp = epc_insn & 0x03FFFFFE; tmp2 = tmp & 0x7FFF; tmp = (((tmp >> 16) & 0x3FF) << 15) | tmp2; epc = (epc & 0xFFC00000) | tmp; } else if ((epc_insn & B32M) == B32) { child->thread.ss_nextcnt = 2; tmp = epc_insn & 0x03FFFFFE; /* discard LK bit */ tmp2 = tmp & 0x3FF; tmp = (((tmp >> 16) & 0x3FF) << 10) | tmp2; /* 20bit */ tmp = tmp << 12; tmp = (unsigned int)((int) tmp >> 12); far_epc = epc + tmp; epc += 4; } else if ((epc_insn & BR32M) == BR32) { child->thread.ss_nextcnt = 2; tmp = (epc_insn >> 16) & 0x1F; far_epc = regs->regs[tmp]; epc += 4; } else epc += 4; } if (child->thread.ss_nextcnt == 1) { read_tsk_long(child, epc, &epc_insn); if (is_16bitinsn(epc_insn)) { write_tsk_short(child, epc, SINGLESTEP16_INSN); ninsn_type = 0; } else { write_tsk_long(child, epc, SINGLESTEP32_INSN); ninsn_type = 1; } if (ninsn_type == 0) { /* 16bits */ child->thread.insn1_type = 0; child->thread.addr1 = epc; /* the insn may have 32bit data */ child->thread.insn1 = (short)epc_insn; } else { child->thread.insn1_type = 1; child->thread.addr1 = epc; child->thread.insn1 = epc_insn; } } else { /* branch! have two target child->thread.ss_nextcnt=2 */ read_tsk_long(child, epc, &epc_insn); read_tsk_long(child, far_epc, &far_epc_insn); if (is_16bitinsn(epc_insn)) { write_tsk_short(child, epc, SINGLESTEP16_INSN); ninsn_type = 0; } else { write_tsk_long(child, epc, SINGLESTEP32_INSN); ninsn_type = 1; } if (ninsn_type == 0) { /* 16bits */ child->thread.insn1_type = 0; child->thread.addr1 = epc; /* the insn may have 32bit data */ child->thread.insn1 = (short)epc_insn; } else { child->thread.insn1_type = 1; child->thread.addr1 = epc; child->thread.insn1 = epc_insn; } if (is_16bitinsn(far_epc_insn)) { write_tsk_short(child, far_epc, SINGLESTEP16_INSN); ninsn_type = 0; } else { write_tsk_long(child, far_epc, SINGLESTEP32_INSN); ninsn_type = 1; } if (ninsn_type == 0) { /* 16bits */ child->thread.insn2_type = 0; child->thread.addr2 = far_epc; /* the insn may have 32bit data */ child->thread.insn2 = (short)far_epc_insn; } else { child->thread.insn2_type = 1; child->thread.addr2 = far_epc; child->thread.insn2 = far_epc_insn; } } } void user_disable_single_step(struct task_struct *child) { if (child->thread.insn1_type == 0) write_tsk_short(child, child->thread.addr1, child->thread.insn1); if (child->thread.insn1_type == 1) write_tsk_long(child, child->thread.addr1, child->thread.insn1); if (child->thread.ss_nextcnt == 2) { /* branch */ if (child->thread.insn1_type == 0) write_tsk_short(child, child->thread.addr1, child->thread.insn1); if (child->thread.insn1_type == 1) write_tsk_long(child, child->thread.addr1, child->thread.insn1); if (child->thread.insn2_type == 0) write_tsk_short(child, child->thread.addr2, child->thread.insn2); if (child->thread.insn2_type == 1) write_tsk_long(child, child->thread.addr2, child->thread.insn2); } child->thread.single_step = 0; child->thread.ss_nextcnt = 0; } void ptrace_disable(struct task_struct *child) { user_disable_single_step(child); } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret; unsigned long __user *datap = (void __user *)data; switch (request) { case PTRACE_GETREGS: ret = copy_regset_to_user(child, &user_score_native_view, REGSET_GENERAL, 0, sizeof(struct pt_regs), datap); break; case PTRACE_SETREGS: ret = copy_regset_from_user(child, &user_score_native_view, REGSET_GENERAL, 0, sizeof(struct pt_regs), datap); break; default: ret = ptrace_request(child, request, addr, data); break; } return ret; } /* * Notification of system call entry/exit * - triggered by current->work.syscall_trace */ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) { if (!(current->ptrace & PT_PTRACED)) return; if (!test_thread_flag(TIF_SYSCALL_TRACE)) return; /* The 0x80 provides a way for the tracing parent to distinguish between a syscall stop and SIGTRAP delivery. */ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); /* * this isn't the same as continuing with a signal, but it will do * for normal use. strace only continues with a signal if the * stopping signal is not SIGTRAP. -brl */ if (current->exit_code) { send_sig(current->exit_code, current, 1); current->exit_code = 0; } }
gpl-2.0
pchri03/net-next
arch/x86/crypto/poly1305_glue.c
220
5692
/* * Poly1305 authenticator algorithm, RFC7539, SIMD glue code * * Copyright (C) 2015 Martin Willi * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <crypto/algapi.h> #include <crypto/internal/hash.h> #include <crypto/poly1305.h> #include <linux/crypto.h> #include <linux/kernel.h> #include <linux/module.h> #include <asm/fpu/api.h> #include <asm/simd.h> struct poly1305_simd_desc_ctx { struct poly1305_desc_ctx base; /* derived key u set? */ bool uset; #ifdef CONFIG_AS_AVX2 /* derived keys r^3, r^4 set? */ bool wset; #endif /* derived Poly1305 key r^2 */ u32 u[5]; /* ... silently appended r^3 and r^4 when using AVX2 */ }; asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src, const u32 *r, unsigned int blocks); asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r, unsigned int blocks, const u32 *u); #ifdef CONFIG_AS_AVX2 asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r, unsigned int blocks, const u32 *u); static bool poly1305_use_avx2; #endif static int poly1305_simd_init(struct shash_desc *desc) { struct poly1305_simd_desc_ctx *sctx = shash_desc_ctx(desc); sctx->uset = false; #ifdef CONFIG_AS_AVX2 sctx->wset = false; #endif return crypto_poly1305_init(desc); } static void poly1305_simd_mult(u32 *a, const u32 *b) { u8 m[POLY1305_BLOCK_SIZE]; memset(m, 0, sizeof(m)); /* The poly1305 block function adds a hi-bit to the accumulator which * we don't need for key multiplication; compensate for it. */ a[4] -= 1 << 24; poly1305_block_sse2(a, m, b, 1); } static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen) { struct poly1305_simd_desc_ctx *sctx; unsigned int blocks, datalen; BUILD_BUG_ON(offsetof(struct poly1305_simd_desc_ctx, base)); sctx = container_of(dctx, struct poly1305_simd_desc_ctx, base); if (unlikely(!dctx->sset)) { datalen = crypto_poly1305_setdesckey(dctx, src, srclen); src += srclen - datalen; srclen = datalen; } #ifdef CONFIG_AS_AVX2 if (poly1305_use_avx2 && srclen >= POLY1305_BLOCK_SIZE * 4) { if (unlikely(!sctx->wset)) { if (!sctx->uset) { memcpy(sctx->u, dctx->r, sizeof(sctx->u)); poly1305_simd_mult(sctx->u, dctx->r); sctx->uset = true; } memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u)); poly1305_simd_mult(sctx->u + 5, dctx->r); memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u)); poly1305_simd_mult(sctx->u + 10, dctx->r); sctx->wset = true; } blocks = srclen / (POLY1305_BLOCK_SIZE * 4); poly1305_4block_avx2(dctx->h, src, dctx->r, blocks, sctx->u); src += POLY1305_BLOCK_SIZE * 4 * blocks; srclen -= POLY1305_BLOCK_SIZE * 4 * blocks; } #endif if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) { if (unlikely(!sctx->uset)) { memcpy(sctx->u, dctx->r, sizeof(sctx->u)); poly1305_simd_mult(sctx->u, dctx->r); sctx->uset = true; } blocks = srclen / (POLY1305_BLOCK_SIZE * 2); poly1305_2block_sse2(dctx->h, src, dctx->r, blocks, sctx->u); src += POLY1305_BLOCK_SIZE * 2 * blocks; srclen -= POLY1305_BLOCK_SIZE * 2 * blocks; } if (srclen >= POLY1305_BLOCK_SIZE) { poly1305_block_sse2(dctx->h, src, dctx->r, 1); srclen -= POLY1305_BLOCK_SIZE; } return srclen; } static int poly1305_simd_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); unsigned int bytes; /* kernel_fpu_begin/end is costly, use fallback for small updates */ if (srclen <= 288 || !may_use_simd()) return crypto_poly1305_update(desc, src, srclen); kernel_fpu_begin(); if (unlikely(dctx->buflen)) { bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); memcpy(dctx->buf + dctx->buflen, src, bytes); src += bytes; srclen -= bytes; dctx->buflen += bytes; if (dctx->buflen == POLY1305_BLOCK_SIZE) { poly1305_simd_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE); dctx->buflen = 0; } } if (likely(srclen >= POLY1305_BLOCK_SIZE)) { bytes = poly1305_simd_blocks(dctx, src, srclen); src += srclen - bytes; srclen = bytes; } kernel_fpu_end(); if (unlikely(srclen)) { dctx->buflen = srclen; memcpy(dctx->buf, src, srclen); } return 0; } static struct shash_alg alg = { .digestsize = POLY1305_DIGEST_SIZE, .init = poly1305_simd_init, .update = poly1305_simd_update, .final = crypto_poly1305_final, .setkey = crypto_poly1305_setkey, .descsize = sizeof(struct poly1305_simd_desc_ctx), .base = { .cra_name = "poly1305", .cra_driver_name = "poly1305-simd", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_alignmask = sizeof(u32) - 1, .cra_blocksize = POLY1305_BLOCK_SIZE, .cra_module = THIS_MODULE, }, }; static int __init poly1305_simd_mod_init(void) { if (!cpu_has_xmm2) return -ENODEV; #ifdef CONFIG_AS_AVX2 poly1305_use_avx2 = cpu_has_avx && cpu_has_avx2 && cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL); alg.descsize = sizeof(struct poly1305_simd_desc_ctx); if (poly1305_use_avx2) alg.descsize += 10 * sizeof(u32); #endif return crypto_register_shash(&alg); } static void __exit poly1305_simd_mod_exit(void) { crypto_unregister_shash(&alg); } module_init(poly1305_simd_mod_init); module_exit(poly1305_simd_mod_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); MODULE_DESCRIPTION("Poly1305 authenticator"); MODULE_ALIAS_CRYPTO("poly1305"); MODULE_ALIAS_CRYPTO("poly1305-simd");
gpl-2.0
linuzo/stock_kernel_lge_d852
drivers/net/wireless/bcmdhd/src/wl/sys/wl_iw.c
220
96650
/* * Linux Wireless Extensions support * * Copyright (C) 1999-2014, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: wl_iw.c 467328 2014-04-03 01:23:40Z $ */ #if defined(USE_IW) #define LINUX_PORT #include <typedefs.h> #include <linuxver.h> #include <osl.h> #include <bcmutils.h> #include <bcmendian.h> #include <proto/ethernet.h> #include <linux/if_arp.h> #include <asm/uaccess.h> typedef const struct si_pub si_t; #include <wlioctl.h> #include <wl_dbg.h> #include <wl_iw.h> #ifdef BCMWAPI_WPI /* these items should evetually go into wireless.h of the linux system headfile dir */ #ifndef IW_ENCODE_ALG_SM4 #define IW_ENCODE_ALG_SM4 0x20 #endif #ifndef IW_AUTH_WAPI_ENABLED #define IW_AUTH_WAPI_ENABLED 0x20 #endif #ifndef IW_AUTH_WAPI_VERSION_1 #define IW_AUTH_WAPI_VERSION_1 0x00000008 #endif #ifndef IW_AUTH_CIPHER_SMS4 #define IW_AUTH_CIPHER_SMS4 0x00000020 #endif #ifndef IW_AUTH_KEY_MGMT_WAPI_PSK #define IW_AUTH_KEY_MGMT_WAPI_PSK 4 #endif #ifndef IW_AUTH_KEY_MGMT_WAPI_CERT #define IW_AUTH_KEY_MGMT_WAPI_CERT 8 #endif #endif /* BCMWAPI_WPI */ /* Broadcom extensions to WEXT, linux upstream has obsoleted WEXT */ #ifndef IW_AUTH_KEY_MGMT_FT_802_1X #define IW_AUTH_KEY_MGMT_FT_802_1X 0x04 #endif #ifndef IW_AUTH_KEY_MGMT_FT_PSK #define IW_AUTH_KEY_MGMT_FT_PSK 0x08 #endif #ifndef IW_ENC_CAPA_FW_ROAM_ENABLE #define IW_ENC_CAPA_FW_ROAM_ENABLE 0x00000020 #endif /* FC9: wireless.h 2.6.25-14.fc9.i686 is missing these, even though WIRELESS_EXT is set to latest * version 22. */ #ifndef IW_ENCODE_ALG_PMK #define IW_ENCODE_ALG_PMK 4 #endif #ifndef IW_ENC_CAPA_4WAY_HANDSHAKE #define IW_ENC_CAPA_4WAY_HANDSHAKE 0x00000010 #endif /* End FC9. */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) #include <linux/rtnetlink.h> #endif #if defined(SOFTAP) struct net_device *ap_net_dev = NULL; tsk_ctl_t ap_eth_ctl; /* apsta AP netdev waiter thread */ #endif /* SOFTAP */ extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason, char* stringBuf, uint buflen); uint wl_msg_level = WL_ERROR_VAL; #define MAX_WLIW_IOCTL_LEN 1024 /* IOCTL swapping mode for Big Endian host with Little Endian dongle. Default to off */ #define htod32(i) (i) #define htod16(i) (i) #define dtoh32(i) (i) #define dtoh16(i) (i) #define htodchanspec(i) (i) #define dtohchanspec(i) (i) extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); extern int dhd_wait_pend8021x(struct net_device *dev); #if WIRELESS_EXT < 19 #define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST) #define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST) #endif /* WIRELESS_EXT < 19 */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) #define DAEMONIZE(a) do { \ allow_signal(SIGKILL); \ allow_signal(SIGTERM); \ } while (0) #elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \ (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))) #define DAEMONIZE(a) daemonize(a); \ allow_signal(SIGKILL); \ allow_signal(SIGTERM); #else /* Linux 2.4 (w/o preemption patch) */ #define RAISE_RX_SOFTIRQ() \ cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ) #define DAEMONIZE(a) daemonize(); \ do { if (a) \ strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \ } while (0); #endif /* LINUX_VERSION_CODE */ #define ISCAN_STATE_IDLE 0 #define ISCAN_STATE_SCANING 1 /* the buf lengh can be WLC_IOCTL_MAXLEN (8K) to reduce iteration */ #define WLC_IW_ISCAN_MAXLEN 2048 typedef struct iscan_buf { struct iscan_buf * next; char iscan_buf[WLC_IW_ISCAN_MAXLEN]; } iscan_buf_t; typedef struct iscan_info { struct net_device *dev; struct timer_list timer; uint32 timer_ms; uint32 timer_on; int iscan_state; iscan_buf_t * list_hdr; iscan_buf_t * list_cur; /* Thread to work on iscan */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) struct task_struct *kthread; #endif long sysioc_pid; struct semaphore sysioc_sem; struct completion sysioc_exited; char ioctlbuf[WLC_IOCTL_SMLEN]; } iscan_info_t; iscan_info_t *g_iscan = NULL; static void wl_iw_timerfunc(ulong data); static void wl_iw_set_event_mask(struct net_device *dev); static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action); /* priv_link becomes netdev->priv and is the link between netdev and wlif struct */ typedef struct priv_link { wl_iw_t *wliw; } priv_link_t; /* dev to priv_link */ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) #define WL_DEV_LINK(dev) (priv_link_t*)(dev->priv) #else #define WL_DEV_LINK(dev) (priv_link_t*)netdev_priv(dev) #endif /* dev to wl_iw_t */ #define IW_DEV_IF(dev) ((wl_iw_t*)(WL_DEV_LINK(dev))->wliw) static void swap_key_from_BE( wl_wsec_key_t *key ) { key->index = htod32(key->index); key->len = htod32(key->len); key->algo = htod32(key->algo); key->flags = htod32(key->flags); key->rxiv.hi = htod32(key->rxiv.hi); key->rxiv.lo = htod16(key->rxiv.lo); key->iv_initialized = htod32(key->iv_initialized); } static void swap_key_to_BE( wl_wsec_key_t *key ) { key->index = dtoh32(key->index); key->len = dtoh32(key->len); key->algo = dtoh32(key->algo); key->flags = dtoh32(key->flags); key->rxiv.hi = dtoh32(key->rxiv.hi); key->rxiv.lo = dtoh16(key->rxiv.lo); key->iv_initialized = dtoh32(key->iv_initialized); } static int dev_wlc_ioctl( struct net_device *dev, int cmd, void *arg, int len ) { struct ifreq ifr; wl_ioctl_t ioc; mm_segment_t fs; int ret; memset(&ioc, 0, sizeof(ioc)); ioc.cmd = cmd; ioc.buf = arg; ioc.len = len; strcpy(ifr.ifr_name, dev->name); ifr.ifr_data = (caddr_t) &ioc; fs = get_fs(); set_fs(get_ds()); #if defined(WL_USE_NETDEV_OPS) ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE); #else ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE); #endif set_fs(fs); return ret; } /* set named driver variable to int value and return error indication calling example: dev_wlc_intvar_set(dev, "arate", rate) */ static int dev_wlc_intvar_set( struct net_device *dev, char *name, int val) { char buf[WLC_IOCTL_SMLEN]; uint len; val = htod32(val); len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf)); ASSERT(len); return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len)); } static int dev_iw_iovar_setbuf( struct net_device *dev, char *iovar, void *param, int paramlen, void *bufptr, int buflen) { int iolen; iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen); ASSERT(iolen); BCM_REFERENCE(iolen); return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen)); } static int dev_iw_iovar_getbuf( struct net_device *dev, char *iovar, void *param, int paramlen, void *bufptr, int buflen) { int iolen; iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen); ASSERT(iolen); BCM_REFERENCE(iolen); return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen)); } #if WIRELESS_EXT > 17 static int dev_wlc_bufvar_set( struct net_device *dev, char *name, char *buf, int len) { char *ioctlbuf; uint buflen; int error; ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL); if (!ioctlbuf) return -ENOMEM; buflen = bcm_mkiovar(name, buf, len, ioctlbuf, MAX_WLIW_IOCTL_LEN); ASSERT(buflen); error = dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen); kfree(ioctlbuf); return error; } #endif /* WIRELESS_EXT > 17 */ /* get named driver variable to int value and return error indication calling example: dev_wlc_bufvar_get(dev, "arate", &rate) */ static int dev_wlc_bufvar_get( struct net_device *dev, char *name, char *buf, int buflen) { char *ioctlbuf; int error; uint len; ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL); if (!ioctlbuf) return -ENOMEM; len = bcm_mkiovar(name, NULL, 0, ioctlbuf, MAX_WLIW_IOCTL_LEN); ASSERT(len); BCM_REFERENCE(len); error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN); if (!error) bcopy(ioctlbuf, buf, buflen); kfree(ioctlbuf); return (error); } /* get named driver variable to int value and return error indication calling example: dev_wlc_intvar_get(dev, "arate", &rate) */ static int dev_wlc_intvar_get( struct net_device *dev, char *name, int *retval) { union { char buf[WLC_IOCTL_SMLEN]; int val; } var; int error; uint len; uint data_null; len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf)); ASSERT(len); error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len); *retval = dtoh32(var.val); return (error); } /* Maintain backward compatibility */ #if WIRELESS_EXT < 13 struct iw_request_info { __u16 cmd; /* Wireless Extension command */ __u16 flags; /* More to come ;-) */ }; typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info, void *wrqu, char *extra); #endif /* WIRELESS_EXT < 13 */ #if WIRELESS_EXT > 12 static int wl_iw_set_leddc( struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra ) { int dc = *(int *)extra; int error; error = dev_wlc_intvar_set(dev, "leddc", dc); return error; } static int wl_iw_set_vlanmode( struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra ) { int mode = *(int *)extra; int error; mode = htod32(mode); error = dev_wlc_intvar_set(dev, "vlan_mode", mode); return error; } static int wl_iw_set_pm( struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra ) { int pm = *(int *)extra; int error; pm = htod32(pm); error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm)); return error; } #if WIRELESS_EXT > 17 #endif /* WIRELESS_EXT > 17 */ #endif /* WIRELESS_EXT > 12 */ int wl_iw_send_priv_event( struct net_device *dev, char *flag ) { union iwreq_data wrqu; char extra[IW_CUSTOM_MAX + 1]; int cmd; cmd = IWEVCUSTOM; memset(&wrqu, 0, sizeof(wrqu)); if (strlen(flag) > sizeof(extra)) return -1; strcpy(extra, flag); wrqu.data.length = strlen(extra); wireless_send_event(dev, cmd, &wrqu, extra); WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra)); return 0; } static int wl_iw_config_commit( struct net_device *dev, struct iw_request_info *info, void *zwrq, char *extra ) { wlc_ssid_t ssid; int error; struct sockaddr bssid; WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name)); if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) return error; ssid.SSID_len = dtoh32(ssid.SSID_len); if (!ssid.SSID_len) return 0; bzero(&bssid, sizeof(struct sockaddr)); if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) { WL_ERROR(("%s: WLC_REASSOC failed (%d)\n", __FUNCTION__, error)); return error; } return 0; } static int wl_iw_get_name( struct net_device *dev, struct iw_request_info *info, union iwreq_data *cwrq, char *extra ) { int phytype, err; uint band[3]; char cap[5]; WL_TRACE(("%s: SIOCGIWNAME\n", dev->name)); cap[0] = 0; if ((err = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))) < 0) goto done; if ((err = dev_wlc_ioctl(dev, WLC_GET_BANDLIST, band, sizeof(band))) < 0) goto done; band[0] = dtoh32(band[0]); switch (phytype) { case WLC_PHY_TYPE_A: strcpy(cap, "a"); break; case WLC_PHY_TYPE_B: strcpy(cap, "b"); break; case WLC_PHY_TYPE_LP: case WLC_PHY_TYPE_G: if (band[0] >= 2) strcpy(cap, "abg"); else strcpy(cap, "bg"); break; case WLC_PHY_TYPE_N: if (band[0] >= 2) strcpy(cap, "abgn"); else strcpy(cap, "bgn"); break; } done: snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap); return 0; } static int wl_iw_set_freq( struct net_device *dev, struct iw_request_info *info, struct iw_freq *fwrq, char *extra ) { int error, chan; uint sf = 0; WL_TRACE(("%s: SIOCSIWFREQ\n", dev->name)); /* Setting by channel number */ if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) { chan = fwrq->m; } /* Setting by frequency */ else { /* Convert to MHz as best we can */ if (fwrq->e >= 6) { fwrq->e -= 6; while (fwrq->e--) fwrq->m *= 10; } else if (fwrq->e < 6) { while (fwrq->e++ < 6) fwrq->m /= 10; } /* handle 4.9GHz frequencies as Japan 4 GHz based channelization */ if (fwrq->m > 4000 && fwrq->m < 5000) sf = WF_CHAN_FACTOR_4_G; /* start factor for 4 GHz */ chan = wf_mhz2channel(fwrq->m, sf); } chan = htod32(chan); if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan)))) return error; /* -EINPROGRESS: Call commit handler */ return -EINPROGRESS; } static int wl_iw_get_freq( struct net_device *dev, struct iw_request_info *info, struct iw_freq *fwrq, char *extra ) { channel_info_t ci; int error; WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name)); if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) return error; /* Return radio channel in channel form */ fwrq->m = dtoh32(ci.hw_channel); fwrq->e = dtoh32(0); return 0; } static int wl_iw_set_mode( struct net_device *dev, struct iw_request_info *info, __u32 *uwrq, char *extra ) { int infra = 0, ap = 0, error = 0; WL_TRACE(("%s: SIOCSIWMODE\n", dev->name)); switch (*uwrq) { case IW_MODE_MASTER: infra = ap = 1; break; case IW_MODE_ADHOC: case IW_MODE_AUTO: break; case IW_MODE_INFRA: infra = 1; break; default: return -EINVAL; } infra = htod32(infra); ap = htod32(ap); if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) || (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap)))) return error; /* -EINPROGRESS: Call commit handler */ return -EINPROGRESS; } static int wl_iw_get_mode( struct net_device *dev, struct iw_request_info *info, __u32 *uwrq, char *extra ) { int error, infra = 0, ap = 0; WL_TRACE(("%s: SIOCGIWMODE\n", dev->name)); if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) || (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap)))) return error; infra = dtoh32(infra); ap = dtoh32(ap); *uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC; return 0; } static int wl_iw_get_range( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { struct iw_range *range = (struct iw_range *) extra; static int channels[MAXCHANNEL+1]; wl_uint32_list_t *list = (wl_uint32_list_t *) channels; wl_rateset_t rateset; int error, i, k; uint sf, ch; int phytype; int bw_cap = 0, sgi_tx = 0, nmode = 0; channel_info_t ci; uint8 nrate_list2copy = 0; uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130}, {14, 29, 43, 58, 87, 116, 130, 144}, {27, 54, 81, 108, 162, 216, 243, 270}, {30, 60, 90, 120, 180, 240, 270, 300}}; int fbt_cap = 0; WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name)); if (!extra) return -EINVAL; dwrq->length = sizeof(struct iw_range); memset(range, 0, sizeof(*range)); /* We don't use nwids */ range->min_nwid = range->max_nwid = 0; /* Set available channels/frequencies */ list->count = htod32(MAXCHANNEL); if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, sizeof(channels)))) return error; for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) { range->freq[i].i = dtoh32(list->element[i]); ch = dtoh32(list->element[i]); if (ch <= CH_MAX_2G_CHANNEL) sf = WF_CHAN_FACTOR_2_4_G; else sf = WF_CHAN_FACTOR_5_G; range->freq[i].m = wf_channel2mhz(ch, sf); range->freq[i].e = 6; } range->num_frequency = range->num_channels = i; /* Link quality (use NDIS cutoffs) */ range->max_qual.qual = 5; /* Signal level (use RSSI) */ range->max_qual.level = 0x100 - 200; /* -200 dBm */ /* Noise level (use noise) */ range->max_qual.noise = 0x100 - 200; /* -200 dBm */ /* Signal level threshold range (?) */ range->sensitivity = 65535; #if WIRELESS_EXT > 11 /* Link quality (use NDIS cutoffs) */ range->avg_qual.qual = 3; /* Signal level (use RSSI) */ range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD; /* Noise level (use noise) */ range->avg_qual.noise = 0x100 - 75; /* -75 dBm */ #endif /* WIRELESS_EXT > 11 */ /* Set available bitrates */ if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) return error; rateset.count = dtoh32(rateset.count); range->num_bitrates = rateset.count; for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++) range->bitrate[i] = (rateset.rates[i] & 0x7f) * 500000; /* convert to bps */ if ((error = dev_wlc_intvar_get(dev, "nmode", &nmode))) return error; if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype)))) return error; if (nmode == 1 && ((phytype == WLC_PHY_TYPE_SSN) || (phytype == WLC_PHY_TYPE_LCN) || (phytype == WLC_PHY_TYPE_LCN40))) { if ((error = dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap))) return error; if ((error = dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx))) return error; if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t)))) return error; ci.hw_channel = dtoh32(ci.hw_channel); if (bw_cap == 0 || (bw_cap == 2 && ci.hw_channel <= 14)) { if (sgi_tx == 0) nrate_list2copy = 0; else nrate_list2copy = 1; } if (bw_cap == 1 || (bw_cap == 2 && ci.hw_channel >= 36)) { if (sgi_tx == 0) nrate_list2copy = 2; else nrate_list2copy = 3; } range->num_bitrates += 8; ASSERT(range->num_bitrates < IW_MAX_BITRATES); for (k = 0; i < range->num_bitrates; k++, i++) { /* convert to bps */ range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000; } } /* Set an indication of the max TCP throughput * in bit/s that we can expect using this interface. * May be use for QoS stuff... Jean II */ if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i)))) return error; i = dtoh32(i); if (i == WLC_PHY_TYPE_A) range->throughput = 24000000; /* 24 Mbits/s */ else range->throughput = 1500000; /* 1.5 Mbits/s */ /* RTS and fragmentation thresholds */ range->min_rts = 0; range->max_rts = 2347; range->min_frag = 256; range->max_frag = 2346; range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS; range->num_encoding_sizes = 4; range->encoding_size[0] = WEP1_KEY_SIZE; range->encoding_size[1] = WEP128_KEY_SIZE; #if WIRELESS_EXT > 17 range->encoding_size[2] = TKIP_KEY_SIZE; #else range->encoding_size[2] = 0; #endif range->encoding_size[3] = AES_KEY_SIZE; /* Do not support power micro-management */ range->min_pmp = 0; range->max_pmp = 0; range->min_pmt = 0; range->max_pmt = 0; range->pmp_flags = 0; range->pm_capa = 0; /* Transmit Power - values are in mW */ range->num_txpower = 2; range->txpower[0] = 1; range->txpower[1] = 255; range->txpower_capa = IW_TXPOW_MWATT; #if WIRELESS_EXT > 10 range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 19; /* Only support retry limits */ range->retry_capa = IW_RETRY_LIMIT; range->retry_flags = IW_RETRY_LIMIT; range->r_time_flags = 0; /* SRL and LRL limits */ range->min_retry = 1; range->max_retry = 255; /* Retry lifetime limits unsupported */ range->min_r_time = 0; range->max_r_time = 0; #endif /* WIRELESS_EXT > 10 */ #if WIRELESS_EXT > 17 range->enc_capa = IW_ENC_CAPA_WPA; range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP; range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP; range->enc_capa |= IW_ENC_CAPA_WPA2; /* Determine driver FBT capability. */ if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) { if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) { /* Tell the host (e.g. wpa_supplicant) to let driver do the handshake */ range->enc_capa |= IW_ENC_CAPA_4WAY_HANDSHAKE; } } #ifdef BCMFW_ROAM_ENABLE_WEXT /* Advertise firmware roam capability to the external supplicant */ range->enc_capa |= IW_ENC_CAPA_FW_ROAM_ENABLE; #endif /* BCMFW_ROAM_ENABLE_WEXT */ /* Event capability (kernel) */ IW_EVENT_CAPA_SET_KERNEL(range->event_capa); /* Event capability (driver) */ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP); IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE); IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCREQIE); IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCRESPIE); IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND); #if WIRELESS_EXT >= 22 && defined(IW_SCAN_CAPA_ESSID) /* FC7 wireless.h defines EXT 22 but doesn't define scan_capa bits */ range->scan_capa = IW_SCAN_CAPA_ESSID; #endif #endif /* WIRELESS_EXT > 17 */ return 0; } static int rssi_to_qual(int rssi) { if (rssi <= WL_IW_RSSI_NO_SIGNAL) return 0; else if (rssi <= WL_IW_RSSI_VERY_LOW) return 1; else if (rssi <= WL_IW_RSSI_LOW) return 2; else if (rssi <= WL_IW_RSSI_GOOD) return 3; else if (rssi <= WL_IW_RSSI_VERY_GOOD) return 4; else return 5; } static int wl_iw_set_spy( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_iw_t *iw = IW_DEV_IF(dev); struct sockaddr *addr = (struct sockaddr *) extra; int i; WL_TRACE(("%s: SIOCSIWSPY\n", dev->name)); if (!extra) return -EINVAL; iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length); for (i = 0; i < iw->spy_num; i++) memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN); memset(iw->spy_qual, 0, sizeof(iw->spy_qual)); return 0; } static int wl_iw_get_spy( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_iw_t *iw = IW_DEV_IF(dev); struct sockaddr *addr = (struct sockaddr *) extra; struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num]; int i; WL_TRACE(("%s: SIOCGIWSPY\n", dev->name)); if (!extra) return -EINVAL; dwrq->length = iw->spy_num; for (i = 0; i < iw->spy_num; i++) { memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN); addr[i].sa_family = AF_UNIX; memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality)); iw->spy_qual[i].updated = 0; } return 0; } static int wl_iw_set_wap( struct net_device *dev, struct iw_request_info *info, struct sockaddr *awrq, char *extra ) { int error = -EINVAL; WL_TRACE(("%s: SIOCSIWAP\n", dev->name)); if (awrq->sa_family != ARPHRD_ETHER) { WL_ERROR(("%s: Invalid Header...sa_family\n", __FUNCTION__)); return -EINVAL; } /* Ignore "auto" or "off" */ if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) { scb_val_t scbval; bzero(&scbval, sizeof(scb_val_t)); if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) { WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error)); } return 0; } /* WL_ASSOC(("Assoc to %s\n", bcm_ether_ntoa((struct ether_addr *)&(awrq->sa_data), * eabuf))); */ /* Reassociate to the specified AP */ if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, awrq->sa_data, ETHER_ADDR_LEN))) { WL_ERROR(("%s: WLC_REASSOC failed (%d).\n", __FUNCTION__, error)); return error; } return 0; } static int wl_iw_get_wap( struct net_device *dev, struct iw_request_info *info, struct sockaddr *awrq, char *extra ) { WL_TRACE(("%s: SIOCGIWAP\n", dev->name)); awrq->sa_family = ARPHRD_ETHER; memset(awrq->sa_data, 0, ETHER_ADDR_LEN); /* Ignore error (may be down or disassociated) */ (void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN); return 0; } #if WIRELESS_EXT > 17 static int wl_iw_mlme( struct net_device *dev, struct iw_request_info *info, struct sockaddr *awrq, char *extra ) { struct iw_mlme *mlme; scb_val_t scbval; int error = -EINVAL; WL_TRACE(("%s: SIOCSIWMLME\n", dev->name)); mlme = (struct iw_mlme *)extra; if (mlme == NULL) { WL_ERROR(("Invalid ioctl data.\n")); return error; } scbval.val = mlme->reason_code; bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN); if (mlme->cmd == IW_MLME_DISASSOC) { scbval.val = htod32(scbval.val); error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)); } else if (mlme->cmd == IW_MLME_DEAUTH) { scbval.val = htod32(scbval.val); error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval, sizeof(scb_val_t)); } else { WL_ERROR(("%s: Invalid ioctl data.\n", __FUNCTION__)); return error; } return error; } #endif /* WIRELESS_EXT > 17 */ static int wl_iw_get_aplist( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_scan_results_t *list; struct sockaddr *addr = (struct sockaddr *) extra; struct iw_quality qual[IW_MAX_AP]; wl_bss_info_t *bi = NULL; int error, i; uint buflen = dwrq->length; WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name)); if (!extra) return -EINVAL; /* Get scan results (too large to put on the stack) */ list = kmalloc(buflen, GFP_KERNEL); if (!list) return -ENOMEM; memset(list, 0, buflen); list->buflen = htod32(buflen); if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) { WL_ERROR(("%d: Scan results error %d\n", __LINE__, error)); kfree(list); return error; } list->buflen = dtoh32(list->buflen); list->version = dtoh32(list->version); list->count = dtoh32(list->count); ASSERT(list->version == WL_BSS_INFO_VERSION); for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) { bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + buflen)); /* Infrastructure only */ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS)) continue; /* BSSID */ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN); addr[dwrq->length].sa_family = ARPHRD_ETHER; qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI)); qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI); qual[dwrq->length].noise = 0x100 + bi->phy_noise; /* Updated qual, level, and noise */ #if WIRELESS_EXT > 18 qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; #else qual[dwrq->length].updated = 7; #endif /* WIRELESS_EXT > 18 */ dwrq->length++; } kfree(list); if (dwrq->length) { memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length); /* Provided qual */ dwrq->flags = 1; } return 0; } static int wl_iw_iscan_get_aplist( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_scan_results_t *list; iscan_buf_t * buf; iscan_info_t *iscan = g_iscan; struct sockaddr *addr = (struct sockaddr *) extra; struct iw_quality qual[IW_MAX_AP]; wl_bss_info_t *bi = NULL; int i; WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name)); if (!extra) return -EINVAL; if ((!iscan) || (iscan->sysioc_pid < 0)) { return wl_iw_get_aplist(dev, info, dwrq, extra); } buf = iscan->list_hdr; /* Get scan results (too large to put on the stack) */ while (buf) { list = &((wl_iscan_results_t*)buf->iscan_buf)->results; ASSERT(list->version == WL_BSS_INFO_VERSION); bi = NULL; for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) { bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + WLC_IW_ISCAN_MAXLEN)); /* Infrastructure only */ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS)) continue; /* BSSID */ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN); addr[dwrq->length].sa_family = ARPHRD_ETHER; qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI)); qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI); qual[dwrq->length].noise = 0x100 + bi->phy_noise; /* Updated qual, level, and noise */ #if WIRELESS_EXT > 18 qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; #else qual[dwrq->length].updated = 7; #endif /* WIRELESS_EXT > 18 */ dwrq->length++; } buf = buf->next; } if (dwrq->length) { memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length); /* Provided qual */ dwrq->flags = 1; } return 0; } #if WIRELESS_EXT > 13 static int wl_iw_set_scan( struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra ) { wlc_ssid_t ssid; WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name)); /* default Broadcast scan */ memset(&ssid, 0, sizeof(ssid)); #if WIRELESS_EXT > 17 /* check for given essid */ if (wrqu->data.length == sizeof(struct iw_scan_req)) { if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { struct iw_scan_req *req = (struct iw_scan_req *)extra; ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len); memcpy(ssid.SSID, req->essid, ssid.SSID_len); ssid.SSID_len = htod32(ssid.SSID_len); } } #endif /* Ignore error (most likely scan in progress) */ (void) dev_wlc_ioctl(dev, WLC_SCAN, &ssid, sizeof(ssid)); return 0; } static int wl_iw_iscan_set_scan( struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra ) { wlc_ssid_t ssid; iscan_info_t *iscan = g_iscan; WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name)); /* use backup if our thread is not successful */ if ((!iscan) || (iscan->sysioc_pid < 0)) { return wl_iw_set_scan(dev, info, wrqu, extra); } if (iscan->iscan_state == ISCAN_STATE_SCANING) { return 0; } /* default Broadcast scan */ memset(&ssid, 0, sizeof(ssid)); #if WIRELESS_EXT > 17 /* check for given essid */ if (wrqu->data.length == sizeof(struct iw_scan_req)) { if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { struct iw_scan_req *req = (struct iw_scan_req *)extra; ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len); memcpy(ssid.SSID, req->essid, ssid.SSID_len); ssid.SSID_len = htod32(ssid.SSID_len); } } #endif iscan->list_cur = iscan->list_hdr; iscan->iscan_state = ISCAN_STATE_SCANING; wl_iw_set_event_mask(dev); wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START); iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms); add_timer(&iscan->timer); iscan->timer_on = 1; return 0; } #if WIRELESS_EXT > 17 static bool ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len) { /* Is this body of this tlvs entry a WPA entry? If */ /* not update the tlvs buffer pointer/length */ uint8 *ie = *wpaie; /* If the contents match the WPA_OUI and type=1 */ if ((ie[1] >= 6) && !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) { return TRUE; } /* point to the next ie */ ie += ie[1] + 2; /* calculate the length of the rest of the buffer */ *tlvs_len -= (int)(ie - *tlvs); /* update the pointer to the start of the buffer */ *tlvs = ie; return FALSE; } static bool ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len) { /* Is this body of this tlvs entry a WPS entry? If */ /* not update the tlvs buffer pointer/length */ uint8 *ie = *wpsie; /* If the contents match the WPA_OUI and type=4 */ if ((ie[1] >= 4) && !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) { return TRUE; } /* point to the next ie */ ie += ie[1] + 2; /* calculate the length of the rest of the buffer */ *tlvs_len -= (int)(ie - *tlvs); /* update the pointer to the start of the buffer */ *tlvs = ie; return FALSE; } #endif /* WIRELESS_EXT > 17 */ #ifdef BCMWAPI_WPI static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len, int uppercase) { size_t i; char *pos = buf, *end = buf + buf_size; int ret; if (buf_size == 0) return 0; for (i = 0; i < len; i++) { ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x", data[i]); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return pos - buf; } pos += ret; } end[-1] = '\0'; return pos - buf; } /** * wpa_snprintf_hex - Print data as a hex string into a buffer * @buf: Memory area to use as the output buffer * @buf_size: Maximum buffer size in bytes (should be at least 2 * len + 1) * @data: Data to be printed * @len: Length of data in bytes * Returns: Number of bytes written */ static int wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len) { return _wpa_snprintf_hex(buf, buf_size, data, len, 0); } #endif /* BCMWAPI_WPI */ static int wl_iw_handle_scanresults_ies(char **event_p, char *end, struct iw_request_info *info, wl_bss_info_t *bi) { #if WIRELESS_EXT > 17 struct iw_event iwe; char *event; #ifdef BCMWAPI_WPI char *buf; int custom_event_len; #endif event = *event_p; if (bi->ie_length) { /* look for wpa/rsn ies in the ie list... */ bcm_tlv_t *ie; uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); int ptr_len = bi->ie_length; /* OSEN IE */ if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_VS_ID)) && ie->len > WFA_OUI_LEN + 1 && !bcmp((const void *)&ie->data[0], (const void *)WFA_OUI, WFA_OUI_LEN) && ie->data[WFA_OUI_LEN] == WFA_OUI_TYPE_OSEN) { iwe.cmd = IWEVGENIE; iwe.u.data.length = ie->len + 2; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); } ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) { iwe.cmd = IWEVGENIE; iwe.u.data.length = ie->len + 2; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); } ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_MDIE_ID))) { iwe.cmd = IWEVGENIE; iwe.u.data.length = ie->len + 2; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); } ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) { /* look for WPS IE */ if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) { iwe.cmd = IWEVGENIE; iwe.u.data.length = ie->len + 2; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); break; } } ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); ptr_len = bi->ie_length; while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) { if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) { iwe.cmd = IWEVGENIE; iwe.u.data.length = ie->len + 2; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); break; } } #ifdef BCMWAPI_WPI ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); ptr_len = bi->ie_length; while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) { WL_TRACE(("%s: found a WAPI IE...\n", __FUNCTION__)); #ifdef WAPI_IE_USE_GENIE iwe.cmd = IWEVGENIE; iwe.u.data.length = ie->len + 2; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); #else /* using CUSTOM event */ iwe.cmd = IWEVCUSTOM; custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2); iwe.u.data.length = custom_event_len; buf = kmalloc(custom_event_len+1, GFP_KERNEL); if (buf == NULL) { WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len)); break; } memcpy(buf, "wapi_ie=", 8); wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1); wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1); wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len); event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf); kfree(buf); #endif /* WAPI_IE_USE_GENIE */ break; } #endif /* BCMWAPI_WPI */ *event_p = event; } #endif /* WIRELESS_EXT > 17 */ return 0; } static int wl_iw_get_scan( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { channel_info_t ci; wl_scan_results_t *list; struct iw_event iwe; wl_bss_info_t *bi = NULL; int error, i, j; char *event = extra, *end = extra + dwrq->length, *value; uint buflen = dwrq->length; WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name)); if (!extra) return -EINVAL; /* Check for scan in progress */ if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) return error; ci.scan_channel = dtoh32(ci.scan_channel); if (ci.scan_channel) return -EAGAIN; /* Get scan results (too large to put on the stack) */ list = kmalloc(buflen, GFP_KERNEL); if (!list) return -ENOMEM; memset(list, 0, buflen); list->buflen = htod32(buflen); if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) { kfree(list); return error; } list->buflen = dtoh32(list->buflen); list->version = dtoh32(list->version); list->count = dtoh32(list->count); ASSERT(list->version == WL_BSS_INFO_VERSION); for (i = 0; i < list->count && i < IW_MAX_AP; i++) { bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + buflen)); /* First entry must be the BSSID */ iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN); event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN); /* SSID */ iwe.u.data.length = dtoh32(bi->SSID_len); iwe.cmd = SIOCGIWESSID; iwe.u.data.flags = 1; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID); /* Mode */ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) { iwe.cmd = SIOCGIWMODE; if (dtoh16(bi->capability) & DOT11_CAP_ESS) iwe.u.mode = IW_MODE_INFRA; else iwe.u.mode = IW_MODE_ADHOC; event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN); } /* Channel */ iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec), (CHSPEC_IS2G(bi->chanspec)) ? WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); iwe.u.freq.e = 6; event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN); /* Channel quality */ iwe.cmd = IWEVQUAL; iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI)); iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI); iwe.u.qual.noise = 0x100 + bi->phy_noise; event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); /* WPA, WPA2, WPS, WAPI IEs */ wl_iw_handle_scanresults_ies(&event, end, info, bi); /* Encryption */ iwe.cmd = SIOCGIWENCODE; if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event); /* Rates */ if (bi->rateset.count) { value = event + IW_EV_LCP_LEN; iwe.cmd = SIOCGIWRATE; /* Those two flags are ignored... */ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) { iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000; value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe, IW_EV_PARAM_LEN); } event = value; } } kfree(list); dwrq->length = event - extra; dwrq->flags = 0; /* todo */ return 0; } static int wl_iw_iscan_get_scan( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_scan_results_t *list; struct iw_event iwe; wl_bss_info_t *bi = NULL; int ii, j; int apcnt; char *event = extra, *end = extra + dwrq->length, *value; iscan_info_t *iscan = g_iscan; iscan_buf_t * p_buf; WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name)); if (!extra) return -EINVAL; /* use backup if our thread is not successful */ if ((!iscan) || (iscan->sysioc_pid < 0)) { return wl_iw_get_scan(dev, info, dwrq, extra); } /* Check for scan in progress */ if (iscan->iscan_state == ISCAN_STATE_SCANING) return -EAGAIN; apcnt = 0; p_buf = iscan->list_hdr; /* Get scan results */ while (p_buf != iscan->list_cur) { list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results; if (list->version != WL_BSS_INFO_VERSION) { WL_ERROR(("list->version %d != WL_BSS_INFO_VERSION\n", list->version)); } bi = NULL; for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) { bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + WLC_IW_ISCAN_MAXLEN)); /* overflow check cover fields before wpa IEs */ if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN + IW_EV_QUAL_LEN >= end) return -E2BIG; /* First entry must be the BSSID */ iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN); event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN); /* SSID */ iwe.u.data.length = dtoh32(bi->SSID_len); iwe.cmd = SIOCGIWESSID; iwe.u.data.flags = 1; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID); /* Mode */ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) { iwe.cmd = SIOCGIWMODE; if (dtoh16(bi->capability) & DOT11_CAP_ESS) iwe.u.mode = IW_MODE_INFRA; else iwe.u.mode = IW_MODE_ADHOC; event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN); } /* Channel */ iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec), (CHSPEC_IS2G(bi->chanspec)) ? WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); iwe.u.freq.e = 6; event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN); /* Channel quality */ iwe.cmd = IWEVQUAL; iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI)); iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI); iwe.u.qual.noise = 0x100 + bi->phy_noise; event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); /* WPA, WPA2, WPS, WAPI IEs */ wl_iw_handle_scanresults_ies(&event, end, info, bi); /* Encryption */ iwe.cmd = SIOCGIWENCODE; if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event); /* Rates */ if (bi->rateset.count <= sizeof(bi->rateset.rates)) { if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end) return -E2BIG; value = event + IW_EV_LCP_LEN; iwe.cmd = SIOCGIWRATE; /* Those two flags are ignored... */ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) { iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000; value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe, IW_EV_PARAM_LEN); } event = value; } } p_buf = p_buf->next; } /* while (p_buf) */ dwrq->length = event - extra; dwrq->flags = 0; /* todo */ return 0; } #endif /* WIRELESS_EXT > 13 */ static int wl_iw_set_essid( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wlc_ssid_t ssid; int error; WL_TRACE(("%s: SIOCSIWESSID\n", dev->name)); /* default Broadcast SSID */ memset(&ssid, 0, sizeof(ssid)); if (dwrq->length && extra) { #if WIRELESS_EXT > 20 ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length); #else ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length-1); #endif memcpy(ssid.SSID, extra, ssid.SSID_len); ssid.SSID_len = htod32(ssid.SSID_len); if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid)))) return error; } /* If essid null then it is "iwconfig <interface> essid off" command */ else { scb_val_t scbval; bzero(&scbval, sizeof(scb_val_t)); if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) return error; } return 0; } static int wl_iw_get_essid( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wlc_ssid_t ssid; int error; WL_TRACE(("%s: SIOCGIWESSID\n", dev->name)); if (!extra) return -EINVAL; if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) { WL_ERROR(("Error getting the SSID\n")); return error; } ssid.SSID_len = dtoh32(ssid.SSID_len); /* Get the current SSID */ memcpy(extra, ssid.SSID, ssid.SSID_len); dwrq->length = ssid.SSID_len; dwrq->flags = 1; /* active */ return 0; } static int wl_iw_set_nick( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_iw_t *iw = IW_DEV_IF(dev); WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name)); if (!extra) return -EINVAL; /* Check the size of the string */ if (dwrq->length > sizeof(iw->nickname)) return -E2BIG; memcpy(iw->nickname, extra, dwrq->length); iw->nickname[dwrq->length - 1] = '\0'; return 0; } static int wl_iw_get_nick( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_iw_t *iw = IW_DEV_IF(dev); WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name)); if (!extra) return -EINVAL; strcpy(extra, iw->nickname); dwrq->length = strlen(extra) + 1; return 0; } static int wl_iw_set_rate( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { wl_rateset_t rateset; int error, rate, i, error_bg, error_a; WL_TRACE(("%s: SIOCSIWRATE\n", dev->name)); /* Get current rateset */ if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) return error; rateset.count = dtoh32(rateset.count); if (vwrq->value < 0) { /* Select maximum rate */ rate = rateset.rates[rateset.count - 1] & 0x7f; } else if (vwrq->value < rateset.count) { /* Select rate by rateset index */ rate = rateset.rates[vwrq->value] & 0x7f; } else { /* Specified rate in bps */ rate = vwrq->value / 500000; } if (vwrq->fixed) { /* Set rate override, Since the is a/b/g-blind, both a/bg_rate are enforced. */ error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate); error_a = dev_wlc_intvar_set(dev, "a_rate", rate); if (error_bg && error_a) return (error_bg | error_a); } else { /* clear rate override Since the is a/b/g-blind, both a/bg_rate are enforced. */ /* 0 is for clearing rate override */ error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0); /* 0 is for clearing rate override */ error_a = dev_wlc_intvar_set(dev, "a_rate", 0); if (error_bg && error_a) return (error_bg | error_a); /* Remove rates above selected rate */ for (i = 0; i < rateset.count; i++) if ((rateset.rates[i] & 0x7f) > rate) break; rateset.count = htod32(i); /* Set current rateset */ if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset)))) return error; } return 0; } static int wl_iw_get_rate( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, rate; WL_TRACE(("%s: SIOCGIWRATE\n", dev->name)); /* Report the current tx rate */ if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate)))) return error; rate = dtoh32(rate); vwrq->value = rate * 500000; return 0; } static int wl_iw_set_rts( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, rts; WL_TRACE(("%s: SIOCSIWRTS\n", dev->name)); if (vwrq->disabled) rts = DOT11_DEFAULT_RTS_LEN; else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN) return -EINVAL; else rts = vwrq->value; if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts))) return error; return 0; } static int wl_iw_get_rts( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, rts; WL_TRACE(("%s: SIOCGIWRTS\n", dev->name)); if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts))) return error; vwrq->value = rts; vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN); vwrq->fixed = 1; return 0; } static int wl_iw_set_frag( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, frag; WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name)); if (vwrq->disabled) frag = DOT11_DEFAULT_FRAG_LEN; else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN) return -EINVAL; else frag = vwrq->value; if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag))) return error; return 0; } static int wl_iw_get_frag( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, fragthreshold; WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name)); if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold))) return error; vwrq->value = fragthreshold; vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN); vwrq->fixed = 1; return 0; } static int wl_iw_set_txpow( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, disable; uint16 txpwrmw; WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name)); /* Make sure radio is off or on as far as software is concerned */ disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0; disable += WL_RADIO_SW_DISABLE << 16; disable = htod32(disable); if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable)))) return error; /* If Radio is off, nothing more to do */ if (disable & WL_RADIO_SW_DISABLE) return 0; /* Only handle mW */ if (!(vwrq->flags & IW_TXPOW_MWATT)) return -EINVAL; /* Value < 0 means just "on" or "off" */ if (vwrq->value < 0) return 0; if (vwrq->value > 0xffff) txpwrmw = 0xffff; else txpwrmw = (uint16)vwrq->value; error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw))); return error; } static int wl_iw_get_txpow( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, disable, txpwrdbm; uint8 result; WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name)); if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) || (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm))) return error; disable = dtoh32(disable); result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE); vwrq->value = (int32)bcm_qdbm_to_mw(result); vwrq->fixed = 0; vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0; vwrq->flags = IW_TXPOW_MWATT; return 0; } #if WIRELESS_EXT > 10 static int wl_iw_set_retry( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, lrl, srl; WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name)); /* Do not handle "off" or "lifetime" */ if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME)) return -EINVAL; /* Handle "[min|max] limit" */ if (vwrq->flags & IW_RETRY_LIMIT) { /* "max limit" or just "limit" */ #if WIRELESS_EXT > 20 if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) || !((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) { #else if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) { #endif /* WIRELESS_EXT > 20 */ lrl = htod32(vwrq->value); if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl)))) return error; } /* "min limit" or just "limit" */ #if WIRELESS_EXT > 20 if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) || !((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) { #else if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) { #endif /* WIRELESS_EXT > 20 */ srl = htod32(vwrq->value); if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl)))) return error; } } return 0; } static int wl_iw_get_retry( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, lrl, srl; WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name)); vwrq->disabled = 0; /* Can't be disabled */ /* Do not handle lifetime queries */ if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) return -EINVAL; /* Get retry limits */ if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) || (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl)))) return error; lrl = dtoh32(lrl); srl = dtoh32(srl); /* Note : by default, display the min retry number */ if (vwrq->flags & IW_RETRY_MAX) { vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX; vwrq->value = lrl; } else { vwrq->flags = IW_RETRY_LIMIT; vwrq->value = srl; if (srl != lrl) vwrq->flags |= IW_RETRY_MIN; } return 0; } #endif /* WIRELESS_EXT > 10 */ static int wl_iw_set_encode( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_wsec_key_t key; int error, val, wsec; WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name)); memset(&key, 0, sizeof(key)); if ((dwrq->flags & IW_ENCODE_INDEX) == 0) { /* Find the current key */ for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) { val = htod32(key.index); if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val)))) return error; val = dtoh32(val); if (val) break; } /* Default to 0 */ if (key.index == DOT11_MAX_DEFAULT_KEYS) key.index = 0; } else { key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; if (key.index >= DOT11_MAX_DEFAULT_KEYS) return -EINVAL; } /* Interpret "off" to mean no encryption */ wsec = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED; if ((error = dev_wlc_intvar_set(dev, "wsec", wsec))) return error; /* Old API used to pass a NULL pointer instead of IW_ENCODE_NOKEY */ if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) { /* Just select a new current key */ val = htod32(key.index); if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val)))) return error; } else { key.len = dwrq->length; if (dwrq->length > sizeof(key.data)) return -EINVAL; memcpy(key.data, extra, dwrq->length); key.flags = WL_PRIMARY_KEY; switch (key.len) { case WEP1_KEY_SIZE: key.algo = CRYPTO_ALGO_WEP1; break; case WEP128_KEY_SIZE: key.algo = CRYPTO_ALGO_WEP128; break; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) case TKIP_KEY_SIZE: key.algo = CRYPTO_ALGO_TKIP; break; #endif case AES_KEY_SIZE: key.algo = CRYPTO_ALGO_AES_CCM; break; default: return -EINVAL; } /* Set the new key/index */ swap_key_from_BE(&key); if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)))) return error; } /* Interpret "restricted" to mean shared key authentication */ val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0; val = htod32(val); if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val)))) return error; return 0; } static int wl_iw_get_encode( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_wsec_key_t key; int error, val, wsec, auth; WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name)); /* assure default values of zero for things we don't touch */ bzero(&key, sizeof(wl_wsec_key_t)); if ((dwrq->flags & IW_ENCODE_INDEX) == 0) { /* Find the current key */ for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) { val = key.index; if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val)))) return error; val = dtoh32(val); if (val) break; } } else key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; if (key.index >= DOT11_MAX_DEFAULT_KEYS) key.index = 0; /* Get info */ if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) || (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth)))) return error; swap_key_to_BE(&key); wsec = dtoh32(wsec); auth = dtoh32(auth); /* Get key length */ dwrq->length = MIN(IW_ENCODING_TOKEN_MAX, key.len); /* Get flags */ dwrq->flags = key.index + 1; if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) { /* Interpret "off" to mean no encryption */ dwrq->flags |= IW_ENCODE_DISABLED; } if (auth) { /* Interpret "restricted" to mean shared key authentication */ dwrq->flags |= IW_ENCODE_RESTRICTED; } /* Get key */ if (dwrq->length && extra) memcpy(extra, key.data, dwrq->length); return 0; } static int wl_iw_set_power( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, pm; WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name)); pm = vwrq->disabled ? PM_OFF : PM_MAX; pm = htod32(pm); if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm)))) return error; return 0; } static int wl_iw_get_power( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, pm; WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name)); if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm)))) return error; pm = dtoh32(pm); vwrq->disabled = pm ? 0 : 1; vwrq->flags = IW_POWER_ALL_R; return 0; } #if WIRELESS_EXT > 17 static int wl_iw_set_wpaie( struct net_device *dev, struct iw_request_info *info, struct iw_point *iwp, char *extra ) { #if defined(BCMWAPI_WPI) uchar buf[WLC_IOCTL_SMLEN] = {0}; uchar *p = buf; int wapi_ie_size; WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name)); if (extra[0] == DOT11_MNG_WAPI_ID) { wapi_ie_size = iwp->length; memcpy(p, extra, iwp->length); dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size); } else #endif dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length); return 0; } static int wl_iw_get_wpaie( struct net_device *dev, struct iw_request_info *info, struct iw_point *iwp, char *extra ) { WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name)); iwp->length = 64; dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length); return 0; } static int wl_iw_set_encodeext( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_wsec_key_t key; int error; struct iw_encode_ext *iwe; WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name)); memset(&key, 0, sizeof(key)); iwe = (struct iw_encode_ext *)extra; /* disable encryption completely */ if (dwrq->flags & IW_ENCODE_DISABLED) { } /* get the key index */ key.index = 0; if (dwrq->flags & IW_ENCODE_INDEX) key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; key.len = iwe->key_len; /* Instead of bcast for ea address for default wep keys, driver needs it to be Null */ if (!ETHER_ISMULTI(iwe->addr.sa_data)) bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN); /* check for key index change */ if (key.len == 0) { if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { WL_WSEC(("Changing the the primary Key to %d\n", key.index)); /* change the key index .... */ key.index = htod32(key.index); error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &key.index, sizeof(key.index)); if (error) return error; } /* key delete */ else { swap_key_from_BE(&key); error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); if (error) return error; } } /* This case is used to allow an external 802.1x supplicant * to pass the PMK to the in-driver supplicant for use in * the 4-way handshake. */ else if (iwe->alg == IW_ENCODE_ALG_PMK) { int j; wsec_pmk_t pmk; char keystring[WSEC_MAX_PSK_LEN + 1]; char* charptr = keystring; uint len; /* copy the raw hex key to the appropriate format */ for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) { sprintf(charptr, "%02x", iwe->key[j]); charptr += 2; } len = strlen(keystring); pmk.key_len = htod16(len); bcopy(keystring, pmk.key, len); pmk.flags = htod16(WSEC_PASSPHRASE); error = dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk)); if (error) return error; } else { if (iwe->key_len > sizeof(key.data)) return -EINVAL; WL_WSEC(("Setting the key index %d\n", key.index)); if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { WL_WSEC(("key is a Primary Key\n")); key.flags = WL_PRIMARY_KEY; } bcopy((void *)iwe->key, key.data, iwe->key_len); if (iwe->alg == IW_ENCODE_ALG_TKIP) { uint8 keybuf[8]; bcopy(&key.data[24], keybuf, sizeof(keybuf)); bcopy(&key.data[16], &key.data[24], sizeof(keybuf)); bcopy(keybuf, &key.data[16], sizeof(keybuf)); } /* rx iv */ if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { uchar *ivptr; ivptr = (uchar *)iwe->rx_seq; key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | (ivptr[3] << 8) | ivptr[2]; key.rxiv.lo = (ivptr[1] << 8) | ivptr[0]; key.iv_initialized = TRUE; } switch (iwe->alg) { case IW_ENCODE_ALG_NONE: key.algo = CRYPTO_ALGO_OFF; break; case IW_ENCODE_ALG_WEP: if (iwe->key_len == WEP1_KEY_SIZE) key.algo = CRYPTO_ALGO_WEP1; else key.algo = CRYPTO_ALGO_WEP128; break; case IW_ENCODE_ALG_TKIP: key.algo = CRYPTO_ALGO_TKIP; break; case IW_ENCODE_ALG_CCMP: key.algo = CRYPTO_ALGO_AES_CCM; break; #ifdef BCMWAPI_WPI case IW_ENCODE_ALG_SM4: key.algo = CRYPTO_ALGO_SMS4; if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { key.flags &= ~WL_PRIMARY_KEY; } break; #endif default: break; } swap_key_from_BE(&key); dhd_wait_pend8021x(dev); error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); if (error) return error; } return 0; } #if WIRELESS_EXT > 17 struct { pmkid_list_t pmkids; pmkid_t foo[MAXPMKID-1]; } pmkid_list; static int wl_iw_set_pmksa( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { struct iw_pmksa *iwpmksa; uint i; char eabuf[ETHER_ADDR_STR_LEN]; pmkid_t * pmkid_array = pmkid_list.pmkids.pmkid; WL_TRACE(("%s: SIOCSIWPMKSA\n", dev->name)); iwpmksa = (struct iw_pmksa *)extra; bzero((char *)eabuf, ETHER_ADDR_STR_LEN); if (iwpmksa->cmd == IW_PMKSA_FLUSH) { WL_TRACE(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n")); bzero((char *)&pmkid_list, sizeof(pmkid_list)); } if (iwpmksa->cmd == IW_PMKSA_REMOVE) { pmkid_list_t pmkid, *pmkidptr; pmkidptr = &pmkid; bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN); bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN); { uint j; WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ", bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID, eabuf))); for (j = 0; j < WPA2_PMKID_LEN; j++) WL_TRACE(("%02x ", pmkidptr->pmkid[0].PMKID[j])); WL_TRACE(("\n")); } for (i = 0; i < pmkid_list.pmkids.npmkid; i++) if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID, ETHER_ADDR_LEN)) break; for (; i < pmkid_list.pmkids.npmkid; i++) { bcopy(&pmkid_array[i+1].BSSID, &pmkid_array[i].BSSID, ETHER_ADDR_LEN); bcopy(&pmkid_array[i+1].PMKID, &pmkid_array[i].PMKID, WPA2_PMKID_LEN); } pmkid_list.pmkids.npmkid--; } if (iwpmksa->cmd == IW_PMKSA_ADD) { bcopy(&iwpmksa->bssid.sa_data[0], &pmkid_array[pmkid_list.pmkids.npmkid].BSSID, ETHER_ADDR_LEN); bcopy(&iwpmksa->pmkid[0], &pmkid_array[pmkid_list.pmkids.npmkid].PMKID, WPA2_PMKID_LEN); { uint j; uint k; k = pmkid_list.pmkids.npmkid; BCM_REFERENCE(k); WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ", bcm_ether_ntoa(&pmkid_array[k].BSSID, eabuf))); for (j = 0; j < WPA2_PMKID_LEN; j++) WL_TRACE(("%02x ", pmkid_array[k].PMKID[j])); WL_TRACE(("\n")); } pmkid_list.pmkids.npmkid++; } WL_TRACE(("PRINTING pmkid LIST - No of elements %d\n", pmkid_list.pmkids.npmkid)); for (i = 0; i < pmkid_list.pmkids.npmkid; i++) { uint j; WL_TRACE(("PMKID[%d]: %s = ", i, bcm_ether_ntoa(&pmkid_array[i].BSSID, eabuf))); for (j = 0; j < WPA2_PMKID_LEN; j++) WL_TRACE(("%02x ", pmkid_array[i].PMKID[j])); printf("\n"); } WL_TRACE(("\n")); dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list)); return 0; } #endif /* WIRELESS_EXT > 17 */ static int wl_iw_get_encodeext( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name)); return 0; } static int wl_iw_set_wpaauth( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error = 0; int paramid; int paramval; uint32 cipher_combined; int val = 0; wl_iw_t *iw = IW_DEV_IF(dev); WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name)); paramid = vwrq->flags & IW_AUTH_INDEX; paramval = vwrq->value; WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n", dev->name, paramid, paramval)); switch (paramid) { case IW_AUTH_WPA_VERSION: /* supported wpa version disabled or wpa or wpa2 */ if (paramval & IW_AUTH_WPA_VERSION_DISABLED) val = WPA_AUTH_DISABLED; else if (paramval & (IW_AUTH_WPA_VERSION_WPA)) val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED; else if (paramval & IW_AUTH_WPA_VERSION_WPA2) val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED; #ifdef BCMWAPI_WPI else if (paramval & IW_AUTH_WAPI_VERSION_1) val = WAPI_AUTH_UNSPECIFIED; #endif WL_TRACE(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val)); if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) return error; break; case IW_AUTH_CIPHER_PAIRWISE: case IW_AUTH_CIPHER_GROUP: { int fbt_cap = 0; if (paramid == IW_AUTH_CIPHER_PAIRWISE) { iw->pwsec = paramval; } else { iw->gwsec = paramval; } if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) return error; cipher_combined = iw->gwsec | iw->pwsec; val &= ~(WEP_ENABLED | TKIP_ENABLED | AES_ENABLED); if (cipher_combined & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104)) val |= WEP_ENABLED; if (cipher_combined & IW_AUTH_CIPHER_TKIP) val |= TKIP_ENABLED; if (cipher_combined & IW_AUTH_CIPHER_CCMP) val |= AES_ENABLED; #ifdef BCMWAPI_WPI val &= ~SMS4_ENABLED; if (cipher_combined & IW_AUTH_CIPHER_SMS4) val |= SMS4_ENABLED; #endif if (iw->privacy_invoked && !val) { WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming " "we're a WPS enrollee\n", dev->name, __FUNCTION__)); if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) { WL_WSEC(("Failed to set iovar is_WPS_enrollee\n")); return error; } } else if (val) { if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); return error; } } if ((error = dev_wlc_intvar_set(dev, "wsec", val))) return error; /* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way * handshake. */ if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) { if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) { if ((paramid == IW_AUTH_CIPHER_PAIRWISE) && (val & AES_ENABLED)) { if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1))) return error; } else if (val == 0) { if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0))) return error; } } } break; } case IW_AUTH_KEY_MGMT: if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) return error; if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK)) val = WPA_AUTH_PSK; else val = WPA_AUTH_UNSPECIFIED; if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK)) val |= WPA2_AUTH_FT; } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK)) val = WPA2_AUTH_PSK; else val = WPA2_AUTH_UNSPECIFIED; if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK)) val |= WPA2_AUTH_FT; } #ifdef BCMWAPI_WPI if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT)) val = WAPI_AUTH_UNSPECIFIED; #endif WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) return error; break; case IW_AUTH_TKIP_COUNTERMEASURES: dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)&paramval, 1); break; case IW_AUTH_80211_AUTH_ALG: /* open shared */ WL_ERROR(("Setting the D11auth %d\n", paramval)); if (paramval & IW_AUTH_ALG_OPEN_SYSTEM) val = 0; else if (paramval & IW_AUTH_ALG_SHARED_KEY) val = 1; else error = 1; if (!error && (error = dev_wlc_intvar_set(dev, "auth", val))) return error; break; case IW_AUTH_WPA_ENABLED: if (paramval == 0) { val = 0; WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); error = dev_wlc_intvar_set(dev, "wpa_auth", val); return error; } else { /* If WPA is enabled, wpa_auth is set elsewhere */ } break; case IW_AUTH_DROP_UNENCRYPTED: dev_wlc_bufvar_set(dev, "wsec_restrict", (char *)&paramval, 1); break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)&paramval, 1); break; #if WIRELESS_EXT > 17 case IW_AUTH_ROAMING_CONTROL: WL_TRACE(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__)); /* driver control or user space app control */ break; case IW_AUTH_PRIVACY_INVOKED: { int wsec; if (paramval == 0) { iw->privacy_invoked = FALSE; if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); return error; } } else { iw->privacy_invoked = TRUE; if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec))) return error; if (!WSEC_ENABLED(wsec)) { /* if privacy is true, but wsec is false, we are a WPS enrollee */ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) { WL_WSEC(("Failed to set iovar is_WPS_enrollee\n")); return error; } } else { if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); return error; } } } break; } #endif /* WIRELESS_EXT > 17 */ #ifdef BCMWAPI_WPI case IW_AUTH_WAPI_ENABLED: if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) return error; if (paramval) { val |= SMS4_ENABLED; if ((error = dev_wlc_intvar_set(dev, "wsec", val))) { WL_ERROR(("%s: setting wsec to 0x%0x returned error %d\n", __FUNCTION__, val, error)); return error; } if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WAPI_AUTH_UNSPECIFIED))) { WL_ERROR(("%s: setting wpa_auth(%d) returned %d\n", __FUNCTION__, WAPI_AUTH_UNSPECIFIED, error)); return error; } } break; #endif /* BCMWAPI_WPI */ default: break; } return 0; } #define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK)) static int wl_iw_get_wpaauth( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error; int paramid; int paramval = 0; int val; wl_iw_t *iw = IW_DEV_IF(dev); WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name)); paramid = vwrq->flags & IW_AUTH_INDEX; switch (paramid) { case IW_AUTH_WPA_VERSION: /* supported wpa version disabled or wpa or wpa2 */ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) return error; if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED)) paramval = IW_AUTH_WPA_VERSION_DISABLED; else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) paramval = IW_AUTH_WPA_VERSION_WPA; else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) paramval = IW_AUTH_WPA_VERSION_WPA2; break; case IW_AUTH_CIPHER_PAIRWISE: paramval = iw->pwsec; break; case IW_AUTH_CIPHER_GROUP: paramval = iw->gwsec; break; case IW_AUTH_KEY_MGMT: /* psk, 1x */ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) return error; if (VAL_PSK(val)) paramval = IW_AUTH_KEY_MGMT_PSK; else paramval = IW_AUTH_KEY_MGMT_802_1X; break; case IW_AUTH_TKIP_COUNTERMEASURES: dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)&paramval, 1); break; case IW_AUTH_DROP_UNENCRYPTED: dev_wlc_bufvar_get(dev, "wsec_restrict", (char *)&paramval, 1); break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)&paramval, 1); break; case IW_AUTH_80211_AUTH_ALG: /* open, shared, leap */ if ((error = dev_wlc_intvar_get(dev, "auth", &val))) return error; if (!val) paramval = IW_AUTH_ALG_OPEN_SYSTEM; else paramval = IW_AUTH_ALG_SHARED_KEY; break; case IW_AUTH_WPA_ENABLED: if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) return error; if (val) paramval = TRUE; else paramval = FALSE; break; #if WIRELESS_EXT > 17 case IW_AUTH_ROAMING_CONTROL: WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__)); /* driver control or user space app control */ break; case IW_AUTH_PRIVACY_INVOKED: paramval = iw->privacy_invoked; break; #endif /* WIRELESS_EXT > 17 */ } vwrq->value = paramval; return 0; } #endif /* WIRELESS_EXT > 17 */ static const iw_handler wl_iw_handler[] = { (iw_handler) wl_iw_config_commit, /* SIOCSIWCOMMIT */ (iw_handler) wl_iw_get_name, /* SIOCGIWNAME */ (iw_handler) NULL, /* SIOCSIWNWID */ (iw_handler) NULL, /* SIOCGIWNWID */ (iw_handler) wl_iw_set_freq, /* SIOCSIWFREQ */ (iw_handler) wl_iw_get_freq, /* SIOCGIWFREQ */ (iw_handler) wl_iw_set_mode, /* SIOCSIWMODE */ (iw_handler) wl_iw_get_mode, /* SIOCGIWMODE */ (iw_handler) NULL, /* SIOCSIWSENS */ (iw_handler) NULL, /* SIOCGIWSENS */ (iw_handler) NULL, /* SIOCSIWRANGE */ (iw_handler) wl_iw_get_range, /* SIOCGIWRANGE */ (iw_handler) NULL, /* SIOCSIWPRIV */ (iw_handler) NULL, /* SIOCGIWPRIV */ (iw_handler) NULL, /* SIOCSIWSTATS */ (iw_handler) NULL, /* SIOCGIWSTATS */ (iw_handler) wl_iw_set_spy, /* SIOCSIWSPY */ (iw_handler) wl_iw_get_spy, /* SIOCGIWSPY */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) wl_iw_set_wap, /* SIOCSIWAP */ (iw_handler) wl_iw_get_wap, /* SIOCGIWAP */ #if WIRELESS_EXT > 17 (iw_handler) wl_iw_mlme, /* SIOCSIWMLME */ #else (iw_handler) NULL, /* -- hole -- */ #endif (iw_handler) wl_iw_iscan_get_aplist, /* SIOCGIWAPLIST */ #if WIRELESS_EXT > 13 (iw_handler) wl_iw_iscan_set_scan, /* SIOCSIWSCAN */ (iw_handler) wl_iw_iscan_get_scan, /* SIOCGIWSCAN */ #else /* WIRELESS_EXT > 13 */ (iw_handler) NULL, /* SIOCSIWSCAN */ (iw_handler) NULL, /* SIOCGIWSCAN */ #endif /* WIRELESS_EXT > 13 */ (iw_handler) wl_iw_set_essid, /* SIOCSIWESSID */ (iw_handler) wl_iw_get_essid, /* SIOCGIWESSID */ (iw_handler) wl_iw_set_nick, /* SIOCSIWNICKN */ (iw_handler) wl_iw_get_nick, /* SIOCGIWNICKN */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) wl_iw_set_rate, /* SIOCSIWRATE */ (iw_handler) wl_iw_get_rate, /* SIOCGIWRATE */ (iw_handler) wl_iw_set_rts, /* SIOCSIWRTS */ (iw_handler) wl_iw_get_rts, /* SIOCGIWRTS */ (iw_handler) wl_iw_set_frag, /* SIOCSIWFRAG */ (iw_handler) wl_iw_get_frag, /* SIOCGIWFRAG */ (iw_handler) wl_iw_set_txpow, /* SIOCSIWTXPOW */ (iw_handler) wl_iw_get_txpow, /* SIOCGIWTXPOW */ #if WIRELESS_EXT > 10 (iw_handler) wl_iw_set_retry, /* SIOCSIWRETRY */ (iw_handler) wl_iw_get_retry, /* SIOCGIWRETRY */ #endif /* WIRELESS_EXT > 10 */ (iw_handler) wl_iw_set_encode, /* SIOCSIWENCODE */ (iw_handler) wl_iw_get_encode, /* SIOCGIWENCODE */ (iw_handler) wl_iw_set_power, /* SIOCSIWPOWER */ (iw_handler) wl_iw_get_power, /* SIOCGIWPOWER */ #if WIRELESS_EXT > 17 (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) wl_iw_set_wpaie, /* SIOCSIWGENIE */ (iw_handler) wl_iw_get_wpaie, /* SIOCGIWGENIE */ (iw_handler) wl_iw_set_wpaauth, /* SIOCSIWAUTH */ (iw_handler) wl_iw_get_wpaauth, /* SIOCGIWAUTH */ (iw_handler) wl_iw_set_encodeext, /* SIOCSIWENCODEEXT */ (iw_handler) wl_iw_get_encodeext, /* SIOCGIWENCODEEXT */ (iw_handler) wl_iw_set_pmksa, /* SIOCSIWPMKSA */ #endif /* WIRELESS_EXT > 17 */ }; #if WIRELESS_EXT > 12 enum { WL_IW_SET_LEDDC = SIOCIWFIRSTPRIV, WL_IW_SET_VLANMODE, WL_IW_SET_PM, #if WIRELESS_EXT > 17 #endif /* WIRELESS_EXT > 17 */ WL_IW_SET_LAST }; static iw_handler wl_iw_priv_handler[] = { wl_iw_set_leddc, wl_iw_set_vlanmode, wl_iw_set_pm, #if WIRELESS_EXT > 17 #endif /* WIRELESS_EXT > 17 */ NULL }; static struct iw_priv_args wl_iw_priv_args[] = { { WL_IW_SET_LEDDC, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_leddc" }, { WL_IW_SET_VLANMODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_vlanmode" }, { WL_IW_SET_PM, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_pm" }, #if WIRELESS_EXT > 17 #endif /* WIRELESS_EXT > 17 */ { 0, 0, 0, { 0 } } }; const struct iw_handler_def wl_iw_handler_def = { .num_standard = ARRAYSIZE(wl_iw_handler), .num_private = ARRAY_SIZE(wl_iw_priv_handler), .num_private_args = ARRAY_SIZE(wl_iw_priv_args), .standard = (iw_handler *) wl_iw_handler, .private = wl_iw_priv_handler, .private_args = wl_iw_priv_args, #if WIRELESS_EXT >= 19 get_wireless_stats: dhd_get_wireless_stats, #endif /* WIRELESS_EXT >= 19 */ }; #endif /* WIRELESS_EXT > 12 */ int wl_iw_ioctl( struct net_device *dev, struct ifreq *rq, int cmd ) { struct iwreq *wrq = (struct iwreq *) rq; struct iw_request_info info; iw_handler handler; char *extra = NULL; size_t token_size = 1; int max_tokens = 0, ret = 0; if (cmd < SIOCIWFIRST || IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) || !(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)])) return -EOPNOTSUPP; switch (cmd) { case SIOCSIWESSID: case SIOCGIWESSID: case SIOCSIWNICKN: case SIOCGIWNICKN: max_tokens = IW_ESSID_MAX_SIZE + 1; break; case SIOCSIWENCODE: case SIOCGIWENCODE: #if WIRELESS_EXT > 17 case SIOCSIWENCODEEXT: case SIOCGIWENCODEEXT: #endif max_tokens = IW_ENCODING_TOKEN_MAX; break; case SIOCGIWRANGE: max_tokens = sizeof(struct iw_range); break; case SIOCGIWAPLIST: token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality); max_tokens = IW_MAX_AP; break; #if WIRELESS_EXT > 13 case SIOCGIWSCAN: if (g_iscan) max_tokens = wrq->u.data.length; else max_tokens = IW_SCAN_MAX_DATA; break; #endif /* WIRELESS_EXT > 13 */ case SIOCSIWSPY: token_size = sizeof(struct sockaddr); max_tokens = IW_MAX_SPY; break; case SIOCGIWSPY: token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality); max_tokens = IW_MAX_SPY; break; default: break; } if (max_tokens && wrq->u.data.pointer) { if (wrq->u.data.length > max_tokens) return -E2BIG; if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL))) return -ENOMEM; if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) { kfree(extra); return -EFAULT; } } info.cmd = cmd; info.flags = 0; ret = handler(dev, &info, &wrq->u, extra); if (extra) { if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) { kfree(extra); return -EFAULT; } kfree(extra); } return ret; } /* Convert a connection status event into a connection status string. * Returns TRUE if a matching connection status string was found. */ bool wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason, char* stringBuf, uint buflen) { typedef struct conn_fail_event_map_t { uint32 inEvent; /* input: event type to match */ uint32 inStatus; /* input: event status code to match */ uint32 inReason; /* input: event reason code to match */ const char* outName; /* output: failure type */ const char* outCause; /* output: failure cause */ } conn_fail_event_map_t; /* Map of WLC_E events to connection failure strings */ # define WL_IW_DONT_CARE 9999 const conn_fail_event_map_t event_map [] = { /* inEvent inStatus inReason */ /* outName outCause */ {WLC_E_SET_SSID, WLC_E_STATUS_SUCCESS, WL_IW_DONT_CARE, "Conn", "Success"}, {WLC_E_SET_SSID, WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE, "Conn", "NoNetworks"}, {WLC_E_SET_SSID, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, "Conn", "ConfigMismatch"}, {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_PRUNE_ENCR_MISMATCH, "Conn", "EncrypMismatch"}, {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_RSN_MISMATCH, "Conn", "RsnMismatch"}, {WLC_E_AUTH, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE, "Conn", "AuthTimeout"}, {WLC_E_AUTH, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, "Conn", "AuthFail"}, {WLC_E_AUTH, WLC_E_STATUS_NO_ACK, WL_IW_DONT_CARE, "Conn", "AuthNoAck"}, {WLC_E_REASSOC, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, "Conn", "ReassocFail"}, {WLC_E_REASSOC, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE, "Conn", "ReassocTimeout"}, {WLC_E_REASSOC, WLC_E_STATUS_ABORT, WL_IW_DONT_CARE, "Conn", "ReassocAbort"}, {WLC_E_PSK_SUP, WLC_SUP_KEYED, WL_IW_DONT_CARE, "Sup", "ConnSuccess"}, {WLC_E_PSK_SUP, WL_IW_DONT_CARE, WL_IW_DONT_CARE, "Sup", "WpaHandshakeFail"}, {WLC_E_DEAUTH_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE, "Conn", "Deauth"}, {WLC_E_DISASSOC_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE, "Conn", "DisassocInd"}, {WLC_E_DISASSOC, WL_IW_DONT_CARE, WL_IW_DONT_CARE, "Conn", "Disassoc"} }; const char* name = ""; const char* cause = NULL; int i; /* Search the event map table for a matching event */ for (i = 0; i < sizeof(event_map)/sizeof(event_map[0]); i++) { const conn_fail_event_map_t* row = &event_map[i]; if (row->inEvent == event_type && (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) && (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) { name = row->outName; cause = row->outCause; break; } } /* If found, generate a connection failure string and return TRUE */ if (cause) { memset(stringBuf, 0, buflen); snprintf(stringBuf, buflen, "%s %s %02d %02d", name, cause, status, reason); WL_TRACE(("Connection status: %s\n", stringBuf)); return TRUE; } else { return FALSE; } } #if (WIRELESS_EXT > 14) /* Check if we have received an event that indicates connection failure * If so, generate a connection failure report string. * The caller supplies a buffer to hold the generated string. */ static bool wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen) { uint32 event = ntoh32(e->event_type); uint32 status = ntoh32(e->status); uint32 reason = ntoh32(e->reason); if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) { return TRUE; } else { return FALSE; } } #endif /* WIRELESS_EXT > 14 */ #ifndef IW_CUSTOM_MAX #define IW_CUSTOM_MAX 256 /* size of extra buffer used for translation of events */ #endif /* IW_CUSTOM_MAX */ void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) { #if WIRELESS_EXT > 13 union iwreq_data wrqu; char extra[IW_CUSTOM_MAX + 1]; int cmd = 0; uint32 event_type = ntoh32(e->event_type); uint16 flags = ntoh16(e->flags); uint32 datalen = ntoh32(e->datalen); uint32 status = ntoh32(e->status); memset(&wrqu, 0, sizeof(wrqu)); memset(extra, 0, sizeof(extra)); memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN); wrqu.addr.sa_family = ARPHRD_ETHER; switch (event_type) { case WLC_E_TXFAIL: cmd = IWEVTXDROP; break; #if WIRELESS_EXT > 14 case WLC_E_JOIN: case WLC_E_ASSOC_IND: case WLC_E_REASSOC_IND: cmd = IWEVREGISTERED; break; case WLC_E_DEAUTH_IND: case WLC_E_DISASSOC_IND: cmd = SIOCGIWAP; wrqu.data.length = strlen(extra); bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN); bzero(&extra, ETHER_ADDR_LEN); break; case WLC_E_LINK: case WLC_E_NDIS_LINK: cmd = SIOCGIWAP; wrqu.data.length = strlen(extra); if (!(flags & WLC_EVENT_MSG_LINK)) { bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN); bzero(&extra, ETHER_ADDR_LEN); } break; case WLC_E_ACTION_FRAME: cmd = IWEVCUSTOM; if (datalen + 1 <= sizeof(extra)) { wrqu.data.length = datalen + 1; extra[0] = WLC_E_ACTION_FRAME; memcpy(&extra[1], data, datalen); WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length)); } break; case WLC_E_ACTION_FRAME_COMPLETE: cmd = IWEVCUSTOM; if (sizeof(status) + 1 <= sizeof(extra)) { wrqu.data.length = sizeof(status) + 1; extra[0] = WLC_E_ACTION_FRAME_COMPLETE; memcpy(&extra[1], &status, sizeof(status)); WL_TRACE(("wl_iw_event status %d \n", status)); } break; #endif /* WIRELESS_EXT > 14 */ #if WIRELESS_EXT > 17 case WLC_E_MIC_ERROR: { struct iw_michaelmicfailure *micerrevt = (struct iw_michaelmicfailure *)&extra; cmd = IWEVMICHAELMICFAILURE; wrqu.data.length = sizeof(struct iw_michaelmicfailure); if (flags & WLC_EVENT_MSG_GROUP) micerrevt->flags |= IW_MICFAILURE_GROUP; else micerrevt->flags |= IW_MICFAILURE_PAIRWISE; memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN); micerrevt->src_addr.sa_family = ARPHRD_ETHER; break; } case WLC_E_ASSOC_REQ_IE: cmd = IWEVASSOCREQIE; wrqu.data.length = datalen; if (datalen < sizeof(extra)) memcpy(extra, data, datalen); break; case WLC_E_ASSOC_RESP_IE: cmd = IWEVASSOCRESPIE; wrqu.data.length = datalen; if (datalen < sizeof(extra)) memcpy(extra, data, datalen); break; case WLC_E_PMKID_CACHE: { struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra; pmkid_cand_list_t *pmkcandlist; pmkid_cand_t *pmkidcand; int count; if (data == NULL) break; cmd = IWEVPMKIDCAND; pmkcandlist = data; count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand); wrqu.data.length = sizeof(struct iw_pmkid_cand); pmkidcand = pmkcandlist->pmkid_cand; while (count) { bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand)); if (pmkidcand->preauth) iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH; bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data, ETHER_ADDR_LEN); wireless_send_event(dev, cmd, &wrqu, extra); pmkidcand++; count--; } break; } #endif /* WIRELESS_EXT > 17 */ case WLC_E_SCAN_COMPLETE: #if WIRELESS_EXT > 14 cmd = SIOCGIWSCAN; #endif WL_TRACE(("event WLC_E_SCAN_COMPLETE\n")); if ((g_iscan) && (g_iscan->sysioc_pid >= 0) && (g_iscan->iscan_state != ISCAN_STATE_IDLE)) up(&g_iscan->sysioc_sem); break; default: /* Cannot translate event */ break; } if (cmd) { if (cmd == SIOCGIWSCAN) wireless_send_event(dev, cmd, &wrqu, NULL); else wireless_send_event(dev, cmd, &wrqu, extra); } #if WIRELESS_EXT > 14 /* Look for WLC events that indicate a connection failure. * If found, generate an IWEVCUSTOM event. */ memset(extra, 0, sizeof(extra)); if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) { cmd = IWEVCUSTOM; wrqu.data.length = strlen(extra); wireless_send_event(dev, cmd, &wrqu, extra); } #endif /* WIRELESS_EXT > 14 */ #endif /* WIRELESS_EXT > 13 */ } int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats) { int res = 0; wl_cnt_t cnt; int phy_noise; int rssi; scb_val_t scb_val; phy_noise = 0; if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise)))) goto done; phy_noise = dtoh32(phy_noise); WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n *****", phy_noise)); scb_val.val = 0; if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t)))) goto done; rssi = dtoh32(scb_val.val); WL_TRACE(("wl_iw_get_wireless_stats rssi=%d ****** \n", rssi)); if (rssi <= WL_IW_RSSI_NO_SIGNAL) wstats->qual.qual = 0; else if (rssi <= WL_IW_RSSI_VERY_LOW) wstats->qual.qual = 1; else if (rssi <= WL_IW_RSSI_LOW) wstats->qual.qual = 2; else if (rssi <= WL_IW_RSSI_GOOD) wstats->qual.qual = 3; else if (rssi <= WL_IW_RSSI_VERY_GOOD) wstats->qual.qual = 4; else wstats->qual.qual = 5; /* Wraps to 0 if RSSI is 0 */ wstats->qual.level = 0x100 + rssi; wstats->qual.noise = 0x100 + phy_noise; #if WIRELESS_EXT > 18 wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM); #else wstats->qual.updated |= 7; #endif /* WIRELESS_EXT > 18 */ #if WIRELESS_EXT > 11 WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n *****", (int)sizeof(wl_cnt_t))); memset(&cnt, 0, sizeof(wl_cnt_t)); res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t)); if (res) { WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d ****** \n", res)); goto done; } cnt.version = dtoh16(cnt.version); if (cnt.version != WL_CNT_T_VERSION) { WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n", WL_CNT_T_VERSION, cnt.version)); goto done; } wstats->discard.nwid = 0; wstats->discard.code = dtoh32(cnt.rxundec); wstats->discard.fragment = dtoh32(cnt.rxfragerr); wstats->discard.retries = dtoh32(cnt.txfail); wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant); wstats->miss.beacon = 0; WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n", dtoh32(cnt.txframe), dtoh32(cnt.txbyte))); WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong))); WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp))); WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec))); WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr))); WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail))); WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt))); WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant))); #endif /* WIRELESS_EXT > 11 */ done: return res; } static void wl_iw_timerfunc(ulong data) { iscan_info_t *iscan = (iscan_info_t *)data; iscan->timer_on = 0; if (iscan->iscan_state != ISCAN_STATE_IDLE) { WL_TRACE(("timer trigger\n")); up(&iscan->sysioc_sem); } } static void wl_iw_set_event_mask(struct net_device *dev) { char eventmask[WL_EVENTING_MASK_LEN]; char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf)); bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN); setbit(eventmask, WLC_E_SCAN_COMPLETE); dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); } static int wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid) { int err = 0; memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN); params->bss_type = DOT11_BSSTYPE_ANY; params->scan_type = 0; params->nprobes = -1; params->active_time = -1; params->passive_time = -1; params->home_time = -1; params->channel_num = 0; params->nprobes = htod32(params->nprobes); params->active_time = htod32(params->active_time); params->passive_time = htod32(params->passive_time); params->home_time = htod32(params->home_time); if (ssid && ssid->SSID_len) memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t)); return err; } static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action) { int params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params)); wl_iscan_params_t *params; int err = 0; if (ssid && ssid->SSID_len) { params_size += sizeof(wlc_ssid_t); } params = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL); if (params == NULL) { return -ENOMEM; } memset(params, 0, params_size); ASSERT(params_size < WLC_IOCTL_SMLEN); err = wl_iw_iscan_prep(&params->params, ssid); if (!err) { params->version = htod32(ISCAN_REQ_VERSION); params->action = htod16(action); params->scan_duration = htod16(0); /* params_size += OFFSETOF(wl_iscan_params_t, params); */ (void) dev_iw_iovar_setbuf(iscan->dev, "iscan", params, params_size, iscan->ioctlbuf, WLC_IOCTL_SMLEN); } kfree(params); return err; } static uint32 wl_iw_iscan_get(iscan_info_t *iscan) { iscan_buf_t * buf; iscan_buf_t * ptr; wl_iscan_results_t * list_buf; wl_iscan_results_t list; wl_scan_results_t *results; uint32 status; /* buffers are allocated on demand */ if (iscan->list_cur) { buf = iscan->list_cur; iscan->list_cur = buf->next; } else { buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL); if (!buf) return WL_SCAN_RESULTS_ABORTED; buf->next = NULL; if (!iscan->list_hdr) iscan->list_hdr = buf; else { ptr = iscan->list_hdr; while (ptr->next) { ptr = ptr->next; } ptr->next = buf; } } memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN); list_buf = (wl_iscan_results_t*)buf->iscan_buf; results = &list_buf->results; results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE; results->version = 0; results->count = 0; memset(&list, 0, sizeof(list)); list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN); (void) dev_iw_iovar_getbuf( iscan->dev, "iscanresults", &list, WL_ISCAN_RESULTS_FIXED_SIZE, buf->iscan_buf, WLC_IW_ISCAN_MAXLEN); results->buflen = dtoh32(results->buflen); results->version = dtoh32(results->version); results->count = dtoh32(results->count); WL_TRACE(("results->count = %d\n", results->count)); WL_TRACE(("results->buflen = %d\n", results->buflen)); status = dtoh32(list_buf->status); return status; } static void wl_iw_send_scan_complete(iscan_info_t *iscan) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); /* wext expects to get no data for SIOCGIWSCAN Event */ wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL); } static int _iscan_sysioc_thread(void *data) { uint32 status; iscan_info_t *iscan = (iscan_info_t *)data; DAEMONIZE("iscan_sysioc"); status = WL_SCAN_RESULTS_PARTIAL; while (down_interruptible(&iscan->sysioc_sem) == 0) { if (iscan->timer_on) { del_timer(&iscan->timer); iscan->timer_on = 0; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) rtnl_lock(); #endif status = wl_iw_iscan_get(iscan); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) rtnl_unlock(); #endif switch (status) { case WL_SCAN_RESULTS_PARTIAL: WL_TRACE(("iscanresults incomplete\n")); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) rtnl_lock(); #endif /* make sure our buffer size is enough before going next round */ wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) rtnl_unlock(); #endif /* Reschedule the timer */ iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms); add_timer(&iscan->timer); iscan->timer_on = 1; break; case WL_SCAN_RESULTS_SUCCESS: WL_TRACE(("iscanresults complete\n")); iscan->iscan_state = ISCAN_STATE_IDLE; wl_iw_send_scan_complete(iscan); break; case WL_SCAN_RESULTS_PENDING: WL_TRACE(("iscanresults pending\n")); /* Reschedule the timer */ iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms); add_timer(&iscan->timer); iscan->timer_on = 1; break; case WL_SCAN_RESULTS_ABORTED: WL_TRACE(("iscanresults aborted\n")); iscan->iscan_state = ISCAN_STATE_IDLE; wl_iw_send_scan_complete(iscan); break; default: WL_TRACE(("iscanresults returned unknown status %d\n", status)); break; } } complete_and_exit(&iscan->sysioc_exited, 0); } int wl_iw_attach(struct net_device *dev, void * dhdp) { iscan_info_t *iscan = NULL; if (!dev) return 0; iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL); if (!iscan) return -ENOMEM; memset(iscan, 0, sizeof(iscan_info_t)); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) iscan->kthread = NULL; #endif iscan->sysioc_pid = -1; /* we only care about main interface so save a global here */ g_iscan = iscan; iscan->dev = dev; iscan->iscan_state = ISCAN_STATE_IDLE; /* Set up the timer */ iscan->timer_ms = 2000; init_timer(&iscan->timer); iscan->timer.data = (ulong)iscan; iscan->timer.function = wl_iw_timerfunc; sema_init(&iscan->sysioc_sem, 0); init_completion(&iscan->sysioc_exited); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) iscan->kthread = kthread_run(_iscan_sysioc_thread, iscan, "iscan_sysioc"); iscan->sysioc_pid = iscan->kthread->pid; #else iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0); #endif if (iscan->sysioc_pid < 0) return -ENOMEM; return 0; } void wl_iw_detach(void) { iscan_buf_t *buf; iscan_info_t *iscan = g_iscan; if (!iscan) return; if (iscan->sysioc_pid >= 0) { KILL_PROC(iscan->sysioc_pid, SIGTERM); wait_for_completion(&iscan->sysioc_exited); } while (iscan->list_hdr) { buf = iscan->list_hdr->next; kfree(iscan->list_hdr); iscan->list_hdr = buf; } kfree(iscan); g_iscan = NULL; } #endif /* USE_IW */
gpl-2.0
lx324310/linux
drivers/gpu/drm/gma500/mdfld_intel_display.c
732
29145
/* * Copyright © 2006-2007 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Authors: * Eric Anholt <eric@anholt.net> */ #include <linux/i2c.h> #include <linux/pm_runtime.h> #include <drm/drmP.h> #include "psb_intel_reg.h" #include "gma_display.h" #include "framebuffer.h" #include "mdfld_output.h" #include "mdfld_dsi_output.h" /* Hardcoded currently */ static int ksel = KSEL_CRYSTAL_19; struct psb_intel_range_t { int min, max; }; struct mrst_limit_t { struct psb_intel_range_t dot, m, p1; }; struct mrst_clock_t { /* derived values */ int dot; int m; int p1; }; #define COUNT_MAX 0x10000000 void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe) { struct drm_psb_private *dev_priv = dev->dev_private; const struct psb_offset *map = &dev_priv->regmap[pipe]; int count, temp; switch (pipe) { case 0: case 1: case 2: break; default: DRM_ERROR("Illegal Pipe Number.\n"); return; } /* FIXME JLIU7_PO */ gma_wait_for_vblank(dev); return; /* Wait for for the pipe disable to take effect. */ for (count = 0; count < COUNT_MAX; count++) { temp = REG_READ(map->conf); if ((temp & PIPEACONF_PIPE_STATE) == 0) break; } } void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe) { struct drm_psb_private *dev_priv = dev->dev_private; const struct psb_offset *map = &dev_priv->regmap[pipe]; int count, temp; switch (pipe) { case 0: case 1: case 2: break; default: DRM_ERROR("Illegal Pipe Number.\n"); return; } /* FIXME JLIU7_PO */ gma_wait_for_vblank(dev); return; /* Wait for for the pipe enable to take effect. */ for (count = 0; count < COUNT_MAX; count++) { temp = REG_READ(map->conf); if ((temp & PIPEACONF_PIPE_STATE) == 1) break; } } /** * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use */ static int psb_intel_panel_fitter_pipe(struct drm_device *dev) { u32 pfit_control; pfit_control = REG_READ(PFIT_CONTROL); /* See if the panel fitter is in use */ if ((pfit_control & PFIT_ENABLE) == 0) return -1; /* 965 can place panel fitter on either pipe */ return (pfit_control >> 29) & 0x3; } static struct drm_device globle_dev; void mdfld__intel_plane_set_alpha(int enable) { struct drm_device *dev = &globle_dev; int dspcntr_reg = DSPACNTR; u32 dspcntr; dspcntr = REG_READ(dspcntr_reg); if (enable) { dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA; dspcntr |= DISPPLANE_32BPP; } else { dspcntr &= ~DISPPLANE_32BPP; dspcntr |= DISPPLANE_32BPP_NO_ALPHA; } REG_WRITE(dspcntr_reg, dspcntr); } static int check_fb(struct drm_framebuffer *fb) { if (!fb) return 0; switch (fb->bits_per_pixel) { case 8: case 16: case 24: case 32: return 0; default: DRM_ERROR("Unknown color depth\n"); return -EINVAL; } } static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb); int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; unsigned long start, offset; u32 dspcntr; int ret; memcpy(&globle_dev, dev, sizeof(struct drm_device)); dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe); /* no fb bound */ if (!crtc->primary->fb) { dev_dbg(dev->dev, "No FB bound\n"); return 0; } ret = check_fb(crtc->primary->fb); if (ret) return ret; if (pipe > 2) { DRM_ERROR("Illegal Pipe Number.\n"); return -EINVAL; } if (!gma_power_begin(dev, true)) return 0; start = psbfb->gtt->offset; offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8); REG_WRITE(map->stride, crtc->primary->fb->pitches[0]); dspcntr = REG_READ(map->cntr); dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; switch (crtc->primary->fb->bits_per_pixel) { case 8: dspcntr |= DISPPLANE_8BPP; break; case 16: if (crtc->primary->fb->depth == 15) dspcntr |= DISPPLANE_15_16BPP; else dspcntr |= DISPPLANE_16BPP; break; case 24: case 32: dspcntr |= DISPPLANE_32BPP_NO_ALPHA; break; } REG_WRITE(map->cntr, dspcntr); dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n", start, offset, x, y); REG_WRITE(map->linoff, offset); REG_READ(map->linoff); REG_WRITE(map->surf, start); REG_READ(map->surf); gma_power_end(dev); return 0; } /* * Disable the pipe, plane and pll. * */ void mdfld_disable_crtc(struct drm_device *dev, int pipe) { struct drm_psb_private *dev_priv = dev->dev_private; const struct psb_offset *map = &dev_priv->regmap[pipe]; u32 temp; dev_dbg(dev->dev, "pipe = %d\n", pipe); if (pipe != 1) mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe), HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY); /* Disable display plane */ temp = REG_READ(map->cntr); if ((temp & DISPLAY_PLANE_ENABLE) != 0) { REG_WRITE(map->cntr, temp & ~DISPLAY_PLANE_ENABLE); /* Flush the plane changes */ REG_WRITE(map->base, REG_READ(map->base)); REG_READ(map->base); } /* FIXME_JLIU7 MDFLD_PO revisit */ /* Next, disable display pipes */ temp = REG_READ(map->conf); if ((temp & PIPEACONF_ENABLE) != 0) { temp &= ~PIPEACONF_ENABLE; temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF; REG_WRITE(map->conf, temp); REG_READ(map->conf); /* Wait for for the pipe disable to take effect. */ mdfldWaitForPipeDisable(dev, pipe); } temp = REG_READ(map->dpll); if (temp & DPLL_VCO_ENABLE) { if ((pipe != 1 && !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE)) || pipe == 1) { temp &= ~(DPLL_VCO_ENABLE); REG_WRITE(map->dpll, temp); REG_READ(map->dpll); /* Wait for the clocks to turn off. */ /* FIXME_MDFLD PO may need more delay */ udelay(500); if (!(temp & MDFLD_PWR_GATE_EN)) { /* gating power of DPLL */ REG_WRITE(map->dpll, temp | MDFLD_PWR_GATE_EN); /* FIXME_MDFLD PO - change 500 to 1 after PO */ udelay(5000); } } } } /** * Sets the power management mode of the pipe and plane. * * This code should probably grow support for turning the cursor off and back * on appropriately at the same time as we're turning the pipe off/on. */ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct gma_crtc *gma_crtc = to_gma_crtc(crtc); int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; u32 pipeconf = dev_priv->pipeconf[pipe]; u32 temp; int timeout = 0; dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe); /* Note: Old code uses pipe a stat for pipe b but that appears to be a bug */ if (!gma_power_begin(dev, true)) return; /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. */ switch (mode) { case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: /* Enable the DPLL */ temp = REG_READ(map->dpll); if ((temp & DPLL_VCO_ENABLE) == 0) { /* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */ if (temp & MDFLD_PWR_GATE_EN) { temp &= ~MDFLD_PWR_GATE_EN; REG_WRITE(map->dpll, temp); /* FIXME_MDFLD PO - change 500 to 1 after PO */ udelay(500); } REG_WRITE(map->dpll, temp); REG_READ(map->dpll); /* FIXME_MDFLD PO - change 500 to 1 after PO */ udelay(500); REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); REG_READ(map->dpll); /** * wait for DSI PLL to lock * NOTE: only need to poll status of pipe 0 and pipe 1, * since both MIPI pipes share the same PLL. */ while ((pipe != 2) && (timeout < 20000) && !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) { udelay(150); timeout++; } } /* Enable the plane */ temp = REG_READ(map->cntr); if ((temp & DISPLAY_PLANE_ENABLE) == 0) { REG_WRITE(map->cntr, temp | DISPLAY_PLANE_ENABLE); /* Flush the plane changes */ REG_WRITE(map->base, REG_READ(map->base)); } /* Enable the pipe */ temp = REG_READ(map->conf); if ((temp & PIPEACONF_ENABLE) == 0) { REG_WRITE(map->conf, pipeconf); /* Wait for for the pipe enable to take effect. */ mdfldWaitForPipeEnable(dev, pipe); } /*workaround for sighting 3741701 Random X blank display*/ /*perform w/a in video mode only on pipe A or C*/ if (pipe == 0 || pipe == 2) { REG_WRITE(map->status, REG_READ(map->status)); msleep(100); if (PIPE_VBLANK_STATUS & REG_READ(map->status)) dev_dbg(dev->dev, "OK"); else { dev_dbg(dev->dev, "STUCK!!!!"); /*shutdown controller*/ temp = REG_READ(map->cntr); REG_WRITE(map->cntr, temp & ~DISPLAY_PLANE_ENABLE); REG_WRITE(map->base, REG_READ(map->base)); /*mdfld_dsi_dpi_shut_down(dev, pipe);*/ REG_WRITE(0xb048, 1); msleep(100); temp = REG_READ(map->conf); temp &= ~PIPEACONF_ENABLE; REG_WRITE(map->conf, temp); msleep(100); /*wait for pipe disable*/ REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0); msleep(100); REG_WRITE(0xb004, REG_READ(0xb004)); /* try to bring the controller back up again*/ REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1); temp = REG_READ(map->cntr); REG_WRITE(map->cntr, temp | DISPLAY_PLANE_ENABLE); REG_WRITE(map->base, REG_READ(map->base)); /*mdfld_dsi_dpi_turn_on(dev, pipe);*/ REG_WRITE(0xb048, 2); msleep(100); temp = REG_READ(map->conf); temp |= PIPEACONF_ENABLE; REG_WRITE(map->conf, temp); } } gma_crtc_load_lut(crtc); /* Give the overlay scaler a chance to enable if it's on this pipe */ /* psb_intel_crtc_dpms_video(crtc, true); TODO */ break; case DRM_MODE_DPMS_OFF: /* Give the overlay scaler a chance to disable * if it's on this pipe */ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ if (pipe != 1) mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe), HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY); /* Disable the VGA plane that we never use */ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); /* Disable display plane */ temp = REG_READ(map->cntr); if ((temp & DISPLAY_PLANE_ENABLE) != 0) { REG_WRITE(map->cntr, temp & ~DISPLAY_PLANE_ENABLE); /* Flush the plane changes */ REG_WRITE(map->base, REG_READ(map->base)); REG_READ(map->base); } /* Next, disable display pipes */ temp = REG_READ(map->conf); if ((temp & PIPEACONF_ENABLE) != 0) { temp &= ~PIPEACONF_ENABLE; temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF; REG_WRITE(map->conf, temp); REG_READ(map->conf); /* Wait for for the pipe disable to take effect. */ mdfldWaitForPipeDisable(dev, pipe); } temp = REG_READ(map->dpll); if (temp & DPLL_VCO_ENABLE) { if ((pipe != 1 && !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE)) || pipe == 1) { temp &= ~(DPLL_VCO_ENABLE); REG_WRITE(map->dpll, temp); REG_READ(map->dpll); /* Wait for the clocks to turn off. */ /* FIXME_MDFLD PO may need more delay */ udelay(500); } } break; } gma_power_end(dev); } #define MDFLD_LIMT_DPLL_19 0 #define MDFLD_LIMT_DPLL_25 1 #define MDFLD_LIMT_DPLL_83 2 #define MDFLD_LIMT_DPLL_100 3 #define MDFLD_LIMT_DSIPLL_19 4 #define MDFLD_LIMT_DSIPLL_25 5 #define MDFLD_LIMT_DSIPLL_83 6 #define MDFLD_LIMT_DSIPLL_100 7 #define MDFLD_DOT_MIN 19750 #define MDFLD_DOT_MAX 120000 #define MDFLD_DPLL_M_MIN_19 113 #define MDFLD_DPLL_M_MAX_19 155 #define MDFLD_DPLL_P1_MIN_19 2 #define MDFLD_DPLL_P1_MAX_19 10 #define MDFLD_DPLL_M_MIN_25 101 #define MDFLD_DPLL_M_MAX_25 130 #define MDFLD_DPLL_P1_MIN_25 2 #define MDFLD_DPLL_P1_MAX_25 10 #define MDFLD_DPLL_M_MIN_83 64 #define MDFLD_DPLL_M_MAX_83 64 #define MDFLD_DPLL_P1_MIN_83 2 #define MDFLD_DPLL_P1_MAX_83 2 #define MDFLD_DPLL_M_MIN_100 64 #define MDFLD_DPLL_M_MAX_100 64 #define MDFLD_DPLL_P1_MIN_100 2 #define MDFLD_DPLL_P1_MAX_100 2 #define MDFLD_DSIPLL_M_MIN_19 131 #define MDFLD_DSIPLL_M_MAX_19 175 #define MDFLD_DSIPLL_P1_MIN_19 3 #define MDFLD_DSIPLL_P1_MAX_19 8 #define MDFLD_DSIPLL_M_MIN_25 97 #define MDFLD_DSIPLL_M_MAX_25 140 #define MDFLD_DSIPLL_P1_MIN_25 3 #define MDFLD_DSIPLL_P1_MAX_25 9 #define MDFLD_DSIPLL_M_MIN_83 33 #define MDFLD_DSIPLL_M_MAX_83 92 #define MDFLD_DSIPLL_P1_MIN_83 2 #define MDFLD_DSIPLL_P1_MAX_83 3 #define MDFLD_DSIPLL_M_MIN_100 97 #define MDFLD_DSIPLL_M_MAX_100 140 #define MDFLD_DSIPLL_P1_MIN_100 3 #define MDFLD_DSIPLL_P1_MAX_100 9 static const struct mrst_limit_t mdfld_limits[] = { { /* MDFLD_LIMT_DPLL_19 */ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19}, .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19}, }, { /* MDFLD_LIMT_DPLL_25 */ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25}, .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25}, }, { /* MDFLD_LIMT_DPLL_83 */ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83}, .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83}, }, { /* MDFLD_LIMT_DPLL_100 */ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100}, .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100}, }, { /* MDFLD_LIMT_DSIPLL_19 */ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19}, .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19}, }, { /* MDFLD_LIMT_DSIPLL_25 */ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25}, .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25}, }, { /* MDFLD_LIMT_DSIPLL_83 */ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83}, .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83}, }, { /* MDFLD_LIMT_DSIPLL_100 */ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100}, .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100}, }, }; #define MDFLD_M_MIN 21 #define MDFLD_M_MAX 180 static const u32 mdfld_m_converts[] = { /* M configuration table from 9-bit LFSR table */ 224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */ 173, 342, 171, 85, 298, 149, 74, 37, 18, 265, /* 31 - 40 */ 388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */ 83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */ 341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */ 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */ 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */ 71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */ 253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */ 478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */ 477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */ 210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */ 145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */ 380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */ 103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */ 396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */ }; static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc) { const struct mrst_limit_t *limit = NULL; struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; if (gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI) || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) { if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19]; else if (ksel == KSEL_BYPASS_25) limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25]; else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 166)) limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83]; else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 100 || dev_priv->core_freq == 200)) limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100]; } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) limit = &mdfld_limits[MDFLD_LIMT_DPLL_19]; else if (ksel == KSEL_BYPASS_25) limit = &mdfld_limits[MDFLD_LIMT_DPLL_25]; else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 166)) limit = &mdfld_limits[MDFLD_LIMT_DPLL_83]; else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 100 || dev_priv->core_freq == 200)) limit = &mdfld_limits[MDFLD_LIMT_DPLL_100]; } else { limit = NULL; dev_dbg(dev->dev, "mdfld_limit Wrong display type.\n"); } return limit; } /** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ static void mdfld_clock(int refclk, struct mrst_clock_t *clock) { clock->dot = (refclk * clock->m) / clock->p1; } /** * Returns a set of divisors for the desired target clock with the given refclk, * or FALSE. Divisor values are the actual divisors for */ static bool mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk, struct mrst_clock_t *best_clock) { struct mrst_clock_t clock; const struct mrst_limit_t *limit = mdfld_limit(crtc); int err = target; memset(best_clock, 0, sizeof(*best_clock)); for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) { for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; clock.p1++) { int this_err; mdfld_clock(refclk, &clock); this_err = abs(clock.dot - target); if (this_err < err) { *best_clock = clock; err = this_err; } } } return err != target; } static int mdfld_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct drm_psb_private *dev_priv = dev->dev_private; int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; int refclk = 0; int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, clk_tmp = 0; struct mrst_clock_t clock; bool ok; u32 dpll = 0, fp = 0; bool is_mipi = false, is_mipi2 = false, is_hdmi = false; struct drm_mode_config *mode_config = &dev->mode_config; struct gma_encoder *gma_encoder = NULL; uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; struct drm_encoder *encoder; struct drm_connector *connector; int timeout = 0; int ret; dev_dbg(dev->dev, "pipe = 0x%x\n", pipe); #if 0 if (pipe == 1) { if (!gma_power_begin(dev, true)) return 0; android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode, x, y, old_fb); goto mrst_crtc_mode_set_exit; } #endif ret = check_fb(crtc->primary->fb); if (ret) return ret; dev_dbg(dev->dev, "adjusted_hdisplay = %d\n", adjusted_mode->hdisplay); dev_dbg(dev->dev, "adjusted_vdisplay = %d\n", adjusted_mode->vdisplay); dev_dbg(dev->dev, "adjusted_hsync_start = %d\n", adjusted_mode->hsync_start); dev_dbg(dev->dev, "adjusted_hsync_end = %d\n", adjusted_mode->hsync_end); dev_dbg(dev->dev, "adjusted_htotal = %d\n", adjusted_mode->htotal); dev_dbg(dev->dev, "adjusted_vsync_start = %d\n", adjusted_mode->vsync_start); dev_dbg(dev->dev, "adjusted_vsync_end = %d\n", adjusted_mode->vsync_end); dev_dbg(dev->dev, "adjusted_vtotal = %d\n", adjusted_mode->vtotal); dev_dbg(dev->dev, "adjusted_clock = %d\n", adjusted_mode->clock); dev_dbg(dev->dev, "hdisplay = %d\n", mode->hdisplay); dev_dbg(dev->dev, "vdisplay = %d\n", mode->vdisplay); if (!gma_power_begin(dev, true)) return 0; memcpy(&gma_crtc->saved_mode, mode, sizeof(struct drm_display_mode)); memcpy(&gma_crtc->saved_adjusted_mode, adjusted_mode, sizeof(struct drm_display_mode)); list_for_each_entry(connector, &mode_config->connector_list, head) { if (!connector) continue; encoder = connector->encoder; if (!encoder) continue; if (encoder->crtc != crtc) continue; gma_encoder = gma_attached_encoder(connector); switch (gma_encoder->type) { case INTEL_OUTPUT_MIPI: is_mipi = true; break; case INTEL_OUTPUT_MIPI2: is_mipi2 = true; break; case INTEL_OUTPUT_HDMI: is_hdmi = true; break; } } /* Disable the VGA plane that we never use */ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); /* Disable the panel fitter if it was on our pipe */ if (psb_intel_panel_fitter_pipe(dev) == pipe) REG_WRITE(PFIT_CONTROL, 0); /* pipesrc and dspsize control the size that is scaled from, * which should always be the user's requested size. */ if (pipe == 1) { /* FIXME: To make HDMI display with 864x480 (TPO), 480x864 * (PYR) or 480x854 (TMD), set the sprite width/height and * souce image size registers with the adjusted mode for * pipe B. */ /* * The defined sprite rectangle must always be completely * contained within the displayable area of the screen image * (frame buffer). */ REG_WRITE(map->size, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16) | (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1)); /* Set the CRTC with encoder mode. */ REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1)); } else { REG_WRITE(map->size, ((mode->crtc_vdisplay - 1) << 16) | (mode->crtc_hdisplay - 1)); REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1)); } REG_WRITE(map->pos, 0); if (gma_encoder) drm_object_property_get_value(&connector->base, dev->mode_config.scaling_mode_property, &scalingType); if (scalingType == DRM_MODE_SCALE_NO_SCALE) { /* Medfield doesn't have register support for centering so we * need to mess with the h/vblank and h/vsync start and ends * to get centering */ int offsetX = 0, offsetY = 0; offsetX = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; offsetY = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - offsetX - 1) | ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16)); REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - offsetX - 1) | ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16)); REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - offsetY - 1) | ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16)); REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - offsetY - 1) | ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16)); } else { REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); } /* Flush the plane changes */ { struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc_funcs->mode_set_base(crtc, x, y, old_fb); } /* setup pipeconf */ dev_priv->pipeconf[pipe] = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */ /* Set up the display plane register */ dev_priv->dspcntr[pipe] = REG_READ(map->cntr); dev_priv->dspcntr[pipe] |= pipe << DISPPLANE_SEL_PIPE_POS; dev_priv->dspcntr[pipe] |= DISPLAY_PLANE_ENABLE; if (is_mipi2) goto mrst_crtc_mode_set_exit; clk = adjusted_mode->clock; if (is_hdmi) { if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) { refclk = 19200; if (is_mipi || is_mipi2) clk_n = 1, clk_p2 = 8; else if (is_hdmi) clk_n = 1, clk_p2 = 10; } else if (ksel == KSEL_BYPASS_25) { refclk = 25000; if (is_mipi || is_mipi2) clk_n = 1, clk_p2 = 8; else if (is_hdmi) clk_n = 1, clk_p2 = 10; } else if ((ksel == KSEL_BYPASS_83_100) && dev_priv->core_freq == 166) { refclk = 83000; if (is_mipi || is_mipi2) clk_n = 4, clk_p2 = 8; else if (is_hdmi) clk_n = 4, clk_p2 = 10; } else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 100 || dev_priv->core_freq == 200)) { refclk = 100000; if (is_mipi || is_mipi2) clk_n = 4, clk_p2 = 8; else if (is_hdmi) clk_n = 4, clk_p2 = 10; } if (is_mipi) clk_byte = dev_priv->bpp / 8; else if (is_mipi2) clk_byte = dev_priv->bpp2 / 8; clk_tmp = clk * clk_n * clk_p2 * clk_byte; dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d.\n", clk, clk_n, clk_p2); dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d.\n", adjusted_mode->clock, clk_tmp); ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock); if (!ok) { DRM_ERROR ("mdfldFindBestPLL fail in mdfld_crtc_mode_set.\n"); } else { m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)]; dev_dbg(dev->dev, "dot clock = %d," "m = %d, p1 = %d, m_conv = %d.\n", clock.dot, clock.m, clock.p1, m_conv); } dpll = REG_READ(map->dpll); if (dpll & DPLL_VCO_ENABLE) { dpll &= ~DPLL_VCO_ENABLE; REG_WRITE(map->dpll, dpll); REG_READ(map->dpll); /* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */ /* FIXME_MDFLD PO - change 500 to 1 after PO */ udelay(500); /* reset M1, N1 & P1 */ REG_WRITE(map->fp0, 0); dpll &= ~MDFLD_P1_MASK; REG_WRITE(map->dpll, dpll); /* FIXME_MDFLD PO - change 500 to 1 after PO */ udelay(500); } /* When ungating power of DPLL, needs to wait 0.5us before * enable the VCO */ if (dpll & MDFLD_PWR_GATE_EN) { dpll &= ~MDFLD_PWR_GATE_EN; REG_WRITE(map->dpll, dpll); /* FIXME_MDFLD PO - change 500 to 1 after PO */ udelay(500); } dpll = 0; #if 0 /* FIXME revisit later */ if (ksel == KSEL_CRYSTAL_19 || ksel == KSEL_BYPASS_19 || ksel == KSEL_BYPASS_25) dpll &= ~MDFLD_INPUT_REF_SEL; else if (ksel == KSEL_BYPASS_83_100) dpll |= MDFLD_INPUT_REF_SEL; #endif /* FIXME revisit later */ if (is_hdmi) dpll |= MDFLD_VCO_SEL; fp = (clk_n / 2) << 16; fp |= m_conv; /* compute bitmask from p1 value */ dpll |= (1 << (clock.p1 - 2)) << 17; #if 0 /* 1080p30 & 720p */ dpll = 0x00050000; fp = 0x000001be; #endif #if 0 /* 480p */ dpll = 0x02010000; fp = 0x000000d2; #endif } else { #if 0 /*DBI_TPO_480x864*/ dpll = 0x00020000; fp = 0x00000156; #endif /* DBI_TPO_480x864 */ /* get from spec. */ dpll = 0x00800000; fp = 0x000000c1; } REG_WRITE(map->fp0, fp); REG_WRITE(map->dpll, dpll); /* FIXME_MDFLD PO - change 500 to 1 after PO */ udelay(500); dpll |= DPLL_VCO_ENABLE; REG_WRITE(map->dpll, dpll); REG_READ(map->dpll); /* wait for DSI PLL to lock */ while (timeout < 20000 && !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) { udelay(150); timeout++; } if (is_mipi) goto mrst_crtc_mode_set_exit; dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi); REG_WRITE(map->conf, dev_priv->pipeconf[pipe]); REG_READ(map->conf); /* Wait for for the pipe enable to take effect. */ REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]); gma_wait_for_vblank(dev); mrst_crtc_mode_set_exit: gma_power_end(dev); return 0; } const struct drm_crtc_helper_funcs mdfld_helper_funcs = { .dpms = mdfld_crtc_dpms, .mode_fixup = gma_crtc_mode_fixup, .mode_set = mdfld_crtc_mode_set, .mode_set_base = mdfld__intel_pipe_set_base, .prepare = gma_crtc_prepare, .commit = gma_crtc_commit, };
gpl-2.0
ExpressOS/third_party-l4android
drivers/mmc/host/sdhci-esdhc-imx.c
988
9327
/* * Freescale eSDHC i.MX controller driver for the platform bus. * * derived from the OF-version. * * Copyright (c) 2010 Pengutronix e.K. * Author: Wolfram Sang <w.sang@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. */ #include <linux/io.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/mmc/host.h> #include <linux/mmc/sdhci-pltfm.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sdio.h> #include <mach/hardware.h> #include <mach/esdhc.h> #include "sdhci.h" #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" /* VENDOR SPEC register */ #define SDHCI_VENDOR_SPEC 0xC0 #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 #define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0) /* * The CMDTYPE of the CMD register (offset 0xE) should be set to * "11" when the STOP CMD12 is issued on imx53 to abort one * open ended multi-blk IO. Otherwise the TC INT wouldn't * be generated. * In exact block transfer, the controller doesn't complete the * operations automatically as required at the end of the * transfer and remains on hold if the abort command is not sent. * As a result, the TC flag is not asserted and SW received timeout * exeception. Bit1 of Vendor Spec registor is used to fix it. */ #define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) struct pltfm_imx_data { int flags; u32 scratchpad; }; static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) { void __iomem *base = host->ioaddr + (reg & ~0x3); u32 shift = (reg & 0x3) * 8; writel(((readl(base) & ~(mask << shift)) | (val << shift)), base); } static u32 esdhc_readl_le(struct sdhci_host *host, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; /* fake CARD_PRESENT flag on mx25/35 */ u32 val = readl(host->ioaddr + reg); if (unlikely((reg == SDHCI_PRESENT_STATE) && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) { struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; if (boarddata && gpio_is_valid(boarddata->cd_gpio) && gpio_get_value(boarddata->cd_gpio)) /* no card, if a valid gpio says so... */ val &= ~SDHCI_CARD_PRESENT; else /* ... in all other cases assume card is present */ val |= SDHCI_CARD_PRESENT; } return val; } static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) /* * these interrupts won't work with a custom card_detect gpio * (only applied to mx25/35) */ val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) && (reg == SDHCI_INT_STATUS) && (val & SDHCI_INT_DATA_END))) { u32 v; v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK; writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); } writel(val, host->ioaddr + reg); } static u16 esdhc_readw_le(struct sdhci_host *host, int reg) { if (unlikely(reg == SDHCI_HOST_VERSION)) reg ^= 2; return readw(host->ioaddr + reg); } static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; switch (reg) { case SDHCI_TRANSFER_MODE: /* * Postpone this write, we must do it together with a * command write that is down below. */ if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) && (host->cmd->opcode == SD_IO_RW_EXTENDED) && (host->cmd->data->blocks > 1) && (host->cmd->data->flags & MMC_DATA_READ)) { u32 v; v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK; writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); } imx_data->scratchpad = val; return; case SDHCI_COMMAND: if ((host->cmd->opcode == MMC_STOP_TRANSMISSION) && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) val |= SDHCI_CMD_ABORTCMD; writel(val << 16 | imx_data->scratchpad, host->ioaddr + SDHCI_TRANSFER_MODE); return; case SDHCI_BLOCK_SIZE: val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); break; } esdhc_clrset_le(host, 0xffff, val, reg); } static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) { u32 new_val; switch (reg) { case SDHCI_POWER_CONTROL: /* * FSL put some DMA bits here * If your board has a regulator, code should be here */ return; case SDHCI_HOST_CONTROL: /* FSL messed up here, so we can just keep those two */ new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS); /* ensure the endianess */ new_val |= ESDHC_HOST_CONTROL_LE; /* DMA mode bits are shifted */ new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; esdhc_clrset_le(host, 0xffff, new_val, reg); return; } esdhc_clrset_le(host, 0xff, val, reg); } static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); return clk_get_rate(pltfm_host->clk); } static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); return clk_get_rate(pltfm_host->clk) / 256 / 16; } static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) { struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; if (boarddata && gpio_is_valid(boarddata->wp_gpio)) return gpio_get_value(boarddata->wp_gpio); else return -ENOSYS; } static struct sdhci_ops sdhci_esdhc_ops = { .read_l = esdhc_readl_le, .read_w = esdhc_readw_le, .write_l = esdhc_writel_le, .write_w = esdhc_writew_le, .write_b = esdhc_writeb_le, .set_clock = esdhc_set_clock, .get_max_clock = esdhc_pltfm_get_max_clock, .get_min_clock = esdhc_pltfm_get_min_clock, }; static irqreturn_t cd_irq(int irq, void *data) { struct sdhci_host *sdhost = (struct sdhci_host *)data; tasklet_schedule(&sdhost->card_tasklet); return IRQ_HANDLED; }; static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; struct clk *clk; int err; struct pltfm_imx_data *imx_data; clk = clk_get(mmc_dev(host->mmc), NULL); if (IS_ERR(clk)) { dev_err(mmc_dev(host->mmc), "clk err\n"); return PTR_ERR(clk); } clk_enable(clk); pltfm_host->clk = clk; imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); if (!imx_data) { clk_disable(pltfm_host->clk); clk_put(pltfm_host->clk); return -ENOMEM; } pltfm_host->priv = imx_data; if (!cpu_is_mx25()) host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; if (cpu_is_mx25() || cpu_is_mx35()) { /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; /* write_protect can't be routed to controller, use gpio */ sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; } if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51())) imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; if (boarddata) { err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); if (err) { dev_warn(mmc_dev(host->mmc), "no write-protect pin available!\n"); boarddata->wp_gpio = err; } err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD"); if (err) { dev_warn(mmc_dev(host->mmc), "no card-detect pin available!\n"); goto no_card_detect_pin; } /* i.MX5x has issues to be researched */ if (!cpu_is_mx25() && !cpu_is_mx35()) goto not_supported; err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, mmc_hostname(host->mmc), host); if (err) { dev_warn(mmc_dev(host->mmc), "request irq error\n"); goto no_card_detect_irq; } imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP; /* Now we have a working card_detect again */ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; } return 0; no_card_detect_irq: gpio_free(boarddata->cd_gpio); no_card_detect_pin: boarddata->cd_gpio = err; not_supported: kfree(imx_data); return 0; } static void esdhc_pltfm_exit(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; struct pltfm_imx_data *imx_data = pltfm_host->priv; if (boarddata && gpio_is_valid(boarddata->wp_gpio)) gpio_free(boarddata->wp_gpio); if (boarddata && gpio_is_valid(boarddata->cd_gpio)) { gpio_free(boarddata->cd_gpio); if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) free_irq(gpio_to_irq(boarddata->cd_gpio), host); } clk_disable(pltfm_host->clk); clk_put(pltfm_host->clk); kfree(imx_data); } struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_BROKEN_CARD_DETECTION, /* ADMA has issues. Might be fixable */ .ops = &sdhci_esdhc_ops, .init = esdhc_pltfm_init, .exit = esdhc_pltfm_exit, };
gpl-2.0
Klagopsalmer/linux
net/core/ptp_classifier.c
1244
7398
/* PTP classifier * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* The below program is the bpf_asm (tools/net/) representation of * the opcode array in the ptp_filter structure. * * For convenience, this can easily be altered and reviewed with * bpf_asm and bpf_dbg, e.g. `./bpf_asm -c prog` where prog is a * simple file containing the below program: * * ldh [12] ; load ethertype * * ; PTP over UDP over IPv4 over Ethernet * test_ipv4: * jneq #0x800, test_ipv6 ; ETH_P_IP ? * ldb [23] ; load proto * jneq #17, drop_ipv4 ; IPPROTO_UDP ? * ldh [20] ; load frag offset field * jset #0x1fff, drop_ipv4 ; don't allow fragments * ldxb 4*([14]&0xf) ; load IP header len * ldh [x + 16] ; load UDP dst port * jneq #319, drop_ipv4 ; is port PTP_EV_PORT ? * ldh [x + 22] ; load payload * and #0xf ; mask PTP_CLASS_VMASK * or #0x10 ; PTP_CLASS_IPV4 * ret a ; return PTP class * drop_ipv4: ret #0x0 ; PTP_CLASS_NONE * * ; PTP over UDP over IPv6 over Ethernet * test_ipv6: * jneq #0x86dd, test_8021q ; ETH_P_IPV6 ? * ldb [20] ; load proto * jneq #17, drop_ipv6 ; IPPROTO_UDP ? * ldh [56] ; load UDP dst port * jneq #319, drop_ipv6 ; is port PTP_EV_PORT ? * ldh [62] ; load payload * and #0xf ; mask PTP_CLASS_VMASK * or #0x20 ; PTP_CLASS_IPV6 * ret a ; return PTP class * drop_ipv6: ret #0x0 ; PTP_CLASS_NONE * * ; PTP over 802.1Q over Ethernet * test_8021q: * jneq #0x8100, test_ieee1588 ; ETH_P_8021Q ? * ldh [16] ; load inner type * jneq #0x88f7, test_8021q_ipv4 ; ETH_P_1588 ? * ldb [18] ; load payload * and #0x8 ; as we don't have ports here, test * jneq #0x0, drop_ieee1588 ; for PTP_GEN_BIT and drop these * ldh [18] ; reload payload * and #0xf ; mask PTP_CLASS_VMASK * or #0x70 ; PTP_CLASS_VLAN|PTP_CLASS_L2 * ret a ; return PTP class * * ; PTP over UDP over IPv4 over 802.1Q over Ethernet * test_8021q_ipv4: * jneq #0x800, test_8021q_ipv6 ; ETH_P_IP ? * ldb [27] ; load proto * jneq #17, drop_8021q_ipv4 ; IPPROTO_UDP ? * ldh [24] ; load frag offset field * jset #0x1fff, drop_8021q_ipv4; don't allow fragments * ldxb 4*([18]&0xf) ; load IP header len * ldh [x + 20] ; load UDP dst port * jneq #319, drop_8021q_ipv4 ; is port PTP_EV_PORT ? * ldh [x + 26] ; load payload * and #0xf ; mask PTP_CLASS_VMASK * or #0x50 ; PTP_CLASS_VLAN|PTP_CLASS_IPV4 * ret a ; return PTP class * drop_8021q_ipv4: ret #0x0 ; PTP_CLASS_NONE * * ; PTP over UDP over IPv6 over 802.1Q over Ethernet * test_8021q_ipv6: * jneq #0x86dd, drop_8021q_ipv6 ; ETH_P_IPV6 ? * ldb [24] ; load proto * jneq #17, drop_8021q_ipv6 ; IPPROTO_UDP ? * ldh [60] ; load UDP dst port * jneq #319, drop_8021q_ipv6 ; is port PTP_EV_PORT ? * ldh [66] ; load payload * and #0xf ; mask PTP_CLASS_VMASK * or #0x60 ; PTP_CLASS_VLAN|PTP_CLASS_IPV6 * ret a ; return PTP class * drop_8021q_ipv6: ret #0x0 ; PTP_CLASS_NONE * * ; PTP over Ethernet * test_ieee1588: * jneq #0x88f7, drop_ieee1588 ; ETH_P_1588 ? * ldb [14] ; load payload * and #0x8 ; as we don't have ports here, test * jneq #0x0, drop_ieee1588 ; for PTP_GEN_BIT and drop these * ldh [14] ; reload payload * and #0xf ; mask PTP_CLASS_VMASK * or #0x30 ; PTP_CLASS_L2 * ret a ; return PTP class * drop_ieee1588: ret #0x0 ; PTP_CLASS_NONE */ #include <linux/skbuff.h> #include <linux/filter.h> #include <linux/ptp_classify.h> static struct bpf_prog *ptp_insns __read_mostly; unsigned int ptp_classify_raw(const struct sk_buff *skb) { return BPF_PROG_RUN(ptp_insns, skb); } EXPORT_SYMBOL_GPL(ptp_classify_raw); void __init ptp_classifier_init(void) { static struct sock_filter ptp_filter[] __initdata = { { 0x28, 0, 0, 0x0000000c }, { 0x15, 0, 12, 0x00000800 }, { 0x30, 0, 0, 0x00000017 }, { 0x15, 0, 9, 0x00000011 }, { 0x28, 0, 0, 0x00000014 }, { 0x45, 7, 0, 0x00001fff }, { 0xb1, 0, 0, 0x0000000e }, { 0x48, 0, 0, 0x00000010 }, { 0x15, 0, 4, 0x0000013f }, { 0x48, 0, 0, 0x00000016 }, { 0x54, 0, 0, 0x0000000f }, { 0x44, 0, 0, 0x00000010 }, { 0x16, 0, 0, 0x00000000 }, { 0x06, 0, 0, 0x00000000 }, { 0x15, 0, 9, 0x000086dd }, { 0x30, 0, 0, 0x00000014 }, { 0x15, 0, 6, 0x00000011 }, { 0x28, 0, 0, 0x00000038 }, { 0x15, 0, 4, 0x0000013f }, { 0x28, 0, 0, 0x0000003e }, { 0x54, 0, 0, 0x0000000f }, { 0x44, 0, 0, 0x00000020 }, { 0x16, 0, 0, 0x00000000 }, { 0x06, 0, 0, 0x00000000 }, { 0x15, 0, 32, 0x00008100 }, { 0x28, 0, 0, 0x00000010 }, { 0x15, 0, 7, 0x000088f7 }, { 0x30, 0, 0, 0x00000012 }, { 0x54, 0, 0, 0x00000008 }, { 0x15, 0, 35, 0x00000000 }, { 0x28, 0, 0, 0x00000012 }, { 0x54, 0, 0, 0x0000000f }, { 0x44, 0, 0, 0x00000070 }, { 0x16, 0, 0, 0x00000000 }, { 0x15, 0, 12, 0x00000800 }, { 0x30, 0, 0, 0x0000001b }, { 0x15, 0, 9, 0x00000011 }, { 0x28, 0, 0, 0x00000018 }, { 0x45, 7, 0, 0x00001fff }, { 0xb1, 0, 0, 0x00000012 }, { 0x48, 0, 0, 0x00000014 }, { 0x15, 0, 4, 0x0000013f }, { 0x48, 0, 0, 0x0000001a }, { 0x54, 0, 0, 0x0000000f }, { 0x44, 0, 0, 0x00000050 }, { 0x16, 0, 0, 0x00000000 }, { 0x06, 0, 0, 0x00000000 }, { 0x15, 0, 8, 0x000086dd }, { 0x30, 0, 0, 0x00000018 }, { 0x15, 0, 6, 0x00000011 }, { 0x28, 0, 0, 0x0000003c }, { 0x15, 0, 4, 0x0000013f }, { 0x28, 0, 0, 0x00000042 }, { 0x54, 0, 0, 0x0000000f }, { 0x44, 0, 0, 0x00000060 }, { 0x16, 0, 0, 0x00000000 }, { 0x06, 0, 0, 0x00000000 }, { 0x15, 0, 7, 0x000088f7 }, { 0x30, 0, 0, 0x0000000e }, { 0x54, 0, 0, 0x00000008 }, { 0x15, 0, 4, 0x00000000 }, { 0x28, 0, 0, 0x0000000e }, { 0x54, 0, 0, 0x0000000f }, { 0x44, 0, 0, 0x00000030 }, { 0x16, 0, 0, 0x00000000 }, { 0x06, 0, 0, 0x00000000 }, }; struct sock_fprog_kern ptp_prog = { .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter, }; BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog)); }
gpl-2.0
faux123/private-pyramid
fs/btrfs/locking.c
1244
6230
/* * Copyright (C) 2008 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/sched.h> #include <linux/pagemap.h> #include <linux/spinlock.h> #include <linux/page-flags.h> #include <asm/bug.h> #include "ctree.h" #include "extent_io.h" #include "locking.h" static inline void spin_nested(struct extent_buffer *eb) { spin_lock(&eb->lock); } /* * Setting a lock to blocking will drop the spinlock and set the * flag that forces other procs who want the lock to wait. After * this you can safely schedule with the lock held. */ void btrfs_set_lock_blocking(struct extent_buffer *eb) { if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) { set_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags); spin_unlock(&eb->lock); } /* exit with the spin lock released and the bit set */ } /* * clearing the blocking flag will take the spinlock again. * After this you can't safely schedule */ void btrfs_clear_lock_blocking(struct extent_buffer *eb) { if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) { spin_nested(eb); clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags); smp_mb__after_clear_bit(); } /* exit with the spin lock held */ } /* * unfortunately, many of the places that currently set a lock to blocking * don't end up blocking for very long, and often they don't block * at all. For a dbench 50 run, if we don't spin on the blocking bit * at all, the context switch rate can jump up to 400,000/sec or more. * * So, we're still stuck with this crummy spin on the blocking bit, * at least until the most common causes of the short blocks * can be dealt with. */ static int btrfs_spin_on_block(struct extent_buffer *eb) { int i; for (i = 0; i < 512; i++) { if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) return 1; if (need_resched()) break; cpu_relax(); } return 0; } /* * This is somewhat different from trylock. It will take the * spinlock but if it finds the lock is set to blocking, it will * return without the lock held. * * returns 1 if it was able to take the lock and zero otherwise * * After this call, scheduling is not safe without first calling * btrfs_set_lock_blocking() */ int btrfs_try_spin_lock(struct extent_buffer *eb) { int i; if (btrfs_spin_on_block(eb)) { spin_nested(eb); if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) return 1; spin_unlock(&eb->lock); } /* spin for a bit on the BLOCKING flag */ for (i = 0; i < 2; i++) { cpu_relax(); if (!btrfs_spin_on_block(eb)) break; spin_nested(eb); if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) return 1; spin_unlock(&eb->lock); } return 0; } /* * the autoremove wake function will return 0 if it tried to wake up * a process that was already awake, which means that process won't * count as an exclusive wakeup. The waitq code will continue waking * procs until it finds one that was actually sleeping. * * For btrfs, this isn't quite what we want. We want a single proc * to be notified that the lock is ready for taking. If that proc * already happen to be awake, great, it will loop around and try for * the lock. * * So, btrfs_wake_function always returns 1, even when the proc that we * tried to wake up was already awake. */ static int btrfs_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) { autoremove_wake_function(wait, mode, sync, key); return 1; } /* * returns with the extent buffer spinlocked. * * This will spin and/or wait as required to take the lock, and then * return with the spinlock held. * * After this call, scheduling is not safe without first calling * btrfs_set_lock_blocking() */ int btrfs_tree_lock(struct extent_buffer *eb) { DEFINE_WAIT(wait); wait.func = btrfs_wake_function; if (!btrfs_spin_on_block(eb)) goto sleep; while(1) { spin_nested(eb); /* nobody is blocking, exit with the spinlock held */ if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) return 0; /* * we have the spinlock, but the real owner is blocking. * wait for them */ spin_unlock(&eb->lock); /* * spin for a bit, and if the blocking flag goes away, * loop around */ cpu_relax(); if (btrfs_spin_on_block(eb)) continue; sleep: prepare_to_wait_exclusive(&eb->lock_wq, &wait, TASK_UNINTERRUPTIBLE); if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) schedule(); finish_wait(&eb->lock_wq, &wait); } return 0; } /* * Very quick trylock, this does not spin or schedule. It returns * 1 with the spinlock held if it was able to take the lock, or it * returns zero if it was unable to take the lock. * * After this call, scheduling is not safe without first calling * btrfs_set_lock_blocking() */ int btrfs_try_tree_lock(struct extent_buffer *eb) { if (spin_trylock(&eb->lock)) { if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) { /* * we've got the spinlock, but the real owner is * blocking. Drop the spinlock and return failure */ spin_unlock(&eb->lock); return 0; } return 1; } /* someone else has the spinlock giveup */ return 0; } int btrfs_tree_unlock(struct extent_buffer *eb) { /* * if we were a blocking owner, we don't have the spinlock held * just clear the bit and look for waiters */ if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) smp_mb__after_clear_bit(); else spin_unlock(&eb->lock); if (waitqueue_active(&eb->lock_wq)) wake_up(&eb->lock_wq); return 0; } void btrfs_assert_tree_locked(struct extent_buffer *eb) { if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) assert_spin_locked(&eb->lock); }
gpl-2.0
lab305itep/linux
drivers/staging/vt6656/power.c
1244
3456
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: power.c * * Purpose: Handles 802.11 power management functions * * Author: Lyndon Chen * * Date: July 17, 2002 * * Functions: * vnt_enable_power_saving - Enable Power Saving Mode * PSvDiasblePowerSaving - Disable Power Saving Mode * vnt_next_tbtt_wakeup - Decide if we need to wake up at next Beacon * * Revision History: * */ #include "mac.h" #include "device.h" #include "power.h" #include "wcmd.h" #include "rxtx.h" #include "card.h" #include "usbpipe.h" /* * * Routine Description: * Enable hw power saving functions * * Return Value: * None. * */ void vnt_enable_power_saving(struct vnt_private *priv, u16 listen_interval) { u16 aid = priv->current_aid | BIT(14) | BIT(15); /* set period of power up before TBTT */ vnt_mac_write_word(priv, MAC_REG_PWBT, C_PWBT); if (priv->op_mode != NL80211_IFTYPE_ADHOC) { /* set AID */ vnt_mac_write_word(priv, MAC_REG_AIDATIM, aid); } /* Warren:06-18-2004,the sequence must follow * PSEN->AUTOSLEEP->GO2DOZE */ /* enable power saving hw function */ vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_PSEN); /* Set AutoSleep */ vnt_mac_reg_bits_on(priv, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); /* Warren:MUST turn on this once before turn on AUTOSLEEP ,or the * AUTOSLEEP doesn't work */ vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_GO2DOZE); if (listen_interval >= 2) { /* clear always listen beacon */ vnt_mac_reg_bits_off(priv, MAC_REG_PSCTL, PSCTL_ALBCN); /* first time set listen next beacon */ vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_LNBCN); } else { /* always listen beacon */ vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_ALBCN); } dev_dbg(&priv->usb->dev, "PS:Power Saving Mode Enable...\n"); } /* * * Routine Description: * Disable hw power saving functions * * Return Value: * None. * */ void vnt_disable_power_saving(struct vnt_private *priv) { /* disable power saving hw function */ vnt_control_out(priv, MESSAGE_TYPE_DISABLE_PS, 0, 0, 0, NULL); /* clear AutoSleep */ vnt_mac_reg_bits_off(priv, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); /* set always listen beacon */ vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_ALBCN); } /* * * Routine Description: * Check if Next TBTT must wake up * * Return Value: * None. * */ int vnt_next_tbtt_wakeup(struct vnt_private *priv) { struct ieee80211_hw *hw = priv->hw; struct ieee80211_conf *conf = &hw->conf; int wake_up = false; if (conf->listen_interval == 1) { /* Turn on wake up to listen next beacon */ vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_LNBCN); wake_up = true; } return wake_up; }
gpl-2.0
vaginessa/android_kernel_samsung_golden-1
sound/soc/codecs/wm9712.c
2012
23840
/* * wm9712.c -- ALSA Soc WM9712 codec support * * Copyright 2006 Wolfson Microelectronics PLC. * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #include <sound/soc.h> #include "wm9712.h" #define WM9712_VERSION "0.4" static unsigned int ac97_read(struct snd_soc_codec *codec, unsigned int reg); static int ac97_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int val); /* * WM9712 register cache */ static const u16 wm9712_reg[] = { 0x6174, 0x8000, 0x8000, 0x8000, /* 6 */ 0x0f0f, 0xaaa0, 0xc008, 0x6808, /* e */ 0xe808, 0xaaa0, 0xad00, 0x8000, /* 16 */ 0xe808, 0x3000, 0x8000, 0x0000, /* 1e */ 0x0000, 0x0000, 0x0000, 0x000f, /* 26 */ 0x0405, 0x0410, 0xbb80, 0xbb80, /* 2e */ 0x0000, 0xbb80, 0x0000, 0x0000, /* 36 */ 0x0000, 0x2000, 0x0000, 0x0000, /* 3e */ 0x0000, 0x0000, 0x0000, 0x0000, /* 46 */ 0x0000, 0x0000, 0xf83e, 0xffff, /* 4e */ 0x0000, 0x0000, 0x0000, 0xf83e, /* 56 */ 0x0008, 0x0000, 0x0000, 0x0000, /* 5e */ 0xb032, 0x3e00, 0x0000, 0x0000, /* 66 */ 0x0000, 0x0000, 0x0000, 0x0000, /* 6e */ 0x0000, 0x0000, 0x0000, 0x0006, /* 76 */ 0x0001, 0x0000, 0x574d, 0x4c12, /* 7e */ 0x0000, 0x0000 /* virtual hp mixers */ }; /* virtual HP mixers regs */ #define HPL_MIXER 0x80 #define HPR_MIXER 0x82 static const char *wm9712_alc_select[] = {"None", "Left", "Right", "Stereo"}; static const char *wm9712_alc_mux[] = {"Stereo", "Left", "Right", "None"}; static const char *wm9712_out3_src[] = {"Left", "VREF", "Left + Right", "Mono"}; static const char *wm9712_spk_src[] = {"Speaker Mix", "Headphone Mix"}; static const char *wm9712_rec_adc[] = {"Stereo", "Left", "Right", "Mute"}; static const char *wm9712_base[] = {"Linear Control", "Adaptive Boost"}; static const char *wm9712_rec_gain[] = {"+1.5dB Steps", "+0.75dB Steps"}; static const char *wm9712_mic[] = {"Mic 1", "Differential", "Mic 2", "Stereo"}; static const char *wm9712_rec_sel[] = {"Mic", "NC", "NC", "Speaker Mixer", "Line", "Headphone Mixer", "Phone Mixer", "Phone"}; static const char *wm9712_ng_type[] = {"Constant Gain", "Mute"}; static const char *wm9712_diff_sel[] = {"Mic", "Line"}; static const struct soc_enum wm9712_enum[] = { SOC_ENUM_SINGLE(AC97_PCI_SVID, 14, 4, wm9712_alc_select), SOC_ENUM_SINGLE(AC97_VIDEO, 12, 4, wm9712_alc_mux), SOC_ENUM_SINGLE(AC97_AUX, 9, 4, wm9712_out3_src), SOC_ENUM_SINGLE(AC97_AUX, 8, 2, wm9712_spk_src), SOC_ENUM_SINGLE(AC97_REC_SEL, 12, 4, wm9712_rec_adc), SOC_ENUM_SINGLE(AC97_MASTER_TONE, 15, 2, wm9712_base), SOC_ENUM_DOUBLE(AC97_REC_GAIN, 14, 6, 2, wm9712_rec_gain), SOC_ENUM_SINGLE(AC97_MIC, 5, 4, wm9712_mic), SOC_ENUM_SINGLE(AC97_REC_SEL, 8, 8, wm9712_rec_sel), SOC_ENUM_SINGLE(AC97_REC_SEL, 0, 8, wm9712_rec_sel), SOC_ENUM_SINGLE(AC97_PCI_SVID, 5, 2, wm9712_ng_type), SOC_ENUM_SINGLE(0x5c, 8, 2, wm9712_diff_sel), }; static const struct snd_kcontrol_new wm9712_snd_ac97_controls[] = { SOC_DOUBLE("Speaker Playback Volume", AC97_MASTER, 8, 0, 31, 1), SOC_SINGLE("Speaker Playback Switch", AC97_MASTER, 15, 1, 1), SOC_DOUBLE("Headphone Playback Volume", AC97_HEADPHONE, 8, 0, 31, 1), SOC_SINGLE("Headphone Playback Switch", AC97_HEADPHONE, 15, 1, 1), SOC_DOUBLE("PCM Playback Volume", AC97_PCM, 8, 0, 31, 1), SOC_SINGLE("Speaker Playback ZC Switch", AC97_MASTER, 7, 1, 0), SOC_SINGLE("Speaker Playback Invert Switch", AC97_MASTER, 6, 1, 0), SOC_SINGLE("Headphone Playback ZC Switch", AC97_HEADPHONE, 7, 1, 0), SOC_SINGLE("Mono Playback ZC Switch", AC97_MASTER_MONO, 7, 1, 0), SOC_SINGLE("Mono Playback Volume", AC97_MASTER_MONO, 0, 31, 1), SOC_SINGLE("Mono Playback Switch", AC97_MASTER_MONO, 15, 1, 1), SOC_SINGLE("ALC Target Volume", AC97_CODEC_CLASS_REV, 12, 15, 0), SOC_SINGLE("ALC Hold Time", AC97_CODEC_CLASS_REV, 8, 15, 0), SOC_SINGLE("ALC Decay Time", AC97_CODEC_CLASS_REV, 4, 15, 0), SOC_SINGLE("ALC Attack Time", AC97_CODEC_CLASS_REV, 0, 15, 0), SOC_ENUM("ALC Function", wm9712_enum[0]), SOC_SINGLE("ALC Max Volume", AC97_PCI_SVID, 11, 7, 0), SOC_SINGLE("ALC ZC Timeout", AC97_PCI_SVID, 9, 3, 1), SOC_SINGLE("ALC ZC Switch", AC97_PCI_SVID, 8, 1, 0), SOC_SINGLE("ALC NG Switch", AC97_PCI_SVID, 7, 1, 0), SOC_ENUM("ALC NG Type", wm9712_enum[10]), SOC_SINGLE("ALC NG Threshold", AC97_PCI_SVID, 0, 31, 1), SOC_SINGLE("Mic Headphone Volume", AC97_VIDEO, 12, 7, 1), SOC_SINGLE("ALC Headphone Volume", AC97_VIDEO, 7, 7, 1), SOC_SINGLE("Out3 Switch", AC97_AUX, 15, 1, 1), SOC_SINGLE("Out3 ZC Switch", AC97_AUX, 7, 1, 1), SOC_SINGLE("Out3 Volume", AC97_AUX, 0, 31, 1), SOC_SINGLE("PCBeep Bypass Headphone Volume", AC97_PC_BEEP, 12, 7, 1), SOC_SINGLE("PCBeep Bypass Speaker Volume", AC97_PC_BEEP, 8, 7, 1), SOC_SINGLE("PCBeep Bypass Phone Volume", AC97_PC_BEEP, 4, 7, 1), SOC_SINGLE("Aux Playback Headphone Volume", AC97_CD, 12, 7, 1), SOC_SINGLE("Aux Playback Speaker Volume", AC97_CD, 8, 7, 1), SOC_SINGLE("Aux Playback Phone Volume", AC97_CD, 4, 7, 1), SOC_SINGLE("Phone Volume", AC97_PHONE, 0, 15, 1), SOC_DOUBLE("Line Capture Volume", AC97_LINE, 8, 0, 31, 1), SOC_SINGLE("Capture 20dB Boost Switch", AC97_REC_SEL, 14, 1, 0), SOC_SINGLE("Capture to Phone 20dB Boost Switch", AC97_REC_SEL, 11, 1, 1), SOC_SINGLE("3D Upper Cut-off Switch", AC97_3D_CONTROL, 5, 1, 1), SOC_SINGLE("3D Lower Cut-off Switch", AC97_3D_CONTROL, 4, 1, 1), SOC_SINGLE("3D Playback Volume", AC97_3D_CONTROL, 0, 15, 0), SOC_ENUM("Bass Control", wm9712_enum[5]), SOC_SINGLE("Bass Cut-off Switch", AC97_MASTER_TONE, 12, 1, 1), SOC_SINGLE("Tone Cut-off Switch", AC97_MASTER_TONE, 4, 1, 1), SOC_SINGLE("Playback Attenuate (-6dB) Switch", AC97_MASTER_TONE, 6, 1, 0), SOC_SINGLE("Bass Volume", AC97_MASTER_TONE, 8, 15, 1), SOC_SINGLE("Treble Volume", AC97_MASTER_TONE, 0, 15, 1), SOC_SINGLE("Capture ADC Switch", AC97_REC_GAIN, 15, 1, 1), SOC_ENUM("Capture Volume Steps", wm9712_enum[6]), SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 1), SOC_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0), SOC_SINGLE("Mic 1 Volume", AC97_MIC, 8, 31, 1), SOC_SINGLE("Mic 2 Volume", AC97_MIC, 0, 31, 1), SOC_SINGLE("Mic 20dB Boost Switch", AC97_MIC, 7, 1, 0), }; /* We have to create a fake left and right HP mixers because * the codec only has a single control that is shared by both channels. * This makes it impossible to determine the audio path. */ static int mixer_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { u16 l, r, beep, line, phone, mic, pcm, aux; l = ac97_read(w->codec, HPL_MIXER); r = ac97_read(w->codec, HPR_MIXER); beep = ac97_read(w->codec, AC97_PC_BEEP); mic = ac97_read(w->codec, AC97_VIDEO); phone = ac97_read(w->codec, AC97_PHONE); line = ac97_read(w->codec, AC97_LINE); pcm = ac97_read(w->codec, AC97_PCM); aux = ac97_read(w->codec, AC97_CD); if (l & 0x1 || r & 0x1) ac97_write(w->codec, AC97_VIDEO, mic & 0x7fff); else ac97_write(w->codec, AC97_VIDEO, mic | 0x8000); if (l & 0x2 || r & 0x2) ac97_write(w->codec, AC97_PCM, pcm & 0x7fff); else ac97_write(w->codec, AC97_PCM, pcm | 0x8000); if (l & 0x4 || r & 0x4) ac97_write(w->codec, AC97_LINE, line & 0x7fff); else ac97_write(w->codec, AC97_LINE, line | 0x8000); if (l & 0x8 || r & 0x8) ac97_write(w->codec, AC97_PHONE, phone & 0x7fff); else ac97_write(w->codec, AC97_PHONE, phone | 0x8000); if (l & 0x10 || r & 0x10) ac97_write(w->codec, AC97_CD, aux & 0x7fff); else ac97_write(w->codec, AC97_CD, aux | 0x8000); if (l & 0x20 || r & 0x20) ac97_write(w->codec, AC97_PC_BEEP, beep & 0x7fff); else ac97_write(w->codec, AC97_PC_BEEP, beep | 0x8000); return 0; } /* Left Headphone Mixers */ static const struct snd_kcontrol_new wm9712_hpl_mixer_controls[] = { SOC_DAPM_SINGLE("PCBeep Bypass Switch", HPL_MIXER, 5, 1, 0), SOC_DAPM_SINGLE("Aux Playback Switch", HPL_MIXER, 4, 1, 0), SOC_DAPM_SINGLE("Phone Bypass Switch", HPL_MIXER, 3, 1, 0), SOC_DAPM_SINGLE("Line Bypass Switch", HPL_MIXER, 2, 1, 0), SOC_DAPM_SINGLE("PCM Playback Switch", HPL_MIXER, 1, 1, 0), SOC_DAPM_SINGLE("Mic Sidetone Switch", HPL_MIXER, 0, 1, 0), }; /* Right Headphone Mixers */ static const struct snd_kcontrol_new wm9712_hpr_mixer_controls[] = { SOC_DAPM_SINGLE("PCBeep Bypass Switch", HPR_MIXER, 5, 1, 0), SOC_DAPM_SINGLE("Aux Playback Switch", HPR_MIXER, 4, 1, 0), SOC_DAPM_SINGLE("Phone Bypass Switch", HPR_MIXER, 3, 1, 0), SOC_DAPM_SINGLE("Line Bypass Switch", HPR_MIXER, 2, 1, 0), SOC_DAPM_SINGLE("PCM Playback Switch", HPR_MIXER, 1, 1, 0), SOC_DAPM_SINGLE("Mic Sidetone Switch", HPR_MIXER, 0, 1, 0), }; /* Speaker Mixer */ static const struct snd_kcontrol_new wm9712_speaker_mixer_controls[] = { SOC_DAPM_SINGLE("PCBeep Bypass Switch", AC97_PC_BEEP, 11, 1, 1), SOC_DAPM_SINGLE("Aux Playback Switch", AC97_CD, 11, 1, 1), SOC_DAPM_SINGLE("Phone Bypass Switch", AC97_PHONE, 14, 1, 1), SOC_DAPM_SINGLE("Line Bypass Switch", AC97_LINE, 14, 1, 1), SOC_DAPM_SINGLE("PCM Playback Switch", AC97_PCM, 14, 1, 1), }; /* Phone Mixer */ static const struct snd_kcontrol_new wm9712_phone_mixer_controls[] = { SOC_DAPM_SINGLE("PCBeep Bypass Switch", AC97_PC_BEEP, 7, 1, 1), SOC_DAPM_SINGLE("Aux Playback Switch", AC97_CD, 7, 1, 1), SOC_DAPM_SINGLE("Line Bypass Switch", AC97_LINE, 13, 1, 1), SOC_DAPM_SINGLE("PCM Playback Switch", AC97_PCM, 13, 1, 1), SOC_DAPM_SINGLE("Mic 1 Sidetone Switch", AC97_MIC, 14, 1, 1), SOC_DAPM_SINGLE("Mic 2 Sidetone Switch", AC97_MIC, 13, 1, 1), }; /* ALC headphone mux */ static const struct snd_kcontrol_new wm9712_alc_mux_controls = SOC_DAPM_ENUM("Route", wm9712_enum[1]); /* out 3 mux */ static const struct snd_kcontrol_new wm9712_out3_mux_controls = SOC_DAPM_ENUM("Route", wm9712_enum[2]); /* spk mux */ static const struct snd_kcontrol_new wm9712_spk_mux_controls = SOC_DAPM_ENUM("Route", wm9712_enum[3]); /* Capture to Phone mux */ static const struct snd_kcontrol_new wm9712_capture_phone_mux_controls = SOC_DAPM_ENUM("Route", wm9712_enum[4]); /* Capture left select */ static const struct snd_kcontrol_new wm9712_capture_selectl_controls = SOC_DAPM_ENUM("Route", wm9712_enum[8]); /* Capture right select */ static const struct snd_kcontrol_new wm9712_capture_selectr_controls = SOC_DAPM_ENUM("Route", wm9712_enum[9]); /* Mic select */ static const struct snd_kcontrol_new wm9712_mic_src_controls = SOC_DAPM_ENUM("Route", wm9712_enum[7]); /* diff select */ static const struct snd_kcontrol_new wm9712_diff_sel_controls = SOC_DAPM_ENUM("Route", wm9712_enum[11]); static const struct snd_soc_dapm_widget wm9712_dapm_widgets[] = { SND_SOC_DAPM_MUX("ALC Sidetone Mux", SND_SOC_NOPM, 0, 0, &wm9712_alc_mux_controls), SND_SOC_DAPM_MUX("Out3 Mux", SND_SOC_NOPM, 0, 0, &wm9712_out3_mux_controls), SND_SOC_DAPM_MUX("Speaker Mux", SND_SOC_NOPM, 0, 0, &wm9712_spk_mux_controls), SND_SOC_DAPM_MUX("Capture Phone Mux", SND_SOC_NOPM, 0, 0, &wm9712_capture_phone_mux_controls), SND_SOC_DAPM_MUX("Left Capture Select", SND_SOC_NOPM, 0, 0, &wm9712_capture_selectl_controls), SND_SOC_DAPM_MUX("Right Capture Select", SND_SOC_NOPM, 0, 0, &wm9712_capture_selectr_controls), SND_SOC_DAPM_MUX("Mic Select Source", SND_SOC_NOPM, 0, 0, &wm9712_mic_src_controls), SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0, &wm9712_diff_sel_controls), SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER_E("Left HP Mixer", AC97_INT_PAGING, 9, 1, &wm9712_hpl_mixer_controls[0], ARRAY_SIZE(wm9712_hpl_mixer_controls), mixer_event, SND_SOC_DAPM_POST_REG), SND_SOC_DAPM_MIXER_E("Right HP Mixer", AC97_INT_PAGING, 8, 1, &wm9712_hpr_mixer_controls[0], ARRAY_SIZE(wm9712_hpr_mixer_controls), mixer_event, SND_SOC_DAPM_POST_REG), SND_SOC_DAPM_MIXER("Phone Mixer", AC97_INT_PAGING, 6, 1, &wm9712_phone_mixer_controls[0], ARRAY_SIZE(wm9712_phone_mixer_controls)), SND_SOC_DAPM_MIXER("Speaker Mixer", AC97_INT_PAGING, 7, 1, &wm9712_speaker_mixer_controls[0], ARRAY_SIZE(wm9712_speaker_mixer_controls)), SND_SOC_DAPM_MIXER("Mono Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_DAC("Left DAC", "Left HiFi Playback", AC97_INT_PAGING, 14, 1), SND_SOC_DAPM_DAC("Right DAC", "Right HiFi Playback", AC97_INT_PAGING, 13, 1), SND_SOC_DAPM_DAC("Aux DAC", "Aux Playback", SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_ADC("Left ADC", "Left HiFi Capture", AC97_INT_PAGING, 12, 1), SND_SOC_DAPM_ADC("Right ADC", "Right HiFi Capture", AC97_INT_PAGING, 11, 1), SND_SOC_DAPM_PGA("Headphone PGA", AC97_INT_PAGING, 4, 1, NULL, 0), SND_SOC_DAPM_PGA("Speaker PGA", AC97_INT_PAGING, 3, 1, NULL, 0), SND_SOC_DAPM_PGA("Out 3 PGA", AC97_INT_PAGING, 5, 1, NULL, 0), SND_SOC_DAPM_PGA("Line PGA", AC97_INT_PAGING, 2, 1, NULL, 0), SND_SOC_DAPM_PGA("Phone PGA", AC97_INT_PAGING, 1, 1, NULL, 0), SND_SOC_DAPM_PGA("Mic PGA", AC97_INT_PAGING, 0, 1, NULL, 0), SND_SOC_DAPM_MICBIAS("Mic Bias", AC97_INT_PAGING, 10, 1), SND_SOC_DAPM_OUTPUT("MONOOUT"), SND_SOC_DAPM_OUTPUT("HPOUTL"), SND_SOC_DAPM_OUTPUT("HPOUTR"), SND_SOC_DAPM_OUTPUT("LOUT2"), SND_SOC_DAPM_OUTPUT("ROUT2"), SND_SOC_DAPM_OUTPUT("OUT3"), SND_SOC_DAPM_INPUT("LINEINL"), SND_SOC_DAPM_INPUT("LINEINR"), SND_SOC_DAPM_INPUT("PHONE"), SND_SOC_DAPM_INPUT("PCBEEP"), SND_SOC_DAPM_INPUT("MIC1"), SND_SOC_DAPM_INPUT("MIC2"), }; static const struct snd_soc_dapm_route wm9712_audio_map[] = { /* virtual mixer - mixes left & right channels for spk and mono */ {"AC97 Mixer", NULL, "Left DAC"}, {"AC97 Mixer", NULL, "Right DAC"}, /* Left HP mixer */ {"Left HP Mixer", "PCBeep Bypass Switch", "PCBEEP"}, {"Left HP Mixer", "Aux Playback Switch", "Aux DAC"}, {"Left HP Mixer", "Phone Bypass Switch", "Phone PGA"}, {"Left HP Mixer", "Line Bypass Switch", "Line PGA"}, {"Left HP Mixer", "PCM Playback Switch", "Left DAC"}, {"Left HP Mixer", "Mic Sidetone Switch", "Mic PGA"}, {"Left HP Mixer", NULL, "ALC Sidetone Mux"}, /* Right HP mixer */ {"Right HP Mixer", "PCBeep Bypass Switch", "PCBEEP"}, {"Right HP Mixer", "Aux Playback Switch", "Aux DAC"}, {"Right HP Mixer", "Phone Bypass Switch", "Phone PGA"}, {"Right HP Mixer", "Line Bypass Switch", "Line PGA"}, {"Right HP Mixer", "PCM Playback Switch", "Right DAC"}, {"Right HP Mixer", "Mic Sidetone Switch", "Mic PGA"}, {"Right HP Mixer", NULL, "ALC Sidetone Mux"}, /* speaker mixer */ {"Speaker Mixer", "PCBeep Bypass Switch", "PCBEEP"}, {"Speaker Mixer", "Line Bypass Switch", "Line PGA"}, {"Speaker Mixer", "PCM Playback Switch", "AC97 Mixer"}, {"Speaker Mixer", "Phone Bypass Switch", "Phone PGA"}, {"Speaker Mixer", "Aux Playback Switch", "Aux DAC"}, /* Phone mixer */ {"Phone Mixer", "PCBeep Bypass Switch", "PCBEEP"}, {"Phone Mixer", "Line Bypass Switch", "Line PGA"}, {"Phone Mixer", "Aux Playback Switch", "Aux DAC"}, {"Phone Mixer", "PCM Playback Switch", "AC97 Mixer"}, {"Phone Mixer", "Mic 1 Sidetone Switch", "Mic PGA"}, {"Phone Mixer", "Mic 2 Sidetone Switch", "Mic PGA"}, /* inputs */ {"Line PGA", NULL, "LINEINL"}, {"Line PGA", NULL, "LINEINR"}, {"Phone PGA", NULL, "PHONE"}, {"Mic PGA", NULL, "MIC1"}, {"Mic PGA", NULL, "MIC2"}, /* left capture selector */ {"Left Capture Select", "Mic", "MIC1"}, {"Left Capture Select", "Speaker Mixer", "Speaker Mixer"}, {"Left Capture Select", "Line", "LINEINL"}, {"Left Capture Select", "Headphone Mixer", "Left HP Mixer"}, {"Left Capture Select", "Phone Mixer", "Phone Mixer"}, {"Left Capture Select", "Phone", "PHONE"}, /* right capture selector */ {"Right Capture Select", "Mic", "MIC2"}, {"Right Capture Select", "Speaker Mixer", "Speaker Mixer"}, {"Right Capture Select", "Line", "LINEINR"}, {"Right Capture Select", "Headphone Mixer", "Right HP Mixer"}, {"Right Capture Select", "Phone Mixer", "Phone Mixer"}, {"Right Capture Select", "Phone", "PHONE"}, /* ALC Sidetone */ {"ALC Sidetone Mux", "Stereo", "Left Capture Select"}, {"ALC Sidetone Mux", "Stereo", "Right Capture Select"}, {"ALC Sidetone Mux", "Left", "Left Capture Select"}, {"ALC Sidetone Mux", "Right", "Right Capture Select"}, /* ADC's */ {"Left ADC", NULL, "Left Capture Select"}, {"Right ADC", NULL, "Right Capture Select"}, /* outputs */ {"MONOOUT", NULL, "Phone Mixer"}, {"HPOUTL", NULL, "Headphone PGA"}, {"Headphone PGA", NULL, "Left HP Mixer"}, {"HPOUTR", NULL, "Headphone PGA"}, {"Headphone PGA", NULL, "Right HP Mixer"}, /* mono mixer */ {"Mono Mixer", NULL, "Left HP Mixer"}, {"Mono Mixer", NULL, "Right HP Mixer"}, /* Out3 Mux */ {"Out3 Mux", "Left", "Left HP Mixer"}, {"Out3 Mux", "Mono", "Phone Mixer"}, {"Out3 Mux", "Left + Right", "Mono Mixer"}, {"Out 3 PGA", NULL, "Out3 Mux"}, {"OUT3", NULL, "Out 3 PGA"}, /* speaker Mux */ {"Speaker Mux", "Speaker Mix", "Speaker Mixer"}, {"Speaker Mux", "Headphone Mix", "Mono Mixer"}, {"Speaker PGA", NULL, "Speaker Mux"}, {"LOUT2", NULL, "Speaker PGA"}, {"ROUT2", NULL, "Speaker PGA"}, }; static unsigned int ac97_read(struct snd_soc_codec *codec, unsigned int reg) { u16 *cache = codec->reg_cache; if (reg == AC97_RESET || reg == AC97_GPIO_STATUS || reg == AC97_VENDOR_ID1 || reg == AC97_VENDOR_ID2 || reg == AC97_REC_GAIN) return soc_ac97_ops.read(codec->ac97, reg); else { reg = reg >> 1; if (reg >= (ARRAY_SIZE(wm9712_reg))) return -EIO; return cache[reg]; } } static int ac97_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int val) { u16 *cache = codec->reg_cache; if (reg < 0x7c) soc_ac97_ops.write(codec->ac97, reg, val); reg = reg >> 1; if (reg < (ARRAY_SIZE(wm9712_reg))) cache[reg] = val; return 0; } static int ac97_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec =rtd->codec; int reg; u16 vra; vra = ac97_read(codec, AC97_EXTENDED_STATUS); ac97_write(codec, AC97_EXTENDED_STATUS, vra | 0x1); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) reg = AC97_PCM_FRONT_DAC_RATE; else reg = AC97_PCM_LR_ADC_RATE; return ac97_write(codec, reg, runtime->rate); } static int ac97_aux_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; u16 vra, xsle; vra = ac97_read(codec, AC97_EXTENDED_STATUS); ac97_write(codec, AC97_EXTENDED_STATUS, vra | 0x1); xsle = ac97_read(codec, AC97_PCI_SID); ac97_write(codec, AC97_PCI_SID, xsle | 0x8000); if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) return -ENODEV; return ac97_write(codec, AC97_PCM_SURR_DAC_RATE, runtime->rate); } #define WM9712_AC97_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 |\ SNDRV_PCM_RATE_48000) static struct snd_soc_dai_ops wm9712_dai_ops_hifi = { .prepare = ac97_prepare, }; static struct snd_soc_dai_ops wm9712_dai_ops_aux = { .prepare = ac97_aux_prepare, }; static struct snd_soc_dai_driver wm9712_dai[] = { { .name = "wm9712-hifi", .ac97_control = 1, .playback = { .stream_name = "HiFi Playback", .channels_min = 1, .channels_max = 2, .rates = WM9712_AC97_RATES, .formats = SND_SOC_STD_AC97_FMTS,}, .capture = { .stream_name = "HiFi Capture", .channels_min = 1, .channels_max = 2, .rates = WM9712_AC97_RATES, .formats = SND_SOC_STD_AC97_FMTS,}, .ops = &wm9712_dai_ops_hifi, }, { .name = "wm9712-aux", .playback = { .stream_name = "Aux Playback", .channels_min = 1, .channels_max = 1, .rates = WM9712_AC97_RATES, .formats = SND_SOC_STD_AC97_FMTS,}, .ops = &wm9712_dai_ops_aux, } }; static int wm9712_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { switch (level) { case SND_SOC_BIAS_ON: case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: ac97_write(codec, AC97_POWERDOWN, 0x0000); break; case SND_SOC_BIAS_OFF: /* disable everything including AC link */ ac97_write(codec, AC97_EXTENDED_MSTATUS, 0xffff); ac97_write(codec, AC97_POWERDOWN, 0xffff); break; } codec->dapm.bias_level = level; return 0; } static int wm9712_reset(struct snd_soc_codec *codec, int try_warm) { if (try_warm && soc_ac97_ops.warm_reset) { soc_ac97_ops.warm_reset(codec->ac97); if (ac97_read(codec, 0) == wm9712_reg[0]) return 1; } soc_ac97_ops.reset(codec->ac97); if (soc_ac97_ops.warm_reset) soc_ac97_ops.warm_reset(codec->ac97); if (ac97_read(codec, 0) != wm9712_reg[0]) goto err; return 0; err: printk(KERN_ERR "WM9712 AC97 reset failed\n"); return -EIO; } static int wm9712_soc_suspend(struct snd_soc_codec *codec, pm_message_t state) { wm9712_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm9712_soc_resume(struct snd_soc_codec *codec) { int i, ret; u16 *cache = codec->reg_cache; ret = wm9712_reset(codec, 1); if (ret < 0) { printk(KERN_ERR "could not reset AC97 codec\n"); return ret; } wm9712_set_bias_level(codec, SND_SOC_BIAS_STANDBY); if (ret == 0) { /* Sync reg_cache with the hardware after cold reset */ for (i = 2; i < ARRAY_SIZE(wm9712_reg) << 1; i += 2) { if (i == AC97_INT_PAGING || i == AC97_POWERDOWN || (i > 0x58 && i != 0x5c)) continue; soc_ac97_ops.write(codec->ac97, i, cache[i>>1]); } } return ret; } static int wm9712_soc_probe(struct snd_soc_codec *codec) { int ret = 0; printk(KERN_INFO "WM9711/WM9712 SoC Audio Codec %s\n", WM9712_VERSION); ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); if (ret < 0) { printk(KERN_ERR "wm9712: failed to register AC97 codec\n"); return ret; } ret = wm9712_reset(codec, 0); if (ret < 0) { printk(KERN_ERR "Failed to reset WM9712: AC97 link error\n"); goto reset_err; } /* set alc mux to none */ ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000); wm9712_set_bias_level(codec, SND_SOC_BIAS_STANDBY); snd_soc_add_controls(codec, wm9712_snd_ac97_controls, ARRAY_SIZE(wm9712_snd_ac97_controls)); return 0; reset_err: snd_soc_free_ac97_codec(codec); return ret; } static int wm9712_soc_remove(struct snd_soc_codec *codec) { snd_soc_free_ac97_codec(codec); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm9712 = { .probe = wm9712_soc_probe, .remove = wm9712_soc_remove, .suspend = wm9712_soc_suspend, .resume = wm9712_soc_resume, .read = ac97_read, .write = ac97_write, .set_bias_level = wm9712_set_bias_level, .reg_cache_size = ARRAY_SIZE(wm9712_reg), .reg_word_size = sizeof(u16), .reg_cache_step = 2, .reg_cache_default = wm9712_reg, .dapm_widgets = wm9712_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm9712_dapm_widgets), .dapm_routes = wm9712_audio_map, .num_dapm_routes = ARRAY_SIZE(wm9712_audio_map), }; static __devinit int wm9712_probe(struct platform_device *pdev) { return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm9712, wm9712_dai, ARRAY_SIZE(wm9712_dai)); } static int __devexit wm9712_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); return 0; } static struct platform_driver wm9712_codec_driver = { .driver = { .name = "wm9712-codec", .owner = THIS_MODULE, }, .probe = wm9712_probe, .remove = __devexit_p(wm9712_remove), }; static int __init wm9712_init(void) { return platform_driver_register(&wm9712_codec_driver); } module_init(wm9712_init); static void __exit wm9712_exit(void) { platform_driver_unregister(&wm9712_codec_driver); } module_exit(wm9712_exit); MODULE_DESCRIPTION("ASoC WM9711/WM9712 driver"); MODULE_AUTHOR("Liam Girdwood"); MODULE_LICENSE("GPL");
gpl-2.0
NAM-IL/ARM_Linux_Kernel_12b
tools/testing/selftests/net/psock_tpacket.c
2268
18158
/* * Copyright 2013 Red Hat, Inc. * Author: Daniel Borkmann <dborkman@redhat.com> * Chetan Loke <loke.chetan@gmail.com> (TPACKET_V3 usage example) * * A basic test of packet socket's TPACKET_V1/TPACKET_V2/TPACKET_V3 behavior. * * Control: * Test the setup of the TPACKET socket with different patterns that are * known to fail (TODO) resp. succeed (OK). * * Datapath: * Open a pair of packet sockets and send resp. receive an a priori known * packet pattern accross the sockets and check if it was received resp. * sent correctly. Fanout in combination with RX_RING is currently not * tested here. * * The test currently runs for * - TPACKET_V1: RX_RING, TX_RING * - TPACKET_V2: RX_RING, TX_RING * - TPACKET_V3: RX_RING * * License (GPLv2): * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/socket.h> #include <sys/mman.h> #include <linux/if_packet.h> #include <linux/filter.h> #include <ctype.h> #include <fcntl.h> #include <unistd.h> #include <bits/wordsize.h> #include <net/ethernet.h> #include <netinet/ip.h> #include <arpa/inet.h> #include <stdint.h> #include <string.h> #include <assert.h> #include <net/if.h> #include <inttypes.h> #include <poll.h> #include "psock_lib.h" #ifndef bug_on # define bug_on(cond) assert(!(cond)) #endif #ifndef __aligned_tpacket # define __aligned_tpacket __attribute__((aligned(TPACKET_ALIGNMENT))) #endif #ifndef __align_tpacket # define __align_tpacket(x) __attribute__((aligned(TPACKET_ALIGN(x)))) #endif #define NUM_PACKETS 100 #define ALIGN_8(x) (((x) + 8 - 1) & ~(8 - 1)) struct ring { struct iovec *rd; uint8_t *mm_space; size_t mm_len, rd_len; struct sockaddr_ll ll; void (*walk)(int sock, struct ring *ring); int type, rd_num, flen, version; union { struct tpacket_req req; struct tpacket_req3 req3; }; }; struct block_desc { uint32_t version; uint32_t offset_to_priv; struct tpacket_hdr_v1 h1; }; union frame_map { struct { struct tpacket_hdr tp_h __aligned_tpacket; struct sockaddr_ll s_ll __align_tpacket(sizeof(struct tpacket_hdr)); } *v1; struct { struct tpacket2_hdr tp_h __aligned_tpacket; struct sockaddr_ll s_ll __align_tpacket(sizeof(struct tpacket2_hdr)); } *v2; void *raw; }; static unsigned int total_packets, total_bytes; static int pfsocket(int ver) { int ret, sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); if (sock == -1) { perror("socket"); exit(1); } ret = setsockopt(sock, SOL_PACKET, PACKET_VERSION, &ver, sizeof(ver)); if (ret == -1) { perror("setsockopt"); exit(1); } return sock; } static void status_bar_update(void) { if (total_packets % 10 == 0) { fprintf(stderr, "."); fflush(stderr); } } static void test_payload(void *pay, size_t len) { struct ethhdr *eth = pay; if (len < sizeof(struct ethhdr)) { fprintf(stderr, "test_payload: packet too " "small: %zu bytes!\n", len); exit(1); } if (eth->h_proto != htons(ETH_P_IP)) { fprintf(stderr, "test_payload: wrong ethernet " "type: 0x%x!\n", ntohs(eth->h_proto)); exit(1); } } static void create_payload(void *pay, size_t *len) { int i; struct ethhdr *eth = pay; struct iphdr *ip = pay + sizeof(*eth); /* Lets create some broken crap, that still passes * our BPF filter. */ *len = DATA_LEN + 42; memset(pay, 0xff, ETH_ALEN * 2); eth->h_proto = htons(ETH_P_IP); for (i = 0; i < sizeof(*ip); ++i) ((uint8_t *) pay)[i + sizeof(*eth)] = (uint8_t) rand(); ip->ihl = 5; ip->version = 4; ip->protocol = 0x11; ip->frag_off = 0; ip->ttl = 64; ip->tot_len = htons((uint16_t) *len - sizeof(*eth)); ip->saddr = htonl(INADDR_LOOPBACK); ip->daddr = htonl(INADDR_LOOPBACK); memset(pay + sizeof(*eth) + sizeof(*ip), DATA_CHAR, DATA_LEN); } static inline int __v1_rx_kernel_ready(struct tpacket_hdr *hdr) { return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER); } static inline void __v1_rx_user_ready(struct tpacket_hdr *hdr) { hdr->tp_status = TP_STATUS_KERNEL; __sync_synchronize(); } static inline int __v2_rx_kernel_ready(struct tpacket2_hdr *hdr) { return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER); } static inline void __v2_rx_user_ready(struct tpacket2_hdr *hdr) { hdr->tp_status = TP_STATUS_KERNEL; __sync_synchronize(); } static inline int __v1_v2_rx_kernel_ready(void *base, int version) { switch (version) { case TPACKET_V1: return __v1_rx_kernel_ready(base); case TPACKET_V2: return __v2_rx_kernel_ready(base); default: bug_on(1); return 0; } } static inline void __v1_v2_rx_user_ready(void *base, int version) { switch (version) { case TPACKET_V1: __v1_rx_user_ready(base); break; case TPACKET_V2: __v2_rx_user_ready(base); break; } } static void walk_v1_v2_rx(int sock, struct ring *ring) { struct pollfd pfd; int udp_sock[2]; union frame_map ppd; unsigned int frame_num = 0; bug_on(ring->type != PACKET_RX_RING); pair_udp_open(udp_sock, PORT_BASE); pair_udp_setfilter(sock); memset(&pfd, 0, sizeof(pfd)); pfd.fd = sock; pfd.events = POLLIN | POLLERR; pfd.revents = 0; pair_udp_send(udp_sock, NUM_PACKETS); while (total_packets < NUM_PACKETS * 2) { while (__v1_v2_rx_kernel_ready(ring->rd[frame_num].iov_base, ring->version)) { ppd.raw = ring->rd[frame_num].iov_base; switch (ring->version) { case TPACKET_V1: test_payload((uint8_t *) ppd.raw + ppd.v1->tp_h.tp_mac, ppd.v1->tp_h.tp_snaplen); total_bytes += ppd.v1->tp_h.tp_snaplen; break; case TPACKET_V2: test_payload((uint8_t *) ppd.raw + ppd.v2->tp_h.tp_mac, ppd.v2->tp_h.tp_snaplen); total_bytes += ppd.v2->tp_h.tp_snaplen; break; } status_bar_update(); total_packets++; __v1_v2_rx_user_ready(ppd.raw, ring->version); frame_num = (frame_num + 1) % ring->rd_num; } poll(&pfd, 1, 1); } pair_udp_close(udp_sock); if (total_packets != 2 * NUM_PACKETS) { fprintf(stderr, "walk_v%d_rx: received %u out of %u pkts\n", ring->version, total_packets, NUM_PACKETS); exit(1); } fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, total_bytes >> 1); } static inline int __v1_tx_kernel_ready(struct tpacket_hdr *hdr) { return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)); } static inline void __v1_tx_user_ready(struct tpacket_hdr *hdr) { hdr->tp_status = TP_STATUS_SEND_REQUEST; __sync_synchronize(); } static inline int __v2_tx_kernel_ready(struct tpacket2_hdr *hdr) { return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)); } static inline void __v2_tx_user_ready(struct tpacket2_hdr *hdr) { hdr->tp_status = TP_STATUS_SEND_REQUEST; __sync_synchronize(); } static inline int __v1_v2_tx_kernel_ready(void *base, int version) { switch (version) { case TPACKET_V1: return __v1_tx_kernel_ready(base); case TPACKET_V2: return __v2_tx_kernel_ready(base); default: bug_on(1); return 0; } } static inline void __v1_v2_tx_user_ready(void *base, int version) { switch (version) { case TPACKET_V1: __v1_tx_user_ready(base); break; case TPACKET_V2: __v2_tx_user_ready(base); break; } } static void __v1_v2_set_packet_loss_discard(int sock) { int ret, discard = 1; ret = setsockopt(sock, SOL_PACKET, PACKET_LOSS, (void *) &discard, sizeof(discard)); if (ret == -1) { perror("setsockopt"); exit(1); } } static void walk_v1_v2_tx(int sock, struct ring *ring) { struct pollfd pfd; int rcv_sock, ret; size_t packet_len; union frame_map ppd; char packet[1024]; unsigned int frame_num = 0, got = 0; struct sockaddr_ll ll = { .sll_family = PF_PACKET, .sll_halen = ETH_ALEN, }; bug_on(ring->type != PACKET_TX_RING); bug_on(ring->rd_num < NUM_PACKETS); rcv_sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); if (rcv_sock == -1) { perror("socket"); exit(1); } pair_udp_setfilter(rcv_sock); ll.sll_ifindex = if_nametoindex("lo"); ret = bind(rcv_sock, (struct sockaddr *) &ll, sizeof(ll)); if (ret == -1) { perror("bind"); exit(1); } memset(&pfd, 0, sizeof(pfd)); pfd.fd = sock; pfd.events = POLLOUT | POLLERR; pfd.revents = 0; total_packets = NUM_PACKETS; create_payload(packet, &packet_len); while (total_packets > 0) { while (__v1_v2_tx_kernel_ready(ring->rd[frame_num].iov_base, ring->version) && total_packets > 0) { ppd.raw = ring->rd[frame_num].iov_base; switch (ring->version) { case TPACKET_V1: ppd.v1->tp_h.tp_snaplen = packet_len; ppd.v1->tp_h.tp_len = packet_len; memcpy((uint8_t *) ppd.raw + TPACKET_HDRLEN - sizeof(struct sockaddr_ll), packet, packet_len); total_bytes += ppd.v1->tp_h.tp_snaplen; break; case TPACKET_V2: ppd.v2->tp_h.tp_snaplen = packet_len; ppd.v2->tp_h.tp_len = packet_len; memcpy((uint8_t *) ppd.raw + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll), packet, packet_len); total_bytes += ppd.v2->tp_h.tp_snaplen; break; } status_bar_update(); total_packets--; __v1_v2_tx_user_ready(ppd.raw, ring->version); frame_num = (frame_num + 1) % ring->rd_num; } poll(&pfd, 1, 1); } bug_on(total_packets != 0); ret = sendto(sock, NULL, 0, 0, NULL, 0); if (ret == -1) { perror("sendto"); exit(1); } while ((ret = recvfrom(rcv_sock, packet, sizeof(packet), 0, NULL, NULL)) > 0 && total_packets < NUM_PACKETS) { got += ret; test_payload(packet, ret); status_bar_update(); total_packets++; } close(rcv_sock); if (total_packets != NUM_PACKETS) { fprintf(stderr, "walk_v%d_rx: received %u out of %u pkts\n", ring->version, total_packets, NUM_PACKETS); exit(1); } fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, got); } static void walk_v1_v2(int sock, struct ring *ring) { if (ring->type == PACKET_RX_RING) walk_v1_v2_rx(sock, ring); else walk_v1_v2_tx(sock, ring); } static uint64_t __v3_prev_block_seq_num = 0; void __v3_test_block_seq_num(struct block_desc *pbd) { if (__v3_prev_block_seq_num + 1 != pbd->h1.seq_num) { fprintf(stderr, "\nprev_block_seq_num:%"PRIu64", expected " "seq:%"PRIu64" != actual seq:%"PRIu64"\n", __v3_prev_block_seq_num, __v3_prev_block_seq_num + 1, (uint64_t) pbd->h1.seq_num); exit(1); } __v3_prev_block_seq_num = pbd->h1.seq_num; } static void __v3_test_block_len(struct block_desc *pbd, uint32_t bytes, int block_num) { if (pbd->h1.num_pkts && bytes != pbd->h1.blk_len) { fprintf(stderr, "\nblock:%u with %upackets, expected " "len:%u != actual len:%u\n", block_num, pbd->h1.num_pkts, bytes, pbd->h1.blk_len); exit(1); } } static void __v3_test_block_header(struct block_desc *pbd, const int block_num) { if ((pbd->h1.block_status & TP_STATUS_USER) == 0) { fprintf(stderr, "\nblock %u: not in TP_STATUS_USER\n", block_num); exit(1); } __v3_test_block_seq_num(pbd); } static void __v3_walk_block(struct block_desc *pbd, const int block_num) { int num_pkts = pbd->h1.num_pkts, i; unsigned long bytes = 0, bytes_with_padding = ALIGN_8(sizeof(*pbd)); struct tpacket3_hdr *ppd; __v3_test_block_header(pbd, block_num); ppd = (struct tpacket3_hdr *) ((uint8_t *) pbd + pbd->h1.offset_to_first_pkt); for (i = 0; i < num_pkts; ++i) { bytes += ppd->tp_snaplen; if (ppd->tp_next_offset) bytes_with_padding += ppd->tp_next_offset; else bytes_with_padding += ALIGN_8(ppd->tp_snaplen + ppd->tp_mac); test_payload((uint8_t *) ppd + ppd->tp_mac, ppd->tp_snaplen); status_bar_update(); total_packets++; ppd = (struct tpacket3_hdr *) ((uint8_t *) ppd + ppd->tp_next_offset); __sync_synchronize(); } __v3_test_block_len(pbd, bytes_with_padding, block_num); total_bytes += bytes; } void __v3_flush_block(struct block_desc *pbd) { pbd->h1.block_status = TP_STATUS_KERNEL; __sync_synchronize(); } static void walk_v3_rx(int sock, struct ring *ring) { unsigned int block_num = 0; struct pollfd pfd; struct block_desc *pbd; int udp_sock[2]; bug_on(ring->type != PACKET_RX_RING); pair_udp_open(udp_sock, PORT_BASE); pair_udp_setfilter(sock); memset(&pfd, 0, sizeof(pfd)); pfd.fd = sock; pfd.events = POLLIN | POLLERR; pfd.revents = 0; pair_udp_send(udp_sock, NUM_PACKETS); while (total_packets < NUM_PACKETS * 2) { pbd = (struct block_desc *) ring->rd[block_num].iov_base; while ((pbd->h1.block_status & TP_STATUS_USER) == 0) poll(&pfd, 1, 1); __v3_walk_block(pbd, block_num); __v3_flush_block(pbd); block_num = (block_num + 1) % ring->rd_num; } pair_udp_close(udp_sock); if (total_packets != 2 * NUM_PACKETS) { fprintf(stderr, "walk_v3_rx: received %u out of %u pkts\n", total_packets, NUM_PACKETS); exit(1); } fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, total_bytes >> 1); } static void walk_v3(int sock, struct ring *ring) { if (ring->type == PACKET_RX_RING) walk_v3_rx(sock, ring); else bug_on(1); } static void __v1_v2_fill(struct ring *ring, unsigned int blocks) { ring->req.tp_block_size = getpagesize() << 2; ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; ring->req.tp_block_nr = blocks; ring->req.tp_frame_nr = ring->req.tp_block_size / ring->req.tp_frame_size * ring->req.tp_block_nr; ring->mm_len = ring->req.tp_block_size * ring->req.tp_block_nr; ring->walk = walk_v1_v2; ring->rd_num = ring->req.tp_frame_nr; ring->flen = ring->req.tp_frame_size; } static void __v3_fill(struct ring *ring, unsigned int blocks) { ring->req3.tp_retire_blk_tov = 64; ring->req3.tp_sizeof_priv = 0; ring->req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH; ring->req3.tp_block_size = getpagesize() << 2; ring->req3.tp_frame_size = TPACKET_ALIGNMENT << 7; ring->req3.tp_block_nr = blocks; ring->req3.tp_frame_nr = ring->req3.tp_block_size / ring->req3.tp_frame_size * ring->req3.tp_block_nr; ring->mm_len = ring->req3.tp_block_size * ring->req3.tp_block_nr; ring->walk = walk_v3; ring->rd_num = ring->req3.tp_block_nr; ring->flen = ring->req3.tp_block_size; } static void setup_ring(int sock, struct ring *ring, int version, int type) { int ret = 0; unsigned int blocks = 256; ring->type = type; ring->version = version; switch (version) { case TPACKET_V1: case TPACKET_V2: if (type == PACKET_TX_RING) __v1_v2_set_packet_loss_discard(sock); __v1_v2_fill(ring, blocks); ret = setsockopt(sock, SOL_PACKET, type, &ring->req, sizeof(ring->req)); break; case TPACKET_V3: __v3_fill(ring, blocks); ret = setsockopt(sock, SOL_PACKET, type, &ring->req3, sizeof(ring->req3)); break; } if (ret == -1) { perror("setsockopt"); exit(1); } ring->rd_len = ring->rd_num * sizeof(*ring->rd); ring->rd = malloc(ring->rd_len); if (ring->rd == NULL) { perror("malloc"); exit(1); } total_packets = 0; total_bytes = 0; } static void mmap_ring(int sock, struct ring *ring) { int i; ring->mm_space = mmap(0, ring->mm_len, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED | MAP_POPULATE, sock, 0); if (ring->mm_space == MAP_FAILED) { perror("mmap"); exit(1); } memset(ring->rd, 0, ring->rd_len); for (i = 0; i < ring->rd_num; ++i) { ring->rd[i].iov_base = ring->mm_space + (i * ring->flen); ring->rd[i].iov_len = ring->flen; } } static void bind_ring(int sock, struct ring *ring) { int ret; ring->ll.sll_family = PF_PACKET; ring->ll.sll_protocol = htons(ETH_P_ALL); ring->ll.sll_ifindex = if_nametoindex("lo"); ring->ll.sll_hatype = 0; ring->ll.sll_pkttype = 0; ring->ll.sll_halen = 0; ret = bind(sock, (struct sockaddr *) &ring->ll, sizeof(ring->ll)); if (ret == -1) { perror("bind"); exit(1); } } static void walk_ring(int sock, struct ring *ring) { ring->walk(sock, ring); } static void unmap_ring(int sock, struct ring *ring) { munmap(ring->mm_space, ring->mm_len); free(ring->rd); } static int test_kernel_bit_width(void) { char in[512], *ptr; int num = 0, fd; ssize_t ret; fd = open("/proc/kallsyms", O_RDONLY); if (fd == -1) { perror("open"); exit(1); } ret = read(fd, in, sizeof(in)); if (ret <= 0) { perror("read"); exit(1); } close(fd); ptr = in; while(!isspace(*ptr)) { num++; ptr++; } return num * 4; } static int test_user_bit_width(void) { return __WORDSIZE; } static const char *tpacket_str[] = { [TPACKET_V1] = "TPACKET_V1", [TPACKET_V2] = "TPACKET_V2", [TPACKET_V3] = "TPACKET_V3", }; static const char *type_str[] = { [PACKET_RX_RING] = "PACKET_RX_RING", [PACKET_TX_RING] = "PACKET_TX_RING", }; static int test_tpacket(int version, int type) { int sock; struct ring ring; fprintf(stderr, "test: %s with %s ", tpacket_str[version], type_str[type]); fflush(stderr); if (version == TPACKET_V1 && test_kernel_bit_width() != test_user_bit_width()) { fprintf(stderr, "test: skip %s %s since user and kernel " "space have different bit width\n", tpacket_str[version], type_str[type]); return 0; } sock = pfsocket(version); memset(&ring, 0, sizeof(ring)); setup_ring(sock, &ring, version, type); mmap_ring(sock, &ring); bind_ring(sock, &ring); walk_ring(sock, &ring); unmap_ring(sock, &ring); close(sock); fprintf(stderr, "\n"); return 0; } int main(void) { int ret = 0; ret |= test_tpacket(TPACKET_V1, PACKET_RX_RING); ret |= test_tpacket(TPACKET_V1, PACKET_TX_RING); ret |= test_tpacket(TPACKET_V2, PACKET_RX_RING); ret |= test_tpacket(TPACKET_V2, PACKET_TX_RING); ret |= test_tpacket(TPACKET_V3, PACKET_RX_RING); if (ret) return 1; printf("OK. All tests passed\n"); return 0; }
gpl-2.0
AndyLavr/Aspire-SW5-012_Kernel_4.8
arch/arm/mach-imx/devices/platform-flexcan.c
2268
1657
/* * Copyright (C) 2010 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include "../hardware.h" #include "devices-common.h" #define imx_flexcan_data_entry_single(soc, _id, _hwid, _size) \ { \ .id = _id, \ .iobase = soc ## _CAN ## _hwid ## _BASE_ADDR, \ .iosize = _size, \ .irq = soc ## _INT_CAN ## _hwid, \ } #define imx_flexcan_data_entry(soc, _id, _hwid, _size) \ [_id] = imx_flexcan_data_entry_single(soc, _id, _hwid, _size) #ifdef CONFIG_SOC_IMX25 const struct imx_flexcan_data imx25_flexcan_data[] __initconst = { #define imx25_flexcan_data_entry(_id, _hwid) \ imx_flexcan_data_entry(MX25, _id, _hwid, SZ_16K) imx25_flexcan_data_entry(0, 1), imx25_flexcan_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX25 */ #ifdef CONFIG_SOC_IMX35 const struct imx_flexcan_data imx35_flexcan_data[] __initconst = { #define imx35_flexcan_data_entry(_id, _hwid) \ imx_flexcan_data_entry(MX35, _id, _hwid, SZ_16K) imx35_flexcan_data_entry(0, 1), imx35_flexcan_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX35 */ struct platform_device *__init imx_add_flexcan( const struct imx_flexcan_data *data) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device("flexcan", data->id, res, ARRAY_SIZE(res), NULL, 0); }
gpl-2.0
KutuSystems/linux
tools/testing/selftests/net/psock_tpacket.c
2268
18158
/* * Copyright 2013 Red Hat, Inc. * Author: Daniel Borkmann <dborkman@redhat.com> * Chetan Loke <loke.chetan@gmail.com> (TPACKET_V3 usage example) * * A basic test of packet socket's TPACKET_V1/TPACKET_V2/TPACKET_V3 behavior. * * Control: * Test the setup of the TPACKET socket with different patterns that are * known to fail (TODO) resp. succeed (OK). * * Datapath: * Open a pair of packet sockets and send resp. receive an a priori known * packet pattern accross the sockets and check if it was received resp. * sent correctly. Fanout in combination with RX_RING is currently not * tested here. * * The test currently runs for * - TPACKET_V1: RX_RING, TX_RING * - TPACKET_V2: RX_RING, TX_RING * - TPACKET_V3: RX_RING * * License (GPLv2): * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/socket.h> #include <sys/mman.h> #include <linux/if_packet.h> #include <linux/filter.h> #include <ctype.h> #include <fcntl.h> #include <unistd.h> #include <bits/wordsize.h> #include <net/ethernet.h> #include <netinet/ip.h> #include <arpa/inet.h> #include <stdint.h> #include <string.h> #include <assert.h> #include <net/if.h> #include <inttypes.h> #include <poll.h> #include "psock_lib.h" #ifndef bug_on # define bug_on(cond) assert(!(cond)) #endif #ifndef __aligned_tpacket # define __aligned_tpacket __attribute__((aligned(TPACKET_ALIGNMENT))) #endif #ifndef __align_tpacket # define __align_tpacket(x) __attribute__((aligned(TPACKET_ALIGN(x)))) #endif #define NUM_PACKETS 100 #define ALIGN_8(x) (((x) + 8 - 1) & ~(8 - 1)) struct ring { struct iovec *rd; uint8_t *mm_space; size_t mm_len, rd_len; struct sockaddr_ll ll; void (*walk)(int sock, struct ring *ring); int type, rd_num, flen, version; union { struct tpacket_req req; struct tpacket_req3 req3; }; }; struct block_desc { uint32_t version; uint32_t offset_to_priv; struct tpacket_hdr_v1 h1; }; union frame_map { struct { struct tpacket_hdr tp_h __aligned_tpacket; struct sockaddr_ll s_ll __align_tpacket(sizeof(struct tpacket_hdr)); } *v1; struct { struct tpacket2_hdr tp_h __aligned_tpacket; struct sockaddr_ll s_ll __align_tpacket(sizeof(struct tpacket2_hdr)); } *v2; void *raw; }; static unsigned int total_packets, total_bytes; static int pfsocket(int ver) { int ret, sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); if (sock == -1) { perror("socket"); exit(1); } ret = setsockopt(sock, SOL_PACKET, PACKET_VERSION, &ver, sizeof(ver)); if (ret == -1) { perror("setsockopt"); exit(1); } return sock; } static void status_bar_update(void) { if (total_packets % 10 == 0) { fprintf(stderr, "."); fflush(stderr); } } static void test_payload(void *pay, size_t len) { struct ethhdr *eth = pay; if (len < sizeof(struct ethhdr)) { fprintf(stderr, "test_payload: packet too " "small: %zu bytes!\n", len); exit(1); } if (eth->h_proto != htons(ETH_P_IP)) { fprintf(stderr, "test_payload: wrong ethernet " "type: 0x%x!\n", ntohs(eth->h_proto)); exit(1); } } static void create_payload(void *pay, size_t *len) { int i; struct ethhdr *eth = pay; struct iphdr *ip = pay + sizeof(*eth); /* Lets create some broken crap, that still passes * our BPF filter. */ *len = DATA_LEN + 42; memset(pay, 0xff, ETH_ALEN * 2); eth->h_proto = htons(ETH_P_IP); for (i = 0; i < sizeof(*ip); ++i) ((uint8_t *) pay)[i + sizeof(*eth)] = (uint8_t) rand(); ip->ihl = 5; ip->version = 4; ip->protocol = 0x11; ip->frag_off = 0; ip->ttl = 64; ip->tot_len = htons((uint16_t) *len - sizeof(*eth)); ip->saddr = htonl(INADDR_LOOPBACK); ip->daddr = htonl(INADDR_LOOPBACK); memset(pay + sizeof(*eth) + sizeof(*ip), DATA_CHAR, DATA_LEN); } static inline int __v1_rx_kernel_ready(struct tpacket_hdr *hdr) { return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER); } static inline void __v1_rx_user_ready(struct tpacket_hdr *hdr) { hdr->tp_status = TP_STATUS_KERNEL; __sync_synchronize(); } static inline int __v2_rx_kernel_ready(struct tpacket2_hdr *hdr) { return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER); } static inline void __v2_rx_user_ready(struct tpacket2_hdr *hdr) { hdr->tp_status = TP_STATUS_KERNEL; __sync_synchronize(); } static inline int __v1_v2_rx_kernel_ready(void *base, int version) { switch (version) { case TPACKET_V1: return __v1_rx_kernel_ready(base); case TPACKET_V2: return __v2_rx_kernel_ready(base); default: bug_on(1); return 0; } } static inline void __v1_v2_rx_user_ready(void *base, int version) { switch (version) { case TPACKET_V1: __v1_rx_user_ready(base); break; case TPACKET_V2: __v2_rx_user_ready(base); break; } } static void walk_v1_v2_rx(int sock, struct ring *ring) { struct pollfd pfd; int udp_sock[2]; union frame_map ppd; unsigned int frame_num = 0; bug_on(ring->type != PACKET_RX_RING); pair_udp_open(udp_sock, PORT_BASE); pair_udp_setfilter(sock); memset(&pfd, 0, sizeof(pfd)); pfd.fd = sock; pfd.events = POLLIN | POLLERR; pfd.revents = 0; pair_udp_send(udp_sock, NUM_PACKETS); while (total_packets < NUM_PACKETS * 2) { while (__v1_v2_rx_kernel_ready(ring->rd[frame_num].iov_base, ring->version)) { ppd.raw = ring->rd[frame_num].iov_base; switch (ring->version) { case TPACKET_V1: test_payload((uint8_t *) ppd.raw + ppd.v1->tp_h.tp_mac, ppd.v1->tp_h.tp_snaplen); total_bytes += ppd.v1->tp_h.tp_snaplen; break; case TPACKET_V2: test_payload((uint8_t *) ppd.raw + ppd.v2->tp_h.tp_mac, ppd.v2->tp_h.tp_snaplen); total_bytes += ppd.v2->tp_h.tp_snaplen; break; } status_bar_update(); total_packets++; __v1_v2_rx_user_ready(ppd.raw, ring->version); frame_num = (frame_num + 1) % ring->rd_num; } poll(&pfd, 1, 1); } pair_udp_close(udp_sock); if (total_packets != 2 * NUM_PACKETS) { fprintf(stderr, "walk_v%d_rx: received %u out of %u pkts\n", ring->version, total_packets, NUM_PACKETS); exit(1); } fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, total_bytes >> 1); } static inline int __v1_tx_kernel_ready(struct tpacket_hdr *hdr) { return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)); } static inline void __v1_tx_user_ready(struct tpacket_hdr *hdr) { hdr->tp_status = TP_STATUS_SEND_REQUEST; __sync_synchronize(); } static inline int __v2_tx_kernel_ready(struct tpacket2_hdr *hdr) { return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)); } static inline void __v2_tx_user_ready(struct tpacket2_hdr *hdr) { hdr->tp_status = TP_STATUS_SEND_REQUEST; __sync_synchronize(); } static inline int __v1_v2_tx_kernel_ready(void *base, int version) { switch (version) { case TPACKET_V1: return __v1_tx_kernel_ready(base); case TPACKET_V2: return __v2_tx_kernel_ready(base); default: bug_on(1); return 0; } } static inline void __v1_v2_tx_user_ready(void *base, int version) { switch (version) { case TPACKET_V1: __v1_tx_user_ready(base); break; case TPACKET_V2: __v2_tx_user_ready(base); break; } } static void __v1_v2_set_packet_loss_discard(int sock) { int ret, discard = 1; ret = setsockopt(sock, SOL_PACKET, PACKET_LOSS, (void *) &discard, sizeof(discard)); if (ret == -1) { perror("setsockopt"); exit(1); } } static void walk_v1_v2_tx(int sock, struct ring *ring) { struct pollfd pfd; int rcv_sock, ret; size_t packet_len; union frame_map ppd; char packet[1024]; unsigned int frame_num = 0, got = 0; struct sockaddr_ll ll = { .sll_family = PF_PACKET, .sll_halen = ETH_ALEN, }; bug_on(ring->type != PACKET_TX_RING); bug_on(ring->rd_num < NUM_PACKETS); rcv_sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); if (rcv_sock == -1) { perror("socket"); exit(1); } pair_udp_setfilter(rcv_sock); ll.sll_ifindex = if_nametoindex("lo"); ret = bind(rcv_sock, (struct sockaddr *) &ll, sizeof(ll)); if (ret == -1) { perror("bind"); exit(1); } memset(&pfd, 0, sizeof(pfd)); pfd.fd = sock; pfd.events = POLLOUT | POLLERR; pfd.revents = 0; total_packets = NUM_PACKETS; create_payload(packet, &packet_len); while (total_packets > 0) { while (__v1_v2_tx_kernel_ready(ring->rd[frame_num].iov_base, ring->version) && total_packets > 0) { ppd.raw = ring->rd[frame_num].iov_base; switch (ring->version) { case TPACKET_V1: ppd.v1->tp_h.tp_snaplen = packet_len; ppd.v1->tp_h.tp_len = packet_len; memcpy((uint8_t *) ppd.raw + TPACKET_HDRLEN - sizeof(struct sockaddr_ll), packet, packet_len); total_bytes += ppd.v1->tp_h.tp_snaplen; break; case TPACKET_V2: ppd.v2->tp_h.tp_snaplen = packet_len; ppd.v2->tp_h.tp_len = packet_len; memcpy((uint8_t *) ppd.raw + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll), packet, packet_len); total_bytes += ppd.v2->tp_h.tp_snaplen; break; } status_bar_update(); total_packets--; __v1_v2_tx_user_ready(ppd.raw, ring->version); frame_num = (frame_num + 1) % ring->rd_num; } poll(&pfd, 1, 1); } bug_on(total_packets != 0); ret = sendto(sock, NULL, 0, 0, NULL, 0); if (ret == -1) { perror("sendto"); exit(1); } while ((ret = recvfrom(rcv_sock, packet, sizeof(packet), 0, NULL, NULL)) > 0 && total_packets < NUM_PACKETS) { got += ret; test_payload(packet, ret); status_bar_update(); total_packets++; } close(rcv_sock); if (total_packets != NUM_PACKETS) { fprintf(stderr, "walk_v%d_rx: received %u out of %u pkts\n", ring->version, total_packets, NUM_PACKETS); exit(1); } fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, got); } static void walk_v1_v2(int sock, struct ring *ring) { if (ring->type == PACKET_RX_RING) walk_v1_v2_rx(sock, ring); else walk_v1_v2_tx(sock, ring); } static uint64_t __v3_prev_block_seq_num = 0; void __v3_test_block_seq_num(struct block_desc *pbd) { if (__v3_prev_block_seq_num + 1 != pbd->h1.seq_num) { fprintf(stderr, "\nprev_block_seq_num:%"PRIu64", expected " "seq:%"PRIu64" != actual seq:%"PRIu64"\n", __v3_prev_block_seq_num, __v3_prev_block_seq_num + 1, (uint64_t) pbd->h1.seq_num); exit(1); } __v3_prev_block_seq_num = pbd->h1.seq_num; } static void __v3_test_block_len(struct block_desc *pbd, uint32_t bytes, int block_num) { if (pbd->h1.num_pkts && bytes != pbd->h1.blk_len) { fprintf(stderr, "\nblock:%u with %upackets, expected " "len:%u != actual len:%u\n", block_num, pbd->h1.num_pkts, bytes, pbd->h1.blk_len); exit(1); } } static void __v3_test_block_header(struct block_desc *pbd, const int block_num) { if ((pbd->h1.block_status & TP_STATUS_USER) == 0) { fprintf(stderr, "\nblock %u: not in TP_STATUS_USER\n", block_num); exit(1); } __v3_test_block_seq_num(pbd); } static void __v3_walk_block(struct block_desc *pbd, const int block_num) { int num_pkts = pbd->h1.num_pkts, i; unsigned long bytes = 0, bytes_with_padding = ALIGN_8(sizeof(*pbd)); struct tpacket3_hdr *ppd; __v3_test_block_header(pbd, block_num); ppd = (struct tpacket3_hdr *) ((uint8_t *) pbd + pbd->h1.offset_to_first_pkt); for (i = 0; i < num_pkts; ++i) { bytes += ppd->tp_snaplen; if (ppd->tp_next_offset) bytes_with_padding += ppd->tp_next_offset; else bytes_with_padding += ALIGN_8(ppd->tp_snaplen + ppd->tp_mac); test_payload((uint8_t *) ppd + ppd->tp_mac, ppd->tp_snaplen); status_bar_update(); total_packets++; ppd = (struct tpacket3_hdr *) ((uint8_t *) ppd + ppd->tp_next_offset); __sync_synchronize(); } __v3_test_block_len(pbd, bytes_with_padding, block_num); total_bytes += bytes; } void __v3_flush_block(struct block_desc *pbd) { pbd->h1.block_status = TP_STATUS_KERNEL; __sync_synchronize(); } static void walk_v3_rx(int sock, struct ring *ring) { unsigned int block_num = 0; struct pollfd pfd; struct block_desc *pbd; int udp_sock[2]; bug_on(ring->type != PACKET_RX_RING); pair_udp_open(udp_sock, PORT_BASE); pair_udp_setfilter(sock); memset(&pfd, 0, sizeof(pfd)); pfd.fd = sock; pfd.events = POLLIN | POLLERR; pfd.revents = 0; pair_udp_send(udp_sock, NUM_PACKETS); while (total_packets < NUM_PACKETS * 2) { pbd = (struct block_desc *) ring->rd[block_num].iov_base; while ((pbd->h1.block_status & TP_STATUS_USER) == 0) poll(&pfd, 1, 1); __v3_walk_block(pbd, block_num); __v3_flush_block(pbd); block_num = (block_num + 1) % ring->rd_num; } pair_udp_close(udp_sock); if (total_packets != 2 * NUM_PACKETS) { fprintf(stderr, "walk_v3_rx: received %u out of %u pkts\n", total_packets, NUM_PACKETS); exit(1); } fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, total_bytes >> 1); } static void walk_v3(int sock, struct ring *ring) { if (ring->type == PACKET_RX_RING) walk_v3_rx(sock, ring); else bug_on(1); } static void __v1_v2_fill(struct ring *ring, unsigned int blocks) { ring->req.tp_block_size = getpagesize() << 2; ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; ring->req.tp_block_nr = blocks; ring->req.tp_frame_nr = ring->req.tp_block_size / ring->req.tp_frame_size * ring->req.tp_block_nr; ring->mm_len = ring->req.tp_block_size * ring->req.tp_block_nr; ring->walk = walk_v1_v2; ring->rd_num = ring->req.tp_frame_nr; ring->flen = ring->req.tp_frame_size; } static void __v3_fill(struct ring *ring, unsigned int blocks) { ring->req3.tp_retire_blk_tov = 64; ring->req3.tp_sizeof_priv = 0; ring->req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH; ring->req3.tp_block_size = getpagesize() << 2; ring->req3.tp_frame_size = TPACKET_ALIGNMENT << 7; ring->req3.tp_block_nr = blocks; ring->req3.tp_frame_nr = ring->req3.tp_block_size / ring->req3.tp_frame_size * ring->req3.tp_block_nr; ring->mm_len = ring->req3.tp_block_size * ring->req3.tp_block_nr; ring->walk = walk_v3; ring->rd_num = ring->req3.tp_block_nr; ring->flen = ring->req3.tp_block_size; } static void setup_ring(int sock, struct ring *ring, int version, int type) { int ret = 0; unsigned int blocks = 256; ring->type = type; ring->version = version; switch (version) { case TPACKET_V1: case TPACKET_V2: if (type == PACKET_TX_RING) __v1_v2_set_packet_loss_discard(sock); __v1_v2_fill(ring, blocks); ret = setsockopt(sock, SOL_PACKET, type, &ring->req, sizeof(ring->req)); break; case TPACKET_V3: __v3_fill(ring, blocks); ret = setsockopt(sock, SOL_PACKET, type, &ring->req3, sizeof(ring->req3)); break; } if (ret == -1) { perror("setsockopt"); exit(1); } ring->rd_len = ring->rd_num * sizeof(*ring->rd); ring->rd = malloc(ring->rd_len); if (ring->rd == NULL) { perror("malloc"); exit(1); } total_packets = 0; total_bytes = 0; } static void mmap_ring(int sock, struct ring *ring) { int i; ring->mm_space = mmap(0, ring->mm_len, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED | MAP_POPULATE, sock, 0); if (ring->mm_space == MAP_FAILED) { perror("mmap"); exit(1); } memset(ring->rd, 0, ring->rd_len); for (i = 0; i < ring->rd_num; ++i) { ring->rd[i].iov_base = ring->mm_space + (i * ring->flen); ring->rd[i].iov_len = ring->flen; } } static void bind_ring(int sock, struct ring *ring) { int ret; ring->ll.sll_family = PF_PACKET; ring->ll.sll_protocol = htons(ETH_P_ALL); ring->ll.sll_ifindex = if_nametoindex("lo"); ring->ll.sll_hatype = 0; ring->ll.sll_pkttype = 0; ring->ll.sll_halen = 0; ret = bind(sock, (struct sockaddr *) &ring->ll, sizeof(ring->ll)); if (ret == -1) { perror("bind"); exit(1); } } static void walk_ring(int sock, struct ring *ring) { ring->walk(sock, ring); } static void unmap_ring(int sock, struct ring *ring) { munmap(ring->mm_space, ring->mm_len); free(ring->rd); } static int test_kernel_bit_width(void) { char in[512], *ptr; int num = 0, fd; ssize_t ret; fd = open("/proc/kallsyms", O_RDONLY); if (fd == -1) { perror("open"); exit(1); } ret = read(fd, in, sizeof(in)); if (ret <= 0) { perror("read"); exit(1); } close(fd); ptr = in; while(!isspace(*ptr)) { num++; ptr++; } return num * 4; } static int test_user_bit_width(void) { return __WORDSIZE; } static const char *tpacket_str[] = { [TPACKET_V1] = "TPACKET_V1", [TPACKET_V2] = "TPACKET_V2", [TPACKET_V3] = "TPACKET_V3", }; static const char *type_str[] = { [PACKET_RX_RING] = "PACKET_RX_RING", [PACKET_TX_RING] = "PACKET_TX_RING", }; static int test_tpacket(int version, int type) { int sock; struct ring ring; fprintf(stderr, "test: %s with %s ", tpacket_str[version], type_str[type]); fflush(stderr); if (version == TPACKET_V1 && test_kernel_bit_width() != test_user_bit_width()) { fprintf(stderr, "test: skip %s %s since user and kernel " "space have different bit width\n", tpacket_str[version], type_str[type]); return 0; } sock = pfsocket(version); memset(&ring, 0, sizeof(ring)); setup_ring(sock, &ring, version, type); mmap_ring(sock, &ring); bind_ring(sock, &ring); walk_ring(sock, &ring); unmap_ring(sock, &ring); close(sock); fprintf(stderr, "\n"); return 0; } int main(void) { int ret = 0; ret |= test_tpacket(TPACKET_V1, PACKET_RX_RING); ret |= test_tpacket(TPACKET_V1, PACKET_TX_RING); ret |= test_tpacket(TPACKET_V2, PACKET_RX_RING); ret |= test_tpacket(TPACKET_V2, PACKET_TX_RING); ret |= test_tpacket(TPACKET_V3, PACKET_RX_RING); if (ret) return 1; printf("OK. All tests passed\n"); return 0; }
gpl-2.0
mmind/linux-es600
drivers/ata/sata_qstor.c
2780
17843
/* * sata_qstor.c - Pacific Digital Corporation QStor SATA * * Maintained by: Mark Lord <mlord@pobox.com> * * Copyright 2005 Pacific Digital Corporation. * (OSL/GPL code release authorized by Jalil Fadavi). * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/gfp.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "sata_qstor" #define DRV_VERSION "0.09" enum { QS_MMIO_BAR = 4, QS_PORTS = 4, QS_MAX_PRD = LIBATA_MAX_PRD, QS_CPB_ORDER = 6, QS_CPB_BYTES = (1 << QS_CPB_ORDER), QS_PRD_BYTES = QS_MAX_PRD * 16, QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES, /* global register offsets */ QS_HCF_CNFG3 = 0x0003, /* host configuration offset */ QS_HID_HPHY = 0x0004, /* host physical interface info */ QS_HCT_CTRL = 0x00e4, /* global interrupt mask offset */ QS_HST_SFF = 0x0100, /* host status fifo offset */ QS_HVS_SERD3 = 0x0393, /* PHY enable offset */ /* global control bits */ QS_HPHY_64BIT = (1 << 1), /* 64-bit bus detected */ QS_CNFG3_GSRST = 0x01, /* global chip reset */ QS_SERD3_PHY_ENA = 0xf0, /* PHY detection ENAble*/ /* per-channel register offsets */ QS_CCF_CPBA = 0x0710, /* chan CPB base address */ QS_CCF_CSEP = 0x0718, /* chan CPB separation factor */ QS_CFC_HUFT = 0x0800, /* host upstream fifo threshold */ QS_CFC_HDFT = 0x0804, /* host downstream fifo threshold */ QS_CFC_DUFT = 0x0808, /* dev upstream fifo threshold */ QS_CFC_DDFT = 0x080c, /* dev downstream fifo threshold */ QS_CCT_CTR0 = 0x0900, /* chan control-0 offset */ QS_CCT_CTR1 = 0x0901, /* chan control-1 offset */ QS_CCT_CFF = 0x0a00, /* chan command fifo offset */ /* channel control bits */ QS_CTR0_REG = (1 << 1), /* register mode (vs. pkt mode) */ QS_CTR0_CLER = (1 << 2), /* clear channel errors */ QS_CTR1_RDEV = (1 << 1), /* sata phy/comms reset */ QS_CTR1_RCHN = (1 << 4), /* reset channel logic */ QS_CCF_RUN_PKT = 0x107, /* RUN a new dma PKT */ /* pkt sub-field headers */ QS_HCB_HDR = 0x01, /* Host Control Block header */ QS_DCB_HDR = 0x02, /* Device Control Block header */ /* pkt HCB flag bits */ QS_HF_DIRO = (1 << 0), /* data DIRection Out */ QS_HF_DAT = (1 << 3), /* DATa pkt */ QS_HF_IEN = (1 << 4), /* Interrupt ENable */ QS_HF_VLD = (1 << 5), /* VaLiD pkt */ /* pkt DCB flag bits */ QS_DF_PORD = (1 << 2), /* Pio OR Dma */ QS_DF_ELBA = (1 << 3), /* Extended LBA (lba48) */ /* PCI device IDs */ board_2068_idx = 0, /* QStor 4-port SATA/RAID */ }; enum { QS_DMA_BOUNDARY = ~0UL }; typedef enum { qs_state_mmio, qs_state_pkt } qs_state_t; struct qs_port_priv { u8 *pkt; dma_addr_t pkt_dma; qs_state_t state; }; static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int qs_port_start(struct ata_port *ap); static void qs_host_stop(struct ata_host *host); static void qs_qc_prep(struct ata_queued_cmd *qc); static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); static int qs_check_atapi_dma(struct ata_queued_cmd *qc); static void qs_freeze(struct ata_port *ap); static void qs_thaw(struct ata_port *ap); static int qs_prereset(struct ata_link *link, unsigned long deadline); static void qs_error_handler(struct ata_port *ap); static struct scsi_host_template qs_ata_sht = { ATA_BASE_SHT(DRV_NAME), .sg_tablesize = QS_MAX_PRD, .dma_boundary = QS_DMA_BOUNDARY, }; static struct ata_port_operations qs_ata_ops = { .inherits = &ata_sff_port_ops, .check_atapi_dma = qs_check_atapi_dma, .qc_prep = qs_qc_prep, .qc_issue = qs_qc_issue, .freeze = qs_freeze, .thaw = qs_thaw, .prereset = qs_prereset, .softreset = ATA_OP_NULL, .error_handler = qs_error_handler, .lost_interrupt = ATA_OP_NULL, .scr_read = qs_scr_read, .scr_write = qs_scr_write, .port_start = qs_port_start, .host_stop = qs_host_stop, }; static const struct ata_port_info qs_port_info[] = { /* board_2068_idx */ { .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING, .pio_mask = ATA_PIO4_ONLY, .udma_mask = ATA_UDMA6, .port_ops = &qs_ata_ops, }, }; static const struct pci_device_id qs_ata_pci_tbl[] = { { PCI_VDEVICE(PDC, 0x2068), board_2068_idx }, { } /* terminate list */ }; static struct pci_driver qs_ata_pci_driver = { .name = DRV_NAME, .id_table = qs_ata_pci_tbl, .probe = qs_ata_init_one, .remove = ata_pci_remove_one, }; static void __iomem *qs_mmio_base(struct ata_host *host) { return host->iomap[QS_MMIO_BAR]; } static int qs_check_atapi_dma(struct ata_queued_cmd *qc) { return 1; /* ATAPI DMA not supported */ } static inline void qs_enter_reg_mode(struct ata_port *ap) { u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); struct qs_port_priv *pp = ap->private_data; pp->state = qs_state_mmio; writeb(QS_CTR0_REG, chan + QS_CCT_CTR0); readb(chan + QS_CCT_CTR0); /* flush */ } static inline void qs_reset_channel_logic(struct ata_port *ap) { u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1); readb(chan + QS_CCT_CTR0); /* flush */ qs_enter_reg_mode(ap); } static void qs_freeze(struct ata_port *ap) { u8 __iomem *mmio_base = qs_mmio_base(ap->host); writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ qs_enter_reg_mode(ap); } static void qs_thaw(struct ata_port *ap) { u8 __iomem *mmio_base = qs_mmio_base(ap->host); qs_enter_reg_mode(ap); writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */ } static int qs_prereset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; qs_reset_channel_logic(ap); return ata_sff_prereset(link, deadline); } static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) return -EINVAL; *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8)); return 0; } static void qs_error_handler(struct ata_port *ap) { qs_enter_reg_mode(ap); ata_sff_error_handler(ap); } static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) return -EINVAL; writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8)); return 0; } static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) { struct scatterlist *sg; struct ata_port *ap = qc->ap; struct qs_port_priv *pp = ap->private_data; u8 *prd = pp->pkt + QS_CPB_BYTES; unsigned int si; for_each_sg(qc->sg, sg, qc->n_elem, si) { u64 addr; u32 len; addr = sg_dma_address(sg); *(__le64 *)prd = cpu_to_le64(addr); prd += sizeof(u64); len = sg_dma_len(sg); *(__le32 *)prd = cpu_to_le32(len); prd += sizeof(u64); VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si, (unsigned long long)addr, len); } return si; } static void qs_qc_prep(struct ata_queued_cmd *qc) { struct qs_port_priv *pp = qc->ap->private_data; u8 dflags = QS_DF_PORD, *buf = pp->pkt; u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD; u64 addr; unsigned int nelem; VPRINTK("ENTER\n"); qs_enter_reg_mode(qc->ap); if (qc->tf.protocol != ATA_PROT_DMA) return; nelem = qs_fill_sg(qc); if ((qc->tf.flags & ATA_TFLAG_WRITE)) hflags |= QS_HF_DIRO; if ((qc->tf.flags & ATA_TFLAG_LBA48)) dflags |= QS_DF_ELBA; /* host control block (HCB) */ buf[ 0] = QS_HCB_HDR; buf[ 1] = hflags; *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nbytes); *(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem); addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES; *(__le64 *)(&buf[16]) = cpu_to_le64(addr); /* device control block (DCB) */ buf[24] = QS_DCB_HDR; buf[28] = dflags; /* frame information structure (FIS) */ ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]); } static inline void qs_packet_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); VPRINTK("ENTER, ap %p\n", ap); writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0); wmb(); /* flush PRDs and pkt to memory */ writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF); readl(chan + QS_CCT_CFF); /* flush */ } static unsigned int qs_qc_issue(struct ata_queued_cmd *qc) { struct qs_port_priv *pp = qc->ap->private_data; switch (qc->tf.protocol) { case ATA_PROT_DMA: pp->state = qs_state_pkt; qs_packet_start(qc); return 0; case ATAPI_PROT_DMA: BUG(); break; default: break; } pp->state = qs_state_mmio; return ata_sff_qc_issue(qc); } static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status) { qc->err_mask |= ac_err_mask(status); if (!qc->err_mask) { ata_qc_complete(qc); } else { struct ata_port *ap = qc->ap; struct ata_eh_info *ehi = &ap->link.eh_info; ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "status 0x%02X", status); if (qc->err_mask == AC_ERR_DEV) ata_port_abort(ap); else ata_port_freeze(ap); } } static inline unsigned int qs_intr_pkt(struct ata_host *host) { unsigned int handled = 0; u8 sFFE; u8 __iomem *mmio_base = qs_mmio_base(host); do { u32 sff0 = readl(mmio_base + QS_HST_SFF); u32 sff1 = readl(mmio_base + QS_HST_SFF + 4); u8 sEVLD = (sff1 >> 30) & 0x01; /* valid flag */ sFFE = sff1 >> 31; /* empty flag */ if (sEVLD) { u8 sDST = sff0 >> 16; /* dev status */ u8 sHST = sff1 & 0x3f; /* host status */ unsigned int port_no = (sff1 >> 8) & 0x03; struct ata_port *ap = host->ports[port_no]; struct qs_port_priv *pp = ap->private_data; struct ata_queued_cmd *qc; DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", sff1, sff0, port_no, sHST, sDST); handled = 1; if (!pp || pp->state != qs_state_pkt) continue; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { switch (sHST) { case 0: /* successful CPB */ case 3: /* device error */ qs_enter_reg_mode(qc->ap); qs_do_or_die(qc, sDST); break; default: break; } } } } while (!sFFE); return handled; } static inline unsigned int qs_intr_mmio(struct ata_host *host) { unsigned int handled = 0, port_no; for (port_no = 0; port_no < host->n_ports; ++port_no) { struct ata_port *ap = host->ports[port_no]; struct qs_port_priv *pp = ap->private_data; struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (!qc) { /* * The qstor hardware generates spurious * interrupts from time to time when switching * in and out of packet mode. There's no * obvious way to know if we're here now due * to that, so just ack the irq and pretend we * knew it was ours.. (ugh). This does not * affect packet mode. */ ata_sff_check_status(ap); handled = 1; continue; } if (!pp || pp->state != qs_state_mmio) continue; if (!(qc->tf.flags & ATA_TFLAG_POLLING)) handled |= ata_sff_port_intr(ap, qc); } return handled; } static irqreturn_t qs_intr(int irq, void *dev_instance) { struct ata_host *host = dev_instance; unsigned int handled = 0; unsigned long flags; VPRINTK("ENTER\n"); spin_lock_irqsave(&host->lock, flags); handled = qs_intr_pkt(host) | qs_intr_mmio(host); spin_unlock_irqrestore(&host->lock, flags); VPRINTK("EXIT\n"); return IRQ_RETVAL(handled); } static void qs_ata_setup_port(struct ata_ioports *port, void __iomem *base) { port->cmd_addr = port->data_addr = base + 0x400; port->error_addr = port->feature_addr = base + 0x408; /* hob_feature = 0x409 */ port->nsect_addr = base + 0x410; /* hob_nsect = 0x411 */ port->lbal_addr = base + 0x418; /* hob_lbal = 0x419 */ port->lbam_addr = base + 0x420; /* hob_lbam = 0x421 */ port->lbah_addr = base + 0x428; /* hob_lbah = 0x429 */ port->device_addr = base + 0x430; port->status_addr = port->command_addr = base + 0x438; port->altstatus_addr = port->ctl_addr = base + 0x440; port->scr_addr = base + 0xc00; } static int qs_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct qs_port_priv *pp; void __iomem *mmio_base = qs_mmio_base(ap->host); void __iomem *chan = mmio_base + (ap->port_no * 0x4000); u64 addr; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma, GFP_KERNEL); if (!pp->pkt) return -ENOMEM; memset(pp->pkt, 0, QS_PKT_BYTES); ap->private_data = pp; qs_enter_reg_mode(ap); addr = (u64)pp->pkt_dma; writel((u32) addr, chan + QS_CCF_CPBA); writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4); return 0; } static void qs_host_stop(struct ata_host *host) { void __iomem *mmio_base = qs_mmio_base(host); writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ } static void qs_host_init(struct ata_host *host, unsigned int chip_id) { void __iomem *mmio_base = host->iomap[QS_MMIO_BAR]; unsigned int port_no; writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ /* reset each channel in turn */ for (port_no = 0; port_no < host->n_ports; ++port_no) { u8 __iomem *chan = mmio_base + (port_no * 0x4000); writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1); writeb(QS_CTR0_REG, chan + QS_CCT_CTR0); readb(chan + QS_CCT_CTR0); /* flush */ } writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */ for (port_no = 0; port_no < host->n_ports; ++port_no) { u8 __iomem *chan = mmio_base + (port_no * 0x4000); /* set FIFO depths to same settings as Windows driver */ writew(32, chan + QS_CFC_HUFT); writew(32, chan + QS_CFC_HDFT); writew(10, chan + QS_CFC_DUFT); writew( 8, chan + QS_CFC_DDFT); /* set CPB size in bytes, as a power of two */ writeb(QS_CPB_ORDER, chan + QS_CCF_CSEP); } writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */ } /* * The QStor understands 64-bit buses, and uses 64-bit fields * for DMA pointers regardless of bus width. We just have to * make sure our DMA masks are set appropriately for whatever * bridge lies between us and the QStor, and then the DMA mapping * code will ensure we only ever "see" appropriate buffer addresses. * If we're 32-bit limited somewhere, then our 64-bit fields will * just end up with zeros in the upper 32-bits, without any special * logic required outside of this routine (below). */ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) { u32 bus_info = readl(mmio_base + QS_HID_HPHY); int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT); if (have_64bit_bus && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (rc) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "64-bit DMA enable failed\n"); return rc; } } } else { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "32-bit DMA enable failed\n"); return rc; } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); return rc; } } return 0; } static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned int board_idx = (unsigned int) ent->driver_data; const struct ata_port_info *ppi[] = { &qs_port_info[board_idx], NULL }; struct ata_host *host; int rc, port_no; ata_print_version_once(&pdev->dev, DRV_VERSION); /* alloc host */ host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS); if (!host) return -ENOMEM; /* acquire resources and fill host */ rc = pcim_enable_device(pdev); if (rc) return rc; if ((pci_resource_flags(pdev, QS_MMIO_BAR) & IORESOURCE_MEM) == 0) return -ENODEV; rc = pcim_iomap_regions(pdev, 1 << QS_MMIO_BAR, DRV_NAME); if (rc) return rc; host->iomap = pcim_iomap_table(pdev); rc = qs_set_dma_masks(pdev, host->iomap[QS_MMIO_BAR]); if (rc) return rc; for (port_no = 0; port_no < host->n_ports; ++port_no) { struct ata_port *ap = host->ports[port_no]; unsigned int offset = port_no * 0x4000; void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset; qs_ata_setup_port(&ap->ioaddr, chan); ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio"); ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port"); } /* initialize adapter */ qs_host_init(host, board_idx); pci_set_master(pdev); return ata_host_activate(host, pdev->irq, qs_intr, IRQF_SHARED, &qs_ata_sht); } module_pci_driver(qs_ata_pci_driver); MODULE_AUTHOR("Mark Lord"); MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl); MODULE_VERSION(DRV_VERSION);
gpl-2.0
jcadduono/android_kernel_oneplus_msm8996
drivers/usb/storage/cypress_atacb.c
3292
8692
/* * Support for emulating SAT (ata pass through) on devices based * on the Cypress USB/ATA bridge supporting ATACB. * * Copyright (c) 2008 Matthieu Castet (castet.matthieu@free.fr) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_eh.h> #include <linux/ata.h> #include "usb.h" #include "protocol.h" #include "scsiglue.h" #include "debug.h" MODULE_DESCRIPTION("SAT support for Cypress USB/ATA bridges with ATACB"); MODULE_AUTHOR("Matthieu Castet <castet.matthieu@free.fr>"); MODULE_LICENSE("GPL"); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } static struct usb_device_id cypress_usb_ids[] = { # include "unusual_cypress.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, cypress_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static struct us_unusual_dev cypress_unusual_dev_list[] = { # include "unusual_cypress.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV /* * ATACB is a protocol used on cypress usb<->ata bridge to * send raw ATA command over mass storage * There is a ATACB2 protocol that support LBA48 on newer chip. * More info that be found on cy7c68310_8.pdf and cy7c68300c_8.pdf * datasheet from cypress.com. */ static void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us) { unsigned char save_cmnd[MAX_COMMAND_SIZE]; if (likely(srb->cmnd[0] != ATA_16 && srb->cmnd[0] != ATA_12)) { usb_stor_transparent_scsi_command(srb, us); return; } memcpy(save_cmnd, srb->cmnd, sizeof(save_cmnd)); memset(srb->cmnd, 0, MAX_COMMAND_SIZE); /* check if we support the command */ if (save_cmnd[1] >> 5) /* MULTIPLE_COUNT */ goto invalid_fld; /* check protocol */ switch((save_cmnd[1] >> 1) & 0xf) { case 3: /*no DATA */ case 4: /* PIO in */ case 5: /* PIO out */ break; default: goto invalid_fld; } /* first build the ATACB command */ srb->cmd_len = 16; srb->cmnd[0] = 0x24; /* bVSCBSignature : vendor-specific command this value can change, but most(all ?) manufacturers keep the cypress default : 0x24 */ srb->cmnd[1] = 0x24; /* bVSCBSubCommand : 0x24 for ATACB */ srb->cmnd[3] = 0xff - 1; /* features, sector count, lba low, lba med lba high, device, command are valid */ srb->cmnd[4] = 1; /* TransferBlockCount : 512 */ if (save_cmnd[0] == ATA_16) { srb->cmnd[ 6] = save_cmnd[ 4]; /* features */ srb->cmnd[ 7] = save_cmnd[ 6]; /* sector count */ srb->cmnd[ 8] = save_cmnd[ 8]; /* lba low */ srb->cmnd[ 9] = save_cmnd[10]; /* lba med */ srb->cmnd[10] = save_cmnd[12]; /* lba high */ srb->cmnd[11] = save_cmnd[13]; /* device */ srb->cmnd[12] = save_cmnd[14]; /* command */ if (save_cmnd[1] & 0x01) {/* extended bit set for LBA48 */ /* this could be supported by atacb2 */ if (save_cmnd[3] || save_cmnd[5] || save_cmnd[7] || save_cmnd[9] || save_cmnd[11]) goto invalid_fld; } } else { /* ATA12 */ srb->cmnd[ 6] = save_cmnd[3]; /* features */ srb->cmnd[ 7] = save_cmnd[4]; /* sector count */ srb->cmnd[ 8] = save_cmnd[5]; /* lba low */ srb->cmnd[ 9] = save_cmnd[6]; /* lba med */ srb->cmnd[10] = save_cmnd[7]; /* lba high */ srb->cmnd[11] = save_cmnd[8]; /* device */ srb->cmnd[12] = save_cmnd[9]; /* command */ } /* Filter SET_FEATURES - XFER MODE command */ if ((srb->cmnd[12] == ATA_CMD_SET_FEATURES) && (srb->cmnd[6] == SETFEATURES_XFER)) goto invalid_fld; if (srb->cmnd[12] == ATA_CMD_ID_ATA || srb->cmnd[12] == ATA_CMD_ID_ATAPI) srb->cmnd[2] |= (1<<7); /* set IdentifyPacketDevice for these cmds */ usb_stor_transparent_scsi_command(srb, us); /* if the device doesn't support ATACB */ if (srb->result == SAM_STAT_CHECK_CONDITION && memcmp(srb->sense_buffer, usb_stor_sense_invalidCDB, sizeof(usb_stor_sense_invalidCDB)) == 0) { usb_stor_dbg(us, "cypress atacb not supported ???\n"); goto end; } /* if ck_cond flags is set, and there wasn't critical error, * build the special sense */ if ((srb->result != (DID_ERROR << 16) && srb->result != (DID_ABORT << 16)) && save_cmnd[2] & 0x20) { struct scsi_eh_save ses; unsigned char regs[8]; unsigned char *sb = srb->sense_buffer; unsigned char *desc = sb + 8; int tmp_result; /* build the command for * reading the ATA registers */ scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sizeof(regs)); /* we use the same command as before, but we set * the read taskfile bit, for not executing atacb command, * but reading register selected in srb->cmnd[4] */ srb->cmd_len = 16; srb->cmnd = ses.cmnd; srb->cmnd[2] = 1; usb_stor_transparent_scsi_command(srb, us); memcpy(regs, srb->sense_buffer, sizeof(regs)); tmp_result = srb->result; scsi_eh_restore_cmnd(srb, &ses); /* we fail to get registers, report invalid command */ if (tmp_result != SAM_STAT_GOOD) goto invalid_fld; /* build the sense */ memset(sb, 0, SCSI_SENSE_BUFFERSIZE); /* set sk, asc for a good command */ sb[1] = RECOVERED_ERROR; sb[2] = 0; /* ATA PASS THROUGH INFORMATION AVAILABLE */ sb[3] = 0x1D; /* XXX we should generate sk, asc, ascq from status and error * regs * (see 11.1 Error translation ATA device error to SCSI error * map, and ata_to_sense_error from libata.) */ /* Sense data is current and format is descriptor. */ sb[0] = 0x72; desc[0] = 0x09; /* ATA_RETURN_DESCRIPTOR */ /* set length of additional sense data */ sb[7] = 14; desc[1] = 12; /* Copy registers into sense buffer. */ desc[ 2] = 0x00; desc[ 3] = regs[1]; /* features */ desc[ 5] = regs[2]; /* sector count */ desc[ 7] = regs[3]; /* lba low */ desc[ 9] = regs[4]; /* lba med */ desc[11] = regs[5]; /* lba high */ desc[12] = regs[6]; /* device */ desc[13] = regs[7]; /* command */ srb->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; } goto end; invalid_fld: srb->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB, sizeof(usb_stor_sense_invalidCDB)); end: memcpy(srb->cmnd, save_cmnd, sizeof(save_cmnd)); if (srb->cmnd[0] == ATA_12) srb->cmd_len = 12; } static int cypress_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; struct usb_device *device; result = usb_stor_probe1(&us, intf, id, (id - cypress_usb_ids) + cypress_unusual_dev_list); if (result) return result; /* Among CY7C68300 chips, the A revision does not support Cypress ATACB * Filter out this revision from EEPROM default descriptor values */ device = interface_to_usbdev(intf); if (device->descriptor.iManufacturer != 0x38 || device->descriptor.iProduct != 0x4e || device->descriptor.iSerialNumber != 0x64) { us->protocol_name = "Transparent SCSI with Cypress ATACB"; us->proto_handler = cypress_atacb_passthrough; } else { us->protocol_name = "Transparent SCSI"; us->proto_handler = usb_stor_transparent_scsi_command; } result = usb_stor_probe2(us); return result; } static struct usb_driver cypress_driver = { .name = "ums-cypress", .probe = cypress_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = cypress_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_driver(cypress_driver);
gpl-2.0
freexperia/android_kernel_sony_msm8960t
arch/arm/mach-tegra/board-trimslice-pinmux.c
4828
12203
/* * arch/arm/mach-tegra/board-trimslice-pinmux.c * * Copyright (C) 2011 CompuLab, Ltd. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/of.h> #include <mach/pinmux.h> #include <mach/pinmux-tegra20.h> #include "gpio-names.h" #include "board-pinmux.h" #include "board-trimslice.h" static struct tegra_pingroup_config trimslice_pinmux[] = { {TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_ATD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_ATE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_CDEV1, TEGRA_MUX_PLLA_OUT, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_CDEV2, TEGRA_MUX_PLLP_OUT4, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_CRTP, TEGRA_MUX_CRT, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_CSUS, TEGRA_MUX_VI_SENSOR_CLK, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DAP1, TEGRA_MUX_DAP1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_DAP2, TEGRA_MUX_DAP2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DDC, TEGRA_MUX_I2C2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_DTA, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DTB, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DTC, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DTD, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DTE, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_GMB, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_GMC, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_GMD, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_GME, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_GPU, TEGRA_MUX_UARTA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_GPU7, TEGRA_MUX_RTCK, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_GPV, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_HDINT, TEGRA_MUX_HDMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_I2CP, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_IRRX, TEGRA_MUX_UARTB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_IRTX, TEGRA_MUX_UARTB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_KBCA, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_KBCB, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_KBCC, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_KBCD, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_KBCE, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_KBCF, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LCSN, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LD0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD3, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD4, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD5, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD6, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD7, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD8, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD9, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD10, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD11, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD12, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD13, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD14, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD15, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD16, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD17, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LDC, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LHP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LHP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LHP2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LHS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LM0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LM1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LPP, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LPW0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LPW1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LPW2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LSC0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LSC1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LSCK, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LSDA, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LSDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LSPI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LVP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LVP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_OWC, TEGRA_MUX_RSVD2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_PTA, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_SDB, TEGRA_MUX_PWM, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_SDC, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_SDD, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_SDIO1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_SLXA, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SLXD, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SLXK, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_SPDI, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPDO, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPIA, TEGRA_MUX_SPI2, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPIB, TEGRA_MUX_SPI2, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPIC, TEGRA_MUX_SPI2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPID, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPIE, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPIF, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPIH, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_UAA, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_UAB, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_UAC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_UAD, TEGRA_MUX_IRDA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_UCA, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_UCB, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_UDA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_CK32, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_DDRC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_PMCA, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_PMCB, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_PMCC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_PMCD, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_PMCE, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_XM2C, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, }; static struct tegra_gpio_table gpio_table[] = { { .gpio = TRIMSLICE_GPIO_SD4_CD, .enable = true }, /* mmc4 cd */ { .gpio = TRIMSLICE_GPIO_SD4_WP, .enable = true }, /* mmc4 wp */ { .gpio = TRIMSLICE_GPIO_USB1_MODE, .enable = true }, /* USB1 mode */ { .gpio = TRIMSLICE_GPIO_USB2_RST, .enable = true }, /* USB2 PHY rst */ }; static struct tegra_board_pinmux_conf conf = { .pgs = trimslice_pinmux, .pg_count = ARRAY_SIZE(trimslice_pinmux), .gpios = gpio_table, .gpio_count = ARRAY_SIZE(gpio_table), }; void trimslice_pinmux_init(void) { tegra_board_pinmux_init(&conf, NULL); }
gpl-2.0
mtshima/Victara-CM-kernel
drivers/hwmon/s3c-hwmon.c
5084
10508
/* linux/drivers/hwmon/s3c-hwmon.c * * Copyright (C) 2005, 2008, 2009 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * S3C24XX/S3C64XX ADC hwmon support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/init.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <plat/adc.h> #include <plat/hwmon.h> struct s3c_hwmon_attr { struct sensor_device_attribute in; struct sensor_device_attribute label; char in_name[12]; char label_name[12]; }; /** * struct s3c_hwmon - ADC hwmon client information * @lock: Access lock to serialise the conversions. * @client: The client we registered with the S3C ADC core. * @hwmon_dev: The hwmon device we created. * @attr: The holders for the channel attributes. */ struct s3c_hwmon { struct mutex lock; struct s3c_adc_client *client; struct device *hwmon_dev; struct s3c_hwmon_attr attrs[8]; }; /** * s3c_hwmon_read_ch - read a value from a given adc channel. * @dev: The device. * @hwmon: Our state. * @channel: The channel we're reading from. * * Read a value from the @channel with the proper locking and sleep until * either the read completes or we timeout awaiting the ADC core to get * back to us. */ static int s3c_hwmon_read_ch(struct device *dev, struct s3c_hwmon *hwmon, int channel) { int ret; ret = mutex_lock_interruptible(&hwmon->lock); if (ret < 0) return ret; dev_dbg(dev, "reading channel %d\n", channel); ret = s3c_adc_read(hwmon->client, channel); mutex_unlock(&hwmon->lock); return ret; } #ifdef CONFIG_SENSORS_S3C_RAW /** * s3c_hwmon_show_raw - show a conversion from the raw channel number. * @dev: The device that the attribute belongs to. * @attr: The attribute being read. * @buf: The result buffer. * * This show deals with the raw attribute, registered for each possible * ADC channel. This does a conversion and returns the raw (un-scaled) * value returned from the hardware. */ static ssize_t s3c_hwmon_show_raw(struct device *dev, struct device_attribute *attr, char *buf) { struct s3c_hwmon *adc = platform_get_drvdata(to_platform_device(dev)); struct sensor_device_attribute *sa = to_sensor_dev_attr(attr); int ret; ret = s3c_hwmon_read_ch(dev, adc, sa->index); return (ret < 0) ? ret : snprintf(buf, PAGE_SIZE, "%d\n", ret); } #define DEF_ADC_ATTR(x) \ static SENSOR_DEVICE_ATTR(adc##x##_raw, S_IRUGO, s3c_hwmon_show_raw, NULL, x) DEF_ADC_ATTR(0); DEF_ADC_ATTR(1); DEF_ADC_ATTR(2); DEF_ADC_ATTR(3); DEF_ADC_ATTR(4); DEF_ADC_ATTR(5); DEF_ADC_ATTR(6); DEF_ADC_ATTR(7); static struct attribute *s3c_hwmon_attrs[9] = { &sensor_dev_attr_adc0_raw.dev_attr.attr, &sensor_dev_attr_adc1_raw.dev_attr.attr, &sensor_dev_attr_adc2_raw.dev_attr.attr, &sensor_dev_attr_adc3_raw.dev_attr.attr, &sensor_dev_attr_adc4_raw.dev_attr.attr, &sensor_dev_attr_adc5_raw.dev_attr.attr, &sensor_dev_attr_adc6_raw.dev_attr.attr, &sensor_dev_attr_adc7_raw.dev_attr.attr, NULL, }; static struct attribute_group s3c_hwmon_attrgroup = { .attrs = s3c_hwmon_attrs, }; static inline int s3c_hwmon_add_raw(struct device *dev) { return sysfs_create_group(&dev->kobj, &s3c_hwmon_attrgroup); } static inline void s3c_hwmon_remove_raw(struct device *dev) { sysfs_remove_group(&dev->kobj, &s3c_hwmon_attrgroup); } #else static inline int s3c_hwmon_add_raw(struct device *dev) { return 0; } static inline void s3c_hwmon_remove_raw(struct device *dev) { } #endif /* CONFIG_SENSORS_S3C_RAW */ /** * s3c_hwmon_ch_show - show value of a given channel * @dev: The device that the attribute belongs to. * @attr: The attribute being read. * @buf: The result buffer. * * Read a value from the ADC and scale it before returning it to the * caller. The scale factor is gained from the channel configuration * passed via the platform data when the device was registered. */ static ssize_t s3c_hwmon_ch_show(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sen_attr = to_sensor_dev_attr(attr); struct s3c_hwmon *hwmon = platform_get_drvdata(to_platform_device(dev)); struct s3c_hwmon_pdata *pdata = dev->platform_data; struct s3c_hwmon_chcfg *cfg; int ret; cfg = pdata->in[sen_attr->index]; ret = s3c_hwmon_read_ch(dev, hwmon, sen_attr->index); if (ret < 0) return ret; ret *= cfg->mult; ret = DIV_ROUND_CLOSEST(ret, cfg->div); return snprintf(buf, PAGE_SIZE, "%d\n", ret); } /** * s3c_hwmon_label_show - show label name of the given channel. * @dev: The device that the attribute belongs to. * @attr: The attribute being read. * @buf: The result buffer. * * Return the label name of a given channel */ static ssize_t s3c_hwmon_label_show(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sen_attr = to_sensor_dev_attr(attr); struct s3c_hwmon_pdata *pdata = dev->platform_data; struct s3c_hwmon_chcfg *cfg; cfg = pdata->in[sen_attr->index]; return snprintf(buf, PAGE_SIZE, "%s\n", cfg->name); } /** * s3c_hwmon_create_attr - create hwmon attribute for given channel. * @dev: The device to create the attribute on. * @cfg: The channel configuration passed from the platform data. * @channel: The ADC channel number to process. * * Create the scaled attribute for use with hwmon from the specified * platform data in @pdata. The sysfs entry is handled by the routine * s3c_hwmon_ch_show(). * * The attribute name is taken from the configuration data if present * otherwise the name is taken by concatenating in_ with the channel * number. */ static int s3c_hwmon_create_attr(struct device *dev, struct s3c_hwmon_chcfg *cfg, struct s3c_hwmon_attr *attrs, int channel) { struct sensor_device_attribute *attr; int ret; snprintf(attrs->in_name, sizeof(attrs->in_name), "in%d_input", channel); attr = &attrs->in; attr->index = channel; sysfs_attr_init(&attr->dev_attr.attr); attr->dev_attr.attr.name = attrs->in_name; attr->dev_attr.attr.mode = S_IRUGO; attr->dev_attr.show = s3c_hwmon_ch_show; ret = device_create_file(dev, &attr->dev_attr); if (ret < 0) { dev_err(dev, "failed to create input attribute\n"); return ret; } /* if this has a name, add a label */ if (cfg->name) { snprintf(attrs->label_name, sizeof(attrs->label_name), "in%d_label", channel); attr = &attrs->label; attr->index = channel; sysfs_attr_init(&attr->dev_attr.attr); attr->dev_attr.attr.name = attrs->label_name; attr->dev_attr.attr.mode = S_IRUGO; attr->dev_attr.show = s3c_hwmon_label_show; ret = device_create_file(dev, &attr->dev_attr); if (ret < 0) { device_remove_file(dev, &attrs->in.dev_attr); dev_err(dev, "failed to create label attribute\n"); } } return ret; } static void s3c_hwmon_remove_attr(struct device *dev, struct s3c_hwmon_attr *attrs) { device_remove_file(dev, &attrs->in.dev_attr); device_remove_file(dev, &attrs->label.dev_attr); } /** * s3c_hwmon_probe - device probe entry. * @dev: The device being probed. */ static int __devinit s3c_hwmon_probe(struct platform_device *dev) { struct s3c_hwmon_pdata *pdata = dev->dev.platform_data; struct s3c_hwmon *hwmon; int ret = 0; int i; if (!pdata) { dev_err(&dev->dev, "no platform data supplied\n"); return -EINVAL; } hwmon = kzalloc(sizeof(struct s3c_hwmon), GFP_KERNEL); if (hwmon == NULL) { dev_err(&dev->dev, "no memory\n"); return -ENOMEM; } platform_set_drvdata(dev, hwmon); mutex_init(&hwmon->lock); /* Register with the core ADC driver. */ hwmon->client = s3c_adc_register(dev, NULL, NULL, 0); if (IS_ERR(hwmon->client)) { dev_err(&dev->dev, "cannot register adc\n"); ret = PTR_ERR(hwmon->client); goto err_mem; } /* add attributes for our adc devices. */ ret = s3c_hwmon_add_raw(&dev->dev); if (ret) goto err_registered; /* register with the hwmon core */ hwmon->hwmon_dev = hwmon_device_register(&dev->dev); if (IS_ERR(hwmon->hwmon_dev)) { dev_err(&dev->dev, "error registering with hwmon\n"); ret = PTR_ERR(hwmon->hwmon_dev); goto err_raw_attribute; } for (i = 0; i < ARRAY_SIZE(pdata->in); i++) { struct s3c_hwmon_chcfg *cfg = pdata->in[i]; if (!cfg) continue; if (cfg->mult >= 0x10000) dev_warn(&dev->dev, "channel %d multiplier too large\n", i); if (cfg->div == 0) { dev_err(&dev->dev, "channel %d divider zero\n", i); continue; } ret = s3c_hwmon_create_attr(&dev->dev, pdata->in[i], &hwmon->attrs[i], i); if (ret) { dev_err(&dev->dev, "error creating channel %d\n", i); for (i--; i >= 0; i--) s3c_hwmon_remove_attr(&dev->dev, &hwmon->attrs[i]); goto err_hwmon_register; } } return 0; err_hwmon_register: hwmon_device_unregister(hwmon->hwmon_dev); err_raw_attribute: s3c_hwmon_remove_raw(&dev->dev); err_registered: s3c_adc_release(hwmon->client); err_mem: kfree(hwmon); return ret; } static int __devexit s3c_hwmon_remove(struct platform_device *dev) { struct s3c_hwmon *hwmon = platform_get_drvdata(dev); int i; s3c_hwmon_remove_raw(&dev->dev); for (i = 0; i < ARRAY_SIZE(hwmon->attrs); i++) s3c_hwmon_remove_attr(&dev->dev, &hwmon->attrs[i]); hwmon_device_unregister(hwmon->hwmon_dev); s3c_adc_release(hwmon->client); return 0; } static struct platform_driver s3c_hwmon_driver = { .driver = { .name = "s3c-hwmon", .owner = THIS_MODULE, }, .probe = s3c_hwmon_probe, .remove = __devexit_p(s3c_hwmon_remove), }; module_platform_driver(s3c_hwmon_driver); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("S3C ADC HWMon driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:s3c-hwmon");
gpl-2.0
TeamGlade-Devices/android_kernel_sony_msm8930
drivers/mtd/devices/phram.c
7388
6172
/** * Copyright (c) ???? Jochen Schäuble <psionic@psionic.de> * Copyright (c) 2003-2004 Joern Engel <joern@wh.fh-wedel.de> * * Usage: * * one commend line parameter per device, each in the form: * phram=<name>,<start>,<len> * <name> may be up to 63 characters. * <start> and <len> can be octal, decimal or hexadecimal. If followed * by "ki", "Mi" or "Gi", the numbers will be interpreted as kilo, mega or * gigabytes. * * Example: * phram=swap,64Mi,128Mi phram=test,900Mi,1Mi */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <asm/io.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> struct phram_mtd_list { struct mtd_info mtd; struct list_head list; }; static LIST_HEAD(phram_list); static int phram_erase(struct mtd_info *mtd, struct erase_info *instr) { u_char *start = mtd->priv; memset(start + instr->addr, 0xff, instr->len); /* * This'll catch a few races. Free the thing before returning :) * I don't feel at all ashamed. This kind of thing is possible anyway * with flash, but unlikely. */ instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); return 0; } static int phram_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys) { *virt = mtd->priv + from; *retlen = len; return 0; } static int phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) { return 0; } static int phram_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { u_char *start = mtd->priv; memcpy(buf, start + from, len); *retlen = len; return 0; } static int phram_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { u_char *start = mtd->priv; memcpy(start + to, buf, len); *retlen = len; return 0; } static void unregister_devices(void) { struct phram_mtd_list *this, *safe; list_for_each_entry_safe(this, safe, &phram_list, list) { mtd_device_unregister(&this->mtd); iounmap(this->mtd.priv); kfree(this->mtd.name); kfree(this); } } static int register_device(char *name, unsigned long start, unsigned long len) { struct phram_mtd_list *new; int ret = -ENOMEM; new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) goto out0; ret = -EIO; new->mtd.priv = ioremap(start, len); if (!new->mtd.priv) { pr_err("ioremap failed\n"); goto out1; } new->mtd.name = name; new->mtd.size = len; new->mtd.flags = MTD_CAP_RAM; new->mtd._erase = phram_erase; new->mtd._point = phram_point; new->mtd._unpoint = phram_unpoint; new->mtd._read = phram_read; new->mtd._write = phram_write; new->mtd.owner = THIS_MODULE; new->mtd.type = MTD_RAM; new->mtd.erasesize = PAGE_SIZE; new->mtd.writesize = 1; ret = -EAGAIN; if (mtd_device_register(&new->mtd, NULL, 0)) { pr_err("Failed to register new device\n"); goto out2; } list_add_tail(&new->list, &phram_list); return 0; out2: iounmap(new->mtd.priv); out1: kfree(new); out0: return ret; } static int ustrtoul(const char *cp, char **endp, unsigned int base) { unsigned long result = simple_strtoul(cp, endp, base); switch (**endp) { case 'G': result *= 1024; case 'M': result *= 1024; case 'k': result *= 1024; /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */ if ((*endp)[1] == 'i') (*endp) += 2; } return result; } static int parse_num32(uint32_t *num32, const char *token) { char *endp; unsigned long n; n = ustrtoul(token, &endp, 0); if (*endp) return -EINVAL; *num32 = n; return 0; } static int parse_name(char **pname, const char *token) { size_t len; char *name; len = strlen(token) + 1; if (len > 64) return -ENOSPC; name = kmalloc(len, GFP_KERNEL); if (!name) return -ENOMEM; strcpy(name, token); *pname = name; return 0; } static inline void kill_final_newline(char *str) { char *newline = strrchr(str, '\n'); if (newline && !newline[1]) *newline = 0; } #define parse_err(fmt, args...) do { \ pr_err(fmt , ## args); \ return 1; \ } while (0) /* * This shall contain the module parameter if any. It is of the form: * - phram=<device>,<address>,<size> for module case * - phram.phram=<device>,<address>,<size> for built-in case * We leave 64 bytes for the device name, 12 for the address and 12 for the * size. * Example: phram.phram=rootfs,0xa0000000,512Mi */ static __initdata char phram_paramline[64+12+12]; static int __init phram_setup(const char *val) { char buf[64+12+12], *str = buf; char *token[3]; char *name; uint32_t start; uint32_t len; int i, ret; if (strnlen(val, sizeof(buf)) >= sizeof(buf)) parse_err("parameter too long\n"); strcpy(str, val); kill_final_newline(str); for (i=0; i<3; i++) token[i] = strsep(&str, ","); if (str) parse_err("too many arguments\n"); if (!token[2]) parse_err("not enough arguments\n"); ret = parse_name(&name, token[0]); if (ret) return ret; ret = parse_num32(&start, token[1]); if (ret) { kfree(name); parse_err("illegal start address\n"); } ret = parse_num32(&len, token[2]); if (ret) { kfree(name); parse_err("illegal device length\n"); } ret = register_device(name, start, len); if (!ret) pr_info("%s device: %#x at %#x\n", name, len, start); else kfree(name); return ret; } static int __init phram_param_call(const char *val, struct kernel_param *kp) { /* * This function is always called before 'init_phram()', whether * built-in or module. */ if (strlen(val) >= sizeof(phram_paramline)) return -ENOSPC; strcpy(phram_paramline, val); return 0; } module_param_call(phram, phram_param_call, NULL, NULL, 000); MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\""); static int __init init_phram(void) { if (phram_paramline[0]) return phram_setup(phram_paramline); return 0; } static void __exit cleanup_phram(void) { unregister_devices(); } module_init(init_phram); module_exit(cleanup_phram); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Joern Engel <joern@wh.fh-wedel.de>"); MODULE_DESCRIPTION("MTD driver for physical RAM");
gpl-2.0
mythos234/SimplKernel-LL-G925F
sound/aoa/codecs/toonie.c
11996
3368
/* * Apple Onboard Audio driver for Toonie codec * * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * * GPL v2, can be found in COPYING. * * * This is a driver for the toonie codec chip. This chip is present * on the Mac Mini and is nothing but a DAC. */ #include <linux/delay.h> #include <linux/module.h> #include <linux/slab.h> MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("toonie codec driver for snd-aoa"); #include "../aoa.h" #include "../soundbus/soundbus.h" #define PFX "snd-aoa-codec-toonie: " struct toonie { struct aoa_codec codec; }; #define codec_to_toonie(c) container_of(c, struct toonie, codec) static int toonie_dev_register(struct snd_device *dev) { return 0; } static struct snd_device_ops ops = { .dev_register = toonie_dev_register, }; static struct transfer_info toonie_transfers[] = { /* This thing *only* has analog output, * the rates are taken from Info.plist * from Darwin. */ { .formats = SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S24_BE, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000, }, {} }; static int toonie_usable(struct codec_info_item *cii, struct transfer_info *ti, struct transfer_info *out) { return 1; } #ifdef CONFIG_PM static int toonie_suspend(struct codec_info_item *cii, pm_message_t state) { /* can we turn it off somehow? */ return 0; } static int toonie_resume(struct codec_info_item *cii) { return 0; } #endif /* CONFIG_PM */ static struct codec_info toonie_codec_info = { .transfers = toonie_transfers, .sysclock_factor = 256, .bus_factor = 64, .owner = THIS_MODULE, .usable = toonie_usable, #ifdef CONFIG_PM .suspend = toonie_suspend, .resume = toonie_resume, #endif }; static int toonie_init_codec(struct aoa_codec *codec) { struct toonie *toonie = codec_to_toonie(codec); /* nothing connected? what a joke! */ if (toonie->codec.connected != 1) return -ENOTCONN; if (aoa_snd_device_new(SNDRV_DEV_LOWLEVEL, toonie, &ops)) { printk(KERN_ERR PFX "failed to create toonie snd device!\n"); return -ENODEV; } if (toonie->codec.soundbus_dev->attach_codec(toonie->codec.soundbus_dev, aoa_get_card(), &toonie_codec_info, toonie)) { printk(KERN_ERR PFX "error creating toonie pcm\n"); snd_device_free(aoa_get_card(), toonie); return -ENODEV; } return 0; } static void toonie_exit_codec(struct aoa_codec *codec) { struct toonie *toonie = codec_to_toonie(codec); if (!toonie->codec.soundbus_dev) { printk(KERN_ERR PFX "toonie_exit_codec called without soundbus_dev!\n"); return; } toonie->codec.soundbus_dev->detach_codec(toonie->codec.soundbus_dev, toonie); } static struct toonie *toonie; static int __init toonie_init(void) { toonie = kzalloc(sizeof(struct toonie), GFP_KERNEL); if (!toonie) return -ENOMEM; strlcpy(toonie->codec.name, "toonie", sizeof(toonie->codec.name)); toonie->codec.owner = THIS_MODULE; toonie->codec.init = toonie_init_codec; toonie->codec.exit = toonie_exit_codec; if (aoa_codec_register(&toonie->codec)) { kfree(toonie); return -EINVAL; } return 0; } static void __exit toonie_exit(void) { aoa_codec_unregister(&toonie->codec); kfree(toonie); } module_init(toonie_init); module_exit(toonie_exit);
gpl-2.0
Ateeq72/hTC_Pico_Kernel
arch/mips/cavium-octeon/executive/cvmx-helper-errata.c
13532
2592
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /** * * Fixes and workaround for Octeon chip errata. This file * contains functions called by cvmx-helper to workaround known * chip errata. For the most part, code doesn't need to call * these functions directly. * */ #include <linux/module.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-helper-jtag.h> /** * Due to errata G-720, the 2nd order CDR circuit on CN52XX pass * 1 doesn't work properly. The following code disables 2nd order * CDR for the specified QLM. * * @qlm: QLM to disable 2nd order CDR for. */ void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm) { int lane; cvmx_helper_qlm_jtag_init(); /* We need to load all four lanes of the QLM, a total of 1072 bits */ for (lane = 0; lane < 4; lane++) { /* * Each lane has 268 bits. We need to set * cfg_cdr_incx<67:64> = 3 and cfg_cdr_secord<77> = * 1. All other bits are zero. Bits go in LSB first, * so start off with the zeros for bits <63:0>. */ cvmx_helper_qlm_jtag_shift_zeros(qlm, 63 - 0 + 1); /* cfg_cdr_incx<67:64>=3 */ cvmx_helper_qlm_jtag_shift(qlm, 67 - 64 + 1, 3); /* Zeros for bits <76:68> */ cvmx_helper_qlm_jtag_shift_zeros(qlm, 76 - 68 + 1); /* cfg_cdr_secord<77>=1 */ cvmx_helper_qlm_jtag_shift(qlm, 77 - 77 + 1, 1); /* Zeros for bits <267:78> */ cvmx_helper_qlm_jtag_shift_zeros(qlm, 267 - 78 + 1); } cvmx_helper_qlm_jtag_update(qlm); } EXPORT_SYMBOL(__cvmx_helper_errata_qlm_disable_2nd_order_cdr);
gpl-2.0
shao2610/us780
drivers/scsi/fnic/vnic_wq.c
14044
4356
/* * Copyright 2008 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/slab.h> #include "vnic_dev.h" #include "vnic_wq.h" static int vnic_wq_alloc_bufs(struct vnic_wq *wq) { struct vnic_wq_buf *buf; struct vnic_dev *vdev; unsigned int i, j, count = wq->ring.desc_count; unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); vdev = wq->vdev; for (i = 0; i < blks; i++) { wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); if (!wq->bufs[i]) { printk(KERN_ERR "Failed to alloc wq_bufs\n"); return -ENOMEM; } } for (i = 0; i < blks; i++) { buf = wq->bufs[i]; for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) { buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j; buf->desc = (u8 *)wq->ring.descs + wq->ring.desc_size * buf->index; if (buf->index + 1 == count) { buf->next = wq->bufs[0]; break; } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) { buf->next = wq->bufs[i + 1]; } else { buf->next = buf + 1; buf++; } } } wq->to_use = wq->to_clean = wq->bufs[0]; return 0; } void vnic_wq_free(struct vnic_wq *wq) { struct vnic_dev *vdev; unsigned int i; vdev = wq->vdev; vnic_dev_free_desc_ring(vdev, &wq->ring); for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { kfree(wq->bufs[i]); wq->bufs[i] = NULL; } wq->ctrl = NULL; } int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) { int err; wq->index = index; wq->vdev = vdev; wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); if (!wq->ctrl) { printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index); return -EINVAL; } vnic_wq_disable(wq); err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); if (err) return err; err = vnic_wq_alloc_bufs(wq); if (err) { vnic_wq_free(wq); return err; } return 0; } void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { u64 paddr; paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; writeq(paddr, &wq->ctrl->ring_base); iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); iowrite32(0, &wq->ctrl->fetch_index); iowrite32(0, &wq->ctrl->posted_index); iowrite32(cq_index, &wq->ctrl->cq_index); iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); iowrite32(0, &wq->ctrl->error_status); } unsigned int vnic_wq_error_status(struct vnic_wq *wq) { return ioread32(&wq->ctrl->error_status); } void vnic_wq_enable(struct vnic_wq *wq) { iowrite32(1, &wq->ctrl->enable); } int vnic_wq_disable(struct vnic_wq *wq) { unsigned int wait; iowrite32(0, &wq->ctrl->enable); /* Wait for HW to ACK disable request */ for (wait = 0; wait < 100; wait++) { if (!(ioread32(&wq->ctrl->running))) return 0; udelay(1); } printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); return -ETIMEDOUT; } void vnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) { struct vnic_wq_buf *buf; BUG_ON(ioread32(&wq->ctrl->enable)); buf = wq->to_clean; while (vnic_wq_desc_used(wq) > 0) { (*buf_clean)(wq, buf); buf = wq->to_clean = buf->next; wq->ring.desc_avail++; } wq->to_use = wq->to_clean = wq->bufs[0]; iowrite32(0, &wq->ctrl->fetch_index); iowrite32(0, &wq->ctrl->posted_index); iowrite32(0, &wq->ctrl->error_status); vnic_dev_clear_desc_ring(&wq->ring); }
gpl-2.0
DengueTim/linux-rockchip
drivers/staging/lustre/lustre/lclient/lcommon_cl.c
221
33929
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. * * GPL HEADER END */ /* * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2011, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * * cl code shared between vvp and liblustre (and other Lustre clients in the * future). * * Author: Nikita Danilov <nikita.danilov@sun.com> */ #define DEBUG_SUBSYSTEM S_LLITE #include "../../include/linux/libcfs/libcfs.h" # include <linux/fs.h> # include <linux/sched.h> # include <linux/mm.h> # include <linux/quotaops.h> # include <linux/highmem.h> # include <linux/pagemap.h> # include <linux/rbtree.h> #include "../include/obd.h" #include "../include/obd_support.h" #include "../include/lustre_fid.h" #include "../include/lustre_lite.h" #include "../include/lustre_dlm.h" #include "../include/lustre_ver.h" #include "../include/lustre_mdc.h" #include "../include/cl_object.h" #include "../include/lclient.h" #include "../llite/llite_internal.h" static const struct cl_req_operations ccc_req_ops; /* * ccc_ prefix stands for "Common Client Code". */ static struct kmem_cache *ccc_lock_kmem; static struct kmem_cache *ccc_object_kmem; static struct kmem_cache *ccc_thread_kmem; static struct kmem_cache *ccc_session_kmem; static struct kmem_cache *ccc_req_kmem; static struct lu_kmem_descr ccc_caches[] = { { .ckd_cache = &ccc_lock_kmem, .ckd_name = "ccc_lock_kmem", .ckd_size = sizeof(struct ccc_lock) }, { .ckd_cache = &ccc_object_kmem, .ckd_name = "ccc_object_kmem", .ckd_size = sizeof(struct ccc_object) }, { .ckd_cache = &ccc_thread_kmem, .ckd_name = "ccc_thread_kmem", .ckd_size = sizeof(struct ccc_thread_info), }, { .ckd_cache = &ccc_session_kmem, .ckd_name = "ccc_session_kmem", .ckd_size = sizeof(struct ccc_session) }, { .ckd_cache = &ccc_req_kmem, .ckd_name = "ccc_req_kmem", .ckd_size = sizeof(struct ccc_req) }, { .ckd_cache = NULL } }; /***************************************************************************** * * Vvp device and device type functions. * */ void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key) { struct ccc_thread_info *info; OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, GFP_NOFS); if (info == NULL) info = ERR_PTR(-ENOMEM); return info; } void ccc_key_fini(const struct lu_context *ctx, struct lu_context_key *key, void *data) { struct ccc_thread_info *info = data; OBD_SLAB_FREE_PTR(info, ccc_thread_kmem); } void *ccc_session_key_init(const struct lu_context *ctx, struct lu_context_key *key) { struct ccc_session *session; OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, GFP_NOFS); if (session == NULL) session = ERR_PTR(-ENOMEM); return session; } void ccc_session_key_fini(const struct lu_context *ctx, struct lu_context_key *key, void *data) { struct ccc_session *session = data; OBD_SLAB_FREE_PTR(session, ccc_session_kmem); } struct lu_context_key ccc_key = { .lct_tags = LCT_CL_THREAD, .lct_init = ccc_key_init, .lct_fini = ccc_key_fini }; struct lu_context_key ccc_session_key = { .lct_tags = LCT_SESSION, .lct_init = ccc_session_key_init, .lct_fini = ccc_session_key_fini }; /* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */ /* LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key); */ int ccc_device_init(const struct lu_env *env, struct lu_device *d, const char *name, struct lu_device *next) { struct ccc_device *vdv; int rc; vdv = lu2ccc_dev(d); vdv->cdv_next = lu2cl_dev(next); LASSERT(d->ld_site != NULL && next->ld_type != NULL); next->ld_site = d->ld_site; rc = next->ld_type->ldt_ops->ldto_device_init( env, next, next->ld_type->ldt_name, NULL); if (rc == 0) { lu_device_get(next); lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init); } return rc; } struct lu_device *ccc_device_fini(const struct lu_env *env, struct lu_device *d) { return cl2lu_dev(lu2ccc_dev(d)->cdv_next); } struct lu_device *ccc_device_alloc(const struct lu_env *env, struct lu_device_type *t, struct lustre_cfg *cfg, const struct lu_device_operations *luops, const struct cl_device_operations *clops) { struct ccc_device *vdv; struct lu_device *lud; struct cl_site *site; int rc; vdv = kzalloc(sizeof(*vdv), GFP_NOFS); if (!vdv) return ERR_PTR(-ENOMEM); lud = &vdv->cdv_cl.cd_lu_dev; cl_device_init(&vdv->cdv_cl, t); ccc2lu_dev(vdv)->ld_ops = luops; vdv->cdv_cl.cd_ops = clops; site = kzalloc(sizeof(*site), GFP_NOFS); if (site != NULL) { rc = cl_site_init(site, &vdv->cdv_cl); if (rc == 0) rc = lu_site_init_finish(&site->cs_lu); else { LASSERT(lud->ld_site == NULL); CERROR("Cannot init lu_site, rc %d.\n", rc); kfree(site); } } else rc = -ENOMEM; if (rc != 0) { ccc_device_free(env, lud); lud = ERR_PTR(rc); } return lud; } struct lu_device *ccc_device_free(const struct lu_env *env, struct lu_device *d) { struct ccc_device *vdv = lu2ccc_dev(d); struct cl_site *site = lu2cl_site(d->ld_site); struct lu_device *next = cl2lu_dev(vdv->cdv_next); if (d->ld_site != NULL) { cl_site_fini(site); kfree(site); } cl_device_fini(lu2cl_dev(d)); kfree(vdv); return next; } int ccc_req_init(const struct lu_env *env, struct cl_device *dev, struct cl_req *req) { struct ccc_req *vrq; int result; OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, GFP_NOFS); if (vrq != NULL) { cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops); result = 0; } else result = -ENOMEM; return result; } /** * An `emergency' environment used by ccc_inode_fini() when cl_env_get() * fails. Access to this environment is serialized by ccc_inode_fini_guard * mutex. */ static struct lu_env *ccc_inode_fini_env; /** * A mutex serializing calls to slp_inode_fini() under extreme memory * pressure, when environments cannot be allocated. */ static DEFINE_MUTEX(ccc_inode_fini_guard); static int dummy_refcheck; int ccc_global_init(struct lu_device_type *device_type) { int result; result = lu_kmem_init(ccc_caches); if (result) return result; result = lu_device_type_init(device_type); if (result) goto out_kmem; ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck, LCT_REMEMBER|LCT_NOREF); if (IS_ERR(ccc_inode_fini_env)) { result = PTR_ERR(ccc_inode_fini_env); goto out_device; } ccc_inode_fini_env->le_ctx.lc_cookie = 0x4; return 0; out_device: lu_device_type_fini(device_type); out_kmem: lu_kmem_fini(ccc_caches); return result; } void ccc_global_fini(struct lu_device_type *device_type) { if (ccc_inode_fini_env != NULL) { cl_env_put(ccc_inode_fini_env, &dummy_refcheck); ccc_inode_fini_env = NULL; } lu_device_type_fini(device_type); lu_kmem_fini(ccc_caches); } /***************************************************************************** * * Object operations. * */ struct lu_object *ccc_object_alloc(const struct lu_env *env, const struct lu_object_header *unused, struct lu_device *dev, const struct cl_object_operations *clops, const struct lu_object_operations *luops) { struct ccc_object *vob; struct lu_object *obj; OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, GFP_NOFS); if (vob != NULL) { struct cl_object_header *hdr; obj = ccc2lu(vob); hdr = &vob->cob_header; cl_object_header_init(hdr); lu_object_init(obj, &hdr->coh_lu, dev); lu_object_add_top(&hdr->coh_lu, obj); vob->cob_cl.co_ops = clops; obj->lo_ops = luops; } else obj = NULL; return obj; } int ccc_object_init0(const struct lu_env *env, struct ccc_object *vob, const struct cl_object_conf *conf) { vob->cob_inode = conf->coc_inode; vob->cob_transient_pages = 0; cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page)); return 0; } int ccc_object_init(const struct lu_env *env, struct lu_object *obj, const struct lu_object_conf *conf) { struct ccc_device *dev = lu2ccc_dev(obj->lo_dev); struct ccc_object *vob = lu2ccc(obj); struct lu_object *below; struct lu_device *under; int result; under = &dev->cdv_next->cd_lu_dev; below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under); if (below != NULL) { const struct cl_object_conf *cconf; cconf = lu2cl_conf(conf); INIT_LIST_HEAD(&vob->cob_pending_list); lu_object_add(obj, below); result = ccc_object_init0(env, vob, cconf); } else result = -ENOMEM; return result; } void ccc_object_free(const struct lu_env *env, struct lu_object *obj) { struct ccc_object *vob = lu2ccc(obj); lu_object_fini(obj); lu_object_header_fini(obj->lo_header); OBD_SLAB_FREE_PTR(vob, ccc_object_kmem); } int ccc_lock_init(const struct lu_env *env, struct cl_object *obj, struct cl_lock *lock, const struct cl_io *unused, const struct cl_lock_operations *lkops) { struct ccc_lock *clk; int result; CLOBINVRNT(env, obj, ccc_object_invariant(obj)); OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, GFP_NOFS); if (clk != NULL) { cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops); result = 0; } else result = -ENOMEM; return result; } int ccc_attr_set(const struct lu_env *env, struct cl_object *obj, const struct cl_attr *attr, unsigned valid) { return 0; } int ccc_object_glimpse(const struct lu_env *env, const struct cl_object *obj, struct ost_lvb *lvb) { struct inode *inode = ccc_object_inode(obj); lvb->lvb_mtime = cl_inode_mtime(inode); lvb->lvb_atime = cl_inode_atime(inode); lvb->lvb_ctime = cl_inode_ctime(inode); /* * LU-417: Add dirty pages block count lest i_blocks reports 0, some * "cp" or "tar" on remote node may think it's a completely sparse file * and skip it. */ if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0) lvb->lvb_blocks = dirty_cnt(inode); return 0; } int ccc_conf_set(const struct lu_env *env, struct cl_object *obj, const struct cl_object_conf *conf) { /* TODO: destroy all pages attached to this object. */ return 0; } static void ccc_object_size_lock(struct cl_object *obj) { struct inode *inode = ccc_object_inode(obj); cl_isize_lock(inode); cl_object_attr_lock(obj); } static void ccc_object_size_unlock(struct cl_object *obj) { struct inode *inode = ccc_object_inode(obj); cl_object_attr_unlock(obj); cl_isize_unlock(inode); } /***************************************************************************** * * Page operations. * */ struct page *ccc_page_vmpage(const struct lu_env *env, const struct cl_page_slice *slice) { return cl2vm_page(slice); } int ccc_page_is_under_lock(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *io) { struct ccc_io *cio = ccc_env_io(env); struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr; struct cl_page *page = slice->cpl_page; int result; if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE || io->ci_type == CIT_FAULT) { if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED) result = -EBUSY; else { desc->cld_start = page->cp_index; desc->cld_end = page->cp_index; desc->cld_obj = page->cp_obj; desc->cld_mode = CLM_READ; result = cl_queue_match(&io->ci_lockset.cls_done, desc) ? -EBUSY : 0; } } else result = 0; return result; } int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice) { /* * Cached read? */ LBUG(); return 0; } void ccc_transient_page_verify(const struct cl_page *page) { } int ccc_transient_page_own(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *unused, int nonblock) { ccc_transient_page_verify(slice->cpl_page); return 0; } void ccc_transient_page_assume(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *unused) { ccc_transient_page_verify(slice->cpl_page); } void ccc_transient_page_unassume(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *unused) { ccc_transient_page_verify(slice->cpl_page); } void ccc_transient_page_disown(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *unused) { ccc_transient_page_verify(slice->cpl_page); } void ccc_transient_page_discard(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *unused) { struct cl_page *page = slice->cpl_page; ccc_transient_page_verify(slice->cpl_page); /* * For transient pages, remove it from the radix tree. */ cl_page_delete(env, page); } int ccc_transient_page_prep(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *unused) { /* transient page should always be sent. */ return 0; } /***************************************************************************** * * Lock operations. * */ void ccc_lock_delete(const struct lu_env *env, const struct cl_lock_slice *slice) { CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); } void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice) { struct ccc_lock *clk = cl2ccc_lock(slice); OBD_SLAB_FREE_PTR(clk, ccc_lock_kmem); } int ccc_lock_enqueue(const struct lu_env *env, const struct cl_lock_slice *slice, struct cl_io *unused, __u32 enqflags) { CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); return 0; } int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice) { CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); return 0; } int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice) { CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); return 0; } int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice) { CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); return 0; } /** * Implementation of cl_lock_operations::clo_fits_into() methods for ccc * layer. This function is executed every time io finds an existing lock in * the lock cache while creating new lock. This function has to decide whether * cached lock "fits" into io. * * \param slice lock to be checked * \param io IO that wants a lock. * * \see lov_lock_fits_into(). */ int ccc_lock_fits_into(const struct lu_env *env, const struct cl_lock_slice *slice, const struct cl_lock_descr *need, const struct cl_io *io) { const struct cl_lock *lock = slice->cls_lock; const struct cl_lock_descr *descr = &lock->cll_descr; const struct ccc_io *cio = ccc_env_io(env); int result; /* * Work around DLM peculiarity: it assumes that glimpse * (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock * when asked for LCK_PW lock with LDLM_FL_HAS_INTENT flag set. Make * sure that glimpse doesn't get CLM_WRITE top-lock, so that it * doesn't enqueue CLM_WRITE sub-locks. */ if (cio->cui_glimpse) result = descr->cld_mode != CLM_WRITE; /* * Also, don't match incomplete write locks for read, otherwise read * would enqueue missing sub-locks in the write mode. */ else if (need->cld_mode != descr->cld_mode) result = lock->cll_state >= CLS_ENQUEUED; else result = 1; return result; } /** * Implements cl_lock_operations::clo_state() method for ccc layer, invoked * whenever lock state changes. Transfers object attributes, that might be * updated as a result of lock acquiring into inode. */ void ccc_lock_state(const struct lu_env *env, const struct cl_lock_slice *slice, enum cl_lock_state state) { struct cl_lock *lock = slice->cls_lock; /* * Refresh inode attributes when the lock is moving into CLS_HELD * state, and only when this is a result of real enqueue, rather than * of finding lock in the cache. */ if (state == CLS_HELD && lock->cll_state < CLS_HELD) { struct cl_object *obj; struct inode *inode; obj = slice->cls_obj; inode = ccc_object_inode(obj); /* vmtruncate() sets the i_size * under both a DLM lock and the * ll_inode_size_lock(). If we don't get the * ll_inode_size_lock() here we can match the DLM lock and * reset i_size. generic_file_write can then trust the * stale i_size when doing appending writes and effectively * cancel the result of the truncate. Getting the * ll_inode_size_lock() after the enqueue maintains the DLM * -> ll_inode_size_lock() acquiring order. */ if (lock->cll_descr.cld_start == 0 && lock->cll_descr.cld_end == CL_PAGE_EOF) cl_merge_lvb(env, inode); } } /***************************************************************************** * * io operations. * */ void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj)); } int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, __u32 enqflags, enum cl_lock_mode mode, pgoff_t start, pgoff_t end) { struct ccc_io *cio = ccc_env_io(env); struct cl_lock_descr *descr = &cio->cui_link.cill_descr; struct cl_object *obj = io->ci_obj; CLOBINVRNT(env, obj, ccc_object_invariant(obj)); CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end); memset(&cio->cui_link, 0, sizeof(cio->cui_link)); if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) { descr->cld_mode = CLM_GROUP; descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid; } else { descr->cld_mode = mode; } descr->cld_obj = obj; descr->cld_start = start; descr->cld_end = end; descr->cld_enq_flags = enqflags; cl_io_lock_add(env, io, &cio->cui_link); return 0; } void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio, struct cl_io *io) { size_t size = io->u.ci_rw.crw_count; if (!cl_is_normalio(env, io) || cio->cui_iter == NULL) return; iov_iter_truncate(cio->cui_iter, size); } int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io, __u32 enqflags, enum cl_lock_mode mode, loff_t start, loff_t end) { struct cl_object *obj = io->ci_obj; return ccc_io_one_lock_index(env, io, enqflags, mode, cl_index(obj, start), cl_index(obj, end)); } void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios) { CLOBINVRNT(env, ios->cis_io->ci_obj, ccc_object_invariant(ios->cis_io->ci_obj)); } void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios, size_t nob) { struct ccc_io *cio = cl2ccc_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = ios->cis_io->ci_obj; CLOBINVRNT(env, obj, ccc_object_invariant(obj)); if (!cl_is_normalio(env, io)) return; iov_iter_reexpand(cio->cui_iter, cio->cui_tot_count -= nob); } /** * Helper function that if necessary adjusts file size (inode->i_size), when * position at the offset \a pos is accessed. File size can be arbitrary stale * on a Lustre client, but client at least knows KMS. If accessed area is * inside [0, KMS], set file size to KMS, otherwise glimpse file size. * * Locking: cl_isize_lock is used to serialize changes to inode size and to * protect consistency between inode size and cl_object * attributes. cl_object_size_lock() protects consistency between cl_attr's of * top-object and sub-objects. */ int ccc_prep_size(const struct lu_env *env, struct cl_object *obj, struct cl_io *io, loff_t start, size_t count, int *exceed) { struct cl_attr *attr = ccc_env_thread_attr(env); struct inode *inode = ccc_object_inode(obj); loff_t pos = start + count - 1; loff_t kms; int result; /* * Consistency guarantees: following possibilities exist for the * relation between region being accessed and real file size at this * moment: * * (A): the region is completely inside of the file; * * (B-x): x bytes of region are inside of the file, the rest is * outside; * * (C): the region is completely outside of the file. * * This classification is stable under DLM lock already acquired by * the caller, because to change the class, other client has to take * DLM lock conflicting with our lock. Also, any updates to ->i_size * by other threads on this client are serialized by * ll_inode_size_lock(). This guarantees that short reads are handled * correctly in the face of concurrent writes and truncates. */ ccc_object_size_lock(obj); result = cl_object_attr_get(env, obj, attr); if (result == 0) { kms = attr->cat_kms; if (pos > kms) { /* * A glimpse is necessary to determine whether we * return a short read (B) or some zeroes at the end * of the buffer (C) */ ccc_object_size_unlock(obj); result = cl_glimpse_lock(env, io, inode, obj, 0); if (result == 0 && exceed != NULL) { /* If objective page index exceed end-of-file * page index, return directly. Do not expect * kernel will check such case correctly. * linux-2.6.18-128.1.1 miss to do that. * --bug 17336 */ loff_t size = cl_isize_read(inode); loff_t cur_index = start >> PAGE_CACHE_SHIFT; loff_t size_index = (size - 1) >> PAGE_CACHE_SHIFT; if ((size == 0 && cur_index != 0) || size_index < cur_index) *exceed = 1; } return result; } /* * region is within kms and, hence, within real file * size (A). We need to increase i_size to cover the * read region so that generic_file_read() will do its * job, but that doesn't mean the kms size is * _correct_, it is only the _minimum_ size. If * someone does a stat they will get the correct size * which will always be >= the kms value here. * b=11081 */ if (cl_isize_read(inode) < kms) { cl_isize_write_nolock(inode, kms); CDEBUG(D_VFSTRACE, DFID" updating i_size %llu\n", PFID(lu_object_fid(&obj->co_lu)), (__u64)cl_isize_read(inode)); } } ccc_object_size_unlock(obj); return result; } /***************************************************************************** * * Transfer operations. * */ void ccc_req_completion(const struct lu_env *env, const struct cl_req_slice *slice, int ioret) { struct ccc_req *vrq; if (ioret > 0) cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret); vrq = cl2ccc_req(slice); OBD_SLAB_FREE_PTR(vrq, ccc_req_kmem); } /** * Implementation of struct cl_req_operations::cro_attr_set() for ccc * layer. ccc is responsible for * * - o_[mac]time * * - o_mode * * - o_parent_seq * * - o_[ug]id * * - o_parent_oid * * - o_parent_ver * * - o_ioepoch, * * and capability. */ void ccc_req_attr_set(const struct lu_env *env, const struct cl_req_slice *slice, const struct cl_object *obj, struct cl_req_attr *attr, u64 flags) { struct inode *inode; struct obdo *oa; u32 valid_flags; oa = attr->cra_oa; inode = ccc_object_inode(obj); valid_flags = OBD_MD_FLTYPE; if ((flags & OBD_MD_FLOSSCAPA) != 0) { LASSERT(attr->cra_capa == NULL); attr->cra_capa = cl_capa_lookup(inode, slice->crs_req->crq_type); } if (slice->crs_req->crq_type == CRT_WRITE) { if (flags & OBD_MD_FLEPOCH) { oa->o_valid |= OBD_MD_FLEPOCH; oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch; valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLUID | OBD_MD_FLGID; } } obdo_from_inode(oa, inode, valid_flags & flags); obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid); memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid, JOBSTATS_JOBID_SIZE); } static const struct cl_req_operations ccc_req_ops = { .cro_attr_set = ccc_req_attr_set, .cro_completion = ccc_req_completion }; int cl_setattr_ost(struct inode *inode, const struct iattr *attr, struct obd_capa *capa) { struct lu_env *env; struct cl_io *io; int result; int refcheck; env = cl_env_get(&refcheck); if (IS_ERR(env)) return PTR_ERR(env); io = ccc_env_thread_io(env); io->ci_obj = cl_i2info(inode)->lli_clob; io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime); io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime); io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime); io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size; io->u.ci_setattr.sa_valid = attr->ia_valid; io->u.ci_setattr.sa_capa = capa; again: if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) { struct ccc_io *cio = ccc_env_io(env); if (attr->ia_valid & ATTR_FILE) /* populate the file descriptor for ftruncate to honor * group lock - see LU-787 */ cio->cui_fd = cl_iattr2fd(inode, attr); result = cl_io_loop(env, io); } else { result = io->ci_result; } cl_io_fini(env, io); if (unlikely(io->ci_need_restart)) goto again; /* HSM import case: file is released, cannot be restored * no need to fail except if restore registration failed * with -ENODATA */ if (result == -ENODATA && io->ci_restore_needed && io->ci_result != -ENODATA) result = 0; cl_env_put(env, &refcheck); return result; } /***************************************************************************** * * Type conversions. * */ struct lu_device *ccc2lu_dev(struct ccc_device *vdv) { return &vdv->cdv_cl.cd_lu_dev; } struct ccc_device *lu2ccc_dev(const struct lu_device *d) { return container_of0(d, struct ccc_device, cdv_cl.cd_lu_dev); } struct ccc_device *cl2ccc_dev(const struct cl_device *d) { return container_of0(d, struct ccc_device, cdv_cl); } struct lu_object *ccc2lu(struct ccc_object *vob) { return &vob->cob_cl.co_lu; } struct ccc_object *lu2ccc(const struct lu_object *obj) { return container_of0(obj, struct ccc_object, cob_cl.co_lu); } struct ccc_object *cl2ccc(const struct cl_object *obj) { return container_of0(obj, struct ccc_object, cob_cl); } struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice) { return container_of(slice, struct ccc_lock, clk_cl); } struct ccc_io *cl2ccc_io(const struct lu_env *env, const struct cl_io_slice *slice) { struct ccc_io *cio; cio = container_of(slice, struct ccc_io, cui_cl); LASSERT(cio == ccc_env_io(env)); return cio; } struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice) { return container_of0(slice, struct ccc_req, crq_cl); } struct page *cl2vm_page(const struct cl_page_slice *slice) { return cl2ccc_page(slice)->cpg_page; } /***************************************************************************** * * Accessors. * */ int ccc_object_invariant(const struct cl_object *obj) { struct inode *inode = ccc_object_inode(obj); struct cl_inode_info *lli = cl_i2info(inode); return (S_ISREG(cl_inode_mode(inode)) || /* i_mode of unlinked inode is zeroed. */ cl_inode_mode(inode) == 0) && lli->lli_clob == obj; } struct inode *ccc_object_inode(const struct cl_object *obj) { return cl2ccc(obj)->cob_inode; } /** * Returns a pointer to cl_page associated with \a vmpage, without acquiring * additional reference to the resulting page. This is an unsafe version of * cl_vmpage_page() that can only be used under vmpage lock. */ struct cl_page *ccc_vmpage_page_transient(struct page *vmpage) { KLASSERT(PageLocked(vmpage)); return (struct cl_page *)vmpage->private; } /** * Initialize or update CLIO structures for regular files when new * meta-data arrives from the server. * * \param inode regular file inode * \param md new file metadata from MDS * - allocates cl_object if necessary, * - updated layout, if object was already here. */ int cl_file_inode_init(struct inode *inode, struct lustre_md *md) { struct lu_env *env; struct cl_inode_info *lli; struct cl_object *clob; struct lu_site *site; struct lu_fid *fid; struct cl_object_conf conf = { .coc_inode = inode, .u = { .coc_md = md } }; int result = 0; int refcheck; LASSERT(md->body->valid & OBD_MD_FLID); LASSERT(S_ISREG(cl_inode_mode(inode))); env = cl_env_get(&refcheck); if (IS_ERR(env)) return PTR_ERR(env); site = cl_i2sbi(inode)->ll_site; lli = cl_i2info(inode); fid = &lli->lli_fid; LASSERT(fid_is_sane(fid)); if (lli->lli_clob == NULL) { /* clob is slave of inode, empty lli_clob means for new inode, * there is no clob in cache with the given fid, so it is * unnecessary to perform lookup-alloc-lookup-insert, just * alloc and insert directly. */ LASSERT(inode->i_state & I_NEW); conf.coc_lu.loc_flags = LOC_F_NEW; clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev), fid, &conf); if (!IS_ERR(clob)) { /* * No locking is necessary, as new inode is * locked by I_NEW bit. */ lli->lli_clob = clob; lli->lli_has_smd = lsm_has_objects(md->lsm); lu_object_ref_add(&clob->co_lu, "inode", inode); } else result = PTR_ERR(clob); } else { result = cl_conf_set(env, lli->lli_clob, &conf); } cl_env_put(env, &refcheck); if (result != 0) CERROR("Failure to initialize cl object "DFID": %d\n", PFID(fid), result); return result; } /** * Wait for others drop their references of the object at first, then we drop * the last one, which will lead to the object be destroyed immediately. * Must be called after cl_object_kill() against this object. * * The reason we want to do this is: destroying top object will wait for sub * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs) * to initiate top object destroying which may deadlock. See bz22520. */ static void cl_object_put_last(struct lu_env *env, struct cl_object *obj) { struct lu_object_header *header = obj->co_lu.lo_header; wait_queue_t waiter; if (unlikely(atomic_read(&header->loh_ref) != 1)) { struct lu_site *site = obj->co_lu.lo_dev->ld_site; struct lu_site_bkt_data *bkt; bkt = lu_site_bkt_from_fid(site, &header->loh_fid); init_waitqueue_entry(&waiter, current); add_wait_queue(&bkt->lsb_marche_funebre, &waiter); while (1) { set_current_state(TASK_UNINTERRUPTIBLE); if (atomic_read(&header->loh_ref) == 1) break; schedule(); } set_current_state(TASK_RUNNING); remove_wait_queue(&bkt->lsb_marche_funebre, &waiter); } cl_object_put(env, obj); } void cl_inode_fini(struct inode *inode) { struct lu_env *env; struct cl_inode_info *lli = cl_i2info(inode); struct cl_object *clob = lli->lli_clob; int refcheck; int emergency; if (clob != NULL) { void *cookie; cookie = cl_env_reenter(); env = cl_env_get(&refcheck); emergency = IS_ERR(env); if (emergency) { mutex_lock(&ccc_inode_fini_guard); LASSERT(ccc_inode_fini_env != NULL); cl_env_implant(ccc_inode_fini_env, &refcheck); env = ccc_inode_fini_env; } /* * cl_object cache is a slave to inode cache (which, in turn * is a slave to dentry cache), don't keep cl_object in memory * when its master is evicted. */ cl_object_kill(env, clob); lu_object_ref_del(&clob->co_lu, "inode", inode); cl_object_put_last(env, clob); lli->lli_clob = NULL; if (emergency) { cl_env_unplant(ccc_inode_fini_env, &refcheck); mutex_unlock(&ccc_inode_fini_guard); } else cl_env_put(env, &refcheck); cl_env_reexit(cookie); } } /** * return IF_* type for given lu_dirent entry. * IF_* flag shld be converted to particular OS file type in * platform llite module. */ __u16 ll_dirent_type_get(struct lu_dirent *ent) { __u16 type = 0; struct luda_type *lt; int len = 0; if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) { const unsigned align = sizeof(struct luda_type) - 1; len = le16_to_cpu(ent->lde_namelen); len = (len + align) & ~align; lt = (void *)ent->lde_name + len; type = IFTODT(le16_to_cpu(lt->lt_type)); } return type; } /** * build inode number from passed @fid */ __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32) { if (BITS_PER_LONG == 32 || api32) return fid_flatten32(fid); else return fid_flatten(fid); } /** * build inode generation from passed @fid. If our FID overflows the 32-bit * inode number then return a non-zero generation to distinguish them. */ __u32 cl_fid_build_gen(const struct lu_fid *fid) { __u32 gen; if (fid_is_igif(fid)) { gen = lu_igif_gen(fid); return gen; } gen = fid_flatten(fid) >> 32; return gen; } /* lsm is unreliable after hsm implementation as layout can be changed at * any time. This is only to support old, non-clio-ized interfaces. It will * cause deadlock if clio operations are called with this extra layout refcount * because in case the layout changed during the IO, ll_layout_refresh() will * have to wait for the refcount to become zero to destroy the older layout. * * Notice that the lsm returned by this function may not be valid unless called * inside layout lock - MDS_INODELOCK_LAYOUT. */ struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode) { return lov_lsm_get(cl_i2info(inode)->lli_clob); } inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm) { lov_lsm_put(cl_i2info(inode)->lli_clob, lsm); }
gpl-2.0
rajat1994/linux
drivers/staging/lustre/lustre/lov/lov_request.c
221
18283
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. * * GPL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2011, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. */ #define DEBUG_SUBSYSTEM S_LOV #include "../../include/linux/libcfs/libcfs.h" #include "../include/obd_class.h" #include "../include/lustre/lustre_idl.h" #include "lov_internal.h" static void lov_init_set(struct lov_request_set *set) { set->set_count = 0; atomic_set(&set->set_completes, 0); atomic_set(&set->set_success, 0); atomic_set(&set->set_finish_checked, 0); set->set_cookies = NULL; INIT_LIST_HEAD(&set->set_list); atomic_set(&set->set_refcount, 1); init_waitqueue_head(&set->set_waitq); spin_lock_init(&set->set_lock); } void lov_finish_set(struct lov_request_set *set) { struct list_head *pos, *n; LASSERT(set); list_for_each_safe(pos, n, &set->set_list) { struct lov_request *req = list_entry(pos, struct lov_request, rq_link); list_del_init(&req->rq_link); if (req->rq_oi.oi_oa) OBDO_FREE(req->rq_oi.oi_oa); kfree(req->rq_oi.oi_osfs); kfree(req); } kfree(set); } int lov_set_finished(struct lov_request_set *set, int idempotent) { int completes = atomic_read(&set->set_completes); CDEBUG(D_INFO, "check set %d/%d\n", completes, set->set_count); if (completes == set->set_count) { if (idempotent) return 1; if (atomic_inc_return(&set->set_finish_checked) == 1) return 1; } return 0; } void lov_update_set(struct lov_request_set *set, struct lov_request *req, int rc) { req->rq_complete = 1; req->rq_rc = rc; atomic_inc(&set->set_completes); if (rc == 0) atomic_inc(&set->set_success); wake_up(&set->set_waitq); } int lov_update_common_set(struct lov_request_set *set, struct lov_request *req, int rc) { struct lov_obd *lov = &set->set_exp->exp_obd->u.lov; lov_update_set(set, req, rc); /* grace error on inactive ost */ if (rc && !(lov->lov_tgts[req->rq_idx] && lov->lov_tgts[req->rq_idx]->ltd_active)) rc = 0; /* FIXME in raid1 regime, should return 0 */ return rc; } void lov_set_add_req(struct lov_request *req, struct lov_request_set *set) { list_add_tail(&req->rq_link, &set->set_list); set->set_count++; req->rq_rqset = set; } static int lov_check_set(struct lov_obd *lov, int idx) { int rc; struct lov_tgt_desc *tgt; mutex_lock(&lov->lov_lock); tgt = lov->lov_tgts[idx]; rc = !tgt || tgt->ltd_active || (tgt->ltd_exp && class_exp2cliimp(tgt->ltd_exp)->imp_connect_tried); mutex_unlock(&lov->lov_lock); return rc; } /* Check if the OSC connection exists and is active. * If the OSC has not yet had a chance to connect to the OST the first time, * wait once for it to connect instead of returning an error. */ int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx) { wait_queue_head_t waitq; struct l_wait_info lwi; struct lov_tgt_desc *tgt; int rc = 0; mutex_lock(&lov->lov_lock); tgt = lov->lov_tgts[ost_idx]; if (unlikely(tgt == NULL)) { rc = 0; goto out; } if (likely(tgt->ltd_active)) { rc = 1; goto out; } if (tgt->ltd_exp && class_exp2cliimp(tgt->ltd_exp)->imp_connect_tried) { rc = 0; goto out; } mutex_unlock(&lov->lov_lock); init_waitqueue_head(&waitq); lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(obd_timeout), cfs_time_seconds(1), NULL, NULL); rc = l_wait_event(waitq, lov_check_set(lov, ost_idx), &lwi); if (tgt != NULL && tgt->ltd_active) return 1; return 0; out: mutex_unlock(&lov->lov_lock); return rc; } static int common_attr_done(struct lov_request_set *set) { struct list_head *pos; struct lov_request *req; struct obdo *tmp_oa; int rc = 0, attrset = 0; LASSERT(set->set_oi != NULL); if (set->set_oi->oi_oa == NULL) return 0; if (!atomic_read(&set->set_success)) return -EIO; OBDO_ALLOC(tmp_oa); if (tmp_oa == NULL) { rc = -ENOMEM; goto out; } list_for_each(pos, &set->set_list) { req = list_entry(pos, struct lov_request, rq_link); if (!req->rq_complete || req->rq_rc) continue; if (req->rq_oi.oi_oa->o_valid == 0) /* inactive stripe */ continue; lov_merge_attrs(tmp_oa, req->rq_oi.oi_oa, req->rq_oi.oi_oa->o_valid, set->set_oi->oi_md, req->rq_stripe, &attrset); } if (!attrset) { CERROR("No stripes had valid attrs\n"); rc = -EIO; } if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) && (set->set_oi->oi_md->lsm_stripe_count != attrset)) { /* When we take attributes of some epoch, we require all the * ost to be active. */ CERROR("Not all the stripes had valid attrs\n"); rc = -EIO; goto out; } tmp_oa->o_oi = set->set_oi->oi_oa->o_oi; memcpy(set->set_oi->oi_oa, tmp_oa, sizeof(*set->set_oi->oi_oa)); out: if (tmp_oa) OBDO_FREE(tmp_oa); return rc; } int lov_fini_getattr_set(struct lov_request_set *set) { int rc = 0; if (set == NULL) return 0; LASSERT(set->set_exp); if (atomic_read(&set->set_completes)) rc = common_attr_done(set); lov_put_reqset(set); return rc; } /* The callback for osc_getattr_async that finalizes a request info when a * response is received. */ static int cb_getattr_update(void *cookie, int rc) { struct obd_info *oinfo = cookie; struct lov_request *lovreq; lovreq = container_of(oinfo, struct lov_request, rq_oi); return lov_update_common_set(lovreq->rq_rqset, lovreq, rc); } int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo, struct lov_request_set **reqset) { struct lov_request_set *set; struct lov_obd *lov = &exp->exp_obd->u.lov; int rc = 0, i; set = kzalloc(sizeof(*set), GFP_NOFS); if (!set) return -ENOMEM; lov_init_set(set); set->set_exp = exp; set->set_oi = oinfo; for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) { struct lov_oinfo *loi; struct lov_request *req; loi = oinfo->oi_md->lsm_oinfo[i]; if (lov_oinfo_is_dummy(loi)) continue; if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) { CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx); if (oinfo->oi_oa->o_valid & OBD_MD_FLEPOCH) { /* SOM requires all the OSTs to be active. */ rc = -EIO; goto out_set; } continue; } req = kzalloc(sizeof(*req), GFP_NOFS); if (!req) { rc = -ENOMEM; goto out_set; } req->rq_stripe = i; req->rq_idx = loi->loi_ost_idx; OBDO_ALLOC(req->rq_oi.oi_oa); if (req->rq_oi.oi_oa == NULL) { kfree(req); rc = -ENOMEM; goto out_set; } memcpy(req->rq_oi.oi_oa, oinfo->oi_oa, sizeof(*req->rq_oi.oi_oa)); req->rq_oi.oi_oa->o_oi = loi->loi_oi; req->rq_oi.oi_cb_up = cb_getattr_update; req->rq_oi.oi_capa = oinfo->oi_capa; lov_set_add_req(req, set); } if (!set->set_count) { rc = -EIO; goto out_set; } *reqset = set; return rc; out_set: lov_fini_getattr_set(set); return rc; } int lov_fini_destroy_set(struct lov_request_set *set) { if (set == NULL) return 0; LASSERT(set->set_exp); if (atomic_read(&set->set_completes)) { /* FIXME update qos data here */ } lov_put_reqset(set); return 0; } int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo, struct obdo *src_oa, struct lov_stripe_md *lsm, struct obd_trans_info *oti, struct lov_request_set **reqset) { struct lov_request_set *set; struct lov_obd *lov = &exp->exp_obd->u.lov; int rc = 0, i; set = kzalloc(sizeof(*set), GFP_NOFS); if (!set) return -ENOMEM; lov_init_set(set); set->set_exp = exp; set->set_oi = oinfo; set->set_oi->oi_md = lsm; set->set_oi->oi_oa = src_oa; set->set_oti = oti; if (oti != NULL && src_oa->o_valid & OBD_MD_FLCOOKIE) set->set_cookies = oti->oti_logcookies; for (i = 0; i < lsm->lsm_stripe_count; i++) { struct lov_oinfo *loi; struct lov_request *req; loi = lsm->lsm_oinfo[i]; if (lov_oinfo_is_dummy(loi)) continue; if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) { CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx); continue; } req = kzalloc(sizeof(*req), GFP_NOFS); if (!req) { rc = -ENOMEM; goto out_set; } req->rq_stripe = i; req->rq_idx = loi->loi_ost_idx; OBDO_ALLOC(req->rq_oi.oi_oa); if (req->rq_oi.oi_oa == NULL) { kfree(req); rc = -ENOMEM; goto out_set; } memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa)); req->rq_oi.oi_oa->o_oi = loi->loi_oi; lov_set_add_req(req, set); } if (!set->set_count) { rc = -EIO; goto out_set; } *reqset = set; return rc; out_set: lov_fini_destroy_set(set); return rc; } int lov_fini_setattr_set(struct lov_request_set *set) { int rc = 0; if (set == NULL) return 0; LASSERT(set->set_exp); if (atomic_read(&set->set_completes)) { rc = common_attr_done(set); /* FIXME update qos data here */ } lov_put_reqset(set); return rc; } int lov_update_setattr_set(struct lov_request_set *set, struct lov_request *req, int rc) { struct lov_obd *lov = &req->rq_rqset->set_exp->exp_obd->u.lov; struct lov_stripe_md *lsm = req->rq_rqset->set_oi->oi_md; lov_update_set(set, req, rc); /* grace error on inactive ost */ if (rc && !(lov->lov_tgts[req->rq_idx] && lov->lov_tgts[req->rq_idx]->ltd_active)) rc = 0; if (rc == 0) { if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLCTIME) lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_ctime = req->rq_oi.oi_oa->o_ctime; if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLMTIME) lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_mtime = req->rq_oi.oi_oa->o_mtime; if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLATIME) lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_atime = req->rq_oi.oi_oa->o_atime; } return rc; } /* The callback for osc_setattr_async that finalizes a request info when a * response is received. */ static int cb_setattr_update(void *cookie, int rc) { struct obd_info *oinfo = cookie; struct lov_request *lovreq; lovreq = container_of(oinfo, struct lov_request, rq_oi); return lov_update_setattr_set(lovreq->rq_rqset, lovreq, rc); } int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo, struct obd_trans_info *oti, struct lov_request_set **reqset) { struct lov_request_set *set; struct lov_obd *lov = &exp->exp_obd->u.lov; int rc = 0, i; set = kzalloc(sizeof(*set), GFP_NOFS); if (!set) return -ENOMEM; lov_init_set(set); set->set_exp = exp; set->set_oti = oti; set->set_oi = oinfo; if (oti != NULL && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) set->set_cookies = oti->oti_logcookies; for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) { struct lov_oinfo *loi = oinfo->oi_md->lsm_oinfo[i]; struct lov_request *req; if (lov_oinfo_is_dummy(loi)) continue; if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) { CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx); continue; } req = kzalloc(sizeof(*req), GFP_NOFS); if (!req) { rc = -ENOMEM; goto out_set; } req->rq_stripe = i; req->rq_idx = loi->loi_ost_idx; OBDO_ALLOC(req->rq_oi.oi_oa); if (req->rq_oi.oi_oa == NULL) { kfree(req); rc = -ENOMEM; goto out_set; } memcpy(req->rq_oi.oi_oa, oinfo->oi_oa, sizeof(*req->rq_oi.oi_oa)); req->rq_oi.oi_oa->o_oi = loi->loi_oi; req->rq_oi.oi_oa->o_stripe_idx = i; req->rq_oi.oi_cb_up = cb_setattr_update; req->rq_oi.oi_capa = oinfo->oi_capa; if (oinfo->oi_oa->o_valid & OBD_MD_FLSIZE) { int off = lov_stripe_offset(oinfo->oi_md, oinfo->oi_oa->o_size, i, &req->rq_oi.oi_oa->o_size); if (off < 0 && req->rq_oi.oi_oa->o_size) req->rq_oi.oi_oa->o_size--; CDEBUG(D_INODE, "stripe %d has size %llu/%llu\n", i, req->rq_oi.oi_oa->o_size, oinfo->oi_oa->o_size); } lov_set_add_req(req, set); } if (!set->set_count) { rc = -EIO; goto out_set; } *reqset = set; return rc; out_set: lov_fini_setattr_set(set); return rc; } #define LOV_U64_MAX ((__u64)~0ULL) #define LOV_SUM_MAX(tot, add) \ do { \ if ((tot) + (add) < (tot)) \ (tot) = LOV_U64_MAX; \ else \ (tot) += (add); \ } while (0) int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs, int success) { if (success) { __u32 expected_stripes = lov_get_stripecnt(&obd->u.lov, LOV_MAGIC, 0); if (osfs->os_files != LOV_U64_MAX) lov_do_div64(osfs->os_files, expected_stripes); if (osfs->os_ffree != LOV_U64_MAX) lov_do_div64(osfs->os_ffree, expected_stripes); spin_lock(&obd->obd_osfs_lock); memcpy(&obd->obd_osfs, osfs, sizeof(*osfs)); obd->obd_osfs_age = cfs_time_current_64(); spin_unlock(&obd->obd_osfs_lock); return 0; } return -EIO; } int lov_fini_statfs_set(struct lov_request_set *set) { int rc = 0; if (set == NULL) return 0; if (atomic_read(&set->set_completes)) { rc = lov_fini_statfs(set->set_obd, set->set_oi->oi_osfs, atomic_read(&set->set_success)); } lov_put_reqset(set); return rc; } void lov_update_statfs(struct obd_statfs *osfs, struct obd_statfs *lov_sfs, int success) { int shift = 0, quit = 0; __u64 tmp; if (success == 0) { memcpy(osfs, lov_sfs, sizeof(*lov_sfs)); } else { if (osfs->os_bsize != lov_sfs->os_bsize) { /* assume all block sizes are always powers of 2 */ /* get the bits difference */ tmp = osfs->os_bsize | lov_sfs->os_bsize; for (shift = 0; shift <= 64; ++shift) { if (tmp & 1) { if (quit) break; quit = 1; shift = 0; } tmp >>= 1; } } if (osfs->os_bsize < lov_sfs->os_bsize) { osfs->os_bsize = lov_sfs->os_bsize; osfs->os_bfree >>= shift; osfs->os_bavail >>= shift; osfs->os_blocks >>= shift; } else if (shift != 0) { lov_sfs->os_bfree >>= shift; lov_sfs->os_bavail >>= shift; lov_sfs->os_blocks >>= shift; } osfs->os_bfree += lov_sfs->os_bfree; osfs->os_bavail += lov_sfs->os_bavail; osfs->os_blocks += lov_sfs->os_blocks; /* XXX not sure about this one - depends on policy. * - could be minimum if we always stripe on all OBDs * (but that would be wrong for any other policy, * if one of the OBDs has no more objects left) * - could be sum if we stripe whole objects * - could be average, just to give a nice number * * To give a "reasonable" (if not wholly accurate) * number, we divide the total number of free objects * by expected stripe count (watch out for overflow). */ LOV_SUM_MAX(osfs->os_files, lov_sfs->os_files); LOV_SUM_MAX(osfs->os_ffree, lov_sfs->os_ffree); } } /* The callback for osc_statfs_async that finalizes a request info when a * response is received. */ static int cb_statfs_update(void *cookie, int rc) { struct obd_info *oinfo = cookie; struct lov_request *lovreq; struct lov_request_set *set; struct obd_statfs *osfs, *lov_sfs; struct lov_obd *lov; struct lov_tgt_desc *tgt; struct obd_device *lovobd, *tgtobd; int success; lovreq = container_of(oinfo, struct lov_request, rq_oi); set = lovreq->rq_rqset; lovobd = set->set_obd; lov = &lovobd->u.lov; osfs = set->set_oi->oi_osfs; lov_sfs = oinfo->oi_osfs; success = atomic_read(&set->set_success); /* XXX: the same is done in lov_update_common_set, however lovset->set_exp is not initialized. */ lov_update_set(set, lovreq, rc); if (rc) goto out; obd_getref(lovobd); tgt = lov->lov_tgts[lovreq->rq_idx]; if (!tgt || !tgt->ltd_active) goto out_update; tgtobd = class_exp2obd(tgt->ltd_exp); spin_lock(&tgtobd->obd_osfs_lock); memcpy(&tgtobd->obd_osfs, lov_sfs, sizeof(*lov_sfs)); if ((oinfo->oi_flags & OBD_STATFS_FROM_CACHE) == 0) tgtobd->obd_osfs_age = cfs_time_current_64(); spin_unlock(&tgtobd->obd_osfs_lock); out_update: lov_update_statfs(osfs, lov_sfs, success); obd_putref(lovobd); out: if (set->set_oi->oi_flags & OBD_STATFS_PTLRPCD && lov_set_finished(set, 0)) { lov_statfs_interpret(NULL, set, set->set_count != atomic_read(&set->set_success)); } return 0; } int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo, struct lov_request_set **reqset) { struct lov_request_set *set; struct lov_obd *lov = &obd->u.lov; int rc = 0, i; set = kzalloc(sizeof(*set), GFP_NOFS); if (!set) return -ENOMEM; lov_init_set(set); set->set_obd = obd; set->set_oi = oinfo; /* We only get block data from the OBD */ for (i = 0; i < lov->desc.ld_tgt_count; i++) { struct lov_request *req; if (lov->lov_tgts[i] == NULL || (!lov_check_and_wait_active(lov, i) && (oinfo->oi_flags & OBD_STATFS_NODELAY))) { CDEBUG(D_HA, "lov idx %d inactive\n", i); continue; } /* skip targets that have been explicitly disabled by the * administrator */ if (!lov->lov_tgts[i]->ltd_exp) { CDEBUG(D_HA, "lov idx %d administratively disabled\n", i); continue; } req = kzalloc(sizeof(*req), GFP_NOFS); if (!req) { rc = -ENOMEM; goto out_set; } req->rq_oi.oi_osfs = kzalloc(sizeof(*req->rq_oi.oi_osfs), GFP_NOFS); if (!req->rq_oi.oi_osfs) { kfree(req); rc = -ENOMEM; goto out_set; } req->rq_idx = i; req->rq_oi.oi_cb_up = cb_statfs_update; req->rq_oi.oi_flags = oinfo->oi_flags; lov_set_add_req(req, set); } if (!set->set_count) { rc = -EIO; goto out_set; } *reqset = set; return rc; out_set: lov_fini_statfs_set(set); return rc; }
gpl-2.0
OpenClovis/linux_tipc
drivers/usb/phy/phy-tahvo.c
733
11392
/* * Tahvo USB transceiver driver * * Copyright (C) 2005-2006 Nokia Corporation * * Parts copied from isp1301_omap.c. * Copyright (C) 2004 Texas Instruments * Copyright (C) 2004 David Brownell * * Original driver written by Juha Yrjölä, Tony Lindgren and Timo Teräs. * Modified for Retu/Tahvo MFD by Aaro Koskinen. * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of this * archive for more details. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/io.h> #include <linux/clk.h> #include <linux/usb.h> #include <linux/extcon.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/usb/otg.h> #include <linux/mfd/retu.h> #include <linux/usb/gadget.h> #include <linux/platform_device.h> #define DRIVER_NAME "tahvo-usb" #define TAHVO_REG_IDSR 0x02 #define TAHVO_REG_USBR 0x06 #define USBR_SLAVE_CONTROL (1 << 8) #define USBR_VPPVIO_SW (1 << 7) #define USBR_SPEED (1 << 6) #define USBR_REGOUT (1 << 5) #define USBR_MASTER_SW2 (1 << 4) #define USBR_MASTER_SW1 (1 << 3) #define USBR_SLAVE_SW (1 << 2) #define USBR_NSUSPEND (1 << 1) #define USBR_SEMODE (1 << 0) #define TAHVO_MODE_HOST 0 #define TAHVO_MODE_PERIPHERAL 1 struct tahvo_usb { struct platform_device *pt_dev; struct usb_phy phy; int vbus_state; struct mutex serialize; struct clk *ick; int irq; int tahvo_mode; struct extcon_dev extcon; }; static const char *tahvo_cable[] = { "USB-HOST", "USB", NULL, }; static ssize_t vbus_state_show(struct device *device, struct device_attribute *attr, char *buf) { struct tahvo_usb *tu = dev_get_drvdata(device); return sprintf(buf, "%s\n", tu->vbus_state ? "on" : "off"); } static DEVICE_ATTR(vbus, 0444, vbus_state_show, NULL); static void check_vbus_state(struct tahvo_usb *tu) { struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent); int reg, prev_state; reg = retu_read(rdev, TAHVO_REG_IDSR); if (reg & TAHVO_STAT_VBUS) { switch (tu->phy.state) { case OTG_STATE_B_IDLE: /* Enable the gadget driver */ if (tu->phy.otg->gadget) usb_gadget_vbus_connect(tu->phy.otg->gadget); tu->phy.state = OTG_STATE_B_PERIPHERAL; break; case OTG_STATE_A_IDLE: /* * Session is now valid assuming the USB hub is driving * Vbus. */ tu->phy.state = OTG_STATE_A_HOST; break; default: break; } dev_info(&tu->pt_dev->dev, "USB cable connected\n"); } else { switch (tu->phy.state) { case OTG_STATE_B_PERIPHERAL: if (tu->phy.otg->gadget) usb_gadget_vbus_disconnect(tu->phy.otg->gadget); tu->phy.state = OTG_STATE_B_IDLE; break; case OTG_STATE_A_HOST: tu->phy.state = OTG_STATE_A_IDLE; break; default: break; } dev_info(&tu->pt_dev->dev, "USB cable disconnected\n"); } prev_state = tu->vbus_state; tu->vbus_state = reg & TAHVO_STAT_VBUS; if (prev_state != tu->vbus_state) { extcon_set_cable_state(&tu->extcon, "USB", tu->vbus_state); sysfs_notify(&tu->pt_dev->dev.kobj, NULL, "vbus_state"); } } static void tahvo_usb_become_host(struct tahvo_usb *tu) { struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent); extcon_set_cable_state(&tu->extcon, "USB-HOST", true); /* Power up the transceiver in USB host mode */ retu_write(rdev, TAHVO_REG_USBR, USBR_REGOUT | USBR_NSUSPEND | USBR_MASTER_SW2 | USBR_MASTER_SW1); tu->phy.state = OTG_STATE_A_IDLE; check_vbus_state(tu); } static void tahvo_usb_stop_host(struct tahvo_usb *tu) { tu->phy.state = OTG_STATE_A_IDLE; } static void tahvo_usb_become_peripheral(struct tahvo_usb *tu) { struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent); extcon_set_cable_state(&tu->extcon, "USB-HOST", false); /* Power up transceiver and set it in USB peripheral mode */ retu_write(rdev, TAHVO_REG_USBR, USBR_SLAVE_CONTROL | USBR_REGOUT | USBR_NSUSPEND | USBR_SLAVE_SW); tu->phy.state = OTG_STATE_B_IDLE; check_vbus_state(tu); } static void tahvo_usb_stop_peripheral(struct tahvo_usb *tu) { if (tu->phy.otg->gadget) usb_gadget_vbus_disconnect(tu->phy.otg->gadget); tu->phy.state = OTG_STATE_B_IDLE; } static void tahvo_usb_power_off(struct tahvo_usb *tu) { struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent); /* Disable gadget controller if any */ if (tu->phy.otg->gadget) usb_gadget_vbus_disconnect(tu->phy.otg->gadget); /* Power off transceiver */ retu_write(rdev, TAHVO_REG_USBR, 0); tu->phy.state = OTG_STATE_UNDEFINED; } static int tahvo_usb_set_suspend(struct usb_phy *dev, int suspend) { struct tahvo_usb *tu = container_of(dev, struct tahvo_usb, phy); struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent); u16 w; dev_dbg(&tu->pt_dev->dev, "%s\n", __func__); w = retu_read(rdev, TAHVO_REG_USBR); if (suspend) w &= ~USBR_NSUSPEND; else w |= USBR_NSUSPEND; retu_write(rdev, TAHVO_REG_USBR, w); return 0; } static int tahvo_usb_set_host(struct usb_otg *otg, struct usb_bus *host) { struct tahvo_usb *tu = container_of(otg->phy, struct tahvo_usb, phy); dev_dbg(&tu->pt_dev->dev, "%s %p\n", __func__, host); mutex_lock(&tu->serialize); if (host == NULL) { if (tu->tahvo_mode == TAHVO_MODE_HOST) tahvo_usb_power_off(tu); otg->host = NULL; mutex_unlock(&tu->serialize); return 0; } if (tu->tahvo_mode == TAHVO_MODE_HOST) { otg->host = NULL; tahvo_usb_become_host(tu); } otg->host = host; mutex_unlock(&tu->serialize); return 0; } static int tahvo_usb_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget) { struct tahvo_usb *tu = container_of(otg->phy, struct tahvo_usb, phy); dev_dbg(&tu->pt_dev->dev, "%s %p\n", __func__, gadget); mutex_lock(&tu->serialize); if (!gadget) { if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL) tahvo_usb_power_off(tu); tu->phy.otg->gadget = NULL; mutex_unlock(&tu->serialize); return 0; } tu->phy.otg->gadget = gadget; if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL) tahvo_usb_become_peripheral(tu); mutex_unlock(&tu->serialize); return 0; } static irqreturn_t tahvo_usb_vbus_interrupt(int irq, void *_tu) { struct tahvo_usb *tu = _tu; mutex_lock(&tu->serialize); check_vbus_state(tu); mutex_unlock(&tu->serialize); return IRQ_HANDLED; } static ssize_t otg_mode_show(struct device *device, struct device_attribute *attr, char *buf) { struct tahvo_usb *tu = dev_get_drvdata(device); switch (tu->tahvo_mode) { case TAHVO_MODE_HOST: return sprintf(buf, "host\n"); case TAHVO_MODE_PERIPHERAL: return sprintf(buf, "peripheral\n"); } return -EINVAL; } static ssize_t otg_mode_store(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { struct tahvo_usb *tu = dev_get_drvdata(device); int r; mutex_lock(&tu->serialize); if (count >= 4 && strncmp(buf, "host", 4) == 0) { if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL) tahvo_usb_stop_peripheral(tu); tu->tahvo_mode = TAHVO_MODE_HOST; if (tu->phy.otg->host) { dev_info(device, "HOST mode: host controller present\n"); tahvo_usb_become_host(tu); } else { dev_info(device, "HOST mode: no host controller, powering off\n"); tahvo_usb_power_off(tu); } r = strlen(buf); } else if (count >= 10 && strncmp(buf, "peripheral", 10) == 0) { if (tu->tahvo_mode == TAHVO_MODE_HOST) tahvo_usb_stop_host(tu); tu->tahvo_mode = TAHVO_MODE_PERIPHERAL; if (tu->phy.otg->gadget) { dev_info(device, "PERIPHERAL mode: gadget driver present\n"); tahvo_usb_become_peripheral(tu); } else { dev_info(device, "PERIPHERAL mode: no gadget driver, powering off\n"); tahvo_usb_power_off(tu); } r = strlen(buf); } else { r = -EINVAL; } mutex_unlock(&tu->serialize); return r; } static DEVICE_ATTR(otg_mode, 0644, otg_mode_show, otg_mode_store); static struct attribute *tahvo_attributes[] = { &dev_attr_vbus.attr, &dev_attr_otg_mode.attr, NULL }; static struct attribute_group tahvo_attr_group = { .attrs = tahvo_attributes, }; static int tahvo_usb_probe(struct platform_device *pdev) { struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent); struct tahvo_usb *tu; int ret; tu = devm_kzalloc(&pdev->dev, sizeof(*tu), GFP_KERNEL); if (!tu) return -ENOMEM; tu->phy.otg = devm_kzalloc(&pdev->dev, sizeof(*tu->phy.otg), GFP_KERNEL); if (!tu->phy.otg) return -ENOMEM; tu->pt_dev = pdev; /* Default mode */ #ifdef CONFIG_TAHVO_USB_HOST_BY_DEFAULT tu->tahvo_mode = TAHVO_MODE_HOST; #else tu->tahvo_mode = TAHVO_MODE_PERIPHERAL; #endif mutex_init(&tu->serialize); tu->ick = devm_clk_get(&pdev->dev, "usb_l4_ick"); if (!IS_ERR(tu->ick)) clk_enable(tu->ick); /* * Set initial state, so that we generate kevents only on state changes. */ tu->vbus_state = retu_read(rdev, TAHVO_REG_IDSR) & TAHVO_STAT_VBUS; tu->extcon.name = DRIVER_NAME; tu->extcon.supported_cable = tahvo_cable; tu->extcon.dev.parent = &pdev->dev; ret = extcon_dev_register(&tu->extcon); if (ret) { dev_err(&pdev->dev, "could not register extcon device: %d\n", ret); goto err_disable_clk; } /* Set the initial cable state. */ extcon_set_cable_state(&tu->extcon, "USB-HOST", tu->tahvo_mode == TAHVO_MODE_HOST); extcon_set_cable_state(&tu->extcon, "USB", tu->vbus_state); /* Create OTG interface */ tahvo_usb_power_off(tu); tu->phy.dev = &pdev->dev; tu->phy.state = OTG_STATE_UNDEFINED; tu->phy.label = DRIVER_NAME; tu->phy.set_suspend = tahvo_usb_set_suspend; tu->phy.otg->phy = &tu->phy; tu->phy.otg->set_host = tahvo_usb_set_host; tu->phy.otg->set_peripheral = tahvo_usb_set_peripheral; ret = usb_add_phy(&tu->phy, USB_PHY_TYPE_USB2); if (ret < 0) { dev_err(&pdev->dev, "cannot register USB transceiver: %d\n", ret); goto err_extcon_unreg; } dev_set_drvdata(&pdev->dev, tu); tu->irq = platform_get_irq(pdev, 0); ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, 0, "tahvo-vbus", tu); if (ret) { dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n", ret); goto err_remove_phy; } /* Attributes */ ret = sysfs_create_group(&pdev->dev.kobj, &tahvo_attr_group); if (ret) { dev_err(&pdev->dev, "cannot create sysfs group: %d\n", ret); goto err_free_irq; } return 0; err_free_irq: free_irq(tu->irq, tu); err_remove_phy: usb_remove_phy(&tu->phy); err_extcon_unreg: extcon_dev_unregister(&tu->extcon); err_disable_clk: if (!IS_ERR(tu->ick)) clk_disable(tu->ick); return ret; } static int tahvo_usb_remove(struct platform_device *pdev) { struct tahvo_usb *tu = platform_get_drvdata(pdev); sysfs_remove_group(&pdev->dev.kobj, &tahvo_attr_group); free_irq(tu->irq, tu); usb_remove_phy(&tu->phy); extcon_dev_unregister(&tu->extcon); if (!IS_ERR(tu->ick)) clk_disable(tu->ick); return 0; } static struct platform_driver tahvo_usb_driver = { .probe = tahvo_usb_probe, .remove = tahvo_usb_remove, .driver = { .name = "tahvo-usb", .owner = THIS_MODULE, }, }; module_platform_driver(tahvo_usb_driver); MODULE_DESCRIPTION("Tahvo USB transceiver driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Juha Yrjölä, Tony Lindgren, and Timo Teräs"); MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
gpl-2.0
DavionKnight/OK6410-linux-2.6.36
drivers/video/ep93xx-fb.c
989
17681
/* * linux/drivers/video/ep93xx-fb.c * * Framebuffer support for the EP93xx series. * * Copyright (C) 2007 Bluewater Systems Ltd * Author: Ryan Mallon <ryan@bluewatersys.com> * * Copyright (c) 2009 H Hartley Sweeten <hsweeten@visionengravers.com> * * Based on the Cirrus Logic ep93xxfb driver, and various other ep93xxfb * drivers. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/fb.h> #include <mach/fb.h> /* Vertical Frame Timing Registers */ #define EP93XXFB_VLINES_TOTAL 0x0000 /* SW locked */ #define EP93XXFB_VSYNC 0x0004 /* SW locked */ #define EP93XXFB_VACTIVE 0x0008 /* SW locked */ #define EP93XXFB_VBLANK 0x0228 /* SW locked */ #define EP93XXFB_VCLK 0x000c /* SW locked */ /* Horizontal Frame Timing Registers */ #define EP93XXFB_HCLKS_TOTAL 0x0010 /* SW locked */ #define EP93XXFB_HSYNC 0x0014 /* SW locked */ #define EP93XXFB_HACTIVE 0x0018 /* SW locked */ #define EP93XXFB_HBLANK 0x022c /* SW locked */ #define EP93XXFB_HCLK 0x001c /* SW locked */ /* Frame Buffer Memory Configuration Registers */ #define EP93XXFB_SCREEN_PAGE 0x0028 #define EP93XXFB_SCREEN_HPAGE 0x002c #define EP93XXFB_SCREEN_LINES 0x0030 #define EP93XXFB_LINE_LENGTH 0x0034 #define EP93XXFB_VLINE_STEP 0x0038 #define EP93XXFB_LINE_CARRY 0x003c /* SW locked */ #define EP93XXFB_EOL_OFFSET 0x0230 /* Other Video Registers */ #define EP93XXFB_BRIGHTNESS 0x0020 #define EP93XXFB_ATTRIBS 0x0024 /* SW locked */ #define EP93XXFB_SWLOCK 0x007c /* SW locked */ #define EP93XXFB_AC_RATE 0x0214 #define EP93XXFB_FIFO_LEVEL 0x0234 #define EP93XXFB_PIXELMODE 0x0054 #define EP93XXFB_PIXELMODE_32BPP (0x7 << 0) #define EP93XXFB_PIXELMODE_24BPP (0x6 << 0) #define EP93XXFB_PIXELMODE_16BPP (0x4 << 0) #define EP93XXFB_PIXELMODE_8BPP (0x2 << 0) #define EP93XXFB_PIXELMODE_SHIFT_1P_24B (0x0 << 3) #define EP93XXFB_PIXELMODE_SHIFT_1P_18B (0x1 << 3) #define EP93XXFB_PIXELMODE_COLOR_LUT (0x0 << 10) #define EP93XXFB_PIXELMODE_COLOR_888 (0x4 << 10) #define EP93XXFB_PIXELMODE_COLOR_555 (0x5 << 10) #define EP93XXFB_PARL_IF_OUT 0x0058 #define EP93XXFB_PARL_IF_IN 0x005c /* Blink Control Registers */ #define EP93XXFB_BLINK_RATE 0x0040 #define EP93XXFB_BLINK_MASK 0x0044 #define EP93XXFB_BLINK_PATTRN 0x0048 #define EP93XXFB_PATTRN_MASK 0x004c #define EP93XXFB_BKGRND_OFFSET 0x0050 /* Hardware Cursor Registers */ #define EP93XXFB_CURSOR_ADR_START 0x0060 #define EP93XXFB_CURSOR_ADR_RESET 0x0064 #define EP93XXFB_CURSOR_SIZE 0x0068 #define EP93XXFB_CURSOR_COLOR1 0x006c #define EP93XXFB_CURSOR_COLOR2 0x0070 #define EP93XXFB_CURSOR_BLINK_COLOR1 0x021c #define EP93XXFB_CURSOR_BLINK_COLOR2 0x0220 #define EP93XXFB_CURSOR_XY_LOC 0x0074 #define EP93XXFB_CURSOR_DSCAN_HY_LOC 0x0078 #define EP93XXFB_CURSOR_BLINK_RATE_CTRL 0x0224 /* LUT Registers */ #define EP93XXFB_GRY_SCL_LUTR 0x0080 #define EP93XXFB_GRY_SCL_LUTG 0x0280 #define EP93XXFB_GRY_SCL_LUTB 0x0300 #define EP93XXFB_LUT_SW_CONTROL 0x0218 #define EP93XXFB_LUT_SW_CONTROL_SWTCH (1 << 0) #define EP93XXFB_LUT_SW_CONTROL_SSTAT (1 << 1) #define EP93XXFB_COLOR_LUT 0x0400 /* Video Signature Registers */ #define EP93XXFB_VID_SIG_RSLT_VAL 0x0200 #define EP93XXFB_VID_SIG_CTRL 0x0204 #define EP93XXFB_VSIG 0x0208 #define EP93XXFB_HSIG 0x020c #define EP93XXFB_SIG_CLR_STR 0x0210 /* Minimum / Maximum resolutions supported */ #define EP93XXFB_MIN_XRES 64 #define EP93XXFB_MIN_YRES 64 #define EP93XXFB_MAX_XRES 1024 #define EP93XXFB_MAX_YRES 768 struct ep93xx_fbi { struct ep93xxfb_mach_info *mach_info; struct clk *clk; struct resource *res; void __iomem *mmio_base; unsigned int pseudo_palette[256]; }; static int check_screenpage_bug = 1; module_param(check_screenpage_bug, int, 0644); MODULE_PARM_DESC(check_screenpage_bug, "Check for bit 27 screen page bug. Default = 1"); static inline unsigned int ep93xxfb_readl(struct ep93xx_fbi *fbi, unsigned int off) { return __raw_readl(fbi->mmio_base + off); } static inline void ep93xxfb_writel(struct ep93xx_fbi *fbi, unsigned int val, unsigned int off) { __raw_writel(val, fbi->mmio_base + off); } /* * Write to one of the locked raster registers. */ static inline void ep93xxfb_out_locked(struct ep93xx_fbi *fbi, unsigned int val, unsigned int reg) { /* * We don't need a lock or delay here since the raster register * block will remain unlocked until the next access. */ ep93xxfb_writel(fbi, 0xaa, EP93XXFB_SWLOCK); ep93xxfb_writel(fbi, val, reg); } static void ep93xxfb_set_video_attribs(struct fb_info *info) { struct ep93xx_fbi *fbi = info->par; unsigned int attribs; attribs = EP93XXFB_ENABLE; attribs |= fbi->mach_info->flags; ep93xxfb_out_locked(fbi, attribs, EP93XXFB_ATTRIBS); } static int ep93xxfb_set_pixelmode(struct fb_info *info) { struct ep93xx_fbi *fbi = info->par; unsigned int val; info->var.transp.offset = 0; info->var.transp.length = 0; switch (info->var.bits_per_pixel) { case 8: val = EP93XXFB_PIXELMODE_8BPP | EP93XXFB_PIXELMODE_COLOR_LUT | EP93XXFB_PIXELMODE_SHIFT_1P_18B; info->var.red.offset = 0; info->var.red.length = 8; info->var.green.offset = 0; info->var.green.length = 8; info->var.blue.offset = 0; info->var.blue.length = 8; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; break; case 16: val = EP93XXFB_PIXELMODE_16BPP | EP93XXFB_PIXELMODE_COLOR_555 | EP93XXFB_PIXELMODE_SHIFT_1P_18B; info->var.red.offset = 11; info->var.red.length = 5; info->var.green.offset = 5; info->var.green.length = 6; info->var.blue.offset = 0; info->var.blue.length = 5; info->fix.visual = FB_VISUAL_TRUECOLOR; break; case 24: val = EP93XXFB_PIXELMODE_24BPP | EP93XXFB_PIXELMODE_COLOR_888 | EP93XXFB_PIXELMODE_SHIFT_1P_24B; info->var.red.offset = 16; info->var.red.length = 8; info->var.green.offset = 8; info->var.green.length = 8; info->var.blue.offset = 0; info->var.blue.length = 8; info->fix.visual = FB_VISUAL_TRUECOLOR; break; case 32: val = EP93XXFB_PIXELMODE_32BPP | EP93XXFB_PIXELMODE_COLOR_888 | EP93XXFB_PIXELMODE_SHIFT_1P_24B; info->var.red.offset = 16; info->var.red.length = 8; info->var.green.offset = 8; info->var.green.length = 8; info->var.blue.offset = 0; info->var.blue.length = 8; info->fix.visual = FB_VISUAL_TRUECOLOR; break; default: return -EINVAL; } ep93xxfb_writel(fbi, val, EP93XXFB_PIXELMODE); return 0; } static void ep93xxfb_set_timing(struct fb_info *info) { struct ep93xx_fbi *fbi = info->par; unsigned int vlines_total, hclks_total, start, stop; vlines_total = info->var.yres + info->var.upper_margin + info->var.lower_margin + info->var.vsync_len - 1; hclks_total = info->var.xres + info->var.left_margin + info->var.right_margin + info->var.hsync_len - 1; ep93xxfb_out_locked(fbi, vlines_total, EP93XXFB_VLINES_TOTAL); ep93xxfb_out_locked(fbi, hclks_total, EP93XXFB_HCLKS_TOTAL); start = vlines_total; stop = vlines_total - info->var.vsync_len; ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_VSYNC); start = vlines_total - info->var.vsync_len - info->var.upper_margin; stop = info->var.lower_margin - 1; ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_VBLANK); ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_VACTIVE); start = vlines_total; stop = vlines_total + 1; ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_VCLK); start = hclks_total; stop = hclks_total - info->var.hsync_len; ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_HSYNC); start = hclks_total - info->var.hsync_len - info->var.left_margin; stop = info->var.right_margin - 1; ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_HBLANK); ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_HACTIVE); start = hclks_total; stop = hclks_total; ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_HCLK); ep93xxfb_out_locked(fbi, 0x0, EP93XXFB_LINE_CARRY); } static int ep93xxfb_set_par(struct fb_info *info) { struct ep93xx_fbi *fbi = info->par; clk_set_rate(fbi->clk, 1000 * PICOS2KHZ(info->var.pixclock)); ep93xxfb_set_timing(info); info->fix.line_length = info->var.xres_virtual * info->var.bits_per_pixel / 8; ep93xxfb_writel(fbi, info->fix.smem_start, EP93XXFB_SCREEN_PAGE); ep93xxfb_writel(fbi, info->var.yres - 1, EP93XXFB_SCREEN_LINES); ep93xxfb_writel(fbi, ((info->var.xres * info->var.bits_per_pixel) / 32) - 1, EP93XXFB_LINE_LENGTH); ep93xxfb_writel(fbi, info->fix.line_length / 4, EP93XXFB_VLINE_STEP); ep93xxfb_set_video_attribs(info); return 0; } static int ep93xxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { int err; err = ep93xxfb_set_pixelmode(info); if (err) return err; var->xres = max_t(unsigned int, var->xres, EP93XXFB_MIN_XRES); var->xres = min_t(unsigned int, var->xres, EP93XXFB_MAX_XRES); var->xres_virtual = max(var->xres_virtual, var->xres); var->yres = max_t(unsigned int, var->yres, EP93XXFB_MIN_YRES); var->yres = min_t(unsigned int, var->yres, EP93XXFB_MAX_YRES); var->yres_virtual = max(var->yres_virtual, var->yres); return 0; } static int ep93xxfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { unsigned int offset = vma->vm_pgoff << PAGE_SHIFT; if (offset < info->fix.smem_len) { return dma_mmap_writecombine(info->dev, vma, info->screen_base, info->fix.smem_start, info->fix.smem_len); } return -EINVAL; } static int ep93xxfb_blank(int blank_mode, struct fb_info *info) { struct ep93xx_fbi *fbi = info->par; unsigned int attribs = ep93xxfb_readl(fbi, EP93XXFB_ATTRIBS); if (blank_mode) { if (fbi->mach_info->blank) fbi->mach_info->blank(blank_mode, info); ep93xxfb_out_locked(fbi, attribs & ~EP93XXFB_ENABLE, EP93XXFB_ATTRIBS); clk_disable(fbi->clk); } else { clk_enable(fbi->clk); ep93xxfb_out_locked(fbi, attribs | EP93XXFB_ENABLE, EP93XXFB_ATTRIBS); if (fbi->mach_info->blank) fbi->mach_info->blank(blank_mode, info); } return 0; } static inline int ep93xxfb_convert_color(int val, int width) { return ((val << width) + 0x7fff - val) >> 16; } static int ep93xxfb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info) { struct ep93xx_fbi *fbi = info->par; unsigned int *pal = info->pseudo_palette; unsigned int ctrl, i, rgb, lut_current, lut_stat; switch (info->fix.visual) { case FB_VISUAL_PSEUDOCOLOR: if (regno > 255) return 1; rgb = ((red & 0xff00) << 8) | (green & 0xff00) | ((blue & 0xff00) >> 8); pal[regno] = rgb; ep93xxfb_writel(fbi, rgb, (EP93XXFB_COLOR_LUT + (regno << 2))); ctrl = ep93xxfb_readl(fbi, EP93XXFB_LUT_SW_CONTROL); lut_stat = !!(ctrl & EP93XXFB_LUT_SW_CONTROL_SSTAT); lut_current = !!(ctrl & EP93XXFB_LUT_SW_CONTROL_SWTCH); if (lut_stat == lut_current) { for (i = 0; i < 256; i++) { ep93xxfb_writel(fbi, pal[i], EP93XXFB_COLOR_LUT + (i << 2)); } ep93xxfb_writel(fbi, ctrl ^ EP93XXFB_LUT_SW_CONTROL_SWTCH, EP93XXFB_LUT_SW_CONTROL); } break; case FB_VISUAL_TRUECOLOR: if (regno > 16) return 1; red = ep93xxfb_convert_color(red, info->var.red.length); green = ep93xxfb_convert_color(green, info->var.green.length); blue = ep93xxfb_convert_color(blue, info->var.blue.length); transp = ep93xxfb_convert_color(transp, info->var.transp.length); pal[regno] = (red << info->var.red.offset) | (green << info->var.green.offset) | (blue << info->var.blue.offset) | (transp << info->var.transp.offset); break; default: return 1; } return 0; } static struct fb_ops ep93xxfb_ops = { .owner = THIS_MODULE, .fb_check_var = ep93xxfb_check_var, .fb_set_par = ep93xxfb_set_par, .fb_blank = ep93xxfb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_setcolreg = ep93xxfb_setcolreg, .fb_mmap = ep93xxfb_mmap, }; static int __init ep93xxfb_calc_fbsize(struct ep93xxfb_mach_info *mach_info) { int i, fb_size = 0; if (mach_info->num_modes == EP93XXFB_USE_MODEDB) { fb_size = EP93XXFB_MAX_XRES * EP93XXFB_MAX_YRES * mach_info->bpp / 8; } else { for (i = 0; i < mach_info->num_modes; i++) { const struct fb_videomode *mode; int size; mode = &mach_info->modes[i]; size = mode->xres * mode->yres * mach_info->bpp / 8; if (size > fb_size) fb_size = size; } } return fb_size; } static int __init ep93xxfb_alloc_videomem(struct fb_info *info) { struct ep93xx_fbi *fbi = info->par; char __iomem *virt_addr; dma_addr_t phys_addr; unsigned int fb_size; fb_size = ep93xxfb_calc_fbsize(fbi->mach_info); virt_addr = dma_alloc_writecombine(info->dev, fb_size, &phys_addr, GFP_KERNEL); if (!virt_addr) return -ENOMEM; /* * There is a bug in the ep93xx framebuffer which causes problems * if bit 27 of the physical address is set. * See: http://marc.info/?l=linux-arm-kernel&m=110061245502000&w=2 * There does not seem to be any offical errata for this, but I * have confirmed the problem exists on my hardware (ep9315) at * least. */ if (check_screenpage_bug && phys_addr & (1 << 27)) { dev_err(info->dev, "ep93xx framebuffer bug. phys addr (0x%x) " "has bit 27 set: cannot init framebuffer\n", phys_addr); dma_free_coherent(info->dev, fb_size, virt_addr, phys_addr); return -ENOMEM; } info->fix.smem_start = phys_addr; info->fix.smem_len = fb_size; info->screen_base = virt_addr; return 0; } static void ep93xxfb_dealloc_videomem(struct fb_info *info) { if (info->screen_base) dma_free_coherent(info->dev, info->fix.smem_len, info->screen_base, info->fix.smem_start); } static int __init ep93xxfb_probe(struct platform_device *pdev) { struct ep93xxfb_mach_info *mach_info = pdev->dev.platform_data; struct fb_info *info; struct ep93xx_fbi *fbi; struct resource *res; char *video_mode; int err; if (!mach_info) return -EINVAL; info = framebuffer_alloc(sizeof(struct ep93xx_fbi), &pdev->dev); if (!info) return -ENOMEM; info->dev = &pdev->dev; platform_set_drvdata(pdev, info); fbi = info->par; fbi->mach_info = mach_info; err = fb_alloc_cmap(&info->cmap, 256, 0); if (err) goto failed; err = ep93xxfb_alloc_videomem(info); if (err) goto failed; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { err = -ENXIO; goto failed; } res = request_mem_region(res->start, resource_size(res), pdev->name); if (!res) { err = -EBUSY; goto failed; } fbi->res = res; fbi->mmio_base = ioremap(res->start, resource_size(res)); if (!fbi->mmio_base) { err = -ENXIO; goto failed; } strcpy(info->fix.id, pdev->name); info->fbops = &ep93xxfb_ops; info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.accel = FB_ACCEL_NONE; info->var.activate = FB_ACTIVATE_NOW; info->var.vmode = FB_VMODE_NONINTERLACED; info->flags = FBINFO_DEFAULT; info->node = -1; info->state = FBINFO_STATE_RUNNING; info->pseudo_palette = &fbi->pseudo_palette; fb_get_options("ep93xx-fb", &video_mode); err = fb_find_mode(&info->var, info, video_mode, fbi->mach_info->modes, fbi->mach_info->num_modes, fbi->mach_info->default_mode, fbi->mach_info->bpp); if (err == 0) { dev_err(info->dev, "No suitable video mode found\n"); err = -EINVAL; goto failed; } if (mach_info->setup) { err = mach_info->setup(pdev); if (err) return err; } err = ep93xxfb_check_var(&info->var, info); if (err) goto failed; fbi->clk = clk_get(info->dev, NULL); if (IS_ERR(fbi->clk)) { err = PTR_ERR(fbi->clk); fbi->clk = NULL; goto failed; } ep93xxfb_set_par(info); clk_enable(fbi->clk); err = register_framebuffer(info); if (err) goto failed; dev_info(info->dev, "registered. Mode = %dx%d-%d\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); return 0; failed: if (fbi->clk) clk_put(fbi->clk); if (fbi->mmio_base) iounmap(fbi->mmio_base); if (fbi->res) release_mem_region(fbi->res->start, resource_size(fbi->res)); ep93xxfb_dealloc_videomem(info); if (&info->cmap) fb_dealloc_cmap(&info->cmap); if (fbi->mach_info->teardown) fbi->mach_info->teardown(pdev); kfree(info); platform_set_drvdata(pdev, NULL); return err; } static int ep93xxfb_remove(struct platform_device *pdev) { struct fb_info *info = platform_get_drvdata(pdev); struct ep93xx_fbi *fbi = info->par; unregister_framebuffer(info); clk_disable(fbi->clk); clk_put(fbi->clk); iounmap(fbi->mmio_base); release_mem_region(fbi->res->start, resource_size(fbi->res)); ep93xxfb_dealloc_videomem(info); fb_dealloc_cmap(&info->cmap); if (fbi->mach_info->teardown) fbi->mach_info->teardown(pdev); kfree(info); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver ep93xxfb_driver = { .probe = ep93xxfb_probe, .remove = ep93xxfb_remove, .driver = { .name = "ep93xx-fb", .owner = THIS_MODULE, }, }; static int __devinit ep93xxfb_init(void) { return platform_driver_register(&ep93xxfb_driver); } static void __exit ep93xxfb_exit(void) { platform_driver_unregister(&ep93xxfb_driver); } module_init(ep93xxfb_init); module_exit(ep93xxfb_exit); MODULE_DESCRIPTION("EP93XX Framebuffer Driver"); MODULE_ALIAS("platform:ep93xx-fb"); MODULE_AUTHOR("Ryan Mallon <ryan&bluewatersys.com>, " "H Hartley Sweeten <hsweeten@visionengravers.com"); MODULE_LICENSE("GPL");
gpl-2.0
TeamBliss-Devices/android_kernel_samsung_jflte
drivers/media/video/msm-bayer/mt9t013_reg.c
1757
7302
/* Copyright (c) 2009, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "mt9t013.h" #include <linux/kernel.h> struct reg_struct const mt9t013_reg_pat[2] = { { /* Preview 2x2 binning 20fps, pclk MHz, MCLK 24MHz */ /* vt_pix_clk_div:REG=0x0300 update get_snapshot_fps * if this change */ 8, /* vt_sys_clk_div: REG=0x0302 update get_snapshot_fps * if this change */ 1, /* pre_pll_clk_div REG=0x0304 update get_snapshot_fps * if this change */ 2, /* pll_multiplier REG=0x0306 60 for 30fps preview, 40 * for 20fps preview * 46 for 30fps preview, try 47/48 to increase further */ 46, /* op_pix_clk_div REG=0x0308 */ 8, /* op_sys_clk_div REG=0x030A */ 1, /* scale_m REG=0x0404 */ 16, /* row_speed REG=0x3016 */ 0x0111, /* x_addr_start REG=0x3004 */ 8, /* x_addr_end REG=0x3008 */ 2053, /* y_addr_start REG=0x3002 */ 8, /* y_addr_end REG=0x3006 */ 1541, /* read_mode REG=0x3040 */ 0x046C, /* x_output_size REG=0x034C */ 1024, /* y_output_size REG=0x034E */ 768, /* line_length_pck REG=0x300C */ 2616, /* frame_length_lines REG=0x300A */ 916, /* coarse_int_time REG=0x3012 */ 16, /* fine_int_time REG=0x3014 */ 1461 }, { /*Snapshot */ /* vt_pix_clk_div REG=0x0300 update get_snapshot_fps * if this change */ 8, /* vt_sys_clk_div REG=0x0302 update get_snapshot_fps * if this change */ 1, /* pre_pll_clk_div REG=0x0304 update get_snapshot_fps * if this change */ 2, /* pll_multiplier REG=0x0306 50 for 15fps snapshot, * 40 for 10fps snapshot * 46 for 30fps snapshot, try 47/48 to increase further */ 46, /* op_pix_clk_div REG=0x0308 */ 8, /* op_sys_clk_div REG=0x030A */ 1, /* scale_m REG=0x0404 */ 16, /* row_speed REG=0x3016 */ 0x0111, /* x_addr_start REG=0x3004 */ 8, /* x_addr_end REG=0x3008 */ 2071, /* y_addr_start REG=0x3002 */ 8, /* y_addr_end REG=0x3006 */ 1551, /* read_mode REG=0x3040 */ 0x0024, /* x_output_size REG=0x034C */ 2064, /* y_output_size REG=0x034E */ 1544, /* line_length_pck REG=0x300C */ 2952, /* frame_length_lines REG=0x300A */ 1629, /* coarse_int_time REG=0x3012 */ 16, /* fine_int_time REG=0x3014 */ 733 } }; struct mt9t013_i2c_reg_conf const mt9t013_test_tbl[] = { { 0x3044, 0x0544 & 0xFBFF }, { 0x30CA, 0x0004 | 0x0001 }, { 0x30D4, 0x9020 & 0x7FFF }, { 0x31E0, 0x0003 & 0xFFFE }, { 0x3180, 0x91FF & 0x7FFF }, { 0x301A, (0x10CC | 0x8000) & 0xFFF7 }, { 0x301E, 0x0000 }, { 0x3780, 0x0000 }, }; /* [Lens shading 85 Percent TL84] */ struct mt9t013_i2c_reg_conf const mt9t013_lc_tbl[] = { { 0x360A, 0x0290 }, /* P_RD_P0Q0 */ { 0x360C, 0xC92D }, /* P_RD_P0Q1 */ { 0x360E, 0x0771 }, /* P_RD_P0Q2 */ { 0x3610, 0xE38C }, /* P_RD_P0Q3 */ { 0x3612, 0xD74F }, /* P_RD_P0Q4 */ { 0x364A, 0x168C }, /* P_RD_P1Q0 */ { 0x364C, 0xCACB }, /* P_RD_P1Q1 */ { 0x364E, 0x8C4C }, /* P_RD_P1Q2 */ { 0x3650, 0x0BEA }, /* P_RD_P1Q3 */ { 0x3652, 0xDC0F }, /* P_RD_P1Q4 */ { 0x368A, 0x70B0 }, /* P_RD_P2Q0 */ { 0x368C, 0x200B }, /* P_RD_P2Q1 */ { 0x368E, 0x30B2 }, /* P_RD_P2Q2 */ { 0x3690, 0xD04F }, /* P_RD_P2Q3 */ { 0x3692, 0xACF5 }, /* P_RD_P2Q4 */ { 0x36CA, 0xF7C9 }, /* P_RD_P3Q0 */ { 0x36CC, 0x2AED }, /* P_RD_P3Q1 */ { 0x36CE, 0xA652 }, /* P_RD_P3Q2 */ { 0x36D0, 0x8192 }, /* P_RD_P3Q3 */ { 0x36D2, 0x3A15 }, /* P_RD_P3Q4 */ { 0x370A, 0xDA30 }, /* P_RD_P4Q0 */ { 0x370C, 0x2E2F }, /* P_RD_P4Q1 */ { 0x370E, 0xBB56 }, /* P_RD_P4Q2 */ { 0x3710, 0x8195 }, /* P_RD_P4Q3 */ { 0x3712, 0x02F9 }, /* P_RD_P4Q4 */ { 0x3600, 0x0230 }, /* P_GR_P0Q0 */ { 0x3602, 0x58AD }, /* P_GR_P0Q1 */ { 0x3604, 0x18D1 }, /* P_GR_P0Q2 */ { 0x3606, 0x260D }, /* P_GR_P0Q3 */ { 0x3608, 0xF530 }, /* P_GR_P0Q4 */ { 0x3640, 0x17EB }, /* P_GR_P1Q0 */ { 0x3642, 0x3CAB }, /* P_GR_P1Q1 */ { 0x3644, 0x87CE }, /* P_GR_P1Q2 */ { 0x3646, 0xC02E }, /* P_GR_P1Q3 */ { 0x3648, 0xF48F }, /* P_GR_P1Q4 */ { 0x3680, 0x5350 }, /* P_GR_P2Q0 */ { 0x3682, 0x7EAF }, /* P_GR_P2Q1 */ { 0x3684, 0x4312 }, /* P_GR_P2Q2 */ { 0x3686, 0xC652 }, /* P_GR_P2Q3 */ { 0x3688, 0xBC15 }, /* P_GR_P2Q4 */ { 0x36C0, 0xB8AD }, /* P_GR_P3Q0 */ { 0x36C2, 0xBDCD }, /* P_GR_P3Q1 */ { 0x36C4, 0xE4B2 }, /* P_GR_P3Q2 */ { 0x36C6, 0xB50F }, /* P_GR_P3Q3 */ { 0x36C8, 0x5B95 }, /* P_GR_P3Q4 */ { 0x3700, 0xFC90 }, /* P_GR_P4Q0 */ { 0x3702, 0x8C51 }, /* P_GR_P4Q1 */ { 0x3704, 0xCED6 }, /* P_GR_P4Q2 */ { 0x3706, 0xB594 }, /* P_GR_P4Q3 */ { 0x3708, 0x0A39 }, /* P_GR_P4Q4 */ { 0x3614, 0x0230 }, /* P_BL_P0Q0 */ { 0x3616, 0x160D }, /* P_BL_P0Q1 */ { 0x3618, 0x08D1 }, /* P_BL_P0Q2 */ { 0x361A, 0x98AB }, /* P_BL_P0Q3 */ { 0x361C, 0xEA50 }, /* P_BL_P0Q4 */ { 0x3654, 0xB4EA }, /* P_BL_P1Q0 */ { 0x3656, 0xEA6C }, /* P_BL_P1Q1 */ { 0x3658, 0xFE08 }, /* P_BL_P1Q2 */ { 0x365A, 0x2C6E }, /* P_BL_P1Q3 */ { 0x365C, 0xEB0E }, /* P_BL_P1Q4 */ { 0x3694, 0x6DF0 }, /* P_BL_P2Q0 */ { 0x3696, 0x3ACF }, /* P_BL_P2Q1 */ { 0x3698, 0x3E0F }, /* P_BL_P2Q2 */ { 0x369A, 0xB2B1 }, /* P_BL_P2Q3 */ { 0x369C, 0xC374 }, /* P_BL_P2Q4 */ { 0x36D4, 0xF2AA }, /* P_BL_P3Q0 */ { 0x36D6, 0x8CCC }, /* P_BL_P3Q1 */ { 0x36D8, 0xDEF2 }, /* P_BL_P3Q2 */ { 0x36DA, 0xFA11 }, /* P_BL_P3Q3 */ { 0x36DC, 0x42F5 }, /* P_BL_P3Q4 */ { 0x3714, 0xF4F1 }, /* P_BL_P4Q0 */ { 0x3716, 0xF6F0 }, /* P_BL_P4Q1 */ { 0x3718, 0x8FD6 }, /* P_BL_P4Q2 */ { 0x371A, 0xEA14 }, /* P_BL_P4Q3 */ { 0x371C, 0x6338 }, /* P_BL_P4Q4 */ { 0x361E, 0x0350 }, /* P_GB_P0Q0 */ { 0x3620, 0x91AE }, /* P_GB_P0Q1 */ { 0x3622, 0x0571 }, /* P_GB_P0Q2 */ { 0x3624, 0x100D }, /* P_GB_P0Q3 */ { 0x3626, 0xCA70 }, /* P_GB_P0Q4 */ { 0x365E, 0xE6CB }, /* P_GB_P1Q0 */ { 0x3660, 0x50ED }, /* P_GB_P1Q1 */ { 0x3662, 0x3DAE }, /* P_GB_P1Q2 */ { 0x3664, 0xAA4F }, /* P_GB_P1Q3 */ { 0x3666, 0xDC50 }, /* P_GB_P1Q4 */ { 0x369E, 0x5470 }, /* P_GB_P2Q0 */ { 0x36A0, 0x1F6E }, /* P_GB_P2Q1 */ { 0x36A2, 0x6671 }, /* P_GB_P2Q2 */ { 0x36A4, 0xC010 }, /* P_GB_P2Q3 */ { 0x36A6, 0x8DF5 }, /* P_GB_P2Q4 */ { 0x36DE, 0x0B0C }, /* P_GB_P3Q0 */ { 0x36E0, 0x84CE }, /* P_GB_P3Q1 */ { 0x36E2, 0x8493 }, /* P_GB_P3Q2 */ { 0x36E4, 0xA610 }, /* P_GB_P3Q3 */ { 0x36E6, 0x50B5 }, /* P_GB_P3Q4 */ { 0x371E, 0x9651 }, /* P_GB_P4Q0 */ { 0x3720, 0x1EAB }, /* P_GB_P4Q1 */ { 0x3722, 0xAF76 }, /* P_GB_P4Q2 */ { 0x3724, 0xE4F4 }, /* P_GB_P4Q3 */ { 0x3726, 0x79F8 }, /* P_GB_P4Q4 */ { 0x3782, 0x0410 }, /* POLY_ORIGIN_C */ { 0x3784, 0x0320 }, /* POLY_ORIGIN_R */ { 0x3780, 0x8000 } /* POLY_SC_ENABLE */ }; struct mt9t013_reg mt9t013_regs = { .reg_pat = &mt9t013_reg_pat[0], .reg_pat_size = ARRAY_SIZE(mt9t013_reg_pat), .ttbl = &mt9t013_test_tbl[0], .ttbl_size = ARRAY_SIZE(mt9t013_test_tbl), .lctbl = &mt9t013_lc_tbl[0], .lctbl_size = ARRAY_SIZE(mt9t013_lc_tbl), .rftbl = &mt9t013_lc_tbl[0], /* &mt9t013_rolloff_tbl[0], */ .rftbl_size = ARRAY_SIZE(mt9t013_lc_tbl) };
gpl-2.0