repo_name
string
path
string
copies
string
size
string
content
string
license
string
Ninpo/ninphetamine3
drivers/net/arm/ether3.c
3084
24089
/* * linux/drivers/acorn/net/ether3.c * * Copyright (C) 1995-2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * SEEQ nq8005 ethernet driver for Acorn/ANT Ether3 card * for Acorn machines * * By Russell King, with some suggestions from borris@ant.co.uk * * Changelog: * 1.04 RMK 29/02/1996 Won't pass packets that are from our ethernet * address up to the higher levels - they're * silently ignored. I/F can now be put into * multicast mode. Receiver routine optimised. * 1.05 RMK 30/02/1996 Now claims interrupt at open when part of * the kernel rather than when a module. * 1.06 RMK 02/03/1996 Various code cleanups * 1.07 RMK 13/10/1996 Optimised interrupt routine and transmit * routines. * 1.08 RMK 14/10/1996 Fixed problem with too many packets, * prevented the kernel message about dropped * packets appearing too many times a second. * Now does not disable all IRQs, only the IRQ * used by this card. * 1.09 RMK 10/11/1996 Only enables TX irq when buffer space is low, * but we still service the TX queue if we get a * RX interrupt. * 1.10 RMK 15/07/1997 Fixed autoprobing of NQ8004. * 1.11 RMK 16/11/1997 Fixed autoprobing of NQ8005A. * 1.12 RMK 31/12/1997 Removed reference to dev_tint for Linux 2.1. * RMK 27/06/1998 Changed asm/delay.h to linux/delay.h. * 1.13 RMK 29/06/1998 Fixed problem with transmission of packets. * Chip seems to have a bug in, whereby if the * packet starts two bytes from the end of the * buffer, it corrupts the receiver chain, and * never updates the transmit status correctly. * 1.14 RMK 07/01/1998 Added initial code for ETHERB addressing. * 1.15 RMK 30/04/1999 More fixes to the transmit routine for buggy * hardware. * 1.16 RMK 10/02/2000 Updated for 2.3.43 * 1.17 RMK 13/05/2000 Updated for 2.3.99-pre8 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/device.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/bitops.h> #include <asm/system.h> #include <asm/ecard.h> #include <asm/io.h> static char version[] __devinitdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n"; #include "ether3.h" static unsigned int net_debug = NET_DEBUG; static void ether3_setmulticastlist(struct net_device *dev); static int ether3_rx(struct net_device *dev, unsigned int maxcnt); static void ether3_tx(struct net_device *dev); static int ether3_open (struct net_device *dev); static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev); static irqreturn_t ether3_interrupt (int irq, void *dev_id); static int ether3_close (struct net_device *dev); static void ether3_setmulticastlist (struct net_device *dev); static void ether3_timeout(struct net_device *dev); #define BUS_16 2 #define BUS_8 1 #define BUS_UNKNOWN 0 /* --------------------------------------------------------------------------- */ typedef enum { buffer_write, buffer_read } buffer_rw_t; /* * ether3 read/write. Slow things down a bit... * The SEEQ8005 doesn't like us writing to its registers * too quickly. */ static inline void ether3_outb(int v, const void __iomem *r) { writeb(v, r); udelay(1); } static inline void ether3_outw(int v, const void __iomem *r) { writew(v, r); udelay(1); } #define ether3_inb(r) ({ unsigned int __v = readb((r)); udelay(1); __v; }) #define ether3_inw(r) ({ unsigned int __v = readw((r)); udelay(1); __v; }) static int ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start) { int timeout = 1000; ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND); while ((ether3_inw(REG_STATUS) & STAT_FIFOEMPTY) == 0) { if (!timeout--) { printk("%s: setbuffer broken\n", dev->name); priv(dev)->broken = 1; return 1; } udelay(1); } if (read == buffer_read) { ether3_outw(start, REG_DMAADDR); ether3_outw(priv(dev)->regs.command | CMD_FIFOREAD, REG_COMMAND); } else { ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND); ether3_outw(start, REG_DMAADDR); } return 0; } /* * write data to the buffer memory */ #define ether3_writebuffer(dev,data,length) \ writesw(REG_BUFWIN, (data), (length) >> 1) #define ether3_writeword(dev,data) \ writew((data), REG_BUFWIN) #define ether3_writelong(dev,data) { \ void __iomem *reg_bufwin = REG_BUFWIN; \ writew((data), reg_bufwin); \ writew((data) >> 16, reg_bufwin); \ } /* * read data from the buffer memory */ #define ether3_readbuffer(dev,data,length) \ readsw(REG_BUFWIN, (data), (length) >> 1) #define ether3_readword(dev) \ readw(REG_BUFWIN) #define ether3_readlong(dev) \ readw(REG_BUFWIN) | (readw(REG_BUFWIN) << 16) /* * Switch LED off... */ static void ether3_ledoff(unsigned long data) { struct net_device *dev = (struct net_device *)data; ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2); } /* * switch LED on... */ static inline void ether3_ledon(struct net_device *dev) { del_timer(&priv(dev)->timer); priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */ priv(dev)->timer.data = (unsigned long)dev; priv(dev)->timer.function = ether3_ledoff; add_timer(&priv(dev)->timer); if (priv(dev)->regs.config2 & CFG2_CTRLO) ether3_outw(priv(dev)->regs.config2 &= ~CFG2_CTRLO, REG_CONFIG2); } /* * Read the ethernet address string from the on board rom. * This is an ascii string!!! */ static int __devinit ether3_addr(char *addr, struct expansion_card *ec) { struct in_chunk_dir cd; char *s; if (ecard_readchunk(&cd, ec, 0xf5, 0) && (s = strchr(cd.d.string, '('))) { int i; for (i = 0; i<6; i++) { addr[i] = simple_strtoul(s + 1, &s, 0x10); if (*s != (i==5?')' : ':' )) break; } if (i == 6) return 0; } /* I wonder if we should even let the user continue in this case * - no, it would be better to disable the device */ printk(KERN_ERR "ether3: Couldn't read a valid MAC address from card.\n"); return -ENODEV; } /* --------------------------------------------------------------------------- */ static int __devinit ether3_ramtest(struct net_device *dev, unsigned char byte) { unsigned char *buffer = kmalloc(RX_END, GFP_KERNEL); int i,ret = 0; int max_errors = 4; int bad = -1; if (!buffer) return 1; memset(buffer, byte, RX_END); ether3_setbuffer(dev, buffer_write, 0); ether3_writebuffer(dev, buffer, TX_END); ether3_setbuffer(dev, buffer_write, RX_START); ether3_writebuffer(dev, buffer + RX_START, RX_LEN); memset(buffer, byte ^ 0xff, RX_END); ether3_setbuffer(dev, buffer_read, 0); ether3_readbuffer(dev, buffer, TX_END); ether3_setbuffer(dev, buffer_read, RX_START); ether3_readbuffer(dev, buffer + RX_START, RX_LEN); for (i = 0; i < RX_END; i++) { if (buffer[i] != byte) { if (max_errors > 0 && bad != buffer[i]) { printk("%s: RAM failed with (%02X instead of %02X) at 0x%04X", dev->name, buffer[i], byte, i); ret = 2; max_errors--; bad = i; } } else { if (bad != -1) { if (bad != i - 1) printk(" - 0x%04X\n", i - 1); printk("\n"); bad = -1; } } } if (bad != -1) printk(" - 0xffff\n"); kfree(buffer); return ret; } /* ------------------------------------------------------------------------------- */ static int __devinit ether3_init_2(struct net_device *dev) { int i; priv(dev)->regs.config1 = CFG1_RECVCOMPSTAT0|CFG1_DMABURST8; priv(dev)->regs.config2 = CFG2_CTRLO|CFG2_RECVCRC|CFG2_ERRENCRC; priv(dev)->regs.command = 0; /* * Set up our hardware address */ ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1); for (i = 0; i < 6; i++) ether3_outb(dev->dev_addr[i], REG_BUFWIN); if (dev->flags & IFF_PROMISC) priv(dev)->regs.config1 |= CFG1_RECVPROMISC; else if (dev->flags & IFF_MULTICAST) priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI; else priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD; /* * There is a problem with the NQ8005 in that it occasionally loses the * last two bytes. To get round this problem, we receive the CRC as * well. That way, if we do lose the last two, then it doesn't matter. */ ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1); ether3_outw((TX_END>>8) - 1, REG_BUFWIN); ether3_outw(priv(dev)->rx_head, REG_RECVPTR); ether3_outw(0, REG_TRANSMITPTR); ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND); ether3_outw(priv(dev)->regs.config2, REG_CONFIG2); ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); ether3_outw(priv(dev)->regs.command, REG_COMMAND); i = ether3_ramtest(dev, 0x5A); if(i) return i; i = ether3_ramtest(dev, 0x1E); if(i) return i; ether3_setbuffer(dev, buffer_write, 0); ether3_writelong(dev, 0); return 0; } static void ether3_init_for_open(struct net_device *dev) { int i; /* Reset the chip */ ether3_outw(CFG2_RESET, REG_CONFIG2); udelay(4); priv(dev)->regs.command = 0; ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND); while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON)) barrier(); ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1); for (i = 0; i < 6; i++) ether3_outb(dev->dev_addr[i], REG_BUFWIN); priv(dev)->tx_head = 0; priv(dev)->tx_tail = 0; priv(dev)->regs.config2 |= CFG2_CTRLO; priv(dev)->rx_head = RX_START; ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1); ether3_outw((TX_END>>8) - 1, REG_BUFWIN); ether3_outw(priv(dev)->rx_head, REG_RECVPTR); ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND); ether3_outw(0, REG_TRANSMITPTR); ether3_outw(priv(dev)->regs.config2, REG_CONFIG2); ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); ether3_setbuffer(dev, buffer_write, 0); ether3_writelong(dev, 0); priv(dev)->regs.command = CMD_ENINTRX | CMD_ENINTTX; ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND); } static inline int ether3_probe_bus_8(struct net_device *dev, int val) { int write_low, write_high, read_low, read_high; write_low = val & 255; write_high = val >> 8; printk(KERN_DEBUG "ether3_probe: write8 [%02X:%02X]", write_high, write_low); ether3_outb(write_low, REG_RECVPTR); ether3_outb(write_high, REG_RECVPTR + 4); read_low = ether3_inb(REG_RECVPTR); read_high = ether3_inb(REG_RECVPTR + 4); printk(", read8 [%02X:%02X]\n", read_high, read_low); return read_low == write_low && read_high == write_high; } static inline int ether3_probe_bus_16(struct net_device *dev, int val) { int read_val; ether3_outw(val, REG_RECVPTR); read_val = ether3_inw(REG_RECVPTR); printk(KERN_DEBUG "ether3_probe: write16 [%04X], read16 [%04X]\n", val, read_val); return read_val == val; } /* * Open/initialize the board. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong. */ static int ether3_open(struct net_device *dev) { if (!is_valid_ether_addr(dev->dev_addr)) { printk(KERN_WARNING "%s: invalid ethernet MAC address\n", dev->name); return -EINVAL; } if (request_irq(dev->irq, ether3_interrupt, 0, "ether3", dev)) return -EAGAIN; ether3_init_for_open(dev); netif_start_queue(dev); return 0; } /* * The inverse routine to ether3_open(). */ static int ether3_close(struct net_device *dev) { netif_stop_queue(dev); disable_irq(dev->irq); ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND); priv(dev)->regs.command = 0; while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON)) barrier(); ether3_outb(0x80, REG_CONFIG2 + 4); ether3_outw(0, REG_COMMAND); free_irq(dev->irq, dev); return 0; } /* * Set or clear promiscuous/multicast mode filter for this adaptor. * * We don't attempt any packet filtering. The card may have a SEEQ 8004 * in which does not have the other ethernet address registers present... */ static void ether3_setmulticastlist(struct net_device *dev) { priv(dev)->regs.config1 &= ~CFG1_RECVPROMISC; if (dev->flags & IFF_PROMISC) { /* promiscuous mode */ priv(dev)->regs.config1 |= CFG1_RECVPROMISC; } else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI; } else priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD; ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); } static void ether3_timeout(struct net_device *dev) { unsigned long flags; del_timer(&priv(dev)->timer); local_irq_save(flags); printk(KERN_ERR "%s: transmit timed out, network cable problem?\n", dev->name); printk(KERN_ERR "%s: state: { status=%04X cfg1=%04X cfg2=%04X }\n", dev->name, ether3_inw(REG_STATUS), ether3_inw(REG_CONFIG1), ether3_inw(REG_CONFIG2)); printk(KERN_ERR "%s: { rpr=%04X rea=%04X tpr=%04X }\n", dev->name, ether3_inw(REG_RECVPTR), ether3_inw(REG_RECVEND), ether3_inw(REG_TRANSMITPTR)); printk(KERN_ERR "%s: tx head=%X tx tail=%X\n", dev->name, priv(dev)->tx_head, priv(dev)->tx_tail); ether3_setbuffer(dev, buffer_read, priv(dev)->tx_tail); printk(KERN_ERR "%s: packet status = %08X\n", dev->name, ether3_readlong(dev)); local_irq_restore(flags); priv(dev)->regs.config2 |= CFG2_CTRLO; dev->stats.tx_errors += 1; ether3_outw(priv(dev)->regs.config2, REG_CONFIG2); priv(dev)->tx_head = priv(dev)->tx_tail = 0; netif_wake_queue(dev); } /* * Transmit a packet */ static int ether3_sendpacket(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; unsigned int ptr, next_ptr; if (priv(dev)->broken) { dev_kfree_skb(skb); dev->stats.tx_dropped++; netif_start_queue(dev); return NETDEV_TX_OK; } length = (length + 1) & ~1; if (length != skb->len) { if (skb_padto(skb, length)) goto out; } next_ptr = (priv(dev)->tx_head + 1) & 15; local_irq_save(flags); if (priv(dev)->tx_tail == next_ptr) { local_irq_restore(flags); return NETDEV_TX_BUSY; /* unable to queue */ } ptr = 0x600 * priv(dev)->tx_head; priv(dev)->tx_head = next_ptr; next_ptr *= 0x600; #define TXHDR_FLAGS (TXHDR_TRANSMIT|TXHDR_CHAINCONTINUE|TXHDR_DATAFOLLOWS|TXHDR_ENSUCCESS) ether3_setbuffer(dev, buffer_write, next_ptr); ether3_writelong(dev, 0); ether3_setbuffer(dev, buffer_write, ptr); ether3_writelong(dev, 0); ether3_writebuffer(dev, skb->data, length); ether3_writeword(dev, htons(next_ptr)); ether3_writeword(dev, TXHDR_CHAINCONTINUE >> 16); ether3_setbuffer(dev, buffer_write, ptr); ether3_writeword(dev, htons((ptr + length + 4))); ether3_writeword(dev, TXHDR_FLAGS >> 16); ether3_ledon(dev); if (!(ether3_inw(REG_STATUS) & STAT_TXON)) { ether3_outw(ptr, REG_TRANSMITPTR); ether3_outw(priv(dev)->regs.command | CMD_TXON, REG_COMMAND); } next_ptr = (priv(dev)->tx_head + 1) & 15; local_irq_restore(flags); dev_kfree_skb(skb); if (priv(dev)->tx_tail == next_ptr) netif_stop_queue(dev); out: return NETDEV_TX_OK; } static irqreturn_t ether3_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; unsigned int status, handled = IRQ_NONE; #if NET_DEBUG > 1 if(net_debug & DEBUG_INT) printk("eth3irq: %d ", irq); #endif status = ether3_inw(REG_STATUS); if (status & STAT_INTRX) { ether3_outw(CMD_ACKINTRX | priv(dev)->regs.command, REG_COMMAND); ether3_rx(dev, 12); handled = IRQ_HANDLED; } if (status & STAT_INTTX) { ether3_outw(CMD_ACKINTTX | priv(dev)->regs.command, REG_COMMAND); ether3_tx(dev); handled = IRQ_HANDLED; } #if NET_DEBUG > 1 if(net_debug & DEBUG_INT) printk("done\n"); #endif return handled; } /* * If we have a good packet(s), get it/them out of the buffers. */ static int ether3_rx(struct net_device *dev, unsigned int maxcnt) { unsigned int next_ptr = priv(dev)->rx_head, received = 0; ether3_ledon(dev); do { unsigned int this_ptr, status; unsigned char addrs[16]; /* * read the first 16 bytes from the buffer. * This contains the status bytes etc and ethernet addresses, * and we also check the source ethernet address to see if * it originated from us. */ { unsigned int temp_ptr; ether3_setbuffer(dev, buffer_read, next_ptr); temp_ptr = ether3_readword(dev); status = ether3_readword(dev); if ((status & (RXSTAT_DONE | RXHDR_CHAINCONTINUE | RXHDR_RECEIVE)) != (RXSTAT_DONE | RXHDR_CHAINCONTINUE) || !temp_ptr) break; this_ptr = next_ptr + 4; next_ptr = ntohs(temp_ptr); } ether3_setbuffer(dev, buffer_read, this_ptr); ether3_readbuffer(dev, addrs+2, 12); if (next_ptr < RX_START || next_ptr >= RX_END) { int i; printk("%s: bad next pointer @%04X: ", dev->name, priv(dev)->rx_head); printk("%02X %02X %02X %02X ", next_ptr >> 8, next_ptr & 255, status & 255, status >> 8); for (i = 2; i < 14; i++) printk("%02X ", addrs[i]); printk("\n"); next_ptr = priv(dev)->rx_head; break; } /* * ignore our own packets... */ if (!(*(unsigned long *)&dev->dev_addr[0] ^ *(unsigned long *)&addrs[2+6]) && !(*(unsigned short *)&dev->dev_addr[4] ^ *(unsigned short *)&addrs[2+10])) { maxcnt ++; /* compensate for loopedback packet */ ether3_outw(next_ptr >> 8, REG_RECVEND); } else if (!(status & (RXSTAT_OVERSIZE|RXSTAT_CRCERROR|RXSTAT_DRIBBLEERROR|RXSTAT_SHORTPACKET))) { unsigned int length = next_ptr - this_ptr; struct sk_buff *skb; if (next_ptr <= this_ptr) length += RX_END - RX_START; skb = dev_alloc_skb(length + 2); if (skb) { unsigned char *buf; skb_reserve(skb, 2); buf = skb_put(skb, length); ether3_readbuffer(dev, buf + 12, length - 12); ether3_outw(next_ptr >> 8, REG_RECVEND); *(unsigned short *)(buf + 0) = *(unsigned short *)(addrs + 2); *(unsigned long *)(buf + 2) = *(unsigned long *)(addrs + 4); *(unsigned long *)(buf + 6) = *(unsigned long *)(addrs + 8); *(unsigned short *)(buf + 10) = *(unsigned short *)(addrs + 12); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); received ++; } else goto dropping; } else { struct net_device_stats *stats = &dev->stats; ether3_outw(next_ptr >> 8, REG_RECVEND); if (status & RXSTAT_OVERSIZE) stats->rx_over_errors ++; if (status & RXSTAT_CRCERROR) stats->rx_crc_errors ++; if (status & RXSTAT_DRIBBLEERROR) stats->rx_fifo_errors ++; if (status & RXSTAT_SHORTPACKET) stats->rx_length_errors ++; stats->rx_errors++; } } while (-- maxcnt); done: dev->stats.rx_packets += received; priv(dev)->rx_head = next_ptr; /* * If rx went off line, then that means that the buffer may be full. We * have dropped at least one packet. */ if (!(ether3_inw(REG_STATUS) & STAT_RXON)) { dev->stats.rx_dropped++; ether3_outw(next_ptr, REG_RECVPTR); ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND); } return maxcnt; dropping:{ static unsigned long last_warned; ether3_outw(next_ptr >> 8, REG_RECVEND); /* * Don't print this message too many times... */ if (time_after(jiffies, last_warned + 10 * HZ)) { last_warned = jiffies; printk("%s: memory squeeze, dropping packet.\n", dev->name); } dev->stats.rx_dropped++; goto done; } } /* * Update stats for the transmitted packet(s) */ static void ether3_tx(struct net_device *dev) { unsigned int tx_tail = priv(dev)->tx_tail; int max_work = 14; do { unsigned long status; /* * Read the packet header */ ether3_setbuffer(dev, buffer_read, tx_tail * 0x600); status = ether3_readlong(dev); /* * Check to see if this packet has been transmitted */ if ((status & (TXSTAT_DONE | TXHDR_TRANSMIT)) != (TXSTAT_DONE | TXHDR_TRANSMIT)) break; /* * Update errors */ if (!(status & (TXSTAT_BABBLED | TXSTAT_16COLLISIONS))) dev->stats.tx_packets++; else { dev->stats.tx_errors++; if (status & TXSTAT_16COLLISIONS) dev->stats.collisions += 16; if (status & TXSTAT_BABBLED) dev->stats.tx_fifo_errors++; } tx_tail = (tx_tail + 1) & 15; } while (--max_work); if (priv(dev)->tx_tail != tx_tail) { priv(dev)->tx_tail = tx_tail; netif_wake_queue(dev); } } static void __devinit ether3_banner(void) { static unsigned version_printed = 0; if (net_debug && version_printed++ == 0) printk(KERN_INFO "%s", version); } static const struct net_device_ops ether3_netdev_ops = { .ndo_open = ether3_open, .ndo_stop = ether3_close, .ndo_start_xmit = ether3_sendpacket, .ndo_set_multicast_list = ether3_setmulticastlist, .ndo_tx_timeout = ether3_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, }; static int __devinit ether3_probe(struct expansion_card *ec, const struct ecard_id *id) { const struct ether3_data *data = id->data; struct net_device *dev; int bus_type, ret; ether3_banner(); ret = ecard_request_resources(ec); if (ret) goto out; dev = alloc_etherdev(sizeof(struct dev_priv)); if (!dev) { ret = -ENOMEM; goto release; } SET_NETDEV_DEV(dev, &ec->dev); priv(dev)->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); if (!priv(dev)->base) { ret = -ENOMEM; goto free; } ec->irqaddr = priv(dev)->base + data->base_offset; ec->irqmask = 0xf0; priv(dev)->seeq = priv(dev)->base + data->base_offset; dev->irq = ec->irq; ether3_addr(dev->dev_addr, ec); init_timer(&priv(dev)->timer); /* Reset card... */ ether3_outb(0x80, REG_CONFIG2 + 4); bus_type = BUS_UNKNOWN; udelay(4); /* Test using Receive Pointer (16-bit register) to find out * how the ether3 is connected to the bus... */ if (ether3_probe_bus_8(dev, 0x100) && ether3_probe_bus_8(dev, 0x201)) bus_type = BUS_8; if (bus_type == BUS_UNKNOWN && ether3_probe_bus_16(dev, 0x101) && ether3_probe_bus_16(dev, 0x201)) bus_type = BUS_16; switch (bus_type) { case BUS_UNKNOWN: printk(KERN_ERR "%s: unable to identify bus width\n", dev->name); ret = -ENODEV; goto free; case BUS_8: printk(KERN_ERR "%s: %s found, but is an unsupported " "8-bit card\n", dev->name, data->name); ret = -ENODEV; goto free; default: break; } if (ether3_init_2(dev)) { ret = -ENODEV; goto free; } dev->netdev_ops = &ether3_netdev_ops; dev->watchdog_timeo = 5 * HZ / 100; ret = register_netdev(dev); if (ret) goto free; printk("%s: %s in slot %d, %pM\n", dev->name, data->name, ec->slot_no, dev->dev_addr); ecard_set_drvdata(ec, dev); return 0; free: free_netdev(dev); release: ecard_release_resources(ec); out: return ret; } static void __devexit ether3_remove(struct expansion_card *ec) { struct net_device *dev = ecard_get_drvdata(ec); ecard_set_drvdata(ec, NULL); unregister_netdev(dev); free_netdev(dev); ecard_release_resources(ec); } static struct ether3_data ether3 = { .name = "ether3", .base_offset = 0, }; static struct ether3_data etherb = { .name = "etherb", .base_offset = 0x800, }; static const struct ecard_id ether3_ids[] = { { MANU_ANT2, PROD_ANT_ETHER3, &ether3 }, { MANU_ANT, PROD_ANT_ETHER3, &ether3 }, { MANU_ANT, PROD_ANT_ETHERB, &etherb }, { 0xffff, 0xffff } }; static struct ecard_driver ether3_driver = { .probe = ether3_probe, .remove = __devexit_p(ether3_remove), .id_table = ether3_ids, .drv = { .name = "ether3", }, }; static int __init ether3_init(void) { return ecard_register_driver(&ether3_driver); } static void __exit ether3_exit(void) { ecard_remove_driver(&ether3_driver); } module_init(ether3_init); module_exit(ether3_exit); MODULE_LICENSE("GPL");
gpl-2.0
ownhere/samsung-kernel-sgs3-ownhere
drivers/net/atp.c
3084
29747
/* atp.c: Attached (pocket) ethernet adapter driver for linux. */ /* This is a driver for commonly OEM pocket (parallel port) ethernet adapters based on the Realtek RTL8002 and RTL8012 chips. Written 1993-2000 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. Copyright 1993 United States Government as represented by the Director, National Security Agency. Copyright 1994-2000 retained by the original author, Donald Becker. The timer-based reset code was supplied in 1995 by Bill Carlson, wwc@super.org. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 Support information and updates available at http://www.scyld.com/network/atp.html Modular support/softnet added by Alan Cox. _bit abuse fixed up by Alan Cox */ static const char version[] = "atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n"; /* The user-configurable values. These may be modified when a driver module is loaded.*/ static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ #define net_debug debug /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ static int max_interrupt_work = 15; #define NUM_UNITS 2 /* The standard set of ISA module parameters. */ static int io[NUM_UNITS]; static int irq[NUM_UNITS]; static int xcvr[NUM_UNITS]; /* The data transfer mode. */ /* Operational parameters that are set at compile time. */ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (400*HZ/1000) /* This file is a device driver for the RealTek (aka AT-Lan-Tec) pocket ethernet adapter. This is a common low-cost OEM pocket ethernet adapter, sold under many names. Sources: This driver was written from the packet driver assembly code provided by Vincent Bono of AT-Lan-Tec. Ever try to figure out how a complicated device works just from the assembly code? It ain't pretty. The following description is written based on guesses and writing lots of special-purpose code to test my theorized operation. In 1997 Realtek made available the documentation for the second generation RTL8012 chip, which has lead to several driver improvements. http://www.realtek.com.tw/ Theory of Operation The RTL8002 adapter seems to be built around a custom spin of the SEEQ controller core. It probably has a 16K or 64K internal packet buffer, of which the first 4K is devoted to transmit and the rest to receive. The controller maintains the queue of received packet and the packet buffer access pointer internally, with only 'reset to beginning' and 'skip to next packet' commands visible. The transmit packet queue holds two (or more?) packets: both 'retransmit this packet' (due to collision) and 'transmit next packet' commands must be started by hand. The station address is stored in a standard bit-serial EEPROM which must be read (ughh) by the device driver. (Provisions have been made for substituting a 74S288 PROM, but I haven't gotten reports of any models using it.) Unlike built-in devices, a pocket adapter can temporarily lose power without indication to the device driver. The major effect is that the station address, receive filter (promiscuous, etc.) and transceiver must be reset. The controller itself has 16 registers, some of which use only the lower bits. The registers are read and written 4 bits at a time. The four bit register address is presented on the data lines along with a few additional timing and control bits. The data is then read from status port or written to the data port. Correction: the controller has two banks of 16 registers. The second bank contains only the multicast filter table (now used) and the EEPROM access registers. Since the bulk data transfer of the actual packets through the slow parallel port dominates the driver's running time, four distinct data (non-register) transfer modes are provided by the adapter, two in each direction. In the first mode timing for the nibble transfers is provided through the data port. In the second mode the same timing is provided through the control port. In either case the data is read from the status port and written to the data port, just as it is accessing registers. In addition to the basic data transfer methods, several more are modes are created by adding some delay by doing multiple reads of the data to allow it to stabilize. This delay seems to be needed on most machines. The data transfer mode is stored in the 'dev->if_port' field. Its default value is '4'. It may be overridden at boot-time using the third parameter to the "ether=..." initialization. The header file <atp.h> provides inline functions that encapsulate the register and data access methods. These functions are hand-tuned to generate reasonable object code. This header file also documents my interpretations of the device registers. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/bitops.h> #include <asm/system.h> #include <asm/io.h> #include <asm/dma.h> #include "atp.h" MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_DESCRIPTION("RealTek RTL8002/8012 parallel port Ethernet driver"); MODULE_LICENSE("GPL"); module_param(max_interrupt_work, int, 0); module_param(debug, int, 0); module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(xcvr, int, NULL, 0); MODULE_PARM_DESC(max_interrupt_work, "ATP maximum events handled per interrupt"); MODULE_PARM_DESC(debug, "ATP debug level (0-7)"); MODULE_PARM_DESC(io, "ATP I/O base address(es)"); MODULE_PARM_DESC(irq, "ATP IRQ number(s)"); MODULE_PARM_DESC(xcvr, "ATP transceiver(s) (0=internal, 1=external)"); /* The number of low I/O ports used by the ethercard. */ #define ETHERCARD_TOTAL_SIZE 3 /* Sequence to switch an 8012 from printer mux to ethernet mode. */ static char mux_8012[] = { 0xff, 0xf7, 0xff, 0xfb, 0xf3, 0xfb, 0xff, 0xf7,}; struct net_local { spinlock_t lock; struct net_device *next_module; struct timer_list timer; /* Media selection timer. */ long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ int saved_tx_size; unsigned int tx_unit_busy:1; unsigned char re_tx, /* Number of packet retransmissions. */ addr_mode, /* Current Rx filter e.g. promiscuous, etc. */ pac_cnt_in_tx_buf, chip_type; }; /* This code, written by wwc@super.org, resets the adapter every TIMED_CHECKER ticks. This recovers from an unknown error which hangs the device. */ #define TIMED_CHECKER (HZ/4) #ifdef TIMED_CHECKER #include <linux/timer.h> static void atp_timed_checker(unsigned long ignored); #endif /* Index to functions, as function prototypes. */ static int atp_probe1(long ioaddr); static void get_node_ID(struct net_device *dev); static unsigned short eeprom_op(long ioaddr, unsigned int cmd); static int net_open(struct net_device *dev); static void hardware_init(struct net_device *dev); static void write_packet(long ioaddr, int length, unsigned char *packet, int pad, int mode); static void trigger_send(long ioaddr, int length); static netdev_tx_t atp_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t atp_interrupt(int irq, void *dev_id); static void net_rx(struct net_device *dev); static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode); static int net_close(struct net_device *dev); static void set_rx_mode(struct net_device *dev); static void tx_timeout(struct net_device *dev); /* A list of all installed ATP devices, for removing the driver module. */ static struct net_device *root_atp_dev; /* Check for a network adapter of this type, and return '0' iff one exists. If dev->base_addr == 0, probe all likely locations. If dev->base_addr == 1, always return failure. If dev->base_addr == 2, allocate space for the device and return success (detachable devices only). FIXME: we should use the parport layer for this */ static int __init atp_init(void) { int *port, ports[] = {0x378, 0x278, 0x3bc, 0}; int base_addr = io[0]; if (base_addr > 0x1ff) /* Check a single specified location. */ return atp_probe1(base_addr); else if (base_addr == 1) /* Don't probe at all. */ return -ENXIO; for (port = ports; *port; port++) { long ioaddr = *port; outb(0x57, ioaddr + PAR_DATA); if (inb(ioaddr + PAR_DATA) != 0x57) continue; if (atp_probe1(ioaddr) == 0) return 0; } return -ENODEV; } static const struct net_device_ops atp_netdev_ops = { .ndo_open = net_open, .ndo_stop = net_close, .ndo_start_xmit = atp_send_packet, .ndo_set_multicast_list = set_rx_mode, .ndo_tx_timeout = tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int __init atp_probe1(long ioaddr) { struct net_device *dev = NULL; struct net_local *lp; int saved_ctrl_reg, status, i; int res; outb(0xff, ioaddr + PAR_DATA); /* Save the original value of the Control register, in case we guessed wrong. */ saved_ctrl_reg = inb(ioaddr + PAR_CONTROL); if (net_debug > 3) printk("atp: Control register was %#2.2x.\n", saved_ctrl_reg); /* IRQEN=0, SLCTB=high INITB=high, AUTOFDB=high, STBB=high. */ outb(0x04, ioaddr + PAR_CONTROL); #ifndef final_version if (net_debug > 3) { /* Turn off the printer multiplexer on the 8012. */ for (i = 0; i < 8; i++) outb(mux_8012[i], ioaddr + PAR_DATA); write_reg(ioaddr, MODSEL, 0x00); printk("atp: Registers are "); for (i = 0; i < 32; i++) printk(" %2.2x", read_nibble(ioaddr, i)); printk(".\n"); } #endif /* Turn off the printer multiplexer on the 8012. */ for (i = 0; i < 8; i++) outb(mux_8012[i], ioaddr + PAR_DATA); write_reg_high(ioaddr, CMR1, CMR1h_RESET); /* udelay() here? */ status = read_nibble(ioaddr, CMR1); if (net_debug > 3) { printk(KERN_DEBUG "atp: Status nibble was %#2.2x..", status); for (i = 0; i < 32; i++) printk(" %2.2x", read_nibble(ioaddr, i)); printk("\n"); } if ((status & 0x78) != 0x08) { /* The pocket adapter probe failed, restore the control register. */ outb(saved_ctrl_reg, ioaddr + PAR_CONTROL); return -ENODEV; } status = read_nibble(ioaddr, CMR2_h); if ((status & 0x78) != 0x10) { outb(saved_ctrl_reg, ioaddr + PAR_CONTROL); return -ENODEV; } dev = alloc_etherdev(sizeof(struct net_local)); if (!dev) return -ENOMEM; /* Find the IRQ used by triggering an interrupt. */ write_reg_byte(ioaddr, CMR2, 0x01); /* No accept mode, IRQ out. */ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); /* Enable Tx and Rx. */ /* Omit autoIRQ routine for now. Use "table lookup" instead. Uhgggh. */ if (irq[0]) dev->irq = irq[0]; else if (ioaddr == 0x378) dev->irq = 7; else dev->irq = 5; write_reg_high(ioaddr, CMR1, CMR1h_TxRxOFF); /* Disable Tx and Rx units. */ write_reg(ioaddr, CMR2, CMR2_NULL); dev->base_addr = ioaddr; /* Read the station address PROM. */ get_node_ID(dev); #ifndef MODULE if (net_debug) printk(KERN_INFO "%s", version); #endif printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, " "SAPROM %pM.\n", dev->name, dev->base_addr, dev->irq, dev->dev_addr); /* Reset the ethernet hardware and activate the printer pass-through. */ write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX); lp = netdev_priv(dev); lp->chip_type = RTL8002; lp->addr_mode = CMR2h_Normal; spin_lock_init(&lp->lock); /* For the ATP adapter the "if_port" is really the data transfer mode. */ if (xcvr[0]) dev->if_port = xcvr[0]; else dev->if_port = (dev->mem_start & 0xf) ? (dev->mem_start & 0x7) : 4; if (dev->mem_end & 0xf) net_debug = dev->mem_end & 7; dev->netdev_ops = &atp_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; res = register_netdev(dev); if (res) { free_netdev(dev); return res; } lp->next_module = root_atp_dev; root_atp_dev = dev; return 0; } /* Read the station address PROM, usually a word-wide EEPROM. */ static void __init get_node_ID(struct net_device *dev) { long ioaddr = dev->base_addr; int sa_offset = 0; int i; write_reg(ioaddr, CMR2, CMR2_EEPROM); /* Point to the EEPROM control registers. */ /* Some adapters have the station address at offset 15 instead of offset zero. Check for it, and fix it if needed. */ if (eeprom_op(ioaddr, EE_READ(0)) == 0xffff) sa_offset = 15; for (i = 0; i < 3; i++) ((__be16 *)dev->dev_addr)[i] = cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i))); write_reg(ioaddr, CMR2, CMR2_NULL); } /* An EEPROM read command starts by shifting out 0x60+address, and then shifting in the serial data. See the NatSemi databook for details. * ________________ * CS : __| * ___ ___ * CLK: ______| |___| | * __ _______ _______ * DI : __X_______X_______X * DO : _________X_______X */ static unsigned short __init eeprom_op(long ioaddr, u32 cmd) { unsigned eedata_out = 0; int num_bits = EE_CMD_SIZE; while (--num_bits >= 0) { char outval = (cmd & (1<<num_bits)) ? EE_DATA_WRITE : 0; write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_LOW); write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_HIGH); eedata_out <<= 1; if (read_nibble(ioaddr, PROM_DATA) & EE_DATA_READ) eedata_out++; } write_reg_high(ioaddr, PROM_CMD, EE_CLK_LOW & ~EE_CS); return eedata_out; } /* Open/initialize the board. This is called (in the current kernel) sometime after booting when the 'ifconfig' program is run. This routine sets everything up anew at each open, even registers that "should" only need to be set once at boot, so that there is non-reboot way to recover if something goes wrong. This is an attachable device: if there is no private entry then it wasn't probed for at boot-time, and we need to probe for it again. */ static int net_open(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); int ret; /* The interrupt line is turned off (tri-stated) when the device isn't in use. That's especially important for "attached" interfaces where the port or interrupt may be shared. */ ret = request_irq(dev->irq, atp_interrupt, 0, dev->name, dev); if (ret) return ret; hardware_init(dev); init_timer(&lp->timer); lp->timer.expires = jiffies + TIMED_CHECKER; lp->timer.data = (unsigned long)dev; lp->timer.function = atp_timed_checker; /* timer handler */ add_timer(&lp->timer); netif_start_queue(dev); return 0; } /* This routine resets the hardware. We initialize everything, assuming that the hardware may have been temporarily detached. */ static void hardware_init(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); long ioaddr = dev->base_addr; int i; /* Turn off the printer multiplexer on the 8012. */ for (i = 0; i < 8; i++) outb(mux_8012[i], ioaddr + PAR_DATA); write_reg_high(ioaddr, CMR1, CMR1h_RESET); for (i = 0; i < 6; i++) write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]); write_reg_high(ioaddr, CMR2, lp->addr_mode); if (net_debug > 2) { printk(KERN_DEBUG "%s: Reset: current Rx mode %d.\n", dev->name, (read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f); } write_reg(ioaddr, CMR2, CMR2_IRQOUT); write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); /* Enable the interrupt line from the serial port. */ outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL); /* Unmask the interesting interrupts. */ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK); write_reg_high(ioaddr, IMR, ISRh_RxErr); lp->tx_unit_busy = 0; lp->pac_cnt_in_tx_buf = 0; lp->saved_tx_size = 0; } static void trigger_send(long ioaddr, int length) { write_reg_byte(ioaddr, TxCNT0, length & 0xff); write_reg(ioaddr, TxCNT1, length >> 8); write_reg(ioaddr, CMR1, CMR1_Xmit); } static void write_packet(long ioaddr, int length, unsigned char *packet, int pad_len, int data_mode) { if (length & 1) { length++; pad_len++; } outb(EOC+MAR, ioaddr + PAR_DATA); if ((data_mode & 1) == 0) { /* Write the packet out, starting with the write addr. */ outb(WrAddr+MAR, ioaddr + PAR_DATA); do { write_byte_mode0(ioaddr, *packet++); } while (--length > pad_len) ; do { write_byte_mode0(ioaddr, 0); } while (--length > 0) ; } else { /* Write the packet out in slow mode. */ unsigned char outbyte = *packet++; outb(Ctrl_LNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL); outb(WrAddr+MAR, ioaddr + PAR_DATA); outb((outbyte & 0x0f)|0x40, ioaddr + PAR_DATA); outb(outbyte & 0x0f, ioaddr + PAR_DATA); outbyte >>= 4; outb(outbyte & 0x0f, ioaddr + PAR_DATA); outb(Ctrl_HNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL); while (--length > pad_len) write_byte_mode1(ioaddr, *packet++); while (--length > 0) write_byte_mode1(ioaddr, 0); } /* Terminate the Tx frame. End of write: ECB. */ outb(0xff, ioaddr + PAR_DATA); outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL); } static void tx_timeout(struct net_device *dev) { long ioaddr = dev->base_addr; printk(KERN_WARNING "%s: Transmit timed out, %s?\n", dev->name, inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem" : "IRQ conflict"); dev->stats.tx_errors++; /* Try to restart the adapter. */ hardware_init(dev); dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); dev->stats.tx_errors++; } static netdev_tx_t atp_send_packet(struct sk_buff *skb, struct net_device *dev) { struct net_local *lp = netdev_priv(dev); long ioaddr = dev->base_addr; int length; unsigned long flags; length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; netif_stop_queue(dev); /* Disable interrupts by writing 0x00 to the Interrupt Mask Register. This sequence must not be interrupted by an incoming packet. */ spin_lock_irqsave(&lp->lock, flags); write_reg(ioaddr, IMR, 0); write_reg_high(ioaddr, IMR, 0); spin_unlock_irqrestore(&lp->lock, flags); write_packet(ioaddr, length, skb->data, length-skb->len, dev->if_port); lp->pac_cnt_in_tx_buf++; if (lp->tx_unit_busy == 0) { trigger_send(ioaddr, length); lp->saved_tx_size = 0; /* Redundant */ lp->re_tx = 0; lp->tx_unit_busy = 1; } else lp->saved_tx_size = length; /* Re-enable the LPT interrupts. */ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK); write_reg_high(ioaddr, IMR, ISRh_RxErr); dev_kfree_skb (skb); return NETDEV_TX_OK; } /* The typical workload of the driver: Handle the network interface interrupts. */ static irqreturn_t atp_interrupt(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct net_local *lp; long ioaddr; static int num_tx_since_rx; int boguscount = max_interrupt_work; int handled = 0; ioaddr = dev->base_addr; lp = netdev_priv(dev); spin_lock(&lp->lock); /* Disable additional spurious interrupts. */ outb(Ctrl_SelData, ioaddr + PAR_CONTROL); /* The adapter's output is currently the IRQ line, switch it to data. */ write_reg(ioaddr, CMR2, CMR2_NULL); write_reg(ioaddr, IMR, 0); if (net_debug > 5) printk(KERN_DEBUG "%s: In interrupt ", dev->name); while (--boguscount > 0) { int status = read_nibble(ioaddr, ISR); if (net_debug > 5) printk("loop status %02x..", status); if (status & (ISR_RxOK<<3)) { handled = 1; write_reg(ioaddr, ISR, ISR_RxOK); /* Clear the Rx interrupt. */ do { int read_status = read_nibble(ioaddr, CMR1); if (net_debug > 6) printk("handling Rx packet %02x..", read_status); /* We acknowledged the normal Rx interrupt, so if the interrupt is still outstanding we must have a Rx error. */ if (read_status & (CMR1_IRQ << 3)) { /* Overrun. */ dev->stats.rx_over_errors++; /* Set to no-accept mode long enough to remove a packet. */ write_reg_high(ioaddr, CMR2, CMR2h_OFF); net_rx(dev); /* Clear the interrupt and return to normal Rx mode. */ write_reg_high(ioaddr, ISR, ISRh_RxErr); write_reg_high(ioaddr, CMR2, lp->addr_mode); } else if ((read_status & (CMR1_BufEnb << 3)) == 0) { net_rx(dev); num_tx_since_rx = 0; } else break; } while (--boguscount > 0); } else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) { handled = 1; if (net_debug > 6) printk("handling Tx done.."); /* Clear the Tx interrupt. We should check for too many failures and reinitialize the adapter. */ write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK); if (status & (ISR_TxErr<<3)) { dev->stats.collisions++; if (++lp->re_tx > 15) { dev->stats.tx_aborted_errors++; hardware_init(dev); break; } /* Attempt to retransmit. */ if (net_debug > 6) printk("attempting to ReTx"); write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit); } else { /* Finish up the transmit. */ dev->stats.tx_packets++; lp->pac_cnt_in_tx_buf--; if ( lp->saved_tx_size) { trigger_send(ioaddr, lp->saved_tx_size); lp->saved_tx_size = 0; lp->re_tx = 0; } else lp->tx_unit_busy = 0; netif_wake_queue(dev); /* Inform upper layers. */ } num_tx_since_rx++; } else if (num_tx_since_rx > 8 && time_after(jiffies, dev->last_rx + HZ)) { if (net_debug > 2) printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and " "%ld jiffies status %02x CMR1 %02x.\n", dev->name, num_tx_since_rx, jiffies - dev->last_rx, status, (read_nibble(ioaddr, CMR1) >> 3) & 15); dev->stats.rx_missed_errors++; hardware_init(dev); num_tx_since_rx = 0; break; } else break; } /* This following code fixes a rare (and very difficult to track down) problem where the adapter forgets its ethernet address. */ { int i; for (i = 0; i < 6; i++) write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]); #if 0 && defined(TIMED_CHECKER) mod_timer(&lp->timer, jiffies + TIMED_CHECKER); #endif } /* Tell the adapter that it can go back to using the output line as IRQ. */ write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Enable the physical interrupt line, which is sure to be low until.. */ outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL); /* .. we enable the interrupt sources. */ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK); write_reg_high(ioaddr, IMR, ISRh_RxErr); /* Hmmm, really needed? */ spin_unlock(&lp->lock); if (net_debug > 5) printk("exiting interrupt.\n"); return IRQ_RETVAL(handled); } #ifdef TIMED_CHECKER /* This following code fixes a rare (and very difficult to track down) problem where the adapter forgets its ethernet address. */ static void atp_timed_checker(unsigned long data) { struct net_device *dev = (struct net_device *)data; long ioaddr = dev->base_addr; struct net_local *lp = netdev_priv(dev); int tickssofar = jiffies - lp->last_rx_time; int i; spin_lock(&lp->lock); if (tickssofar > 2*HZ) { #if 1 for (i = 0; i < 6; i++) write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]); lp->last_rx_time = jiffies; #else for (i = 0; i < 6; i++) if (read_cmd_byte(ioaddr, PAR0 + i) != atp_timed_dev->dev_addr[i]) { struct net_local *lp = netdev_priv(atp_timed_dev); write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]); if (i == 2) dev->stats.tx_errors++; else if (i == 3) dev->stats.tx_dropped++; else if (i == 4) dev->stats.collisions++; else dev->stats.rx_errors++; } #endif } spin_unlock(&lp->lock); lp->timer.expires = jiffies + TIMED_CHECKER; add_timer(&lp->timer); } #endif /* We have a good packet(s), get it/them out of the buffers. */ static void net_rx(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); long ioaddr = dev->base_addr; struct rx_header rx_head; /* Process the received packet. */ outb(EOC+MAR, ioaddr + PAR_DATA); read_block(ioaddr, 8, (unsigned char*)&rx_head, dev->if_port); if (net_debug > 5) printk(KERN_DEBUG " rx_count %04x %04x %04x %04x..", rx_head.pad, rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr); if ((rx_head.rx_status & 0x77) != 0x01) { dev->stats.rx_errors++; if (rx_head.rx_status & 0x0004) dev->stats.rx_frame_errors++; else if (rx_head.rx_status & 0x0002) dev->stats.rx_crc_errors++; if (net_debug > 3) printk(KERN_DEBUG "%s: Unknown ATP Rx error %04x.\n", dev->name, rx_head.rx_status); if (rx_head.rx_status & 0x0020) { dev->stats.rx_fifo_errors++; write_reg_high(ioaddr, CMR1, CMR1h_TxENABLE); write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); } else if (rx_head.rx_status & 0x0050) hardware_init(dev); return; } else { /* Malloc up new buffer. The "-4" omits the FCS (CRC). */ int pkt_len = (rx_head.rx_count & 0x7ff) - 4; struct sk_buff *skb; skb = dev_alloc_skb(pkt_len + 2); if (skb == NULL) { printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; goto done; } skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } done: write_reg(ioaddr, CMR1, CMR1_NextPkt); lp->last_rx_time = jiffies; } static void read_block(long ioaddr, int length, unsigned char *p, int data_mode) { if (data_mode <= 3) { /* Mode 0 or 1 */ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); outb(length == 8 ? RdAddr | HNib | MAR : RdAddr | MAR, ioaddr + PAR_DATA); if (data_mode <= 1) { /* Mode 0 or 1 */ do { *p++ = read_byte_mode0(ioaddr); } while (--length > 0); } else { /* Mode 2 or 3 */ do { *p++ = read_byte_mode2(ioaddr); } while (--length > 0); } } else if (data_mode <= 5) { do { *p++ = read_byte_mode4(ioaddr); } while (--length > 0); } else { do { *p++ = read_byte_mode6(ioaddr); } while (--length > 0); } outb(EOC+HNib+MAR, ioaddr + PAR_DATA); outb(Ctrl_SelData, ioaddr + PAR_CONTROL); } /* The inverse routine to net_open(). */ static int net_close(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); long ioaddr = dev->base_addr; netif_stop_queue(dev); del_timer_sync(&lp->timer); /* Flush the Tx and disable Rx here. */ lp->addr_mode = CMR2h_OFF; write_reg_high(ioaddr, CMR2, CMR2h_OFF); /* Free the IRQ line. */ outb(0x00, ioaddr + PAR_CONTROL); free_irq(dev->irq, dev); /* Reset the ethernet hardware and activate the printer pass-through. */ write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX); return 0; } /* * Set or clear the multicast filter for this adapter. */ static void set_rx_mode_8002(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); long ioaddr = dev->base_addr; if (!netdev_mc_empty(dev) || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) lp->addr_mode = CMR2h_PROMISC; else lp->addr_mode = CMR2h_Normal; write_reg_high(ioaddr, CMR2, lp->addr_mode); } static void set_rx_mode_8012(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); long ioaddr = dev->base_addr; unsigned char new_mode, mc_filter[8]; /* Multicast hash filter */ int i; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ new_mode = CMR2h_PROMISC; } else if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ memset(mc_filter, 0xff, sizeof(mc_filter)); new_mode = CMR2h_Normal; } else { struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); netdev_for_each_mc_addr(ha, dev) { int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f; mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); } new_mode = CMR2h_Normal; } lp->addr_mode = new_mode; write_reg(ioaddr, CMR2, CMR2_IRQOUT | 0x04); /* Switch to page 1. */ for (i = 0; i < 8; i++) write_reg_byte(ioaddr, i, mc_filter[i]); if (net_debug > 2 || 1) { lp->addr_mode = 1; printk(KERN_DEBUG "%s: Mode %d, setting multicast filter to", dev->name, lp->addr_mode); for (i = 0; i < 8; i++) printk(" %2.2x", mc_filter[i]); printk(".\n"); } write_reg_high(ioaddr, CMR2, lp->addr_mode); write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */ } static void set_rx_mode(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); if (lp->chip_type == RTL8002) return set_rx_mode_8002(dev); else return set_rx_mode_8012(dev); } static int __init atp_init_module(void) { if (debug) /* Emit version even if no cards detected. */ printk(KERN_INFO "%s", version); return atp_init(); } static void __exit atp_cleanup_module(void) { struct net_device *next_dev; while (root_atp_dev) { struct net_local *atp_local = netdev_priv(root_atp_dev); next_dev = atp_local->next_module; unregister_netdev(root_atp_dev); /* No need to release_region(), since we never snarf it. */ free_netdev(root_atp_dev); root_atp_dev = next_dev; } } module_init(atp_init_module); module_exit(atp_cleanup_module);
gpl-2.0
MoKee/android_kernel_zte_x9180
arch/um/os-Linux/time.c
4876
4180
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <stddef.h> #include <errno.h> #include <signal.h> #include <time.h> #include <sys/time.h> #include "kern_util.h" #include "os.h" #include "internal.h" int set_interval(void) { int usec = UM_USEC_PER_SEC / UM_HZ; struct itimerval interval = ((struct itimerval) { { 0, usec }, { 0, usec } }); if (setitimer(ITIMER_VIRTUAL, &interval, NULL) == -1) return -errno; return 0; } int timer_one_shot(int ticks) { unsigned long usec = ticks * UM_USEC_PER_SEC / UM_HZ; unsigned long sec = usec / UM_USEC_PER_SEC; struct itimerval interval; usec %= UM_USEC_PER_SEC; interval = ((struct itimerval) { { 0, 0 }, { sec, usec } }); if (setitimer(ITIMER_VIRTUAL, &interval, NULL) == -1) return -errno; return 0; } /** * timeval_to_ns - Convert timeval to nanoseconds * @ts: pointer to the timeval variable to be converted * * Returns the scalar nanosecond representation of the timeval * parameter. * * Ripped from linux/time.h because it's a kernel header, and thus * unusable from here. */ static inline long long timeval_to_ns(const struct timeval *tv) { return ((long long) tv->tv_sec * UM_NSEC_PER_SEC) + tv->tv_usec * UM_NSEC_PER_USEC; } long long disable_timer(void) { struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } }); long long remain, max = UM_NSEC_PER_SEC / UM_HZ; if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0) printk(UM_KERN_ERR "disable_timer - setitimer failed, " "errno = %d\n", errno); remain = timeval_to_ns(&time.it_value); if (remain > max) remain = max; return remain; } long long os_nsecs(void) { struct timeval tv; gettimeofday(&tv, NULL); return timeval_to_ns(&tv); } #ifdef UML_CONFIG_NO_HZ static int after_sleep_interval(struct timespec *ts) { return 0; } static void deliver_alarm(void) { alarm_handler(SIGVTALRM, NULL); } static unsigned long long sleep_time(unsigned long long nsecs) { return nsecs; } #else unsigned long long last_tick; unsigned long long skew; static void deliver_alarm(void) { unsigned long long this_tick = os_nsecs(); int one_tick = UM_NSEC_PER_SEC / UM_HZ; /* Protection against the host's time going backwards */ if ((last_tick != 0) && (this_tick < last_tick)) this_tick = last_tick; if (last_tick == 0) last_tick = this_tick - one_tick; skew += this_tick - last_tick; while (skew >= one_tick) { alarm_handler(SIGVTALRM, NULL); skew -= one_tick; } last_tick = this_tick; } static unsigned long long sleep_time(unsigned long long nsecs) { return nsecs > skew ? nsecs - skew : 0; } static inline long long timespec_to_us(const struct timespec *ts) { return ((long long) ts->tv_sec * UM_USEC_PER_SEC) + ts->tv_nsec / UM_NSEC_PER_USEC; } static int after_sleep_interval(struct timespec *ts) { int usec = UM_USEC_PER_SEC / UM_HZ; long long start_usecs = timespec_to_us(ts); struct timeval tv; struct itimerval interval; /* * It seems that rounding can increase the value returned from * setitimer to larger than the one passed in. Over time, * this will cause the remaining time to be greater than the * tick interval. If this happens, then just reduce the first * tick to the interval value. */ if (start_usecs > usec) start_usecs = usec; start_usecs -= skew / UM_NSEC_PER_USEC; if (start_usecs < 0) start_usecs = 0; tv = ((struct timeval) { .tv_sec = start_usecs / UM_USEC_PER_SEC, .tv_usec = start_usecs % UM_USEC_PER_SEC }); interval = ((struct itimerval) { { 0, usec }, tv }); if (setitimer(ITIMER_VIRTUAL, &interval, NULL) == -1) return -errno; return 0; } #endif void idle_sleep(unsigned long long nsecs) { struct timespec ts; /* * nsecs can come in as zero, in which case, this starts a * busy loop. To prevent this, reset nsecs to the tick * interval if it is zero. */ if (nsecs == 0) nsecs = UM_NSEC_PER_SEC / UM_HZ; nsecs = sleep_time(nsecs); ts = ((struct timespec) { .tv_sec = nsecs / UM_NSEC_PER_SEC, .tv_nsec = nsecs % UM_NSEC_PER_SEC }); if (nanosleep(&ts, &ts) == 0) deliver_alarm(); after_sleep_interval(&ts); }
gpl-2.0
luckasfb/android_kernel_iocean_x7
drivers/char/agp/generic.c
4876
37494
/* * AGPGART driver. * Copyright (C) 2004 Silicon Graphics, Inc. * Copyright (C) 2002-2005 Dave Jones. * Copyright (C) 1999 Jeff Hartmann. * Copyright (C) 1999 Precision Insight, Inc. * Copyright (C) 1999 Xi Graphics, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * TODO: * - Allocate more than order 0 pages to avoid too much linear map splitting. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/miscdevice.h> #include <linux/pm.h> #include <linux/agp_backend.h> #include <linux/vmalloc.h> #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/cacheflush.h> #include <asm/pgtable.h> #include "agp.h" __u32 *agp_gatt_table; int agp_memory_reserved; /* * Needed by the Nforce GART driver for the time being. Would be * nice to do this some other way instead of needing this export. */ EXPORT_SYMBOL_GPL(agp_memory_reserved); /* * Generic routines for handling agp_memory structures - * They use the basic page allocation routines to do the brunt of the work. */ void agp_free_key(int key) { if (key < 0) return; if (key < MAXKEY) clear_bit(key, agp_bridge->key_list); } EXPORT_SYMBOL(agp_free_key); static int agp_get_key(void) { int bit; bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY); if (bit < MAXKEY) { set_bit(bit, agp_bridge->key_list); return bit; } return -1; } /* * Use kmalloc if possible for the page list. Otherwise fall back to * vmalloc. This speeds things up and also saves memory for small AGP * regions. */ void agp_alloc_page_array(size_t size, struct agp_memory *mem) { mem->pages = NULL; if (size <= 2*PAGE_SIZE) mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); if (mem->pages == NULL) { mem->pages = vmalloc(size); } } EXPORT_SYMBOL(agp_alloc_page_array); void agp_free_page_array(struct agp_memory *mem) { if (is_vmalloc_addr(mem->pages)) { vfree(mem->pages); } else { kfree(mem->pages); } } EXPORT_SYMBOL(agp_free_page_array); static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) { struct agp_memory *new; unsigned long alloc_size = num_agp_pages*sizeof(struct page *); if (INT_MAX/sizeof(struct page *) < num_agp_pages) return NULL; new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); if (new == NULL) return NULL; new->key = agp_get_key(); if (new->key < 0) { kfree(new); return NULL; } agp_alloc_page_array(alloc_size, new); if (new->pages == NULL) { agp_free_key(new->key); kfree(new); return NULL; } new->num_scratch_pages = 0; return new; } struct agp_memory *agp_create_memory(int scratch_pages) { struct agp_memory *new; new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); if (new == NULL) return NULL; new->key = agp_get_key(); if (new->key < 0) { kfree(new); return NULL; } agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); if (new->pages == NULL) { agp_free_key(new->key); kfree(new); return NULL; } new->num_scratch_pages = scratch_pages; new->type = AGP_NORMAL_MEMORY; return new; } EXPORT_SYMBOL(agp_create_memory); /** * agp_free_memory - free memory associated with an agp_memory pointer. * * @curr: agp_memory pointer to be freed. * * It is the only function that can be called when the backend is not owned * by the caller. (So it can free memory on client death.) */ void agp_free_memory(struct agp_memory *curr) { size_t i; if (curr == NULL) return; if (curr->is_bound) agp_unbind_memory(curr); if (curr->type >= AGP_USER_TYPES) { agp_generic_free_by_type(curr); return; } if (curr->type != 0) { curr->bridge->driver->free_by_type(curr); return; } if (curr->page_count != 0) { if (curr->bridge->driver->agp_destroy_pages) { curr->bridge->driver->agp_destroy_pages(curr); } else { for (i = 0; i < curr->page_count; i++) { curr->bridge->driver->agp_destroy_page( curr->pages[i], AGP_PAGE_DESTROY_UNMAP); } for (i = 0; i < curr->page_count; i++) { curr->bridge->driver->agp_destroy_page( curr->pages[i], AGP_PAGE_DESTROY_FREE); } } } agp_free_key(curr->key); agp_free_page_array(curr); kfree(curr); } EXPORT_SYMBOL(agp_free_memory); #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) /** * agp_allocate_memory - allocate a group of pages of a certain type. * * @page_count: size_t argument of the number of pages * @type: u32 argument of the type of memory to be allocated. * * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which * maps to physical ram. Any other type is device dependent. * * It returns NULL whenever memory is unavailable. */ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, size_t page_count, u32 type) { int scratch_pages; struct agp_memory *new; size_t i; int cur_memory; if (!bridge) return NULL; cur_memory = atomic_read(&bridge->current_memory_agp); if ((cur_memory + page_count > bridge->max_memory_agp) || (cur_memory + page_count < page_count)) return NULL; if (type >= AGP_USER_TYPES) { new = agp_generic_alloc_user(page_count, type); if (new) new->bridge = bridge; return new; } if (type != 0) { new = bridge->driver->alloc_by_type(page_count, type); if (new) new->bridge = bridge; return new; } scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; new = agp_create_memory(scratch_pages); if (new == NULL) return NULL; if (bridge->driver->agp_alloc_pages) { if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { agp_free_memory(new); return NULL; } new->bridge = bridge; return new; } for (i = 0; i < page_count; i++) { struct page *page = bridge->driver->agp_alloc_page(bridge); if (page == NULL) { agp_free_memory(new); return NULL; } new->pages[i] = page; new->page_count++; } new->bridge = bridge; return new; } EXPORT_SYMBOL(agp_allocate_memory); /* End - Generic routines for handling agp_memory structures */ static int agp_return_size(void) { int current_size; void *temp; temp = agp_bridge->current_size; switch (agp_bridge->driver->size_type) { case U8_APER_SIZE: current_size = A_SIZE_8(temp)->size; break; case U16_APER_SIZE: current_size = A_SIZE_16(temp)->size; break; case U32_APER_SIZE: current_size = A_SIZE_32(temp)->size; break; case LVL2_APER_SIZE: current_size = A_SIZE_LVL2(temp)->size; break; case FIXED_APER_SIZE: current_size = A_SIZE_FIX(temp)->size; break; default: current_size = 0; break; } current_size -= (agp_memory_reserved / (1024*1024)); if (current_size <0) current_size = 0; return current_size; } int agp_num_entries(void) { int num_entries; void *temp; temp = agp_bridge->current_size; switch (agp_bridge->driver->size_type) { case U8_APER_SIZE: num_entries = A_SIZE_8(temp)->num_entries; break; case U16_APER_SIZE: num_entries = A_SIZE_16(temp)->num_entries; break; case U32_APER_SIZE: num_entries = A_SIZE_32(temp)->num_entries; break; case LVL2_APER_SIZE: num_entries = A_SIZE_LVL2(temp)->num_entries; break; case FIXED_APER_SIZE: num_entries = A_SIZE_FIX(temp)->num_entries; break; default: num_entries = 0; break; } num_entries -= agp_memory_reserved>>PAGE_SHIFT; if (num_entries<0) num_entries = 0; return num_entries; } EXPORT_SYMBOL_GPL(agp_num_entries); /** * agp_copy_info - copy bridge state information * * @info: agp_kern_info pointer. The caller should insure that this pointer is valid. * * This function copies information about the agp bridge device and the state of * the agp backend into an agp_kern_info pointer. */ int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info) { memset(info, 0, sizeof(struct agp_kern_info)); if (!bridge) { info->chipset = NOT_SUPPORTED; return -EIO; } info->version.major = bridge->version->major; info->version.minor = bridge->version->minor; info->chipset = SUPPORTED; info->device = bridge->dev; if (bridge->mode & AGPSTAT_MODE_3_0) info->mode = bridge->mode & ~AGP3_RESERVED_MASK; else info->mode = bridge->mode & ~AGP2_RESERVED_MASK; info->aper_base = bridge->gart_bus_addr; info->aper_size = agp_return_size(); info->max_memory = bridge->max_memory_agp; info->current_memory = atomic_read(&bridge->current_memory_agp); info->cant_use_aperture = bridge->driver->cant_use_aperture; info->vm_ops = bridge->vm_ops; info->page_mask = ~0UL; return 0; } EXPORT_SYMBOL(agp_copy_info); /* End - Routine to copy over information structure */ /* * Routines for handling swapping of agp_memory into the GATT - * These routines take agp_memory and insert them into the GATT. * They call device specific routines to actually write to the GATT. */ /** * agp_bind_memory - Bind an agp_memory structure into the GATT. * * @curr: agp_memory pointer * @pg_start: an offset into the graphics aperture translation table * * It returns -EINVAL if the pointer == NULL. * It returns -EBUSY if the area of the table requested is already in use. */ int agp_bind_memory(struct agp_memory *curr, off_t pg_start) { int ret_val; if (curr == NULL) return -EINVAL; if (curr->is_bound) { printk(KERN_INFO PFX "memory %p is already bound!\n", curr); return -EINVAL; } if (!curr->is_flushed) { curr->bridge->driver->cache_flush(); curr->is_flushed = true; } ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); if (ret_val != 0) return ret_val; curr->is_bound = true; curr->pg_start = pg_start; spin_lock(&agp_bridge->mapped_lock); list_add(&curr->mapped_list, &agp_bridge->mapped_list); spin_unlock(&agp_bridge->mapped_lock); return 0; } EXPORT_SYMBOL(agp_bind_memory); /** * agp_unbind_memory - Removes an agp_memory structure from the GATT * * @curr: agp_memory pointer to be removed from the GATT. * * It returns -EINVAL if this piece of agp_memory is not currently bound to * the graphics aperture translation table or if the agp_memory pointer == NULL */ int agp_unbind_memory(struct agp_memory *curr) { int ret_val; if (curr == NULL) return -EINVAL; if (!curr->is_bound) { printk(KERN_INFO PFX "memory %p was not bound!\n", curr); return -EINVAL; } ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); if (ret_val != 0) return ret_val; curr->is_bound = false; curr->pg_start = 0; spin_lock(&curr->bridge->mapped_lock); list_del(&curr->mapped_list); spin_unlock(&curr->bridge->mapped_lock); return 0; } EXPORT_SYMBOL(agp_unbind_memory); /* End - Routines for handling swapping of agp_memory into the GATT */ /* Generic Agp routines - Start */ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) { u32 tmp; if (*requested_mode & AGP2_RESERVED_MASK) { printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", *requested_mode & AGP2_RESERVED_MASK, *requested_mode); *requested_mode &= ~AGP2_RESERVED_MASK; } /* * Some dumb bridges are programmed to disobey the AGP2 spec. * This is likely a BIOS misprogramming rather than poweron default, or * it would be a lot more common. * https://bugs.freedesktop.org/show_bug.cgi?id=8816 * AGPv2 spec 6.1.9 states: * The RATE field indicates the data transfer rates supported by this * device. A.G.P. devices must report all that apply. * Fix them up as best we can. */ switch (*bridge_agpstat & 7) { case 4: *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X); printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. " "Fixing up support for x2 & x1\n"); break; case 2: *bridge_agpstat |= AGPSTAT2_1X; printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. " "Fixing up support for x1\n"); break; default: break; } /* Check the speed bits make sense. Only one should be set. */ tmp = *requested_mode & 7; switch (tmp) { case 0: printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm); *requested_mode |= AGPSTAT2_1X; break; case 1: case 2: break; case 3: *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */ break; case 4: break; case 5: case 6: case 7: *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/ break; } /* disable SBA if it's not supported */ if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA))) *bridge_agpstat &= ~AGPSTAT_SBA; /* Set rate */ if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X))) *bridge_agpstat &= ~AGPSTAT2_4X; if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X))) *bridge_agpstat &= ~AGPSTAT2_2X; if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X))) *bridge_agpstat &= ~AGPSTAT2_1X; /* Now we know what mode it should be, clear out the unwanted bits. */ if (*bridge_agpstat & AGPSTAT2_4X) *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */ if (*bridge_agpstat & AGPSTAT2_2X) *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */ if (*bridge_agpstat & AGPSTAT2_1X) *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */ /* Apply any errata. */ if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) *bridge_agpstat &= ~AGPSTAT_FW; if (agp_bridge->flags & AGP_ERRATA_SBA) *bridge_agpstat &= ~AGPSTAT_SBA; if (agp_bridge->flags & AGP_ERRATA_1X) { *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); *bridge_agpstat |= AGPSTAT2_1X; } /* If we've dropped down to 1X, disable fast writes. */ if (*bridge_agpstat & AGPSTAT2_1X) *bridge_agpstat &= ~AGPSTAT_FW; } /* * requested_mode = Mode requested by (typically) X. * bridge_agpstat = PCI_AGP_STATUS from agp bridge. * vga_agpstat = PCI_AGP_STATUS from graphic card. */ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) { u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat; u32 tmp; if (*requested_mode & AGP3_RESERVED_MASK) { printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", *requested_mode & AGP3_RESERVED_MASK, *requested_mode); *requested_mode &= ~AGP3_RESERVED_MASK; } /* Check the speed bits make sense. */ tmp = *requested_mode & 7; if (tmp == 0) { printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm); *requested_mode |= AGPSTAT3_4X; } if (tmp >= 3) { printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4); *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X; } /* ARQSZ - Set the value to the maximum one. * Don't allow the mode register to override values. */ *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) | max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ))); /* Calibration cycle. * Don't allow the mode register to override values. */ *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) | min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK))); /* SBA *must* be supported for AGP v3 */ *bridge_agpstat |= AGPSTAT_SBA; /* * Set speed. * Check for invalid speeds. This can happen when applications * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware */ if (*requested_mode & AGPSTAT_MODE_3_0) { /* * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode, * have been passed a 3.0 mode, but with 2.x speed bits set. * AGP2.x 4x -> AGP3.0 4x. */ if (*requested_mode & AGPSTAT2_4X) { printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n", current->comm, *requested_mode); *requested_mode &= ~AGPSTAT2_4X; *requested_mode |= AGPSTAT3_4X; } } else { /* * The caller doesn't know what they are doing. We are in 3.0 mode, * but have been passed an AGP 2.x mode. * Convert AGP 1x,2x,4x -> AGP 3.0 4x. */ printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n", current->comm, *requested_mode); *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X); *requested_mode |= AGPSTAT3_4X; } if (*requested_mode & AGPSTAT3_8X) { if (!(*bridge_agpstat & AGPSTAT3_8X)) { *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); *bridge_agpstat |= AGPSTAT3_4X; printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm); return; } if (!(*vga_agpstat & AGPSTAT3_8X)) { *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); *bridge_agpstat |= AGPSTAT3_4X; printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm); return; } /* All set, bridge & device can do AGP x8*/ *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); goto done; } else if (*requested_mode & AGPSTAT3_4X) { *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); *bridge_agpstat |= AGPSTAT3_4X; goto done; } else { /* * If we didn't specify an AGP mode, we see if both * the graphics card, and the bridge can do x8, and use if so. * If not, we fall back to x4 mode. */ if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) { printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode " "supported by bridge & card (x8).\n"); *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); } else { printk(KERN_INFO PFX "Fell back to AGPx4 mode because "); if (!(*bridge_agpstat & AGPSTAT3_8X)) { printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", *bridge_agpstat, origbridge); *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); *bridge_agpstat |= AGPSTAT3_4X; } if (!(*vga_agpstat & AGPSTAT3_8X)) { printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", *vga_agpstat, origvga); *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); *vga_agpstat |= AGPSTAT3_4X; } } } done: /* Apply any errata. */ if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) *bridge_agpstat &= ~AGPSTAT_FW; if (agp_bridge->flags & AGP_ERRATA_SBA) *bridge_agpstat &= ~AGPSTAT_SBA; if (agp_bridge->flags & AGP_ERRATA_1X) { *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); *bridge_agpstat |= AGPSTAT2_1X; } } /** * agp_collect_device_status - determine correct agp_cmd from various agp_stat's * @bridge: an agp_bridge_data struct allocated for the AGP host bridge. * @requested_mode: requested agp_stat from userspace (Typically from X) * @bridge_agpstat: current agp_stat from AGP bridge. * * This function will hunt for an AGP graphics card, and try to match * the requested mode to the capabilities of both the bridge and the card. */ u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat) { struct pci_dev *device = NULL; u32 vga_agpstat; u8 cap_ptr; for (;;) { device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device); if (!device) { printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); return 0; } cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); if (cap_ptr) break; } /* * Ok, here we have a AGP device. Disable impossible * settings, and adjust the readqueue to the minimum. */ pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat); /* adjust RQ depth */ bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) | min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH), min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH)))); /* disable FW if it's not supported */ if (!((bridge_agpstat & AGPSTAT_FW) && (vga_agpstat & AGPSTAT_FW) && (requested_mode & AGPSTAT_FW))) bridge_agpstat &= ~AGPSTAT_FW; /* Check to see if we are operating in 3.0 mode */ if (agp_bridge->mode & AGPSTAT_MODE_3_0) agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); else agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); pci_dev_put(device); return bridge_agpstat; } EXPORT_SYMBOL(agp_collect_device_status); void agp_device_command(u32 bridge_agpstat, bool agp_v3) { struct pci_dev *device = NULL; int mode; mode = bridge_agpstat & 0x7; if (agp_v3) mode *= 4; for_each_pci_dev(device) { u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); if (!agp) continue; dev_info(&device->dev, "putting AGP V%d device into %dx mode\n", agp_v3 ? 3 : 2, mode); pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); } } EXPORT_SYMBOL(agp_device_command); void get_agp_version(struct agp_bridge_data *bridge) { u32 ncapid; /* Exit early if already set by errata workarounds. */ if (bridge->major_version != 0) return; pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid); bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf; } EXPORT_SYMBOL(get_agp_version); void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) { u32 bridge_agpstat, temp; get_agp_version(agp_bridge); dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", agp_bridge->major_version, agp_bridge->minor_version); pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat); if (bridge_agpstat == 0) /* Something bad happened. FIXME: Return error code? */ return; bridge_agpstat |= AGPSTAT_AGP_ENABLE; /* Do AGP version specific frobbing. */ if (bridge->major_version >= 3) { if (bridge->mode & AGPSTAT_MODE_3_0) { /* If we have 3.5, we can do the isoch stuff. */ if (bridge->minor_version >= 5) agp_3_5_enable(bridge); agp_device_command(bridge_agpstat, true); return; } else { /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/ bridge_agpstat &= ~(7<<10) ; pci_read_config_dword(bridge->dev, bridge->capndx+AGPCTRL, &temp); temp |= (1<<9); pci_write_config_dword(bridge->dev, bridge->capndx+AGPCTRL, temp); dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n"); } } /* AGP v<3 */ agp_device_command(bridge_agpstat, false); } EXPORT_SYMBOL(agp_generic_enable); int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) { char *table; char *table_end; int size; int page_order; int num_entries; int i; void *temp; struct page *page; /* The generic routines can't handle 2 level gatt's */ if (bridge->driver->size_type == LVL2_APER_SIZE) return -EINVAL; table = NULL; i = bridge->aperture_size_idx; temp = bridge->current_size; size = page_order = num_entries = 0; if (bridge->driver->size_type != FIXED_APER_SIZE) { do { switch (bridge->driver->size_type) { case U8_APER_SIZE: size = A_SIZE_8(temp)->size; page_order = A_SIZE_8(temp)->page_order; num_entries = A_SIZE_8(temp)->num_entries; break; case U16_APER_SIZE: size = A_SIZE_16(temp)->size; page_order = A_SIZE_16(temp)->page_order; num_entries = A_SIZE_16(temp)->num_entries; break; case U32_APER_SIZE: size = A_SIZE_32(temp)->size; page_order = A_SIZE_32(temp)->page_order; num_entries = A_SIZE_32(temp)->num_entries; break; /* This case will never really happen. */ case FIXED_APER_SIZE: case LVL2_APER_SIZE: default: size = page_order = num_entries = 0; break; } table = alloc_gatt_pages(page_order); if (table == NULL) { i++; switch (bridge->driver->size_type) { case U8_APER_SIZE: bridge->current_size = A_IDX8(bridge); break; case U16_APER_SIZE: bridge->current_size = A_IDX16(bridge); break; case U32_APER_SIZE: bridge->current_size = A_IDX32(bridge); break; /* These cases will never really happen. */ case FIXED_APER_SIZE: case LVL2_APER_SIZE: default: break; } temp = bridge->current_size; } else { bridge->aperture_size_idx = i; } } while (!table && (i < bridge->driver->num_aperture_sizes)); } else { size = ((struct aper_size_info_fixed *) temp)->size; page_order = ((struct aper_size_info_fixed *) temp)->page_order; num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; table = alloc_gatt_pages(page_order); } if (table == NULL) return -ENOMEM; table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) SetPageReserved(page); bridge->gatt_table_real = (u32 *) table; agp_gatt_table = (void *)table; bridge->driver->cache_flush(); #ifdef CONFIG_X86 if (set_memory_uc((unsigned long)table, 1 << page_order)) printk(KERN_WARNING "Could not set GATT table memory to UC!\n"); bridge->gatt_table = (void *)table; #else bridge->gatt_table = ioremap_nocache(virt_to_phys(table), (PAGE_SIZE * (1 << page_order))); bridge->driver->cache_flush(); #endif if (bridge->gatt_table == NULL) { for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) ClearPageReserved(page); free_gatt_pages(table, page_order); return -ENOMEM; } bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real); /* AK: bogus, should encode addresses > 4GB */ for (i = 0; i < num_entries; i++) { writel(bridge->scratch_page, bridge->gatt_table+i); readl(bridge->gatt_table+i); /* PCI Posting. */ } return 0; } EXPORT_SYMBOL(agp_generic_create_gatt_table); int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) { int page_order; char *table, *table_end; void *temp; struct page *page; temp = bridge->current_size; switch (bridge->driver->size_type) { case U8_APER_SIZE: page_order = A_SIZE_8(temp)->page_order; break; case U16_APER_SIZE: page_order = A_SIZE_16(temp)->page_order; break; case U32_APER_SIZE: page_order = A_SIZE_32(temp)->page_order; break; case FIXED_APER_SIZE: page_order = A_SIZE_FIX(temp)->page_order; break; case LVL2_APER_SIZE: /* The generic routines can't deal with 2 level gatt's */ return -EINVAL; break; default: page_order = 0; break; } /* Do not worry about freeing memory, because if this is * called, then all agp memory is deallocated and removed * from the table. */ #ifdef CONFIG_X86 set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order); #else iounmap(bridge->gatt_table); #endif table = (char *) bridge->gatt_table_real; table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) ClearPageReserved(page); free_gatt_pages(bridge->gatt_table_real, page_order); agp_gatt_table = NULL; bridge->gatt_table = NULL; bridge->gatt_table_real = NULL; bridge->gatt_bus_addr = 0; return 0; } EXPORT_SYMBOL(agp_generic_free_gatt_table); int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) { int num_entries; size_t i; off_t j; void *temp; struct agp_bridge_data *bridge; int mask_type; bridge = mem->bridge; if (!bridge) return -EINVAL; if (mem->page_count == 0) return 0; temp = bridge->current_size; switch (bridge->driver->size_type) { case U8_APER_SIZE: num_entries = A_SIZE_8(temp)->num_entries; break; case U16_APER_SIZE: num_entries = A_SIZE_16(temp)->num_entries; break; case U32_APER_SIZE: num_entries = A_SIZE_32(temp)->num_entries; break; case FIXED_APER_SIZE: num_entries = A_SIZE_FIX(temp)->num_entries; break; case LVL2_APER_SIZE: /* The generic routines can't deal with 2 level gatt's */ return -EINVAL; break; default: num_entries = 0; break; } num_entries -= agp_memory_reserved/PAGE_SIZE; if (num_entries < 0) num_entries = 0; if (type != mem->type) return -EINVAL; mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); if (mask_type != 0) { /* The generic routines know nothing of memory types */ return -EINVAL; } if (((pg_start + mem->page_count) > num_entries) || ((pg_start + mem->page_count) < pg_start)) return -EINVAL; j = pg_start; while (j < (pg_start + mem->page_count)) { if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j))) return -EBUSY; j++; } if (!mem->is_flushed) { bridge->driver->cache_flush(); mem->is_flushed = true; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { writel(bridge->driver->mask_memory(bridge, page_to_phys(mem->pages[i]), mask_type), bridge->gatt_table+j); } readl(bridge->gatt_table+j-1); /* PCI Posting. */ bridge->driver->tlb_flush(mem); return 0; } EXPORT_SYMBOL(agp_generic_insert_memory); int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { size_t i; struct agp_bridge_data *bridge; int mask_type, num_entries; bridge = mem->bridge; if (!bridge) return -EINVAL; if (mem->page_count == 0) return 0; if (type != mem->type) return -EINVAL; num_entries = agp_num_entries(); if (((pg_start + mem->page_count) > num_entries) || ((pg_start + mem->page_count) < pg_start)) return -EINVAL; mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); if (mask_type != 0) { /* The generic routines know nothing of memory types */ return -EINVAL; } /* AK: bogus, should encode addresses > 4GB */ for (i = pg_start; i < (mem->page_count + pg_start); i++) { writel(bridge->scratch_page, bridge->gatt_table+i); } readl(bridge->gatt_table+i-1); /* PCI Posting. */ bridge->driver->tlb_flush(mem); return 0; } EXPORT_SYMBOL(agp_generic_remove_memory); struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) { return NULL; } EXPORT_SYMBOL(agp_generic_alloc_by_type); void agp_generic_free_by_type(struct agp_memory *curr) { agp_free_page_array(curr); agp_free_key(curr->key); kfree(curr); } EXPORT_SYMBOL(agp_generic_free_by_type); struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) { struct agp_memory *new; int i; int pages; pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; new = agp_create_user_memory(page_count); if (new == NULL) return NULL; for (i = 0; i < page_count; i++) new->pages[i] = NULL; new->page_count = 0; new->type = type; new->num_scratch_pages = pages; return new; } EXPORT_SYMBOL(agp_generic_alloc_user); /* * Basic Page Allocation Routines - * These routines handle page allocation and by default they reserve the allocated * memory. They also handle incrementing the current_memory_agp value, Which is checked * against a maximum value. */ int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages) { struct page * page; int i, ret = -ENOMEM; for (i = 0; i < num_pages; i++) { page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); /* agp_free_memory() needs gart address */ if (page == NULL) goto out; #ifndef CONFIG_X86 map_page_into_agp(page); #endif get_page(page); atomic_inc(&agp_bridge->current_memory_agp); mem->pages[i] = page; mem->page_count++; } #ifdef CONFIG_X86 set_pages_array_uc(mem->pages, num_pages); #endif ret = 0; out: return ret; } EXPORT_SYMBOL(agp_generic_alloc_pages); struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge) { struct page * page; page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); if (page == NULL) return NULL; map_page_into_agp(page); get_page(page); atomic_inc(&agp_bridge->current_memory_agp); return page; } EXPORT_SYMBOL(agp_generic_alloc_page); void agp_generic_destroy_pages(struct agp_memory *mem) { int i; struct page *page; if (!mem) return; #ifdef CONFIG_X86 set_pages_array_wb(mem->pages, mem->page_count); #endif for (i = 0; i < mem->page_count; i++) { page = mem->pages[i]; #ifndef CONFIG_X86 unmap_page_from_agp(page); #endif put_page(page); __free_page(page); atomic_dec(&agp_bridge->current_memory_agp); mem->pages[i] = NULL; } } EXPORT_SYMBOL(agp_generic_destroy_pages); void agp_generic_destroy_page(struct page *page, int flags) { if (page == NULL) return; if (flags & AGP_PAGE_DESTROY_UNMAP) unmap_page_from_agp(page); if (flags & AGP_PAGE_DESTROY_FREE) { put_page(page); __free_page(page); atomic_dec(&agp_bridge->current_memory_agp); } } EXPORT_SYMBOL(agp_generic_destroy_page); /* End Basic Page Allocation Routines */ /** * agp_enable - initialise the agp point-to-point connection. * * @mode: agp mode register value to configure with. */ void agp_enable(struct agp_bridge_data *bridge, u32 mode) { if (!bridge) return; bridge->driver->agp_enable(bridge, mode); } EXPORT_SYMBOL(agp_enable); /* When we remove the global variable agp_bridge from all drivers * then agp_alloc_bridge and agp_generic_find_bridge need to be updated */ struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev) { if (list_empty(&agp_bridges)) return NULL; return agp_bridge; } static void ipi_handler(void *null) { flush_agp_cache(); } void global_cache_flush(void) { if (on_each_cpu(ipi_handler, NULL, 1) != 0) panic(PFX "timed out waiting for the other CPUs!\n"); } EXPORT_SYMBOL(global_cache_flush); unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr, int type) { /* memory type is ignored in the generic routine */ if (bridge->driver->masks) return addr | bridge->driver->masks[0].mask; else return addr; } EXPORT_SYMBOL(agp_generic_mask_memory); int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, int type) { if (type >= AGP_USER_TYPES) return 0; return type; } EXPORT_SYMBOL(agp_generic_type_to_mask_type); /* * These functions are implemented according to the AGPv3 spec, * which covers implementation details that had previously been * left open. */ int agp3_generic_fetch_size(void) { u16 temp_size; int i; struct aper_size_info_16 *values; pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size); values = A_SIZE_16(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp_size == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } EXPORT_SYMBOL(agp3_generic_fetch_size); void agp3_generic_tlbflush(struct agp_memory *mem) { u32 ctrl; pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN); pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl); } EXPORT_SYMBOL(agp3_generic_tlbflush); int agp3_generic_configure(void) { u32 temp; struct aper_size_info_16 *current_size; current_size = A_SIZE_16(agp_bridge->current_size); pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* set aperture size */ pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); /* set gart pointer */ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr); /* enable aperture and GTLB */ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp); pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN); return 0; } EXPORT_SYMBOL(agp3_generic_configure); void agp3_generic_cleanup(void) { u32 ctrl; pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB); } EXPORT_SYMBOL(agp3_generic_cleanup); const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] = { {4096, 1048576, 10,0x000}, {2048, 524288, 9, 0x800}, {1024, 262144, 8, 0xc00}, { 512, 131072, 7, 0xe00}, { 256, 65536, 6, 0xf00}, { 128, 32768, 5, 0xf20}, { 64, 16384, 4, 0xf30}, { 32, 8192, 3, 0xf38}, { 16, 4096, 2, 0xf3c}, { 8, 2048, 1, 0xf3e}, { 4, 1024, 0, 0xf3f} }; EXPORT_SYMBOL(agp3_generic_sizes);
gpl-2.0
zhuoyang/kernel
arch/x86/platform/scx200/scx200_32.c
4876
3224
/* * Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> * * National Semiconductor SCx200 support. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/scx200.h> #include <linux/scx200_gpio.h> /* Verify that the configuration block really is there */ #define scx200_cb_probe(base) (inw((base) + SCx200_CBA) == (base)) MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); MODULE_DESCRIPTION("NatSemi SCx200 Driver"); MODULE_LICENSE("GPL"); unsigned scx200_gpio_base = 0; unsigned long scx200_gpio_shadow[2]; unsigned scx200_cb_base = 0; static struct pci_device_id scx200_tbl[] = { { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) }, { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) }, { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_XBUS) }, { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SC1100_XBUS) }, { }, }; MODULE_DEVICE_TABLE(pci,scx200_tbl); static int __devinit scx200_probe(struct pci_dev *, const struct pci_device_id *); static struct pci_driver scx200_pci_driver = { .name = "scx200", .id_table = scx200_tbl, .probe = scx200_probe, }; static DEFINE_MUTEX(scx200_gpio_config_lock); static void __devinit scx200_init_shadow(void) { int bank; /* read the current values driven on the GPIO signals */ for (bank = 0; bank < 2; ++bank) scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank); } static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned base; if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE || pdev->device == PCI_DEVICE_ID_NS_SC1100_BRIDGE) { base = pci_resource_start(pdev, 0); pr_info("GPIO base 0x%x\n", base); if (!request_region(base, SCx200_GPIO_SIZE, "NatSemi SCx200 GPIO")) { pr_err("can't allocate I/O for GPIOs\n"); return -EBUSY; } scx200_gpio_base = base; scx200_init_shadow(); } else { /* find the base of the Configuration Block */ if (scx200_cb_probe(SCx200_CB_BASE_FIXED)) { scx200_cb_base = SCx200_CB_BASE_FIXED; } else { pci_read_config_dword(pdev, SCx200_CBA_SCRATCH, &base); if (scx200_cb_probe(base)) { scx200_cb_base = base; } else { pr_warn("Configuration Block not found\n"); return -ENODEV; } } pr_info("Configuration Block base 0x%x\n", scx200_cb_base); } return 0; } u32 scx200_gpio_configure(unsigned index, u32 mask, u32 bits) { u32 config, new_config; mutex_lock(&scx200_gpio_config_lock); outl(index, scx200_gpio_base + 0x20); config = inl(scx200_gpio_base + 0x24); new_config = (config & mask) | bits; outl(new_config, scx200_gpio_base + 0x24); mutex_unlock(&scx200_gpio_config_lock); return config; } static int __init scx200_init(void) { pr_info("NatSemi SCx200 Driver\n"); return pci_register_driver(&scx200_pci_driver); } static void __exit scx200_cleanup(void) { pci_unregister_driver(&scx200_pci_driver); release_region(scx200_gpio_base, SCx200_GPIO_SIZE); } module_init(scx200_init); module_exit(scx200_cleanup); EXPORT_SYMBOL(scx200_gpio_base); EXPORT_SYMBOL(scx200_gpio_shadow); EXPORT_SYMBOL(scx200_gpio_configure); EXPORT_SYMBOL(scx200_cb_base);
gpl-2.0
faux123/Galaxy_S5
arch/sparc/kernel/chmc.c
7436
20646
/* chmc.c: Driver for UltraSPARC-III memory controller. * * Copyright (C) 2001, 2007, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/spitfire.h> #include <asm/chmctrl.h> #include <asm/cpudata.h> #include <asm/oplib.h> #include <asm/prom.h> #include <asm/head.h> #include <asm/io.h> #include <asm/memctrl.h> #define DRV_MODULE_NAME "chmc" #define PFX DRV_MODULE_NAME ": " #define DRV_MODULE_VERSION "0.2" MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("UltraSPARC-III memory controller driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); static int mc_type; #define MC_TYPE_SAFARI 1 #define MC_TYPE_JBUS 2 static dimm_printer_t us3mc_dimm_printer; #define CHMCTRL_NDGRPS 2 #define CHMCTRL_NDIMMS 4 #define CHMC_DIMMS_PER_MC (CHMCTRL_NDGRPS * CHMCTRL_NDIMMS) /* OBP memory-layout property format. */ struct chmc_obp_map { unsigned char dimm_map[144]; unsigned char pin_map[576]; }; #define DIMM_LABEL_SZ 8 struct chmc_obp_mem_layout { /* One max 8-byte string label per DIMM. Usually * this matches the label on the motherboard where * that DIMM resides. */ char dimm_labels[CHMC_DIMMS_PER_MC][DIMM_LABEL_SZ]; /* If symmetric use map[0], else it is * asymmetric and map[1] should be used. */ char symmetric; struct chmc_obp_map map[2]; }; #define CHMCTRL_NBANKS 4 struct chmc_bank_info { struct chmc *p; int bank_id; u64 raw_reg; int valid; int uk; int um; int lk; int lm; int interleave; unsigned long base; unsigned long size; }; struct chmc { struct list_head list; int portid; struct chmc_obp_mem_layout layout_prop; int layout_size; void __iomem *regs; u64 timing_control1; u64 timing_control2; u64 timing_control3; u64 timing_control4; u64 memaddr_control; struct chmc_bank_info logical_banks[CHMCTRL_NBANKS]; }; #define JBUSMC_REGS_SIZE 8 #define JB_MC_REG1_DIMM2_BANK3 0x8000000000000000UL #define JB_MC_REG1_DIMM1_BANK1 0x4000000000000000UL #define JB_MC_REG1_DIMM2_BANK2 0x2000000000000000UL #define JB_MC_REG1_DIMM1_BANK0 0x1000000000000000UL #define JB_MC_REG1_XOR 0x0000010000000000UL #define JB_MC_REG1_ADDR_GEN_2 0x000000e000000000UL #define JB_MC_REG1_ADDR_GEN_2_SHIFT 37 #define JB_MC_REG1_ADDR_GEN_1 0x0000001c00000000UL #define JB_MC_REG1_ADDR_GEN_1_SHIFT 34 #define JB_MC_REG1_INTERLEAVE 0x0000000001800000UL #define JB_MC_REG1_INTERLEAVE_SHIFT 23 #define JB_MC_REG1_DIMM2_PTYPE 0x0000000000200000UL #define JB_MC_REG1_DIMM2_PTYPE_SHIFT 21 #define JB_MC_REG1_DIMM1_PTYPE 0x0000000000100000UL #define JB_MC_REG1_DIMM1_PTYPE_SHIFT 20 #define PART_TYPE_X8 0 #define PART_TYPE_X4 1 #define INTERLEAVE_NONE 0 #define INTERLEAVE_SAME 1 #define INTERLEAVE_INTERNAL 2 #define INTERLEAVE_BOTH 3 #define ADDR_GEN_128MB 0 #define ADDR_GEN_256MB 1 #define ADDR_GEN_512MB 2 #define ADDR_GEN_1GB 3 #define JB_NUM_DIMM_GROUPS 2 #define JB_NUM_DIMMS_PER_GROUP 2 #define JB_NUM_DIMMS (JB_NUM_DIMM_GROUPS * JB_NUM_DIMMS_PER_GROUP) struct jbusmc_obp_map { unsigned char dimm_map[18]; unsigned char pin_map[144]; }; struct jbusmc_obp_mem_layout { /* One max 8-byte string label per DIMM. Usually * this matches the label on the motherboard where * that DIMM resides. */ char dimm_labels[JB_NUM_DIMMS][DIMM_LABEL_SZ]; /* If symmetric use map[0], else it is * asymmetric and map[1] should be used. */ char symmetric; struct jbusmc_obp_map map; char _pad; }; struct jbusmc_dimm_group { struct jbusmc *controller; int index; u64 base_addr; u64 size; }; struct jbusmc { void __iomem *regs; u64 mc_reg_1; u32 portid; struct jbusmc_obp_mem_layout layout; int layout_len; int num_dimm_groups; struct jbusmc_dimm_group dimm_groups[JB_NUM_DIMM_GROUPS]; struct list_head list; }; static DEFINE_SPINLOCK(mctrl_list_lock); static LIST_HEAD(mctrl_list); static void mc_list_add(struct list_head *list) { spin_lock(&mctrl_list_lock); list_add(list, &mctrl_list); spin_unlock(&mctrl_list_lock); } static void mc_list_del(struct list_head *list) { spin_lock(&mctrl_list_lock); list_del_init(list); spin_unlock(&mctrl_list_lock); } #define SYNDROME_MIN -1 #define SYNDROME_MAX 144 /* Covert syndrome code into the way the bits are positioned * on the bus. */ static int syndrome_to_qword_code(int syndrome_code) { if (syndrome_code < 128) syndrome_code += 16; else if (syndrome_code < 128 + 9) syndrome_code -= (128 - 7); else if (syndrome_code < (128 + 9 + 3)) syndrome_code -= (128 + 9 - 4); else syndrome_code -= (128 + 9 + 3); return syndrome_code; } /* All this magic has to do with how a cache line comes over the wire * on Safari and JBUS. A 64-bit line comes over in 1 or more quadword * cycles, each of which transmit ECC/MTAG info as well as the actual * data. */ #define L2_LINE_SIZE 64 #define L2_LINE_ADDR_MSK (L2_LINE_SIZE - 1) #define QW_PER_LINE 4 #define QW_BYTES (L2_LINE_SIZE / QW_PER_LINE) #define QW_BITS 144 #define SAFARI_LAST_BIT (576 - 1) #define JBUS_LAST_BIT (144 - 1) static void get_pin_and_dimm_str(int syndrome_code, unsigned long paddr, int *pin_p, char **dimm_str_p, void *_prop, int base_dimm_offset) { int qword_code = syndrome_to_qword_code(syndrome_code); int cache_line_offset; int offset_inverse; int dimm_map_index; int map_val; if (mc_type == MC_TYPE_JBUS) { struct jbusmc_obp_mem_layout *p = _prop; /* JBUS */ cache_line_offset = qword_code; offset_inverse = (JBUS_LAST_BIT - cache_line_offset); dimm_map_index = offset_inverse / 8; map_val = p->map.dimm_map[dimm_map_index]; map_val = ((map_val >> ((7 - (offset_inverse & 7)))) & 1); *dimm_str_p = p->dimm_labels[base_dimm_offset + map_val]; *pin_p = p->map.pin_map[cache_line_offset]; } else { struct chmc_obp_mem_layout *p = _prop; struct chmc_obp_map *mp; int qword; /* Safari */ if (p->symmetric) mp = &p->map[0]; else mp = &p->map[1]; qword = (paddr & L2_LINE_ADDR_MSK) / QW_BYTES; cache_line_offset = ((3 - qword) * QW_BITS) + qword_code; offset_inverse = (SAFARI_LAST_BIT - cache_line_offset); dimm_map_index = offset_inverse >> 2; map_val = mp->dimm_map[dimm_map_index]; map_val = ((map_val >> ((3 - (offset_inverse & 3)) << 1)) & 0x3); *dimm_str_p = p->dimm_labels[base_dimm_offset + map_val]; *pin_p = mp->pin_map[cache_line_offset]; } } static struct jbusmc_dimm_group *jbusmc_find_dimm_group(unsigned long phys_addr) { struct jbusmc *p; list_for_each_entry(p, &mctrl_list, list) { int i; for (i = 0; i < p->num_dimm_groups; i++) { struct jbusmc_dimm_group *dp = &p->dimm_groups[i]; if (phys_addr < dp->base_addr || (dp->base_addr + dp->size) <= phys_addr) continue; return dp; } } return NULL; } static int jbusmc_print_dimm(int syndrome_code, unsigned long phys_addr, char *buf, int buflen) { struct jbusmc_obp_mem_layout *prop; struct jbusmc_dimm_group *dp; struct jbusmc *p; int first_dimm; dp = jbusmc_find_dimm_group(phys_addr); if (dp == NULL || syndrome_code < SYNDROME_MIN || syndrome_code > SYNDROME_MAX) { buf[0] = '?'; buf[1] = '?'; buf[2] = '?'; buf[3] = '\0'; return 0; } p = dp->controller; prop = &p->layout; first_dimm = dp->index * JB_NUM_DIMMS_PER_GROUP; if (syndrome_code != SYNDROME_MIN) { char *dimm_str; int pin; get_pin_and_dimm_str(syndrome_code, phys_addr, &pin, &dimm_str, prop, first_dimm); sprintf(buf, "%s, pin %3d", dimm_str, pin); } else { int dimm; /* Multi-bit error, we just dump out all the * dimm labels associated with this dimm group. */ for (dimm = 0; dimm < JB_NUM_DIMMS_PER_GROUP; dimm++) { sprintf(buf, "%s ", prop->dimm_labels[first_dimm + dimm]); buf += strlen(buf); } } return 0; } static u64 __devinit jbusmc_dimm_group_size(u64 base, const struct linux_prom64_registers *mem_regs, int num_mem_regs) { u64 max = base + (8UL * 1024 * 1024 * 1024); u64 max_seen = base; int i; for (i = 0; i < num_mem_regs; i++) { const struct linux_prom64_registers *ent; u64 this_base; u64 this_end; ent = &mem_regs[i]; this_base = ent->phys_addr; this_end = this_base + ent->reg_size; if (base < this_base || base >= this_end) continue; if (this_end > max) this_end = max; if (this_end > max_seen) max_seen = this_end; } return max_seen - base; } static void __devinit jbusmc_construct_one_dimm_group(struct jbusmc *p, unsigned long index, const struct linux_prom64_registers *mem_regs, int num_mem_regs) { struct jbusmc_dimm_group *dp = &p->dimm_groups[index]; dp->controller = p; dp->index = index; dp->base_addr = (p->portid * (64UL * 1024 * 1024 * 1024)); dp->base_addr += (index * (8UL * 1024 * 1024 * 1024)); dp->size = jbusmc_dimm_group_size(dp->base_addr, mem_regs, num_mem_regs); } static void __devinit jbusmc_construct_dimm_groups(struct jbusmc *p, const struct linux_prom64_registers *mem_regs, int num_mem_regs) { if (p->mc_reg_1 & JB_MC_REG1_DIMM1_BANK0) { jbusmc_construct_one_dimm_group(p, 0, mem_regs, num_mem_regs); p->num_dimm_groups++; } if (p->mc_reg_1 & JB_MC_REG1_DIMM2_BANK2) { jbusmc_construct_one_dimm_group(p, 1, mem_regs, num_mem_regs); p->num_dimm_groups++; } } static int __devinit jbusmc_probe(struct platform_device *op) { const struct linux_prom64_registers *mem_regs; struct device_node *mem_node; int err, len, num_mem_regs; struct jbusmc *p; const u32 *prop; const void *ml; err = -ENODEV; mem_node = of_find_node_by_path("/memory"); if (!mem_node) { printk(KERN_ERR PFX "Cannot find /memory node.\n"); goto out; } mem_regs = of_get_property(mem_node, "reg", &len); if (!mem_regs) { printk(KERN_ERR PFX "Cannot get reg property of /memory node.\n"); goto out; } num_mem_regs = len / sizeof(*mem_regs); err = -ENOMEM; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { printk(KERN_ERR PFX "Cannot allocate struct jbusmc.\n"); goto out; } INIT_LIST_HEAD(&p->list); err = -ENODEV; prop = of_get_property(op->dev.of_node, "portid", &len); if (!prop || len != 4) { printk(KERN_ERR PFX "Cannot find portid.\n"); goto out_free; } p->portid = *prop; prop = of_get_property(op->dev.of_node, "memory-control-register-1", &len); if (!prop || len != 8) { printk(KERN_ERR PFX "Cannot get memory control register 1.\n"); goto out_free; } p->mc_reg_1 = ((u64)prop[0] << 32) | (u64) prop[1]; err = -ENOMEM; p->regs = of_ioremap(&op->resource[0], 0, JBUSMC_REGS_SIZE, "jbusmc"); if (!p->regs) { printk(KERN_ERR PFX "Cannot map jbusmc regs.\n"); goto out_free; } err = -ENODEV; ml = of_get_property(op->dev.of_node, "memory-layout", &p->layout_len); if (!ml) { printk(KERN_ERR PFX "Cannot get memory layout property.\n"); goto out_iounmap; } if (p->layout_len > sizeof(p->layout)) { printk(KERN_ERR PFX "Unexpected memory-layout size %d\n", p->layout_len); goto out_iounmap; } memcpy(&p->layout, ml, p->layout_len); jbusmc_construct_dimm_groups(p, mem_regs, num_mem_regs); mc_list_add(&p->list); printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %s\n", op->dev.of_node->full_name); dev_set_drvdata(&op->dev, p); err = 0; out: return err; out_iounmap: of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE); out_free: kfree(p); goto out; } /* Does BANK decode PHYS_ADDR? */ static int chmc_bank_match(struct chmc_bank_info *bp, unsigned long phys_addr) { unsigned long upper_bits = (phys_addr & PA_UPPER_BITS) >> PA_UPPER_BITS_SHIFT; unsigned long lower_bits = (phys_addr & PA_LOWER_BITS) >> PA_LOWER_BITS_SHIFT; /* Bank must be enabled to match. */ if (bp->valid == 0) return 0; /* Would BANK match upper bits? */ upper_bits ^= bp->um; /* What bits are different? */ upper_bits = ~upper_bits; /* Invert. */ upper_bits |= bp->uk; /* What bits don't matter for matching? */ upper_bits = ~upper_bits; /* Invert. */ if (upper_bits) return 0; /* Would BANK match lower bits? */ lower_bits ^= bp->lm; /* What bits are different? */ lower_bits = ~lower_bits; /* Invert. */ lower_bits |= bp->lk; /* What bits don't matter for matching? */ lower_bits = ~lower_bits; /* Invert. */ if (lower_bits) return 0; /* I always knew you'd be the one. */ return 1; } /* Given PHYS_ADDR, search memory controller banks for a match. */ static struct chmc_bank_info *chmc_find_bank(unsigned long phys_addr) { struct chmc *p; list_for_each_entry(p, &mctrl_list, list) { int bank_no; for (bank_no = 0; bank_no < CHMCTRL_NBANKS; bank_no++) { struct chmc_bank_info *bp; bp = &p->logical_banks[bank_no]; if (chmc_bank_match(bp, phys_addr)) return bp; } } return NULL; } /* This is the main purpose of this driver. */ static int chmc_print_dimm(int syndrome_code, unsigned long phys_addr, char *buf, int buflen) { struct chmc_bank_info *bp; struct chmc_obp_mem_layout *prop; int bank_in_controller, first_dimm; bp = chmc_find_bank(phys_addr); if (bp == NULL || syndrome_code < SYNDROME_MIN || syndrome_code > SYNDROME_MAX) { buf[0] = '?'; buf[1] = '?'; buf[2] = '?'; buf[3] = '\0'; return 0; } prop = &bp->p->layout_prop; bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1); first_dimm = (bank_in_controller & (CHMCTRL_NDGRPS - 1)); first_dimm *= CHMCTRL_NDIMMS; if (syndrome_code != SYNDROME_MIN) { char *dimm_str; int pin; get_pin_and_dimm_str(syndrome_code, phys_addr, &pin, &dimm_str, prop, first_dimm); sprintf(buf, "%s, pin %3d", dimm_str, pin); } else { int dimm; /* Multi-bit error, we just dump out all the * dimm labels associated with this bank. */ for (dimm = 0; dimm < CHMCTRL_NDIMMS; dimm++) { sprintf(buf, "%s ", prop->dimm_labels[first_dimm + dimm]); buf += strlen(buf); } } return 0; } /* Accessing the registers is slightly complicated. If you want * to get at the memory controller which is on the same processor * the code is executing, you must use special ASI load/store else * you go through the global mapping. */ static u64 chmc_read_mcreg(struct chmc *p, unsigned long offset) { unsigned long ret, this_cpu; preempt_disable(); this_cpu = real_hard_smp_processor_id(); if (p->portid == this_cpu) { __asm__ __volatile__("ldxa [%1] %2, %0" : "=r" (ret) : "r" (offset), "i" (ASI_MCU_CTRL_REG)); } else { __asm__ __volatile__("ldxa [%1] %2, %0" : "=r" (ret) : "r" (p->regs + offset), "i" (ASI_PHYS_BYPASS_EC_E)); } preempt_enable(); return ret; } #if 0 /* currently unused */ static void chmc_write_mcreg(struct chmc *p, unsigned long offset, u64 val) { if (p->portid == smp_processor_id()) { __asm__ __volatile__("stxa %0, [%1] %2" : : "r" (val), "r" (offset), "i" (ASI_MCU_CTRL_REG)); } else { __asm__ __volatile__("ldxa %0, [%1] %2" : : "r" (val), "r" (p->regs + offset), "i" (ASI_PHYS_BYPASS_EC_E)); } } #endif static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 val) { struct chmc_bank_info *bp = &p->logical_banks[which_bank]; bp->p = p; bp->bank_id = (CHMCTRL_NBANKS * p->portid) + which_bank; bp->raw_reg = val; bp->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT; bp->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT; bp->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT; bp->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT; bp->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT; bp->base = (bp->um); bp->base &= ~(bp->uk); bp->base <<= PA_UPPER_BITS_SHIFT; switch(bp->lk) { case 0xf: default: bp->interleave = 1; break; case 0xe: bp->interleave = 2; break; case 0xc: bp->interleave = 4; break; case 0x8: bp->interleave = 8; break; case 0x0: bp->interleave = 16; break; } /* UK[10] is reserved, and UK[11] is not set for the SDRAM * bank size definition. */ bp->size = (((unsigned long)bp->uk & ((1UL << 10UL) - 1UL)) + 1UL) << PA_UPPER_BITS_SHIFT; bp->size /= bp->interleave; } static void chmc_fetch_decode_regs(struct chmc *p) { if (p->layout_size == 0) return; chmc_interpret_one_decode_reg(p, 0, chmc_read_mcreg(p, CHMCTRL_DECODE1)); chmc_interpret_one_decode_reg(p, 1, chmc_read_mcreg(p, CHMCTRL_DECODE2)); chmc_interpret_one_decode_reg(p, 2, chmc_read_mcreg(p, CHMCTRL_DECODE3)); chmc_interpret_one_decode_reg(p, 3, chmc_read_mcreg(p, CHMCTRL_DECODE4)); } static int __devinit chmc_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; unsigned long ver; const void *pval; int len, portid; struct chmc *p; int err; err = -ENODEV; __asm__ ("rdpr %%ver, %0" : "=r" (ver)); if ((ver >> 32UL) == __JALAPENO_ID || (ver >> 32UL) == __SERRANO_ID) goto out; portid = of_getintprop_default(dp, "portid", -1); if (portid == -1) goto out; pval = of_get_property(dp, "memory-layout", &len); if (pval && len > sizeof(p->layout_prop)) { printk(KERN_ERR PFX "Unexpected memory-layout property " "size %d.\n", len); goto out; } err = -ENOMEM; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { printk(KERN_ERR PFX "Could not allocate struct chmc.\n"); goto out; } p->portid = portid; p->layout_size = len; if (!pval) p->layout_size = 0; else memcpy(&p->layout_prop, pval, len); p->regs = of_ioremap(&op->resource[0], 0, 0x48, "chmc"); if (!p->regs) { printk(KERN_ERR PFX "Could not map registers.\n"); goto out_free; } if (p->layout_size != 0UL) { p->timing_control1 = chmc_read_mcreg(p, CHMCTRL_TCTRL1); p->timing_control2 = chmc_read_mcreg(p, CHMCTRL_TCTRL2); p->timing_control3 = chmc_read_mcreg(p, CHMCTRL_TCTRL3); p->timing_control4 = chmc_read_mcreg(p, CHMCTRL_TCTRL4); p->memaddr_control = chmc_read_mcreg(p, CHMCTRL_MACTRL); } chmc_fetch_decode_regs(p); mc_list_add(&p->list); printk(KERN_INFO PFX "UltraSPARC-III memory controller at %s [%s]\n", dp->full_name, (p->layout_size ? "ACTIVE" : "INACTIVE")); dev_set_drvdata(&op->dev, p); err = 0; out: return err; out_free: kfree(p); goto out; } static int __devinit us3mc_probe(struct platform_device *op) { if (mc_type == MC_TYPE_SAFARI) return chmc_probe(op); else if (mc_type == MC_TYPE_JBUS) return jbusmc_probe(op); return -ENODEV; } static void __devexit chmc_destroy(struct platform_device *op, struct chmc *p) { list_del(&p->list); of_iounmap(&op->resource[0], p->regs, 0x48); kfree(p); } static void __devexit jbusmc_destroy(struct platform_device *op, struct jbusmc *p) { mc_list_del(&p->list); of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE); kfree(p); } static int __devexit us3mc_remove(struct platform_device *op) { void *p = dev_get_drvdata(&op->dev); if (p) { if (mc_type == MC_TYPE_SAFARI) chmc_destroy(op, p); else if (mc_type == MC_TYPE_JBUS) jbusmc_destroy(op, p); } return 0; } static const struct of_device_id us3mc_match[] = { { .name = "memory-controller", }, {}, }; MODULE_DEVICE_TABLE(of, us3mc_match); static struct platform_driver us3mc_driver = { .driver = { .name = "us3mc", .owner = THIS_MODULE, .of_match_table = us3mc_match, }, .probe = us3mc_probe, .remove = __devexit_p(us3mc_remove), }; static inline bool us3mc_platform(void) { if (tlb_type == cheetah || tlb_type == cheetah_plus) return true; return false; } static int __init us3mc_init(void) { unsigned long ver; int ret; if (!us3mc_platform()) return -ENODEV; __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); if ((ver >> 32UL) == __JALAPENO_ID || (ver >> 32UL) == __SERRANO_ID) { mc_type = MC_TYPE_JBUS; us3mc_dimm_printer = jbusmc_print_dimm; } else { mc_type = MC_TYPE_SAFARI; us3mc_dimm_printer = chmc_print_dimm; } ret = register_dimm_printer(us3mc_dimm_printer); if (!ret) { ret = platform_driver_register(&us3mc_driver); if (ret) unregister_dimm_printer(us3mc_dimm_printer); } return ret; } static void __exit us3mc_cleanup(void) { if (us3mc_platform()) { unregister_dimm_printer(us3mc_dimm_printer); platform_driver_unregister(&us3mc_driver); } } module_init(us3mc_init); module_exit(us3mc_cleanup);
gpl-2.0
CyanideL/android_kernel_samsung_jf
arch/sh/kernel/cpu/sh3/clock-sh7710.c
9228
1904
/* * arch/sh/kernel/cpu/sh3/clock-sh7710.c * * SH7710 support for the clock framework * * Copyright (C) 2005 Paul Mundt * * FRQCR parsing hacked out of arch/sh/kernel/time.c * * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> * Copyright (C) 2002, 2003, 2004 Paul Mundt * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/clock.h> #include <asm/freq.h> #include <asm/io.h> static int md_table[] = { 1, 2, 3, 4, 6, 8, 12 }; static void master_clk_init(struct clk *clk) { clk->rate *= md_table[__raw_readw(FRQCR) & 0x0007]; } static struct sh_clk_ops sh7710_master_clk_ops = { .init = master_clk_init, }; static unsigned long module_clk_recalc(struct clk *clk) { int idx = (__raw_readw(FRQCR) & 0x0007); return clk->parent->rate / md_table[idx]; } static struct sh_clk_ops sh7710_module_clk_ops = { .recalc = module_clk_recalc, }; static unsigned long bus_clk_recalc(struct clk *clk) { int idx = (__raw_readw(FRQCR) & 0x0700) >> 8; return clk->parent->rate / md_table[idx]; } static struct sh_clk_ops sh7710_bus_clk_ops = { .recalc = bus_clk_recalc, }; static unsigned long cpu_clk_recalc(struct clk *clk) { int idx = (__raw_readw(FRQCR) & 0x0070) >> 4; return clk->parent->rate / md_table[idx]; } static struct sh_clk_ops sh7710_cpu_clk_ops = { .recalc = cpu_clk_recalc, }; static struct sh_clk_ops *sh7710_clk_ops[] = { &sh7710_master_clk_ops, &sh7710_module_clk_ops, &sh7710_bus_clk_ops, &sh7710_cpu_clk_ops, }; void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) { if (idx < ARRAY_SIZE(sh7710_clk_ops)) *ops = sh7710_clk_ops[idx]; }
gpl-2.0
markfasheh/linux-4.1-dedupe_fixes
drivers/isdn/hardware/eicon/s_4bri.c
9740
15335
/* * Copyright (c) Eicon Networks, 2002. * This source file is supplied for the use with Eicon Networks range of DIVA Server Adapters. * Eicon File Revision : 2.1 * This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "platform.h" #include "di_defs.h" #include "pc.h" #include "pr_pc.h" #include "di.h" #include "mi_pc.h" #include "pc_maint.h" #include "divasync.h" #include "pc_init.h" #include "io.h" #include "helpers.h" #include "dsrv4bri.h" #include "dsp_defs.h" #include "sdp_hdr.h" /*****************************************************************************/ #define MAX_XLOG_SIZE (64 * 1024) /* -------------------------------------------------------------------------- Recovery XLOG from QBRI Card -------------------------------------------------------------------------- */ static void qBri_cpu_trapped(PISDN_ADAPTER IoAdapter) { byte __iomem *base; word *Xlog; dword regs[4], TrapID, offset, size; Xdesc xlogDesc; int factor = (IoAdapter->tasks == 1) ? 1 : 2; /* * check for trapped MIPS 46xx CPU, dump exception frame */ base = DIVA_OS_MEM_ATTACH_CONTROL(IoAdapter); offset = IoAdapter->ControllerNumber * (IoAdapter->MemorySize >> factor); TrapID = READ_DWORD(&base[0x80]); if ((TrapID == 0x99999999) || (TrapID == 0x99999901)) { dump_trap_frame(IoAdapter, &base[0x90]); IoAdapter->trapped = 1; } regs[0] = READ_DWORD((base + offset) + 0x70); regs[1] = READ_DWORD((base + offset) + 0x74); regs[2] = READ_DWORD((base + offset) + 0x78); regs[3] = READ_DWORD((base + offset) + 0x7c); regs[0] &= IoAdapter->MemorySize - 1; if ((regs[0] >= offset) && (regs[0] < offset + (IoAdapter->MemorySize >> factor) - 1)) { if (!(Xlog = (word *)diva_os_malloc(0, MAX_XLOG_SIZE))) { DIVA_OS_MEM_DETACH_CONTROL(IoAdapter, base); return; } size = offset + (IoAdapter->MemorySize >> factor) - regs[0]; if (size > MAX_XLOG_SIZE) size = MAX_XLOG_SIZE; memcpy_fromio(Xlog, &base[regs[0]], size); xlogDesc.buf = Xlog; xlogDesc.cnt = READ_WORD(&base[regs[1] & (IoAdapter->MemorySize - 1)]); xlogDesc.out = READ_WORD(&base[regs[2] & (IoAdapter->MemorySize - 1)]); dump_xlog_buffer(IoAdapter, &xlogDesc); diva_os_free(0, Xlog); IoAdapter->trapped = 2; } DIVA_OS_MEM_DETACH_CONTROL(IoAdapter, base); } /* -------------------------------------------------------------------------- Reset QBRI Hardware -------------------------------------------------------------------------- */ static void reset_qBri_hardware(PISDN_ADAPTER IoAdapter) { word volatile __iomem *qBriReset; byte volatile __iomem *qBriCntrl; byte volatile __iomem *p; qBriReset = (word volatile __iomem *)DIVA_OS_MEM_ATTACH_PROM(IoAdapter); WRITE_WORD(qBriReset, READ_WORD(qBriReset) | PLX9054_SOFT_RESET); diva_os_wait(1); WRITE_WORD(qBriReset, READ_WORD(qBriReset) & ~PLX9054_SOFT_RESET); diva_os_wait(1); WRITE_WORD(qBriReset, READ_WORD(qBriReset) | PLX9054_RELOAD_EEPROM); diva_os_wait(1); WRITE_WORD(qBriReset, READ_WORD(qBriReset) & ~PLX9054_RELOAD_EEPROM); diva_os_wait(1); DIVA_OS_MEM_DETACH_PROM(IoAdapter, qBriReset); qBriCntrl = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); p = &qBriCntrl[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_RISC) : (MQ_BREG_RISC)]; WRITE_DWORD(p, 0); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, qBriCntrl); DBG_TRC(("resetted board @ reset addr 0x%08lx", qBriReset)) DBG_TRC(("resetted board @ cntrl addr 0x%08lx", p)) } /* -------------------------------------------------------------------------- Start Card CPU -------------------------------------------------------------------------- */ void start_qBri_hardware(PISDN_ADAPTER IoAdapter) { byte volatile __iomem *qBriReset; byte volatile __iomem *p; p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); qBriReset = &p[(DIVA_4BRI_REVISION(IoAdapter)) ? (MQ2_BREG_RISC) : (MQ_BREG_RISC)]; WRITE_DWORD(qBriReset, MQ_RISC_COLD_RESET_MASK); diva_os_wait(2); WRITE_DWORD(qBriReset, MQ_RISC_WARM_RESET_MASK | MQ_RISC_COLD_RESET_MASK); diva_os_wait(10); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p); DBG_TRC(("started processor @ addr 0x%08lx", qBriReset)) } /* -------------------------------------------------------------------------- Stop Card CPU -------------------------------------------------------------------------- */ static void stop_qBri_hardware(PISDN_ADAPTER IoAdapter) { byte volatile __iomem *p; dword volatile __iomem *qBriReset; dword volatile __iomem *qBriIrq; dword volatile __iomem *qBriIsacDspReset; int rev2 = DIVA_4BRI_REVISION(IoAdapter); int reset_offset = rev2 ? (MQ2_BREG_RISC) : (MQ_BREG_RISC); int irq_offset = rev2 ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST); int hw_offset = rev2 ? (MQ2_ISAC_DSP_RESET) : (MQ_ISAC_DSP_RESET); if (IoAdapter->ControllerNumber > 0) return; p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); qBriReset = (dword volatile __iomem *)&p[reset_offset]; qBriIsacDspReset = (dword volatile __iomem *)&p[hw_offset]; /* * clear interrupt line (reset Local Interrupt Test Register) */ WRITE_DWORD(qBriReset, 0); WRITE_DWORD(qBriIsacDspReset, 0); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p); p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter); WRITE_BYTE(&p[PLX9054_INTCSR], 0x00); /* disable PCI interrupts */ DIVA_OS_MEM_DETACH_RESET(IoAdapter, p); p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); qBriIrq = (dword volatile __iomem *)&p[irq_offset]; WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p); DBG_TRC(("stopped processor @ addr 0x%08lx", qBriReset)) } /* -------------------------------------------------------------------------- FPGA download -------------------------------------------------------------------------- */ #define FPGA_NAME_OFFSET 0x10 static byte *qBri_check_FPGAsrc(PISDN_ADAPTER IoAdapter, char *FileName, dword *Length, dword *code) { byte *File; char *fpgaFile, *fpgaType, *fpgaDate, *fpgaTime; dword fpgaFlen, fpgaTlen, fpgaDlen, cnt, year, i; if (!(File = (byte *)xdiLoadFile(FileName, Length, 0))) { return (NULL); } /* * scan file until FF and put id string into buffer */ for (i = 0; File[i] != 0xff;) { if (++i >= *Length) { DBG_FTL(("FPGA download: start of data header not found")) xdiFreeFile(File); return (NULL); } } *code = i++; if ((File[i] & 0xF0) != 0x20) { DBG_FTL(("FPGA download: data header corrupted")) xdiFreeFile(File); return (NULL); } fpgaFlen = (dword)File[FPGA_NAME_OFFSET - 1]; if (fpgaFlen == 0) fpgaFlen = 12; fpgaFile = (char *)&File[FPGA_NAME_OFFSET]; fpgaTlen = (dword)fpgaFile[fpgaFlen + 2]; if (fpgaTlen == 0) fpgaTlen = 10; fpgaType = (char *)&fpgaFile[fpgaFlen + 3]; fpgaDlen = (dword) fpgaType[fpgaTlen + 2]; if (fpgaDlen == 0) fpgaDlen = 11; fpgaDate = (char *)&fpgaType[fpgaTlen + 3]; fpgaTime = (char *)&fpgaDate[fpgaDlen + 3]; cnt = (dword)(((File[i] & 0x0F) << 20) + (File[i + 1] << 12) + (File[i + 2] << 4) + (File[i + 3] >> 4)); if ((dword)(i + (cnt / 8)) > *Length) { DBG_FTL(("FPGA download: '%s' file too small (%ld < %ld)", FileName, *Length, code + ((cnt + 7) / 8))) xdiFreeFile(File); return (NULL); } i = 0; do { while ((fpgaDate[i] != '\0') && ((fpgaDate[i] < '0') || (fpgaDate[i] > '9'))) { i++; } year = 0; while ((fpgaDate[i] >= '0') && (fpgaDate[i] <= '9')) year = year * 10 + (fpgaDate[i++] - '0'); } while ((year < 2000) && (fpgaDate[i] != '\0')); switch (IoAdapter->cardType) { case CARDTYPE_DIVASRV_B_2F_PCI: break; default: if (year >= 2001) { IoAdapter->fpga_features |= PCINIT_FPGA_PLX_ACCESS_SUPPORTED; } } DBG_LOG(("FPGA[%s] file %s (%s %s) len %d", fpgaType, fpgaFile, fpgaDate, fpgaTime, cnt)) return (File); } /******************************************************************************/ #define FPGA_PROG 0x0001 /* PROG enable low */ #define FPGA_BUSY 0x0002 /* BUSY high, DONE low */ #define FPGA_CS 0x000C /* Enable I/O pins */ #define FPGA_CCLK 0x0100 #define FPGA_DOUT 0x0400 #define FPGA_DIN FPGA_DOUT /* bidirectional I/O */ int qBri_FPGA_download(PISDN_ADAPTER IoAdapter) { int bit; byte *File; dword code, FileLength; word volatile __iomem *addr = (word volatile __iomem *)DIVA_OS_MEM_ATTACH_PROM(IoAdapter); word val, baseval = FPGA_CS | FPGA_PROG; if (DIVA_4BRI_REVISION(IoAdapter)) { char *name; switch (IoAdapter->cardType) { case CARDTYPE_DIVASRV_B_2F_PCI: name = "dsbri2f.bit"; break; case CARDTYPE_DIVASRV_B_2M_V2_PCI: case CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI: name = "dsbri2m.bit"; break; default: name = "ds4bri2.bit"; } File = qBri_check_FPGAsrc(IoAdapter, name, &FileLength, &code); } else { File = qBri_check_FPGAsrc(IoAdapter, "ds4bri.bit", &FileLength, &code); } if (!File) { DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr); return (0); } /* * prepare download, pulse PROGRAM pin down. */ WRITE_WORD(addr, baseval & ~FPGA_PROG); /* PROGRAM low pulse */ WRITE_WORD(addr, baseval); /* release */ diva_os_wait(50); /* wait until FPGA finished internal memory clear */ /* * check done pin, must be low */ if (READ_WORD(addr) & FPGA_BUSY) { DBG_FTL(("FPGA download: acknowledge for FPGA memory clear missing")) xdiFreeFile(File); DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr); return (0); } /* * put data onto the FPGA */ while (code < FileLength) { val = ((word)File[code++]) << 3; for (bit = 8; bit-- > 0; val <<= 1) /* put byte onto FPGA */ { baseval &= ~FPGA_DOUT; /* clr data bit */ baseval |= (val & FPGA_DOUT); /* copy data bit */ WRITE_WORD(addr, baseval); WRITE_WORD(addr, baseval | FPGA_CCLK); /* set CCLK hi */ WRITE_WORD(addr, baseval | FPGA_CCLK); /* set CCLK hi */ WRITE_WORD(addr, baseval); /* set CCLK lo */ } } xdiFreeFile(File); diva_os_wait(100); val = READ_WORD(addr); DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr); if (!(val & FPGA_BUSY)) { DBG_FTL(("FPGA download: chip remains in busy state (0x%04x)", val)) return (0); } return (1); } static int load_qBri_hardware(PISDN_ADAPTER IoAdapter) { return (0); } /* -------------------------------------------------------------------------- Card ISR -------------------------------------------------------------------------- */ static int qBri_ISR(struct _ISDN_ADAPTER *IoAdapter) { dword volatile __iomem *qBriIrq; PADAPTER_LIST_ENTRY QuadroList = IoAdapter->QuadroList; word i; int serviced = 0; byte __iomem *p; p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter); if (!(READ_BYTE(&p[PLX9054_INTCSR]) & 0x80)) { DIVA_OS_MEM_DETACH_RESET(IoAdapter, p); return (0); } DIVA_OS_MEM_DETACH_RESET(IoAdapter, p); /* * clear interrupt line (reset Local Interrupt Test Register) */ p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); qBriIrq = (dword volatile __iomem *)(&p[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST)]); WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p); for (i = 0; i < IoAdapter->tasks; ++i) { IoAdapter = QuadroList->QuadroAdapter[i]; if (IoAdapter && IoAdapter->Initialized && IoAdapter->tst_irq(&IoAdapter->a)) { IoAdapter->IrqCount++; serviced = 1; diva_os_schedule_soft_isr(&IoAdapter->isr_soft_isr); } } return (serviced); } /* -------------------------------------------------------------------------- Does disable the interrupt on the card -------------------------------------------------------------------------- */ static void disable_qBri_interrupt(PISDN_ADAPTER IoAdapter) { dword volatile __iomem *qBriIrq; byte __iomem *p; if (IoAdapter->ControllerNumber > 0) return; /* * clear interrupt line (reset Local Interrupt Test Register) */ p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter); WRITE_BYTE(&p[PLX9054_INTCSR], 0x00); /* disable PCI interrupts */ DIVA_OS_MEM_DETACH_RESET(IoAdapter, p); p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); qBriIrq = (dword volatile __iomem *)(&p[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST)]); WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p); } /* -------------------------------------------------------------------------- Install Adapter Entry Points -------------------------------------------------------------------------- */ static void set_common_qBri_functions(PISDN_ADAPTER IoAdapter) { ADAPTER *a; a = &IoAdapter->a; a->ram_in = mem_in; a->ram_inw = mem_inw; a->ram_in_buffer = mem_in_buffer; a->ram_look_ahead = mem_look_ahead; a->ram_out = mem_out; a->ram_outw = mem_outw; a->ram_out_buffer = mem_out_buffer; a->ram_inc = mem_inc; IoAdapter->out = pr_out; IoAdapter->dpc = pr_dpc; IoAdapter->tst_irq = scom_test_int; IoAdapter->clr_irq = scom_clear_int; IoAdapter->pcm = (struct pc_maint *)MIPS_MAINT_OFFS; IoAdapter->load = load_qBri_hardware; IoAdapter->disIrq = disable_qBri_interrupt; IoAdapter->rstFnc = reset_qBri_hardware; IoAdapter->stop = stop_qBri_hardware; IoAdapter->trapFnc = qBri_cpu_trapped; IoAdapter->diva_isr_handler = qBri_ISR; IoAdapter->a.io = (void *)IoAdapter; } static void set_qBri_functions(PISDN_ADAPTER IoAdapter) { if (!IoAdapter->tasks) { IoAdapter->tasks = MQ_INSTANCE_COUNT; } IoAdapter->MemorySize = MQ_MEMORY_SIZE; set_common_qBri_functions(IoAdapter); diva_os_set_qBri_functions(IoAdapter); } static void set_qBri2_functions(PISDN_ADAPTER IoAdapter) { if (!IoAdapter->tasks) { IoAdapter->tasks = MQ_INSTANCE_COUNT; } IoAdapter->MemorySize = (IoAdapter->tasks == 1) ? BRI2_MEMORY_SIZE : MQ2_MEMORY_SIZE; set_common_qBri_functions(IoAdapter); diva_os_set_qBri2_functions(IoAdapter); } /******************************************************************************/ void prepare_qBri_functions(PISDN_ADAPTER IoAdapter) { set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[0]); set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[1]); set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[2]); set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[3]); } void prepare_qBri2_functions(PISDN_ADAPTER IoAdapter) { if (!IoAdapter->tasks) { IoAdapter->tasks = MQ_INSTANCE_COUNT; } set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[0]); if (IoAdapter->tasks > 1) { set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[1]); set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[2]); set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[3]); } } /* -------------------------------------------------------------------------- */
gpl-2.0
tjstyle/android_kernel_samsung_msm
drivers/scsi/aic7xxx_old/aic7xxx_proc.c
10252
12527
/*+M************************************************************************* * Adaptec AIC7xxx device driver proc support for Linux. * * Copyright (c) 1995, 1996 Dean W. Gehnert * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * ---------------------------------------------------------------- * o Modified from the EATA-DMA /proc support. * o Additional support for device block statistics provided by * Matthew Jacob. * o Correction of overflow by Heinz Mauelshagen * o Adittional corrections by Doug Ledford * * Dean W. Gehnert, deang@teleport.com, 05/01/96 * * $Id: aic7xxx_proc.c,v 4.1 1997/06/97 08:23:42 deang Exp $ *-M*************************************************************************/ #define BLS (&aic7xxx_buffer[size]) #define HDRB \ " 0 - 4K 4 - 16K 16 - 64K 64 - 256K 256K - 1M 1M+" #ifdef PROC_DEBUG extern int vsprintf(char *, const char *, va_list); static void proc_debug(const char *fmt, ...) { va_list ap; char buf[256]; va_start(ap, fmt); vsprintf(buf, fmt, ap); printk(buf); va_end(ap); } #else /* PROC_DEBUG */ # define proc_debug(fmt, args...) #endif /* PROC_DEBUG */ static int aic7xxx_buffer_size = 0; static char *aic7xxx_buffer = NULL; /*+F************************************************************************* * Function: * aic7xxx_set_info * * Description: * Set parameters for the driver from the /proc filesystem. *-F*************************************************************************/ static int aic7xxx_set_info(char *buffer, int length, struct Scsi_Host *HBAptr) { proc_debug("aic7xxx_set_info(): %s\n", buffer); return (-ENOSYS); /* Currently this is a no-op */ } /*+F************************************************************************* * Function: * aic7xxx_proc_info * * Description: * Return information to handle /proc support for the driver. *-F*************************************************************************/ int aic7xxx_proc_info ( struct Scsi_Host *HBAptr, char *buffer, char **start, off_t offset, int length, int inout) { struct aic7xxx_host *p; struct aic_dev_data *aic_dev; struct scsi_device *sdptr; int size = 0; unsigned char i; unsigned char tindex; for(p=first_aic7xxx; p && p->host != HBAptr; p=p->next) ; if (!p) { size += sprintf(buffer, "Can't find adapter for host number %d\n", HBAptr->host_no); if (size > length) { return (size); } else { return (length); } } if (inout == TRUE) /* Has data been written to the file? */ { return (aic7xxx_set_info(buffer, length, HBAptr)); } p = (struct aic7xxx_host *) HBAptr->hostdata; /* * It takes roughly 1K of space to hold all relevant card info, not * counting any proc stats, so we start out with a 1.5k buffer size and * if proc_stats is defined, then we sweep the stats structure to see * how many drives we will be printing out for and add 384 bytes per * device with active stats. * * Hmmmm...that 1.5k seems to keep growing as items get added so they * can be easily viewed for debugging purposes. So, we bumped that * 1.5k to 4k so we can quit having to bump it all the time. */ size = 4096; list_for_each_entry(aic_dev, &p->aic_devs, list) size += 512; if (aic7xxx_buffer_size != size) { if (aic7xxx_buffer != NULL) { kfree(aic7xxx_buffer); aic7xxx_buffer_size = 0; } aic7xxx_buffer = kmalloc(size, GFP_KERNEL); } if (aic7xxx_buffer == NULL) { size = sprintf(buffer, "AIC7xxx - kmalloc error at line %d\n", __LINE__); return size; } aic7xxx_buffer_size = size; size = 0; size += sprintf(BLS, "Adaptec AIC7xxx driver version: "); size += sprintf(BLS, "%s/", AIC7XXX_C_VERSION); size += sprintf(BLS, "%s", AIC7XXX_H_VERSION); size += sprintf(BLS, "\n"); size += sprintf(BLS, "Adapter Configuration:\n"); size += sprintf(BLS, " SCSI Adapter: %s\n", board_names[p->board_name_index]); if (p->flags & AHC_TWIN) size += sprintf(BLS, " Twin Channel Controller "); else { char *channel = ""; char *ultra = ""; char *wide = "Narrow "; if (p->flags & AHC_MULTI_CHANNEL) { channel = " Channel A"; if (p->flags & (AHC_CHNLB|AHC_CHNLC)) channel = (p->flags & AHC_CHNLB) ? " Channel B" : " Channel C"; } if (p->features & AHC_WIDE) wide = "Wide "; if (p->features & AHC_ULTRA3) { switch(p->chip & AHC_CHIPID_MASK) { case AHC_AIC7892: case AHC_AIC7899: ultra = "Ultra-160/m LVD/SE "; break; default: ultra = "Ultra-3 LVD/SE "; break; } } else if (p->features & AHC_ULTRA2) ultra = "Ultra-2 LVD/SE "; else if (p->features & AHC_ULTRA) ultra = "Ultra "; size += sprintf(BLS, " %s%sController%s ", ultra, wide, channel); } switch(p->chip & ~AHC_CHIPID_MASK) { case AHC_VL: size += sprintf(BLS, "at VLB slot %d\n", p->pci_device_fn); break; case AHC_EISA: size += sprintf(BLS, "at EISA slot %d\n", p->pci_device_fn); break; default: size += sprintf(BLS, "at PCI %d/%d/%d\n", p->pci_bus, PCI_SLOT(p->pci_device_fn), PCI_FUNC(p->pci_device_fn)); break; } if( !(p->maddr) ) { size += sprintf(BLS, " Programmed I/O Base: %lx\n", p->base); } else { size += sprintf(BLS, " PCI MMAPed I/O Base: 0x%lx\n", p->mbase); } if( (p->chip & (AHC_VL | AHC_EISA)) ) { size += sprintf(BLS, " BIOS Memory Address: 0x%08x\n", p->bios_address); } size += sprintf(BLS, " Adapter SEEPROM Config: %s\n", (p->flags & AHC_SEEPROM_FOUND) ? "SEEPROM found and used." : ((p->flags & AHC_USEDEFAULTS) ? "SEEPROM not found, using defaults." : "SEEPROM not found, using leftover BIOS values.") ); size += sprintf(BLS, " Adaptec SCSI BIOS: %s\n", (p->flags & AHC_BIOS_ENABLED) ? "Enabled" : "Disabled"); size += sprintf(BLS, " IRQ: %d\n", HBAptr->irq); size += sprintf(BLS, " SCBs: Active %d, Max Active %d,\n", p->activescbs, p->max_activescbs); size += sprintf(BLS, " Allocated %d, HW %d, " "Page %d\n", p->scb_data->numscbs, p->scb_data->maxhscbs, p->scb_data->maxscbs); if (p->flags & AHC_EXTERNAL_SRAM) size += sprintf(BLS, " Using External SCB SRAM\n"); size += sprintf(BLS, " Interrupts: %ld", p->isr_count); if (p->chip & AHC_EISA) { size += sprintf(BLS, " %s\n", (p->pause & IRQMS) ? "(Level Sensitive)" : "(Edge Triggered)"); } else { size += sprintf(BLS, "\n"); } size += sprintf(BLS, " BIOS Control Word: 0x%04x\n", p->bios_control); size += sprintf(BLS, " Adapter Control Word: 0x%04x\n", p->adapter_control); size += sprintf(BLS, " Extended Translation: %sabled\n", (p->flags & AHC_EXTEND_TRANS_A) ? "En" : "Dis"); size += sprintf(BLS, "Disconnect Enable Flags: 0x%04x\n", p->discenable); if (p->features & (AHC_ULTRA | AHC_ULTRA2)) { size += sprintf(BLS, " Ultra Enable Flags: 0x%04x\n", p->ultraenb); } size += sprintf(BLS, "Default Tag Queue Depth: %d\n", aic7xxx_default_queue_depth); size += sprintf(BLS, " Tagged Queue By Device array for aic7xxx host " "instance %d:\n", p->instance); size += sprintf(BLS, " {"); for(i=0; i < (MAX_TARGETS - 1); i++) size += sprintf(BLS, "%d,",aic7xxx_tag_info[p->instance].tag_commands[i]); size += sprintf(BLS, "%d}\n",aic7xxx_tag_info[p->instance].tag_commands[i]); size += sprintf(BLS, "\n"); size += sprintf(BLS, "Statistics:\n\n"); list_for_each_entry(aic_dev, &p->aic_devs, list) { sdptr = aic_dev->SDptr; tindex = sdptr->channel << 3 | sdptr->id; size += sprintf(BLS, "(scsi%d:%d:%d:%d)\n", p->host_no, sdptr->channel, sdptr->id, sdptr->lun); size += sprintf(BLS, " Device using %s/%s", (aic_dev->cur.width == MSG_EXT_WDTR_BUS_16_BIT) ? "Wide" : "Narrow", (aic_dev->cur.offset != 0) ? "Sync transfers at " : "Async transfers.\n" ); if (aic_dev->cur.offset != 0) { struct aic7xxx_syncrate *sync_rate; unsigned char options = aic_dev->cur.options; int period = aic_dev->cur.period; int rate = (aic_dev->cur.width == MSG_EXT_WDTR_BUS_16_BIT) ? 1 : 0; sync_rate = aic7xxx_find_syncrate(p, &period, 0, &options); if (sync_rate != NULL) { size += sprintf(BLS, "%s MByte/sec, offset %d\n", sync_rate->rate[rate], aic_dev->cur.offset ); } else { size += sprintf(BLS, "3.3 MByte/sec, offset %d\n", aic_dev->cur.offset ); } } size += sprintf(BLS, " Transinfo settings: "); size += sprintf(BLS, "current(%d/%d/%d/%d), ", aic_dev->cur.period, aic_dev->cur.offset, aic_dev->cur.width, aic_dev->cur.options); size += sprintf(BLS, "goal(%d/%d/%d/%d), ", aic_dev->goal.period, aic_dev->goal.offset, aic_dev->goal.width, aic_dev->goal.options); size += sprintf(BLS, "user(%d/%d/%d/%d)\n", p->user[tindex].period, p->user[tindex].offset, p->user[tindex].width, p->user[tindex].options); if(sdptr->simple_tags) { size += sprintf(BLS, " Tagged Command Queueing Enabled, Ordered Tags %s, Depth %d/%d\n", sdptr->ordered_tags ? "Enabled" : "Disabled", sdptr->queue_depth, aic_dev->max_q_depth); } if(aic_dev->barrier_total) size += sprintf(BLS, " Total transfers %ld:\n (%ld/%ld/%ld/%ld reads/writes/REQ_BARRIER/Ordered Tags)\n", aic_dev->r_total+aic_dev->w_total, aic_dev->r_total, aic_dev->w_total, aic_dev->barrier_total, aic_dev->ordered_total); else size += sprintf(BLS, " Total transfers %ld:\n (%ld/%ld reads/writes)\n", aic_dev->r_total+aic_dev->w_total, aic_dev->r_total, aic_dev->w_total); size += sprintf(BLS, "%s\n", HDRB); size += sprintf(BLS, " Reads:"); for (i = 0; i < ARRAY_SIZE(aic_dev->r_bins); i++) { size += sprintf(BLS, " %10ld", aic_dev->r_bins[i]); } size += sprintf(BLS, "\n"); size += sprintf(BLS, " Writes:"); for (i = 0; i < ARRAY_SIZE(aic_dev->w_bins); i++) { size += sprintf(BLS, " %10ld", aic_dev->w_bins[i]); } size += sprintf(BLS, "\n"); size += sprintf(BLS, "\n\n"); } if (size >= aic7xxx_buffer_size) { printk(KERN_WARNING "aic7xxx: Overflow in aic7xxx_proc.c\n"); } if (offset > size - 1) { kfree(aic7xxx_buffer); aic7xxx_buffer = NULL; aic7xxx_buffer_size = length = 0; *start = NULL; } else { *start = buffer; length = min_t(int, length, size - offset); memcpy(buffer, &aic7xxx_buffer[offset], length); } return (length); } /* * Overrides for Emacs so that we follow Linus's tabbing style. * Emacs will notice this stuff at the end of the file and automatically * adjust the settings for this buffer only. This must remain at the end * of the file. * --------------------------------------------------------------------------- * Local variables: * c-indent-level: 2 * c-brace-imaginary-offset: 0 * c-brace-offset: -2 * c-argdecl-indent: 2 * c-label-offset: -2 * c-continued-statement-offset: 2 * c-continued-brace-offset: 0 * indent-tabs-mode: nil * tab-width: 8 * End: */
gpl-2.0
RaymanFX/kernel_samsung_lt03wifi
arch/ia64/kvm/mmio.c
10764
8856
/* * mmio.c: MMIO emulation components. * Copyright (c) 2004, Intel Corporation. * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) * Kun Tian (Kevin Tian) (Kevin.tian@intel.com) * * Copyright (c) 2007 Intel Corporation KVM support. * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) * Xiantao Zhang (xiantao.zhang@intel.com) * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/kvm_host.h> #include "vcpu.h" static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val) { VLSAPIC_XTP(v) = val; } /* * LSAPIC OFFSET */ #define PIB_LOW_HALF(ofst) !(ofst & (1 << 20)) #define PIB_OFST_INTA 0x1E0000 #define PIB_OFST_XTP 0x1E0008 /* * execute write IPI op. */ static void vlsapic_write_ipi(struct kvm_vcpu *vcpu, uint64_t addr, uint64_t data) { struct exit_ctl_data *p = &current_vcpu->arch.exit_data; unsigned long psr; local_irq_save(psr); p->exit_reason = EXIT_REASON_IPI; p->u.ipi_data.addr.val = addr; p->u.ipi_data.data.val = data; vmm_transition(current_vcpu); local_irq_restore(psr); } void lsapic_write(struct kvm_vcpu *v, unsigned long addr, unsigned long length, unsigned long val) { addr &= (PIB_SIZE - 1); switch (addr) { case PIB_OFST_INTA: panic_vm(v, "Undefined write on PIB INTA\n"); break; case PIB_OFST_XTP: if (length == 1) { vlsapic_write_xtp(v, val); } else { panic_vm(v, "Undefined write on PIB XTP\n"); } break; default: if (PIB_LOW_HALF(addr)) { /*Lower half */ if (length != 8) panic_vm(v, "Can't LHF write with size %ld!\n", length); else vlsapic_write_ipi(v, addr, val); } else { /*Upper half */ panic_vm(v, "IPI-UHF write %lx\n", addr); } break; } } unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr, unsigned long length) { uint64_t result = 0; addr &= (PIB_SIZE - 1); switch (addr) { case PIB_OFST_INTA: if (length == 1) /* 1 byte load */ ; /* There is no i8259, there is no INTA access*/ else panic_vm(v, "Undefined read on PIB INTA\n"); break; case PIB_OFST_XTP: if (length == 1) { result = VLSAPIC_XTP(v); } else { panic_vm(v, "Undefined read on PIB XTP\n"); } break; default: panic_vm(v, "Undefined addr access for lsapic!\n"); break; } return result; } static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, u16 s, int ma, int dir) { unsigned long iot; struct exit_ctl_data *p = &vcpu->arch.exit_data; unsigned long psr; iot = __gpfn_is_io(src_pa >> PAGE_SHIFT); local_irq_save(psr); /*Intercept the access for PIB range*/ if (iot == GPFN_PIB) { if (!dir) lsapic_write(vcpu, src_pa, s, *dest); else *dest = lsapic_read(vcpu, src_pa, s); goto out; } p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION; p->u.ioreq.addr = src_pa; p->u.ioreq.size = s; p->u.ioreq.dir = dir; if (dir == IOREQ_WRITE) p->u.ioreq.data = *dest; p->u.ioreq.state = STATE_IOREQ_READY; vmm_transition(vcpu); if (p->u.ioreq.state == STATE_IORESP_READY) { if (dir == IOREQ_READ) /* it's necessary to ensure zero extending */ *dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); } else panic_vm(vcpu, "Unhandled mmio access returned!\n"); out: local_irq_restore(psr); return ; } /* dir 1: read 0:write inst_type 0:integer 1:floating point */ #define SL_INTEGER 0 /* store/load interger*/ #define SL_FLOATING 1 /* store/load floating*/ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) { struct kvm_pt_regs *regs; IA64_BUNDLE bundle; int slot, dir = 0; int inst_type = -1; u16 size = 0; u64 data, slot1a, slot1b, temp, update_reg; s32 imm; INST64 inst; regs = vcpu_regs(vcpu); if (fetch_code(vcpu, regs->cr_iip, &bundle)) { /* if fetch code fail, return and try again */ return; } slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri; if (!slot) inst.inst = bundle.slot0; else if (slot == 1) { slot1a = bundle.slot1a; slot1b = bundle.slot1b; inst.inst = slot1a + (slot1b << 18); } else if (slot == 2) inst.inst = bundle.slot2; /* Integer Load/Store */ if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) { inst_type = SL_INTEGER; size = (inst.M1.x6 & 0x3); if ((inst.M1.x6 >> 2) > 0xb) { /*write*/ dir = IOREQ_WRITE; data = vcpu_get_gr(vcpu, inst.M4.r2); } else if ((inst.M1.x6 >> 2) < 0xb) { /*read*/ dir = IOREQ_READ; } } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) { /* Integer Load + Reg update */ inst_type = SL_INTEGER; dir = IOREQ_READ; size = (inst.M2.x6 & 0x3); temp = vcpu_get_gr(vcpu, inst.M2.r3); update_reg = vcpu_get_gr(vcpu, inst.M2.r2); temp += update_reg; vcpu_set_gr(vcpu, inst.M2.r3, temp, 0); } else if (inst.M3.major == 5) { /*Integer Load/Store + Imm update*/ inst_type = SL_INTEGER; size = (inst.M3.x6&0x3); if ((inst.M5.x6 >> 2) > 0xb) { /*write*/ dir = IOREQ_WRITE; data = vcpu_get_gr(vcpu, inst.M5.r2); temp = vcpu_get_gr(vcpu, inst.M5.r3); imm = (inst.M5.s << 31) | (inst.M5.i << 30) | (inst.M5.imm7 << 23); temp += imm >> 23; vcpu_set_gr(vcpu, inst.M5.r3, temp, 0); } else if ((inst.M3.x6 >> 2) < 0xb) { /*read*/ dir = IOREQ_READ; temp = vcpu_get_gr(vcpu, inst.M3.r3); imm = (inst.M3.s << 31) | (inst.M3.i << 30) | (inst.M3.imm7 << 23); temp += imm >> 23; vcpu_set_gr(vcpu, inst.M3.r3, temp, 0); } } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B && inst.M9.m == 0 && inst.M9.x == 0) { /* Floating-point spill*/ struct ia64_fpreg v; inst_type = SL_FLOATING; dir = IOREQ_WRITE; vcpu_get_fpreg(vcpu, inst.M9.f2, &v); /* Write high word. FIXME: this is a kludge! */ v.u.bits[1] &= 0x3ffff; mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8, ma, IOREQ_WRITE); data = v.u.bits[0]; size = 3; } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) { /* Floating-point spill + Imm update */ struct ia64_fpreg v; inst_type = SL_FLOATING; dir = IOREQ_WRITE; vcpu_get_fpreg(vcpu, inst.M10.f2, &v); temp = vcpu_get_gr(vcpu, inst.M10.r3); imm = (inst.M10.s << 31) | (inst.M10.i << 30) | (inst.M10.imm7 << 23); temp += imm >> 23; vcpu_set_gr(vcpu, inst.M10.r3, temp, 0); /* Write high word.FIXME: this is a kludge! */ v.u.bits[1] &= 0x3ffff; mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8, ma, IOREQ_WRITE); data = v.u.bits[0]; size = 3; } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) { /* Floating-point stf8 + Imm update */ struct ia64_fpreg v; inst_type = SL_FLOATING; dir = IOREQ_WRITE; size = 3; vcpu_get_fpreg(vcpu, inst.M10.f2, &v); data = v.u.bits[0]; /* Significand. */ temp = vcpu_get_gr(vcpu, inst.M10.r3); imm = (inst.M10.s << 31) | (inst.M10.i << 30) | (inst.M10.imm7 << 23); temp += imm >> 23; vcpu_set_gr(vcpu, inst.M10.r3, temp, 0); } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c && inst.M15.x6 <= 0x2f) { temp = vcpu_get_gr(vcpu, inst.M15.r3); imm = (inst.M15.s << 31) | (inst.M15.i << 30) | (inst.M15.imm7 << 23); temp += imm >> 23; vcpu_set_gr(vcpu, inst.M15.r3, temp, 0); vcpu_increment_iip(vcpu); return; } else if (inst.M12.major == 6 && inst.M12.m == 1 && inst.M12.x == 1 && inst.M12.x6 == 1) { /* Floating-point Load Pair + Imm ldfp8 M12*/ struct ia64_fpreg v; inst_type = SL_FLOATING; dir = IOREQ_READ; size = 8; /*ldfd*/ mmio_access(vcpu, padr, &data, size, ma, dir); v.u.bits[0] = data; v.u.bits[1] = 0x1003E; vcpu_set_fpreg(vcpu, inst.M12.f1, &v); padr += 8; mmio_access(vcpu, padr, &data, size, ma, dir); v.u.bits[0] = data; v.u.bits[1] = 0x1003E; vcpu_set_fpreg(vcpu, inst.M12.f2, &v); padr += 8; vcpu_set_gr(vcpu, inst.M12.r3, padr, 0); vcpu_increment_iip(vcpu); return; } else { inst_type = -1; panic_vm(vcpu, "Unsupported MMIO access instruction! " "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n", bundle.i64[0], bundle.i64[1]); } size = 1 << size; if (dir == IOREQ_WRITE) { mmio_access(vcpu, padr, &data, size, ma, dir); } else { mmio_access(vcpu, padr, &data, size, ma, dir); if (inst_type == SL_INTEGER) vcpu_set_gr(vcpu, inst.M1.r1, data, 0); else panic_vm(vcpu, "Unsupported instruction type!\n"); } vcpu_increment_iip(vcpu); }
gpl-2.0
rmtew/MediaTek-HelioX10-Kernel
alps/kernel-3.10/sound/aoa/soundbus/i2sbus/control.c
12556
4585
/* * i2sbus driver -- bus control routines * * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * * GPL v2, can be found in COPYING. */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/macio.h> #include <asm/pmac_feature.h> #include <asm/pmac_pfunc.h> #include <asm/keylargo.h> #include "i2sbus.h" int i2sbus_control_init(struct macio_dev* dev, struct i2sbus_control **c) { *c = kzalloc(sizeof(struct i2sbus_control), GFP_KERNEL); if (!*c) return -ENOMEM; INIT_LIST_HEAD(&(*c)->list); (*c)->macio = dev->bus->chip; return 0; } void i2sbus_control_destroy(struct i2sbus_control *c) { kfree(c); } /* this is serialised externally */ int i2sbus_control_add_dev(struct i2sbus_control *c, struct i2sbus_dev *i2sdev) { struct device_node *np; np = i2sdev->sound.ofdev.dev.of_node; i2sdev->enable = pmf_find_function(np, "enable"); i2sdev->cell_enable = pmf_find_function(np, "cell-enable"); i2sdev->clock_enable = pmf_find_function(np, "clock-enable"); i2sdev->cell_disable = pmf_find_function(np, "cell-disable"); i2sdev->clock_disable = pmf_find_function(np, "clock-disable"); /* if the bus number is not 0 or 1 we absolutely need to use * the platform functions -- there's nothing in Darwin that * would allow seeing a system behind what the FCRs are then, * and I don't want to go parsing a bunch of platform functions * by hand to try finding a system... */ if (i2sdev->bus_number != 0 && i2sdev->bus_number != 1 && (!i2sdev->enable || !i2sdev->cell_enable || !i2sdev->clock_enable || !i2sdev->cell_disable || !i2sdev->clock_disable)) { pmf_put_function(i2sdev->enable); pmf_put_function(i2sdev->cell_enable); pmf_put_function(i2sdev->clock_enable); pmf_put_function(i2sdev->cell_disable); pmf_put_function(i2sdev->clock_disable); return -ENODEV; } list_add(&i2sdev->item, &c->list); return 0; } void i2sbus_control_remove_dev(struct i2sbus_control *c, struct i2sbus_dev *i2sdev) { /* this is serialised externally */ list_del(&i2sdev->item); if (list_empty(&c->list)) i2sbus_control_destroy(c); } int i2sbus_control_enable(struct i2sbus_control *c, struct i2sbus_dev *i2sdev) { struct pmf_args args = { .count = 0 }; struct macio_chip *macio = c->macio; if (i2sdev->enable) return pmf_call_one(i2sdev->enable, &args); if (macio == NULL || macio->base == NULL) return -ENODEV; switch (i2sdev->bus_number) { case 0: /* these need to be locked or done through * newly created feature calls! */ MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_ENABLE); break; case 1: MACIO_BIS(KEYLARGO_FCR1, KL1_I2S1_ENABLE); break; default: return -ENODEV; } return 0; } int i2sbus_control_cell(struct i2sbus_control *c, struct i2sbus_dev *i2sdev, int enable) { struct pmf_args args = { .count = 0 }; struct macio_chip *macio = c->macio; switch (enable) { case 0: if (i2sdev->cell_disable) return pmf_call_one(i2sdev->cell_disable, &args); break; case 1: if (i2sdev->cell_enable) return pmf_call_one(i2sdev->cell_enable, &args); break; default: printk(KERN_ERR "i2sbus: INVALID CELL ENABLE VALUE\n"); return -ENODEV; } if (macio == NULL || macio->base == NULL) return -ENODEV; switch (i2sdev->bus_number) { case 0: if (enable) MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_CELL_ENABLE); else MACIO_BIC(KEYLARGO_FCR1, KL1_I2S0_CELL_ENABLE); break; case 1: if (enable) MACIO_BIS(KEYLARGO_FCR1, KL1_I2S1_CELL_ENABLE); else MACIO_BIC(KEYLARGO_FCR1, KL1_I2S1_CELL_ENABLE); break; default: return -ENODEV; } return 0; } int i2sbus_control_clock(struct i2sbus_control *c, struct i2sbus_dev *i2sdev, int enable) { struct pmf_args args = { .count = 0 }; struct macio_chip *macio = c->macio; switch (enable) { case 0: if (i2sdev->clock_disable) return pmf_call_one(i2sdev->clock_disable, &args); break; case 1: if (i2sdev->clock_enable) return pmf_call_one(i2sdev->clock_enable, &args); break; default: printk(KERN_ERR "i2sbus: INVALID CLOCK ENABLE VALUE\n"); return -ENODEV; } if (macio == NULL || macio->base == NULL) return -ENODEV; switch (i2sdev->bus_number) { case 0: if (enable) MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_CLK_ENABLE_BIT); else MACIO_BIC(KEYLARGO_FCR1, KL1_I2S0_CLK_ENABLE_BIT); break; case 1: if (enable) MACIO_BIS(KEYLARGO_FCR1, KL1_I2S1_CLK_ENABLE_BIT); else MACIO_BIC(KEYLARGO_FCR1, KL1_I2S1_CLK_ENABLE_BIT); break; default: return -ENODEV; } return 0; }
gpl-2.0
mmihail/trinitycore10353
src/server/scripts/EasternKingdoms/BlackwingLair/boss_broodlord_lashlayer.cpp
13
3426
/* * Copyright (C) 2008-2010 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Boss_Broodlord_Lashlayer SD%Complete: 100 SDComment: SDCategory: Blackwing Lair EndScriptData */ #include "ScriptPCH.h" #define SAY_AGGRO -1469000 #define SAY_LEASH -1469001 #define SPELL_CLEAVE 26350 #define SPELL_BLASTWAVE 23331 #define SPELL_MORTALSTRIKE 24573 #define SPELL_KNOCKBACK 25778 class boss_broodlord : public CreatureScript { public: boss_broodlord() : CreatureScript("boss_broodlord") { } CreatureAI* GetAI(Creature* pCreature) const { return new boss_broodlordAI (pCreature); } struct boss_broodlordAI : public ScriptedAI { boss_broodlordAI(Creature *c) : ScriptedAI(c) {} uint32 Cleave_Timer; uint32 BlastWave_Timer; uint32 MortalStrike_Timer; uint32 KnockBack_Timer; void Reset() { Cleave_Timer = 8000; //These times are probably wrong BlastWave_Timer = 12000; MortalStrike_Timer = 20000; KnockBack_Timer = 30000; } void EnterCombat(Unit * /*who*/) { DoScriptText(SAY_AGGRO, me); DoZoneInCombat(); } void UpdateAI(const uint32 diff) { if (!UpdateVictim()) return; //Cleave_Timer if (Cleave_Timer <= diff) { DoCast(me->getVictim(), SPELL_CLEAVE); Cleave_Timer = 7000; } else Cleave_Timer -= diff; // BlastWave if (BlastWave_Timer <= diff) { DoCast(me->getVictim(), SPELL_BLASTWAVE); BlastWave_Timer = urand(8000,16000); } else BlastWave_Timer -= diff; //MortalStrike_Timer if (MortalStrike_Timer <= diff) { DoCast(me->getVictim(), SPELL_MORTALSTRIKE); MortalStrike_Timer = urand(25000,35000); } else MortalStrike_Timer -= diff; if (KnockBack_Timer <= diff) { DoCast(me->getVictim(), SPELL_KNOCKBACK); //Drop 50% aggro if (DoGetThreat(me->getVictim())) DoModifyThreatPercent(me->getVictim(),-50); KnockBack_Timer = urand(15000,30000); } else KnockBack_Timer -= diff; if (EnterEvadeIfOutOfCombatArea(diff)) DoScriptText(SAY_LEASH, me); DoMeleeAttackIfReady(); } }; }; void AddSC_boss_broodlord() { new boss_broodlord(); }
gpl-2.0
IBYoung/oceanbase
oceanbase_0.4/src/common/ob_regex.cpp
13
2592
////=================================================================== // // ob_regex.cpp common / Oceanbase // // Copyright (C) 2010 Taobao.com, Inc. // // Created on 2011-03-19 by Yubai (yubai.lk@taobao.com) // // ------------------------------------------------------------------- // // Description // // // ------------------------------------------------------------------- // // Change Log // ////==================================================================== #include <stdlib.h> #include "tblog.h" #include "ob_malloc.h" #include "ob_regex.h" namespace oceanbase { namespace common { ObRegex::ObRegex() { init_ = false; match_ = NULL; nmatch_ = 0; } ObRegex::~ObRegex() { if (true == init_) { destroy(); } } bool ObRegex::init(const char* pattern, int flags) { bool bret = false; if (init_) { TBSYS_LOG(WARN, "this=%p already inited", this); } else if (NULL == pattern) { TBSYS_LOG(WARN, "invalid param pattern=%p", pattern); } else { int tmp_ret = regcomp(&reg_, pattern, flags); if (0 != tmp_ret) { TBSYS_LOG(WARN, "regcomp fail ret=%d", tmp_ret); } else { nmatch_ = reg_.re_nsub + 1; match_ = (regmatch_t*)ob_malloc(sizeof(regmatch_t) * nmatch_, ObModIds::OB_REGEX); if (NULL == match_) { TBSYS_LOG(WARN, "create the regmatch object fail"); regfree(&reg_); } else { init_ = true; bret = true; } } } return bret; } bool ObRegex::match(const char* text, int flags) { bool bret = false; if (!init_) { TBSYS_LOG(WARN, "this=%p have not inited", this); } else if (NULL == text) { TBSYS_LOG(WARN, "invalid param text=%p", text); } else { int tmp_ret = regexec(&reg_, text, nmatch_, match_, flags); if (REG_NOMATCH == tmp_ret) { bret = false; } else if (tmp_ret != 0) { bret = false; } else { bret = true; } } return bret; } void ObRegex::destroy(void) { if (init_) { regfree(&reg_); if (NULL != match_) { ob_free(match_); match_ = NULL; } nmatch_ = 0; init_ = false; } } } }
gpl-2.0
MarvinCorro/linux-cmps107
drivers/staging/media/davinci_vpfe/vpfe_video.c
13
46480
/* * Copyright (C) 2012 Texas Instruments Inc * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Contributors: * Manjunath Hadli <manjunath.hadli@ti.com> * Prabhakar Lad <prabhakar.lad@ti.com> */ #include <linux/module.h> #include <linux/slab.h> #include <media/v4l2-ioctl.h> #include "vpfe.h" #include "vpfe_mc_capture.h" static int debug; /* get v4l2 subdev pointer to external subdev which is active */ static struct media_entity *vpfe_get_input_entity (struct vpfe_video_device *video) { struct vpfe_device *vpfe_dev = video->vpfe_dev; struct media_pad *remote; remote = media_entity_remote_pad(&vpfe_dev->vpfe_isif.pads[0]); if (remote == NULL) { pr_err("Invalid media connection to isif/ccdc\n"); return NULL; } return remote->entity; } /* updates external subdev(sensor/decoder) which is active */ static int vpfe_update_current_ext_subdev(struct vpfe_video_device *video) { struct vpfe_device *vpfe_dev = video->vpfe_dev; struct vpfe_config *vpfe_cfg; struct v4l2_subdev *subdev; struct media_pad *remote; int i; remote = media_entity_remote_pad(&vpfe_dev->vpfe_isif.pads[0]); if (remote == NULL) { pr_err("Invalid media connection to isif/ccdc\n"); return -EINVAL; } subdev = media_entity_to_v4l2_subdev(remote->entity); vpfe_cfg = vpfe_dev->pdev->platform_data; for (i = 0; i < vpfe_cfg->num_subdevs; i++) { if (!strcmp(vpfe_cfg->sub_devs[i].module_name, subdev->name)) { video->current_ext_subdev = &vpfe_cfg->sub_devs[i]; break; } } /* if user not linked decoder/sensor to isif/ccdc */ if (i == vpfe_cfg->num_subdevs) { pr_err("Invalid media chain connection to isif/ccdc\n"); return -EINVAL; } /* find the v4l2 subdev pointer */ for (i = 0; i < vpfe_dev->num_ext_subdevs; i++) { if (!strcmp(video->current_ext_subdev->module_name, vpfe_dev->sd[i]->name)) video->current_ext_subdev->subdev = vpfe_dev->sd[i]; } return 0; } /* get the subdev which is connected to the output video node */ static struct v4l2_subdev * vpfe_video_remote_subdev(struct vpfe_video_device *video, u32 *pad) { struct media_pad *remote = media_entity_remote_pad(&video->pad); if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) return NULL; if (pad) *pad = remote->index; return media_entity_to_v4l2_subdev(remote->entity); } /* get the format set at output pad of the adjacent subdev */ static int __vpfe_video_get_format(struct vpfe_video_device *video, struct v4l2_format *format) { struct v4l2_subdev_format fmt; struct v4l2_subdev *subdev; struct media_pad *remote; u32 pad; int ret; subdev = vpfe_video_remote_subdev(video, &pad); if (subdev == NULL) return -EINVAL; fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; remote = media_entity_remote_pad(&video->pad); fmt.pad = remote->index; ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); if (ret == -ENOIOCTLCMD) return -EINVAL; format->type = video->type; /* convert mbus_format to v4l2_format */ v4l2_fill_pix_format(&format->fmt.pix, &fmt.format); mbus_to_pix(&fmt.format, &format->fmt.pix); return 0; } /* make a note of pipeline details */ static int vpfe_prepare_pipeline(struct vpfe_video_device *video) { struct media_entity_graph graph; struct media_entity *entity = &video->video_dev.entity; struct media_device *mdev = entity->graph_obj.mdev; struct vpfe_pipeline *pipe = &video->pipe; struct vpfe_video_device *far_end = NULL; int ret; pipe->input_num = 0; pipe->output_num = 0; if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) pipe->inputs[pipe->input_num++] = video; else pipe->outputs[pipe->output_num++] = video; mutex_lock(&mdev->graph_mutex); ret = media_entity_graph_walk_init(&graph, entity->graph_obj.mdev); if (ret) { mutex_unlock(&mdev->graph_mutex); return -ENOMEM; } media_entity_graph_walk_start(&graph, entity); while ((entity = media_entity_graph_walk_next(&graph))) { if (entity == &video->video_dev.entity) continue; if (!is_media_entity_v4l2_io(entity)) continue; far_end = to_vpfe_video(media_entity_to_video_device(entity)); if (far_end->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) pipe->inputs[pipe->input_num++] = far_end; else pipe->outputs[pipe->output_num++] = far_end; } media_entity_graph_walk_cleanup(&graph); mutex_unlock(&mdev->graph_mutex); return 0; } /* update pipe state selected by user */ static int vpfe_update_pipe_state(struct vpfe_video_device *video) { struct vpfe_pipeline *pipe = &video->pipe; int ret; ret = vpfe_prepare_pipeline(video); if (ret) return ret; /* Find out if there is any input video if yes, it is single shot. */ if (pipe->input_num == 0) { pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS; ret = vpfe_update_current_ext_subdev(video); if (ret) { pr_err("Invalid external subdev\n"); return ret; } } else { pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT; } video->initialized = 1; video->skip_frame_count = 1; video->skip_frame_count_init = 1; return 0; } /* checks wether pipeline is ready for enabling */ int vpfe_video_is_pipe_ready(struct vpfe_pipeline *pipe) { int i; for (i = 0; i < pipe->input_num; i++) if (!pipe->inputs[i]->started || pipe->inputs[i]->state != VPFE_VIDEO_BUFFER_QUEUED) return 0; for (i = 0; i < pipe->output_num; i++) if (!pipe->outputs[i]->started || pipe->outputs[i]->state != VPFE_VIDEO_BUFFER_QUEUED) return 0; return 1; } /** * Validate a pipeline by checking both ends of all links for format * discrepancies. * * Return 0 if all formats match, or -EPIPE if at least one link is found with * different formats on its two ends. */ static int vpfe_video_validate_pipeline(struct vpfe_pipeline *pipe) { struct v4l2_subdev_format fmt_source; struct v4l2_subdev_format fmt_sink; struct v4l2_subdev *subdev; struct media_pad *pad; int ret; /* * Should not matter if it is output[0] or 1 as * the general ideas is to traverse backwards and * the fact that the out video node always has the * format of the connected pad. */ subdev = vpfe_video_remote_subdev(pipe->outputs[0], NULL); if (subdev == NULL) return -EPIPE; while (1) { /* Retrieve the sink format */ pad = &subdev->entity.pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE; fmt_sink.pad = pad->index; ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_sink); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; /* Retrieve the source format */ pad = media_entity_remote_pad(pad); if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) break; subdev = media_entity_to_v4l2_subdev(pad->entity); fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE; fmt_source.pad = pad->index; ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; /* Check if the two ends match */ if (fmt_source.format.code != fmt_sink.format.code || fmt_source.format.width != fmt_sink.format.width || fmt_source.format.height != fmt_sink.format.height) return -EPIPE; } return 0; } /* * vpfe_pipeline_enable() - Enable streaming on a pipeline * @vpfe_dev: vpfe device * @pipe: vpfe pipeline * * Walk the entities chain starting at the pipeline output video node and start * all modules in the chain in the given mode. * * Return 0 if successful, or the return value of the failed video::s_stream * operation otherwise. */ static int vpfe_pipeline_enable(struct vpfe_pipeline *pipe) { struct media_entity *entity; struct v4l2_subdev *subdev; struct media_device *mdev; int ret; if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS) entity = vpfe_get_input_entity(pipe->outputs[0]); else entity = &pipe->inputs[0]->video_dev.entity; mdev = entity->graph_obj.mdev; mutex_lock(&mdev->graph_mutex); ret = media_entity_graph_walk_init(&pipe->graph, entity->graph_obj.mdev); if (ret) goto out; media_entity_graph_walk_start(&pipe->graph, entity); while ((entity = media_entity_graph_walk_next(&pipe->graph))) { if (!is_media_entity_v4l2_subdev(entity)) continue; subdev = media_entity_to_v4l2_subdev(entity); ret = v4l2_subdev_call(subdev, video, s_stream, 1); if (ret < 0 && ret != -ENOIOCTLCMD) break; } out: if (ret) media_entity_graph_walk_cleanup(&pipe->graph); mutex_unlock(&mdev->graph_mutex); return ret; } /* * vpfe_pipeline_disable() - Disable streaming on a pipeline * @vpfe_dev: vpfe device * @pipe: VPFE pipeline * * Walk the entities chain starting at the pipeline output video node and stop * all modules in the chain. * * Return 0 if all modules have been properly stopped, or -ETIMEDOUT if a module * can't be stopped. */ static int vpfe_pipeline_disable(struct vpfe_pipeline *pipe) { struct media_entity *entity; struct v4l2_subdev *subdev; struct media_device *mdev; int ret = 0; if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS) entity = vpfe_get_input_entity(pipe->outputs[0]); else entity = &pipe->inputs[0]->video_dev.entity; mdev = entity->graph_obj.mdev; mutex_lock(&mdev->graph_mutex); media_entity_graph_walk_start(&pipe->graph, entity); while ((entity = media_entity_graph_walk_next(&pipe->graph))) { if (!is_media_entity_v4l2_subdev(entity)) continue; subdev = media_entity_to_v4l2_subdev(entity); ret = v4l2_subdev_call(subdev, video, s_stream, 0); if (ret < 0 && ret != -ENOIOCTLCMD) break; } mutex_unlock(&mdev->graph_mutex); media_entity_graph_walk_cleanup(&pipe->graph); return ret ? -ETIMEDOUT : 0; } /* * vpfe_pipeline_set_stream() - Enable/disable streaming on a pipeline * @vpfe_dev: VPFE device * @pipe: VPFE pipeline * @state: Stream state (stopped or active) * * Set the pipeline to the given stream state. * * Return 0 if successful, or the return value of the failed video::s_stream * operation otherwise. */ static int vpfe_pipeline_set_stream(struct vpfe_pipeline *pipe, enum vpfe_pipeline_stream_state state) { if (state == VPFE_PIPELINE_STREAM_STOPPED) return vpfe_pipeline_disable(pipe); return vpfe_pipeline_enable(pipe); } static int all_videos_stopped(struct vpfe_video_device *video) { struct vpfe_pipeline *pipe = &video->pipe; int i; for (i = 0; i < pipe->input_num; i++) if (pipe->inputs[i]->started) return 0; for (i = 0; i < pipe->output_num; i++) if (pipe->outputs[i]->started) return 0; return 1; } /* * vpfe_open() - open video device * @file: file pointer * * initialize media pipeline state, allocate memory for file handle * * Return 0 if successful, or the return -ENODEV otherwise. */ static int vpfe_open(struct file *file) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_fh *handle; /* Allocate memory for the file handle object */ handle = kzalloc(sizeof(struct vpfe_fh), GFP_KERNEL); if (handle == NULL) return -ENOMEM; v4l2_fh_init(&handle->vfh, &video->video_dev); v4l2_fh_add(&handle->vfh); mutex_lock(&video->lock); /* If decoder is not initialized. initialize it */ if (!video->initialized && vpfe_update_pipe_state(video)) { mutex_unlock(&video->lock); return -ENODEV; } /* Increment device users counter */ video->usrs++; /* Set io_allowed member to false */ handle->io_allowed = 0; handle->video = video; file->private_data = &handle->vfh; mutex_unlock(&video->lock); return 0; } /* get the next buffer available from dma queue */ static unsigned long vpfe_video_get_next_buffer(struct vpfe_video_device *video) { video->cur_frm = video->next_frm = list_entry(video->dma_queue.next, struct vpfe_cap_buffer, list); list_del(&video->next_frm->list); video->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE; return vb2_dma_contig_plane_dma_addr(&video->next_frm->vb.vb2_buf, 0); } /* schedule the next buffer which is available on dma queue */ void vpfe_video_schedule_next_buffer(struct vpfe_video_device *video) { struct vpfe_device *vpfe_dev = video->vpfe_dev; unsigned long addr; if (list_empty(&video->dma_queue)) return; video->next_frm = list_entry(video->dma_queue.next, struct vpfe_cap_buffer, list); if (VPFE_PIPELINE_STREAM_SINGLESHOT == video->pipe.state) video->cur_frm = video->next_frm; list_del(&video->next_frm->list); video->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE; addr = vb2_dma_contig_plane_dma_addr(&video->next_frm->vb.vb2_buf, 0); video->ops->queue(vpfe_dev, addr); video->state = VPFE_VIDEO_BUFFER_QUEUED; } /* schedule the buffer for capturing bottom field */ void vpfe_video_schedule_bottom_field(struct vpfe_video_device *video) { struct vpfe_device *vpfe_dev = video->vpfe_dev; unsigned long addr; addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb.vb2_buf, 0); addr += video->field_off; video->ops->queue(vpfe_dev, addr); } /* make buffer available for dequeue */ void vpfe_video_process_buffer_complete(struct vpfe_video_device *video) { struct vpfe_pipeline *pipe = &video->pipe; video->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&video->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS) video->cur_frm = video->next_frm; } /* vpfe_stop_capture() - stop streaming */ static void vpfe_stop_capture(struct vpfe_video_device *video) { struct vpfe_pipeline *pipe = &video->pipe; video->started = 0; if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) return; if (all_videos_stopped(video)) vpfe_pipeline_set_stream(pipe, VPFE_PIPELINE_STREAM_STOPPED); } /* * vpfe_release() - release video device * @file: file pointer * * deletes buffer queue, frees the buffers and the vpfe file handle * * Return 0 */ static int vpfe_release(struct file *file) { struct vpfe_video_device *video = video_drvdata(file); struct v4l2_fh *vfh = file->private_data; struct vpfe_device *vpfe_dev = video->vpfe_dev; struct vpfe_fh *fh = container_of(vfh, struct vpfe_fh, vfh); v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_release\n"); /* Get the device lock */ mutex_lock(&video->lock); /* if this instance is doing IO */ if (fh->io_allowed) { if (video->started) { vpfe_stop_capture(video); /* mark pipe state as stopped in vpfe_release(), as app might call streamon() after streamoff() in which case driver has to start streaming. */ video->pipe.state = VPFE_PIPELINE_STREAM_STOPPED; vb2_streamoff(&video->buffer_queue, video->buffer_queue.type); } video->io_usrs = 0; /* Free buffers allocated */ vb2_queue_release(&video->buffer_queue); vb2_dma_contig_cleanup_ctx(video->alloc_ctx); } /* Decrement device users counter */ video->usrs--; v4l2_fh_del(&fh->vfh); v4l2_fh_exit(&fh->vfh); /* If this is the last file handle */ if (!video->usrs) video->initialized = 0; mutex_unlock(&video->lock); file->private_data = NULL; /* Free memory allocated to file handle object */ v4l2_fh_del(vfh); kzfree(fh); return 0; } /* * vpfe_mmap() - It is used to map kernel space buffers * into user spaces */ static int vpfe_mmap(struct file *file, struct vm_area_struct *vma) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_mmap\n"); return vb2_mmap(&video->buffer_queue, vma); } /* * vpfe_poll() - It is used for select/poll system call */ static unsigned int vpfe_poll(struct file *file, poll_table *wait) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_poll\n"); if (video->started) return vb2_poll(&video->buffer_queue, file, wait); return 0; } /* vpfe capture driver file operations */ static const struct v4l2_file_operations vpfe_fops = { .owner = THIS_MODULE, .open = vpfe_open, .release = vpfe_release, .unlocked_ioctl = video_ioctl2, .mmap = vpfe_mmap, .poll = vpfe_poll }; /* * vpfe_querycap() - query capabilities of video device * @file: file pointer * @priv: void pointer * @cap: pointer to v4l2_capability structure * * fills v4l2 capabilities structure * * Return 0 */ static int vpfe_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n"); if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; else cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS; strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver)); strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info)); strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card)); return 0; } /* * vpfe_g_fmt() - get the format which is active on video device * @file: file pointer * @priv: void pointer * @fmt: pointer to v4l2_format structure * * fills v4l2 format structure with active format * * Return 0 */ static int vpfe_g_fmt(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_fmt\n"); /* Fill in the information about format */ *fmt = video->fmt; return 0; } /* * vpfe_enum_fmt() - enum formats supported on media chain * @file: file pointer * @priv: void pointer * @fmt: pointer to v4l2_fmtdesc structure * * fills v4l2_fmtdesc structure with output format set on adjacent subdev, * only one format is enumearted as subdevs are already configured * * Return 0 if successful, error code otherwise */ static int vpfe_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *fmt) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct v4l2_subdev_format sd_fmt; struct v4l2_mbus_framefmt mbus; struct v4l2_subdev *subdev; struct v4l2_format format; struct media_pad *remote; int ret; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n"); /* since already subdev pad format is set, only one pixel format is available */ if (fmt->index > 0) { v4l2_err(&vpfe_dev->v4l2_dev, "Invalid index\n"); return -EINVAL; } /* get the remote pad */ remote = media_entity_remote_pad(&video->pad); if (remote == NULL) { v4l2_err(&vpfe_dev->v4l2_dev, "invalid remote pad for video node\n"); return -EINVAL; } /* get the remote subdev */ subdev = vpfe_video_remote_subdev(video, NULL); if (subdev == NULL) { v4l2_err(&vpfe_dev->v4l2_dev, "invalid remote subdev for video node\n"); return -EINVAL; } sd_fmt.pad = remote->index; sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; /* get output format of remote subdev */ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt); if (ret) { v4l2_err(&vpfe_dev->v4l2_dev, "invalid remote subdev for video node\n"); return ret; } /* convert to pix format */ mbus.code = sd_fmt.format.code; mbus_to_pix(&mbus, &format.fmt.pix); /* copy the result */ fmt->pixelformat = format.fmt.pix.pixelformat; return 0; } /* * vpfe_s_fmt() - set the format on video device * @file: file pointer * @priv: void pointer * @fmt: pointer to v4l2_format structure * * validate and set the format on video device * * Return 0 on success, error code otherwise */ static int vpfe_s_fmt(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct v4l2_format format; int ret; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n"); /* If streaming is started, return error */ if (video->started) { v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is started\n"); return -EBUSY; } /* get adjacent subdev's output pad format */ ret = __vpfe_video_get_format(video, &format); if (ret) return ret; *fmt = format; video->fmt = *fmt; return 0; } /* * vpfe_try_fmt() - try the format on video device * @file: file pointer * @priv: void pointer * @fmt: pointer to v4l2_format structure * * validate the format, update with correct format * based on output format set on adjacent subdev * * Return 0 on success, error code otherwise */ static int vpfe_try_fmt(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct v4l2_format format; int ret; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n"); /* get adjacent subdev's output pad format */ ret = __vpfe_video_get_format(video, &format); if (ret) return ret; *fmt = format; return 0; } /* * vpfe_enum_input() - enum inputs supported on media chain * @file: file pointer * @priv: void pointer * @fmt: pointer to v4l2_fmtdesc structure * * fills v4l2_input structure with input available on media chain, * only one input is enumearted as media chain is setup by this time * * Return 0 if successful, -EINVAL is media chain is invalid */ static int vpfe_enum_input(struct file *file, void *priv, struct v4l2_input *inp) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_ext_subdev_info *sdinfo = video->current_ext_subdev; struct vpfe_device *vpfe_dev = video->vpfe_dev; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_input\n"); /* enumerate from the subdev user has chosen through mc */ if (inp->index < sdinfo->num_inputs) { memcpy(inp, &sdinfo->inputs[inp->index], sizeof(struct v4l2_input)); return 0; } return -EINVAL; } /* * vpfe_g_input() - get index of the input which is active * @file: file pointer * @priv: void pointer * @index: pointer to unsigned int * * set index with input index which is active */ static int vpfe_g_input(struct file *file, void *priv, unsigned int *index) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_input\n"); *index = video->current_input; return 0; } /* * vpfe_s_input() - set input which is pointed by input index * @file: file pointer * @priv: void pointer * @index: pointer to unsigned int * * set input on external subdev * * Return 0 on success, error code otherwise */ static int vpfe_s_input(struct file *file, void *priv, unsigned int index) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct vpfe_ext_subdev_info *sdinfo; struct vpfe_route *route; struct v4l2_input *inps; u32 output; u32 input; int ret; int i; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n"); ret = mutex_lock_interruptible(&video->lock); if (ret) return ret; /* * If streaming is started return device busy * error */ if (video->started) { v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is on\n"); ret = -EBUSY; goto unlock_out; } sdinfo = video->current_ext_subdev; if (!sdinfo->registered) { ret = -EINVAL; goto unlock_out; } if (vpfe_dev->cfg->setup_input && vpfe_dev->cfg->setup_input(sdinfo->grp_id) < 0) { ret = -EFAULT; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "couldn't setup input for %s\n", sdinfo->module_name); goto unlock_out; } route = &sdinfo->routes[index]; if (route && sdinfo->can_route) { input = route->input; output = route->output; ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id, video, s_routing, input, output, 0); if (ret) { v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "s_input:error in setting input in decoder\n"); ret = -EINVAL; goto unlock_out; } } /* set standards set by subdev in video device */ for (i = 0; i < sdinfo->num_inputs; i++) { inps = &sdinfo->inputs[i]; video->video_dev.tvnorms |= inps->std; } video->current_input = index; unlock_out: mutex_unlock(&video->lock); return ret; } /* * vpfe_querystd() - query std which is being input on external subdev * @file: file pointer * @priv: void pointer * @std_id: pointer to v4l2_std_id structure * * call external subdev through v4l2_device_call_until_err to * get the std that is being active. * * Return 0 on success, error code otherwise */ static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct vpfe_ext_subdev_info *sdinfo; int ret; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n"); ret = mutex_lock_interruptible(&video->lock); sdinfo = video->current_ext_subdev; if (ret) return ret; /* Call querystd function of decoder device */ ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id, video, querystd, std_id); mutex_unlock(&video->lock); return ret; } /* * vpfe_s_std() - set std on external subdev * @file: file pointer * @priv: void pointer * @std_id: pointer to v4l2_std_id structure * * set std pointed by std_id on external subdev by calling it using * v4l2_device_call_until_err * * Return 0 on success, error code otherwise */ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct vpfe_ext_subdev_info *sdinfo; int ret; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n"); /* Call decoder driver function to set the standard */ ret = mutex_lock_interruptible(&video->lock); if (ret) return ret; sdinfo = video->current_ext_subdev; /* If streaming is started, return device busy error */ if (video->started) { v4l2_err(&vpfe_dev->v4l2_dev, "streaming is started\n"); ret = -EBUSY; goto unlock_out; } ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id, video, s_std, std_id); if (ret < 0) { v4l2_err(&vpfe_dev->v4l2_dev, "Failed to set standard\n"); video->stdid = V4L2_STD_UNKNOWN; goto unlock_out; } video->stdid = std_id; unlock_out: mutex_unlock(&video->lock); return ret; } static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_std\n"); *tvnorm = video->stdid; return 0; } /* * vpfe_enum_dv_timings() - enumerate dv_timings which are supported by * to external subdev * @file: file pointer * @priv: void pointer * @timings: pointer to v4l2_enum_dv_timings structure * * enum dv_timings's which are supported by external subdev through * v4l2_subdev_call * * Return 0 on success, error code otherwise */ static int vpfe_enum_dv_timings(struct file *file, void *fh, struct v4l2_enum_dv_timings *timings) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct v4l2_subdev *subdev = video->current_ext_subdev->subdev; timings->pad = 0; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_dv_timings\n"); return v4l2_subdev_call(subdev, pad, enum_dv_timings, timings); } /* * vpfe_query_dv_timings() - query the dv_timings which is being input * to external subdev * @file: file pointer * @priv: void pointer * @timings: pointer to v4l2_dv_timings structure * * get dv_timings which is being input on external subdev through * v4l2_subdev_call * * Return 0 on success, error code otherwise */ static int vpfe_query_dv_timings(struct file *file, void *fh, struct v4l2_dv_timings *timings) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct v4l2_subdev *subdev = video->current_ext_subdev->subdev; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_query_dv_timings\n"); return v4l2_subdev_call(subdev, video, query_dv_timings, timings); } /* * vpfe_s_dv_timings() - set dv_timings on external subdev * @file: file pointer * @priv: void pointer * @timings: pointer to v4l2_dv_timings structure * * set dv_timings pointed by timings on external subdev through * v4l2_device_call_until_err, this configures amplifier also * * Return 0 on success, error code otherwise */ static int vpfe_s_dv_timings(struct file *file, void *fh, struct v4l2_dv_timings *timings) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_dv_timings\n"); video->stdid = V4L2_STD_UNKNOWN; return v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, video->current_ext_subdev->grp_id, video, s_dv_timings, timings); } /* * vpfe_g_dv_timings() - get dv_timings which is set on external subdev * @file: file pointer * @priv: void pointer * @timings: pointer to v4l2_dv_timings structure * * get dv_timings which is set on external subdev through * v4l2_subdev_call * * Return 0 on success, error code otherwise */ static int vpfe_g_dv_timings(struct file *file, void *fh, struct v4l2_dv_timings *timings) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct v4l2_subdev *subdev = video->current_ext_subdev->subdev; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_dv_timings\n"); return v4l2_subdev_call(subdev, video, g_dv_timings, timings); } /* * Videobuf operations */ /* * vpfe_buffer_queue_setup : Callback function for buffer setup. * @vq: vb2_queue ptr * @fmt: v4l2 format * @nbuffers: ptr to number of buffers requested by application * @nplanes:: contains number of distinct video planes needed to hold a frame * @sizes[]: contains the size (in bytes) of each plane. * @alloc_ctxs: ptr to allocation context * * This callback function is called when reqbuf() is called to adjust * the buffer nbuffers and buffer size */ static int vpfe_buffer_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct vpfe_fh *fh = vb2_get_drv_priv(vq); struct vpfe_video_device *video = fh->video; struct vpfe_device *vpfe_dev = video->vpfe_dev; unsigned long size; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_queue_setup\n"); size = video->fmt.fmt.pix.sizeimage; if (vq->num_buffers + *nbuffers < 3) *nbuffers = 3 - vq->num_buffers; *nplanes = 1; sizes[0] = size; alloc_ctxs[0] = video->alloc_ctx; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "nbuffers=%d, size=%lu\n", *nbuffers, size); return 0; } /* * vpfe_buffer_prepare : callback function for buffer prepare * @vb: ptr to vb2_buffer * * This is the callback function for buffer prepare when vb2_qbuf() * function is called. The buffer is prepared and user space virtual address * or user address is converted into physical address */ static int vpfe_buffer_prepare(struct vb2_buffer *vb) { struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue); struct vpfe_video_device *video = fh->video; struct vpfe_device *vpfe_dev = video->vpfe_dev; unsigned long addr; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n"); if (vb->state != VB2_BUF_STATE_ACTIVE && vb->state != VB2_BUF_STATE_PREPARED) return 0; /* Initialize buffer */ vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage); if (vb2_plane_vaddr(vb, 0) && vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) return -EINVAL; addr = vb2_dma_contig_plane_dma_addr(vb, 0); /* Make sure user addresses are aligned to 32 bytes */ if (!ALIGN(addr, 32)) return -EINVAL; return 0; } static void vpfe_buffer_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); /* Get the file handle object and device object */ struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue); struct vpfe_video_device *video = fh->video; struct vpfe_device *vpfe_dev = video->vpfe_dev; struct vpfe_pipeline *pipe = &video->pipe; struct vpfe_cap_buffer *buf = container_of(vbuf, struct vpfe_cap_buffer, vb); unsigned long flags; unsigned long empty; unsigned long addr; spin_lock_irqsave(&video->dma_queue_lock, flags); empty = list_empty(&video->dma_queue); /* add the buffer to the DMA queue */ list_add_tail(&buf->list, &video->dma_queue); spin_unlock_irqrestore(&video->dma_queue_lock, flags); /* this case happens in case of single shot */ if (empty && video->started && pipe->state == VPFE_PIPELINE_STREAM_SINGLESHOT && video->state == VPFE_VIDEO_BUFFER_NOT_QUEUED) { spin_lock(&video->dma_queue_lock); addr = vpfe_video_get_next_buffer(video); video->ops->queue(vpfe_dev, addr); video->state = VPFE_VIDEO_BUFFER_QUEUED; spin_unlock(&video->dma_queue_lock); /* enable h/w each time in single shot */ if (vpfe_video_is_pipe_ready(pipe)) vpfe_pipeline_set_stream(pipe, VPFE_PIPELINE_STREAM_SINGLESHOT); } } /* vpfe_start_capture() - start streaming on all the subdevs */ static int vpfe_start_capture(struct vpfe_video_device *video) { struct vpfe_pipeline *pipe = &video->pipe; int ret = 0; video->started = 1; if (vpfe_video_is_pipe_ready(pipe)) ret = vpfe_pipeline_set_stream(pipe, pipe->state); return ret; } static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count) { struct vpfe_fh *fh = vb2_get_drv_priv(vq); struct vpfe_video_device *video = fh->video; struct vpfe_device *vpfe_dev = video->vpfe_dev; unsigned long addr; int ret; ret = mutex_lock_interruptible(&video->lock); if (ret) goto streamoff; /* Get the next frame from the buffer queue */ video->cur_frm = video->next_frm = list_entry(video->dma_queue.next, struct vpfe_cap_buffer, list); /* Remove buffer from the buffer queue */ list_del(&video->cur_frm->list); /* Mark state of the current frame to active */ video->cur_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE; /* Initialize field_id and started member */ video->field_id = 0; addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb.vb2_buf, 0); video->ops->queue(vpfe_dev, addr); video->state = VPFE_VIDEO_BUFFER_QUEUED; ret = vpfe_start_capture(video); if (ret) { struct vpfe_cap_buffer *buf, *tmp; vb2_buffer_done(&video->cur_frm->vb.vb2_buf, VB2_BUF_STATE_QUEUED); list_for_each_entry_safe(buf, tmp, &video->dma_queue, list) { list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } goto unlock_out; } mutex_unlock(&video->lock); return ret; unlock_out: mutex_unlock(&video->lock); streamoff: ret = vb2_streamoff(&video->buffer_queue, video->buffer_queue.type); return 0; } static int vpfe_buffer_init(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vpfe_cap_buffer *buf = container_of(vbuf, struct vpfe_cap_buffer, vb); INIT_LIST_HEAD(&buf->list); return 0; } /* abort streaming and wait for last buffer */ static void vpfe_stop_streaming(struct vb2_queue *vq) { struct vpfe_fh *fh = vb2_get_drv_priv(vq); struct vpfe_video_device *video = fh->video; /* release all active buffers */ if (video->cur_frm == video->next_frm) { vb2_buffer_done(&video->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); } else { if (video->cur_frm != NULL) vb2_buffer_done(&video->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); if (video->next_frm != NULL) vb2_buffer_done(&video->next_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); } while (!list_empty(&video->dma_queue)) { video->next_frm = list_entry(video->dma_queue.next, struct vpfe_cap_buffer, list); list_del(&video->next_frm->list); vb2_buffer_done(&video->next_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); } } static void vpfe_buf_cleanup(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue); struct vpfe_video_device *video = fh->video; struct vpfe_device *vpfe_dev = video->vpfe_dev; struct vpfe_cap_buffer *buf = container_of(vbuf, struct vpfe_cap_buffer, vb); v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buf_cleanup\n"); if (vb->state == VB2_BUF_STATE_ACTIVE) list_del_init(&buf->list); } static struct vb2_ops video_qops = { .queue_setup = vpfe_buffer_queue_setup, .buf_init = vpfe_buffer_init, .buf_prepare = vpfe_buffer_prepare, .start_streaming = vpfe_start_streaming, .stop_streaming = vpfe_stop_streaming, .buf_cleanup = vpfe_buf_cleanup, .buf_queue = vpfe_buffer_queue, }; /* * vpfe_reqbufs() - supported REQBUF only once opening * the device. */ static int vpfe_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *req_buf) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct vpfe_fh *fh = file->private_data; struct vb2_queue *q; int ret; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n"); if (V4L2_BUF_TYPE_VIDEO_CAPTURE != req_buf->type && V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) { v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n"); return -EINVAL; } ret = mutex_lock_interruptible(&video->lock); if (ret) return ret; if (video->io_usrs != 0) { v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n"); ret = -EBUSY; goto unlock_out; } video->memory = req_buf->memory; /* Initialize videobuf2 queue as per the buffer type */ video->alloc_ctx = vb2_dma_contig_init_ctx(vpfe_dev->pdev); if (IS_ERR(video->alloc_ctx)) { v4l2_err(&vpfe_dev->v4l2_dev, "Failed to get the context\n"); return PTR_ERR(video->alloc_ctx); } q = &video->buffer_queue; q->type = req_buf->type; q->io_modes = VB2_MMAP | VB2_USERPTR; q->drv_priv = fh; q->min_buffers_needed = 1; q->ops = &video_qops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct vpfe_cap_buffer); q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; ret = vb2_queue_init(q); if (ret) { v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n"); vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev); return ret; } fh->io_allowed = 1; video->io_usrs = 1; INIT_LIST_HEAD(&video->dma_queue); ret = vb2_reqbufs(&video->buffer_queue, req_buf); unlock_out: mutex_unlock(&video->lock); return ret; } /* * vpfe_querybuf() - query buffers for exchange */ static int vpfe_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n"); if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type && V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) { v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n"); return -EINVAL; } if (video->memory != V4L2_MEMORY_MMAP) { v4l2_err(&vpfe_dev->v4l2_dev, "Invalid memory\n"); return -EINVAL; } /* Call vb2_querybuf to get information */ return vb2_querybuf(&video->buffer_queue, buf); } /* * vpfe_qbuf() - queue buffers for capture or processing */ static int vpfe_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct vpfe_fh *fh = file->private_data; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n"); if (V4L2_BUF_TYPE_VIDEO_CAPTURE != p->type && V4L2_BUF_TYPE_VIDEO_OUTPUT != p->type) { v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n"); return -EINVAL; } /* * If this file handle is not allowed to do IO, * return error */ if (!fh->io_allowed) { v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n"); return -EACCES; } return vb2_qbuf(&video->buffer_queue, p); } /* * vpfe_dqbuf() - deque buffer which is done with processing */ static int vpfe_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n"); if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type && V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) { v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n"); return -EINVAL; } return vb2_dqbuf(&video->buffer_queue, buf, (file->f_flags & O_NONBLOCK)); } /* * vpfe_streamon() - start streaming * @file: file pointer * @priv: void pointer * @buf_type: enum v4l2_buf_type * * queue buffer onto hardware for capture/processing and * start all the subdevs which are in media chain * * Return 0 on success, error code otherwise */ static int vpfe_streamon(struct file *file, void *priv, enum v4l2_buf_type buf_type) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct vpfe_pipeline *pipe = &video->pipe; struct vpfe_fh *fh = file->private_data; struct vpfe_ext_subdev_info *sdinfo; int ret = -EINVAL; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n"); if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type && V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) { v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n"); return ret; } /* If file handle is not allowed IO, return error */ if (!fh->io_allowed) { v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n"); return -EACCES; } sdinfo = video->current_ext_subdev; /* If buffer queue is empty, return error */ if (list_empty(&video->buffer_queue.queued_list)) { v4l2_err(&vpfe_dev->v4l2_dev, "buffer queue is empty\n"); return -EIO; } /* Validate the pipeline */ if (V4L2_BUF_TYPE_VIDEO_CAPTURE == buf_type) { ret = vpfe_video_validate_pipeline(pipe); if (ret < 0) return ret; } /* Call vb2_streamon to start streaming */ return vb2_streamon(&video->buffer_queue, buf_type); } /* * vpfe_streamoff() - stop streaming * @file: file pointer * @priv: void pointer * @buf_type: enum v4l2_buf_type * * stop all the subdevs which are in media chain * * Return 0 on success, error code otherwise */ static int vpfe_streamoff(struct file *file, void *priv, enum v4l2_buf_type buf_type) { struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct vpfe_fh *fh = file->private_data; int ret = 0; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n"); if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE && buf_type != V4L2_BUF_TYPE_VIDEO_OUTPUT) { v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "Invalid buf type\n"); return -EINVAL; } /* If io is allowed for this file handle, return error */ if (!fh->io_allowed) { v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "fh->io_allowed\n"); return -EACCES; } /* If streaming is not started, return error */ if (!video->started) { v4l2_err(&vpfe_dev->v4l2_dev, "device is not started\n"); return -EINVAL; } ret = mutex_lock_interruptible(&video->lock); if (ret) return ret; vpfe_stop_capture(video); ret = vb2_streamoff(&video->buffer_queue, buf_type); mutex_unlock(&video->lock); return ret; } /* vpfe capture ioctl operations */ static const struct v4l2_ioctl_ops vpfe_ioctl_ops = { .vidioc_querycap = vpfe_querycap, .vidioc_g_fmt_vid_cap = vpfe_g_fmt, .vidioc_s_fmt_vid_cap = vpfe_s_fmt, .vidioc_try_fmt_vid_cap = vpfe_try_fmt, .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt, .vidioc_g_fmt_vid_out = vpfe_g_fmt, .vidioc_s_fmt_vid_out = vpfe_s_fmt, .vidioc_try_fmt_vid_out = vpfe_try_fmt, .vidioc_enum_fmt_vid_out = vpfe_enum_fmt, .vidioc_enum_input = vpfe_enum_input, .vidioc_g_input = vpfe_g_input, .vidioc_s_input = vpfe_s_input, .vidioc_querystd = vpfe_querystd, .vidioc_s_std = vpfe_s_std, .vidioc_g_std = vpfe_g_std, .vidioc_enum_dv_timings = vpfe_enum_dv_timings, .vidioc_query_dv_timings = vpfe_query_dv_timings, .vidioc_s_dv_timings = vpfe_s_dv_timings, .vidioc_g_dv_timings = vpfe_g_dv_timings, .vidioc_reqbufs = vpfe_reqbufs, .vidioc_querybuf = vpfe_querybuf, .vidioc_qbuf = vpfe_qbuf, .vidioc_dqbuf = vpfe_dqbuf, .vidioc_streamon = vpfe_streamon, .vidioc_streamoff = vpfe_streamoff, }; /* VPFE video init function */ int vpfe_video_init(struct vpfe_video_device *video, const char *name) { const char *direction; int ret; switch (video->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: direction = "output"; video->pad.flags = MEDIA_PAD_FL_SINK; video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: direction = "input"; video->pad.flags = MEDIA_PAD_FL_SOURCE; video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; break; default: return -EINVAL; } /* Initialize field of video device */ video->video_dev.release = video_device_release; video->video_dev.fops = &vpfe_fops; video->video_dev.ioctl_ops = &vpfe_ioctl_ops; video->video_dev.minor = -1; video->video_dev.tvnorms = 0; snprintf(video->video_dev.name, sizeof(video->video_dev.name), "DAVINCI VIDEO %s %s", name, direction); spin_lock_init(&video->irqlock); spin_lock_init(&video->dma_queue_lock); mutex_init(&video->lock); ret = media_entity_pads_init(&video->video_dev.entity, 1, &video->pad); if (ret < 0) return ret; video_set_drvdata(&video->video_dev, video); return 0; } /* vpfe video device register function */ int vpfe_video_register(struct vpfe_video_device *video, struct v4l2_device *vdev) { int ret; video->video_dev.v4l2_dev = vdev; ret = video_register_device(&video->video_dev, VFL_TYPE_GRABBER, -1); if (ret < 0) pr_err("%s: could not register video device (%d)\n", __func__, ret); return ret; } /* vpfe video device unregister function */ void vpfe_video_unregister(struct vpfe_video_device *video) { if (video_is_registered(&video->video_dev)) { video_unregister_device(&video->video_dev); media_entity_cleanup(&video->video_dev.entity); } }
gpl-2.0
open-source-explore/openjdk
jdk/src/solaris/native/java/lang/java_props_md.c
13
15858
/* * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ #ifdef __linux__ #include <stdio.h> #include <ctype.h> #endif #include <pwd.h> #include <locale.h> #ifndef ARCHPROPNAME #error "The macro ARCHPROPNAME has not been defined" #endif #include <sys/utsname.h> /* For os_name and os_version */ #include <langinfo.h> /* For nl_langinfo */ #include <stdlib.h> #include <string.h> #include <sys/types.h> #include <unistd.h> #include <sys/param.h> #include <time.h> #include <errno.h> #include "locale_str.h" #include "java_props.h" #ifdef __linux__ #ifndef CODESET #define CODESET _NL_CTYPE_CODESET_NAME #endif #else #ifdef ALT_CODESET_KEY #define CODESET ALT_CODESET_KEY #endif #endif #ifdef JAVASE_EMBEDDED #include <dlfcn.h> #include <sys/stat.h> #endif /* Take an array of string pairs (map of key->value) and a string (key). * Examine each pair in the map to see if the first string (key) matches the * string. If so, store the second string of the pair (value) in the value and * return 1. Otherwise do nothing and return 0. The end of the map is * indicated by an empty string at the start of a pair (key of ""). */ static int mapLookup(char* map[], const char* key, char** value) { int i; for (i = 0; strcmp(map[i], ""); i += 2){ if (!strcmp(key, map[i])){ *value = map[i + 1]; return 1; } } return 0; } /* This function sets an environment variable using envstring. * The format of envstring is "name=value". * If the name has already existed, it will append value to the name. */ static void setPathEnvironment(char *envstring) { char name[20], *value, *current; value = strchr(envstring, '='); /* locate name and value separator */ if (! value) return; /* not a valid environment setting */ /* copy first part as environment name */ strncpy(name, envstring, value - envstring); name[value-envstring] = '\0'; value++; /* set value point to value of the envstring */ current = getenv(name); if (current) { if (! strstr(current, value)) { /* value is not found in current environment, append it */ char *temp = malloc(strlen(envstring) + strlen(current) + 2); strcpy(temp, name); strcat(temp, "="); strcat(temp, current); strcat(temp, ":"); strcat(temp, value); putenv(temp); } /* else the value has already been set, do nothing */ } else { /* environment variable is not found */ putenv(envstring); } } #ifndef P_tmpdir #define P_tmpdir "/var/tmp" #endif static int ParseLocale(int cat, char ** std_language, char ** std_script, char ** std_country, char ** std_variant, char ** std_encoding) { char temp[64]; char *language = NULL, *country = NULL, *variant = NULL, *encoding = NULL; char *p, encoding_variant[64]; char *lc; /* Query the locale set for the category */ lc = setlocale(cat, NULL); #ifndef __linux__ if (lc == NULL) { return 0; } if (cat == LC_CTYPE) { /* * Workaround for Solaris bug 4201684: Xlib doesn't like @euro * locales. Since we don't depend on the libc @euro behavior, * we just remove the qualifier. * On Linux, the bug doesn't occur; on the other hand, @euro * is needed there because it's a shortcut that also determines * the encoding - without it, we wouldn't get ISO-8859-15. * Therefore, this code section is Solaris-specific. */ lc = strdup(lc); /* keep a copy, setlocale trashes original. */ strcpy(temp, lc); p = strstr(temp, "@euro"); if (p != NULL) { *p = '\0'; setlocale(LC_ALL, temp); } } #else if (lc == NULL || !strcmp(lc, "C") || !strcmp(lc, "POSIX")) { lc = "en_US"; } #endif /* * locale string format in Solaris is * <language name>_<country name>.<encoding name>@<variant name> * <country name>, <encoding name>, and <variant name> are optional. */ strcpy(temp, lc); /* Parse the language, country, encoding, and variant from the * locale. Any of the elements may be missing, but they must occur * in the order language_country.encoding@variant, and must be * preceded by their delimiter (except for language). * * If the locale name (without .encoding@variant, if any) matches * any of the names in the locale_aliases list, map it to the * corresponding full locale name. Most of the entries in the * locale_aliases list are locales that include a language name but * no country name, and this facility is used to map each language * to a default country if that's possible. It's also used to map * the Solaris locale aliases to their proper Java locale IDs. */ if ((p = strchr(temp, '.')) != NULL) { strcpy(encoding_variant, p); /* Copy the leading '.' */ *p = '\0'; } else if ((p = strchr(temp, '@')) != NULL) { strcpy(encoding_variant, p); /* Copy the leading '@' */ *p = '\0'; } else { *encoding_variant = '\0'; } if (mapLookup(locale_aliases, temp, &p)) { strcpy(temp, p); // check the "encoding_variant" again, if any. if ((p = strchr(temp, '.')) != NULL) { strcpy(encoding_variant, p); /* Copy the leading '.' */ *p = '\0'; } else if ((p = strchr(temp, '@')) != NULL) { strcpy(encoding_variant, p); /* Copy the leading '@' */ *p = '\0'; } } language = temp; if ((country = strchr(temp, '_')) != NULL) { *country++ = '\0'; } p = encoding_variant; if ((encoding = strchr(p, '.')) != NULL) { p[encoding++ - p] = '\0'; p = encoding; } if ((variant = strchr(p, '@')) != NULL) { p[variant++ - p] = '\0'; } /* Normalize the language name */ if (std_language != NULL) { *std_language = "en"; if (language != NULL && mapLookup(language_names, language, std_language) == 0) { *std_language = malloc(strlen(language)+1); strcpy(*std_language, language); } } /* Normalize the country name */ if (std_country != NULL && country != NULL) { if (mapLookup(country_names, country, std_country) == 0) { *std_country = malloc(strlen(country)+1); strcpy(*std_country, country); } } /* Normalize the script and variant name. Note that we only use * variants listed in the mapping array; others are ignored. */ if (variant != NULL) { if (std_script != NULL) { mapLookup(script_names, variant, std_script); } if (std_variant != NULL) { mapLookup(variant_names, variant, std_variant); } } /* Normalize the encoding name. Note that we IGNORE the string * 'encoding' extracted from the locale name above. Instead, we use the * more reliable method of calling nl_langinfo(CODESET). This function * returns an empty string if no encoding is set for the given locale * (e.g., the C or POSIX locales); we use the default ISO 8859-1 * converter for such locales. */ if (std_encoding != NULL) { /* OK, not so reliable - nl_langinfo() gives wrong answers on * Euro locales, in particular. */ if (strcmp(p, "ISO8859-15") == 0) p = "ISO8859-15"; else p = nl_langinfo(CODESET); /* Convert the bare "646" used on Solaris to a proper IANA name */ if (strcmp(p, "646") == 0) p = "ISO646-US"; /* return same result nl_langinfo would return for en_UK, * in order to use optimizations. */ *std_encoding = (*p != '\0') ? p : "ISO8859-1"; #ifdef __linux__ /* * Remap the encoding string to a different value for japanese * locales on linux so that customized converters are used instead * of the default converter for "EUC-JP". The customized converters * omit support for the JIS0212 encoding which is not supported by * the variant of "EUC-JP" encoding used on linux */ if (strcmp(p, "EUC-JP") == 0) { *std_encoding = "EUC-JP-LINUX"; } #else if (strcmp(p,"eucJP") == 0) { /* For Solaris use customized vendor defined character * customized EUC-JP converter */ *std_encoding = "eucJP-open"; } else if (strcmp(p, "Big5") == 0 || strcmp(p, "BIG5") == 0) { /* * Remap the encoding string to Big5_Solaris which augments * the default converter for Solaris Big5 locales to include * seven additional ideographic characters beyond those included * in the Java "Big5" converter. */ *std_encoding = "Big5_Solaris"; } else if (strcmp(p, "Big5-HKSCS") == 0) { /* * Solaris uses HKSCS2001 */ *std_encoding = "Big5-HKSCS-2001"; } #endif } return 1; } #ifdef JAVASE_EMBEDDED /* Determine the default embedded toolkit based on whether lib/xawt/ * exists in the JRE. This can still be overridden by -Dawt.toolkit=XXX */ static char* getEmbeddedToolkit() { Dl_info dlinfo; char buf[MAXPATHLEN]; int32_t len; char *p; struct stat statbuf; /* Get address of this library and the directory containing it. */ dladdr((void *)getEmbeddedToolkit, &dlinfo); realpath((char *)dlinfo.dli_fname, buf); len = strlen(buf); p = strrchr(buf, '/'); /* Default AWT Toolkit on Linux and Solaris is XAWT. */ strncpy(p, "/xawt/", MAXPATHLEN-len-1); /* Check if it exists */ if (stat(buf, &statbuf) == -1 && errno == ENOENT) { /* No - this is a reduced-headless-jre so use special HToolkit */ return "sun.awt.HToolkit"; } else { /* Yes - this is a headful JRE so fallback to SE defaults */ return NULL; } } #endif /* This function gets called very early, before VM_CALLS are setup. * Do not use any of the VM_CALLS entries!!! */ java_props_t * GetJavaProperties(JNIEnv *env) { static java_props_t sprops; char *v; /* tmp var */ if (sprops.user_dir) { return &sprops; } /* tmp dir */ sprops.tmp_dir = P_tmpdir; /* Printing properties */ sprops.printerJob = "sun.print.PSPrinterJob"; /* patches/service packs installed */ sprops.patch_level = "unknown"; /* Java 2D properties */ sprops.graphics_env = "sun.awt.X11GraphicsEnvironment"; #ifdef JAVASE_EMBEDDED sprops.awt_toolkit = getEmbeddedToolkit(); if (sprops.awt_toolkit == NULL) // default as below #endif sprops.awt_toolkit = "sun.awt.X11.XToolkit"; /* This is used only for debugging of font problems. */ v = getenv("JAVA2D_FONTPATH"); sprops.font_dir = v ? v : NULL; #ifdef SI_ISALIST /* supported instruction sets */ { char list[258]; sysinfo(SI_ISALIST, list, sizeof(list)); sprops.cpu_isalist = strdup(list); } #else sprops.cpu_isalist = NULL; #endif /* endianness of platform */ { unsigned int endianTest = 0xff000000; if (((char*)(&endianTest))[0] != 0) sprops.cpu_endian = "big"; else sprops.cpu_endian = "little"; } /* os properties */ { struct utsname name; uname(&name); sprops.os_name = strdup(name.sysname); sprops.os_version = strdup(name.release); sprops.os_arch = ARCHPROPNAME; if (getenv("GNOME_DESKTOP_SESSION_ID") != NULL) { sprops.desktop = "gnome"; } else { sprops.desktop = NULL; } } /* Determine the language, country, variant, and encoding from the host, * and store these in the user.language, user.country, user.variant and * file.encoding system properties. */ setlocale(LC_ALL, ""); if (ParseLocale(LC_CTYPE, &(sprops.format_language), &(sprops.format_script), &(sprops.format_country), &(sprops.format_variant), &(sprops.encoding))) { ParseLocale(LC_MESSAGES, &(sprops.language), &(sprops.script), &(sprops.country), &(sprops.variant), NULL); } else { sprops.language = "en"; sprops.encoding = "ISO8859-1"; } sprops.display_language = sprops.language; sprops.display_script = sprops.script; sprops.display_country = sprops.country; sprops.display_variant = sprops.variant; sprops.sun_jnu_encoding = sprops.encoding; #ifdef __linux__ #if __BYTE_ORDER == __LITTLE_ENDIAN sprops.unicode_encoding = "UnicodeLittle"; #else sprops.unicode_encoding = "UnicodeBig"; #endif #else sprops.unicode_encoding = "UnicodeBig"; #endif /* user properties */ { struct passwd *pwent = getpwuid(getuid()); sprops.user_name = pwent ? strdup(pwent->pw_name) : "?"; sprops.user_home = pwent ? strdup(pwent->pw_dir) : "?"; } /* User TIMEZONE */ { /* * We defer setting up timezone until it's actually necessary. * Refer to TimeZone.getDefault(). However, the system * property is necessary to be able to be set by the command * line interface -D. Here temporarily set a null string to * timezone. */ tzset(); /* for compatibility */ sprops.timezone = ""; } /* Current directory */ { char buf[MAXPATHLEN]; errno = 0; if (getcwd(buf, sizeof(buf)) == NULL) JNU_ThrowByName(env, "java/lang/Error", "Properties init: Could not determine current working directory."); else sprops.user_dir = strdup(buf); } sprops.file_separator = "/"; sprops.path_separator = ":"; sprops.line_separator = "\n"; /* Append CDE message and resource search path to NLSPATH and * XFILESEARCHPATH, in order to pick localized message for * FileSelectionDialog window (Bug 4173641). */ setPathEnvironment("NLSPATH=/usr/dt/lib/nls/msg/%L/%N.cat"); setPathEnvironment("XFILESEARCHPATH=/usr/dt/app-defaults/%L/Dt"); return &sprops; } jstring GetStringPlatform(JNIEnv *env, nchar* cstr) { return JNU_NewStringPlatform(env, cstr); }
gpl-2.0
wareash/linux-xylon
arch/microblaze/pci/pci-common.c
13
43191
/* * Contains common pci routines for ALL ppc platform * (based on pci_32.c and pci_64.c) * * Port for PPC64 David Engebretsen, IBM Corp. * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. * * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM * Rework, based on alpha PCI code. * * Common pmac/prep/chrp pci routines. -- Cort * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mm.h> #include <linux/list.h> #include <linux/syscalls.h> #include <linux/irq.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_pci.h> #include <linux/export.h> #include <asm/processor.h> #include <linux/io.h> #include <asm/pci-bridge.h> #include <asm/byteorder.h> static DEFINE_SPINLOCK(hose_spinlock); LIST_HEAD(hose_list); /* XXX kill that some day ... */ static int global_phb_number; /* Global phb counter */ /* ISA Memory physical address */ resource_size_t isa_mem_base; unsigned long isa_io_base; static int pci_bus_count; struct pci_controller *pcibios_alloc_controller(struct device_node *dev) { struct pci_controller *phb; phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); if (!phb) return NULL; spin_lock(&hose_spinlock); phb->global_number = global_phb_number++; list_add_tail(&phb->list_node, &hose_list); spin_unlock(&hose_spinlock); phb->dn = dev; phb->is_dynamic = mem_init_done; return phb; } void pcibios_free_controller(struct pci_controller *phb) { spin_lock(&hose_spinlock); list_del(&phb->list_node); spin_unlock(&hose_spinlock); if (phb->is_dynamic) kfree(phb); } static resource_size_t pcibios_io_size(const struct pci_controller *hose) { return resource_size(&hose->io_resource); } int pcibios_vaddr_is_ioport(void __iomem *address) { int ret = 0; struct pci_controller *hose; resource_size_t size; spin_lock(&hose_spinlock); list_for_each_entry(hose, &hose_list, list_node) { size = pcibios_io_size(hose); if (address >= hose->io_base_virt && address < (hose->io_base_virt + size)) { ret = 1; break; } } spin_unlock(&hose_spinlock); return ret; } unsigned long pci_address_to_pio(phys_addr_t address) { struct pci_controller *hose; resource_size_t size; unsigned long ret = ~0; spin_lock(&hose_spinlock); list_for_each_entry(hose, &hose_list, list_node) { size = pcibios_io_size(hose); if (address >= hose->io_base_phys && address < (hose->io_base_phys + size)) { unsigned long base = (unsigned long)hose->io_base_virt - _IO_BASE; ret = base + (address - hose->io_base_phys); break; } } spin_unlock(&hose_spinlock); return ret; } EXPORT_SYMBOL_GPL(pci_address_to_pio); /* * Return the domain number for this bus. */ int pci_domain_nr(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); return hose->global_number; } EXPORT_SYMBOL(pci_domain_nr); /* This routine is meant to be used early during boot, when the * PCI bus numbers have not yet been assigned, and you need to * issue PCI config cycles to an OF device. * It could also be used to "fix" RTAS config cycles if you want * to set pci_assign_all_buses to 1 and still use RTAS for PCI * config cycles. */ struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node) { while (node) { struct pci_controller *hose, *tmp; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) if (hose->dn == node) return hose; node = node->parent; } return NULL; } void __weak pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling */ } /* * Platform support for /proc/bus/pci/X/Y mmap()s, * modelled on the sparc64 implementation by Dave Miller. * -- paulus. */ /* * Adjust vm_pgoff of VMA such that it is the physical page offset * corresponding to the 32-bit pci bus offset for DEV requested by the user. * * Basically, the user finds the base address for his device which he wishes * to mmap. They read the 32-bit value from the config space base register, * add whatever PAGE_SIZE multiple offset they wish, and feed this into the * offset parameter of mmap on /proc/bus/pci/XXX for that device. * * Returns negative error code on failure, zero on success. */ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, resource_size_t *offset, enum pci_mmap_state mmap_state) { struct pci_controller *hose = pci_bus_to_host(dev->bus); unsigned long io_offset = 0; int i, res_bit; if (!hose) return NULL; /* should never happen */ /* If memory, add on the PCI bridge address offset */ if (mmap_state == pci_mmap_mem) { #if 0 /* See comment in pci_resource_to_user() for why this is disabled */ *offset += hose->pci_mem_offset; #endif res_bit = IORESOURCE_MEM; } else { io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; *offset += io_offset; res_bit = IORESOURCE_IO; } /* * Check that the offset requested corresponds to one of the * resources of the device. */ for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &dev->resource[i]; int flags = rp->flags; /* treat ROM as memory (should be already) */ if (i == PCI_ROM_RESOURCE) flags |= IORESOURCE_MEM; /* Active and same type? */ if ((flags & res_bit) == 0) continue; /* In the range of this resource? */ if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) continue; /* found it! construct the final physical address */ if (mmap_state == pci_mmap_io) *offset += hose->io_base_phys - io_offset; return rp; } return NULL; } /* * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci * device mapping. */ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, pgprot_t protection, enum pci_mmap_state mmap_state, int write_combine) { pgprot_t prot = protection; /* Write combine is always 0 on non-memory space mappings. On * memory space, if the user didn't pass 1, we check for a * "prefetchable" resource. This is a bit hackish, but we use * this to workaround the inability of /sysfs to provide a write * combine bit */ if (mmap_state != pci_mmap_mem) write_combine = 0; else if (write_combine == 0) { if (rp->flags & IORESOURCE_PREFETCH) write_combine = 1; } return pgprot_noncached(prot); } /* * This one is used by /dev/mem and fbdev who have no clue about the * PCI device, it tries to find the PCI device first and calls the * above routine */ pgprot_t pci_phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t prot) { struct pci_dev *pdev = NULL; struct resource *found = NULL; resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; int i; if (page_is_ram(pfn)) return prot; prot = pgprot_noncached(prot); for_each_pci_dev(pdev) { for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &pdev->resource[i]; int flags = rp->flags; /* Active and same type? */ if ((flags & IORESOURCE_MEM) == 0) continue; /* In the range of this resource? */ if (offset < (rp->start & PAGE_MASK) || offset > rp->end) continue; found = rp; break; } if (found) break; } if (found) { if (found->flags & IORESOURCE_PREFETCH) prot = pgprot_noncached_wc(prot); pci_dev_put(pdev); } pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", (unsigned long long)offset, pgprot_val(prot)); return prot; } /* * Perform the actual remap of the pages for a PCI device mapping, as * appropriate for this architecture. The region in the process to map * is described by vm_start and vm_end members of VMA, the base physical * address is found in vm_pgoff. * The pci device structure is provided so that architectures may make mapping * decisions on a per-device or per-bus basis. * * Returns a negative error code on failure, zero on success. */ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { resource_size_t offset = ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; struct resource *rp; int ret; rp = __pci_mmap_make_offset(dev, &offset, mmap_state); if (rp == NULL) return -EINVAL; vma->vm_pgoff = offset >> PAGE_SHIFT; vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, vma->vm_page_prot, mmap_state, write_combine); ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); return ret; } /* This provides legacy IO read access on a bus */ int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) { unsigned long offset; struct pci_controller *hose = pci_bus_to_host(bus); struct resource *rp = &hose->io_resource; void __iomem *addr; /* Check if port can be supported by that bus. We only check * the ranges of the PHB though, not the bus itself as the rules * for forwarding legacy cycles down bridges are not our problem * here. So if the host bridge supports it, we do it. */ offset = (unsigned long)hose->io_base_virt - _IO_BASE; offset += port; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (offset < rp->start || (offset + size) > rp->end) return -ENXIO; addr = hose->io_base_virt + port; switch (size) { case 1: *((u8 *)val) = in_8(addr); return 1; case 2: if (port & 1) return -EINVAL; *((u16 *)val) = in_le16(addr); return 2; case 4: if (port & 3) return -EINVAL; *((u32 *)val) = in_le32(addr); return 4; } return -EINVAL; } /* This provides legacy IO write access on a bus */ int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) { unsigned long offset; struct pci_controller *hose = pci_bus_to_host(bus); struct resource *rp = &hose->io_resource; void __iomem *addr; /* Check if port can be supported by that bus. We only check * the ranges of the PHB though, not the bus itself as the rules * for forwarding legacy cycles down bridges are not our problem * here. So if the host bridge supports it, we do it. */ offset = (unsigned long)hose->io_base_virt - _IO_BASE; offset += port; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (offset < rp->start || (offset + size) > rp->end) return -ENXIO; addr = hose->io_base_virt + port; /* WARNING: The generic code is idiotic. It gets passed a pointer * to what can be a 1, 2 or 4 byte quantity and always reads that * as a u32, which means that we have to correct the location of * the data read within those 32 bits for size 1 and 2 */ switch (size) { case 1: out_8(addr, val >> 24); return 1; case 2: if (port & 1) return -EINVAL; out_le16(addr, val >> 16); return 2; case 4: if (port & 3) return -EINVAL; out_le32(addr, val); return 4; } return -EINVAL; } /* This provides legacy IO or memory mmap access on a bus */ int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, enum pci_mmap_state mmap_state) { struct pci_controller *hose = pci_bus_to_host(bus); resource_size_t offset = ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; resource_size_t size = vma->vm_end - vma->vm_start; struct resource *rp; pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n", pci_domain_nr(bus), bus->number, mmap_state == pci_mmap_mem ? "MEM" : "IO", (unsigned long long)offset, (unsigned long long)(offset + size - 1)); if (mmap_state == pci_mmap_mem) { /* Hack alert ! * * Because X is lame and can fail starting if it gets an error * trying to mmap legacy_mem (instead of just moving on without * legacy memory access) we fake it here by giving it anonymous * memory, effectively behaving just like /dev/zero */ if ((offset + size) > hose->isa_mem_size) { #ifdef CONFIG_MMU pr_debug("Process %s (pid:%d) mapped non-existing PCI", current->comm, current->pid); pr_debug("legacy memory for 0%04x:%02x\n", pci_domain_nr(bus), bus->number); #endif if (vma->vm_flags & VM_SHARED) return shmem_zero_setup(vma); return 0; } offset += hose->isa_mem_phys; } else { unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; unsigned long roffset = offset + io_offset; rp = &hose->io_resource; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (roffset < rp->start || (roffset + size) > rp->end) return -ENXIO; offset += hose->io_base_phys; } pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); vma->vm_pgoff = offset >> PAGE_SHIFT; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); } void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end) { struct pci_controller *hose = pci_bus_to_host(dev->bus); resource_size_t offset = 0; if (hose == NULL) return; if (rsrc->flags & IORESOURCE_IO) offset = (unsigned long)hose->io_base_virt - _IO_BASE; /* We pass a fully fixed up address to userland for MMIO instead of * a BAR value because X is lame and expects to be able to use that * to pass to /dev/mem ! * * That means that we'll have potentially 64 bits values where some * userland apps only expect 32 (like X itself since it thinks only * Sparc has 64 bits MMIO) but if we don't do that, we break it on * 32 bits CHRPs :-( * * Hopefully, the sysfs insterface is immune to that gunk. Once X * has been fixed (and the fix spread enough), we can re-enable the * 2 lines below and pass down a BAR value to userland. In that case * we'll also have to re-enable the matching code in * __pci_mmap_make_offset(). * * BenH. */ #if 0 else if (rsrc->flags & IORESOURCE_MEM) offset = hose->pci_mem_offset; #endif *start = rsrc->start - offset; *end = rsrc->end - offset; } /** * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree * @hose: newly allocated pci_controller to be setup * @dev: device node of the host bridge * @primary: set if primary bus (32 bits only, soon to be deprecated) * * This function will parse the "ranges" property of a PCI host bridge device * node and setup the resource mapping of a pci controller based on its * content. * * Life would be boring if it wasn't for a few issues that we have to deal * with here: * * - We can only cope with one IO space range and up to 3 Memory space * ranges. However, some machines (thanks Apple !) tend to split their * space into lots of small contiguous ranges. So we have to coalesce. * * - We can only cope with all memory ranges having the same offset * between CPU addresses and PCI addresses. Unfortunately, some bridges * are setup for a large 1:1 mapping along with a small "window" which * maps PCI address 0 to some arbitrary high address of the CPU space in * order to give access to the ISA memory hole. * The way out of here that I've chosen for now is to always set the * offset based on the first resource found, then override it if we * have a different offset and the previous was set by an ISA hole. * * - Some busses have IO space not starting at 0, which causes trouble with * the way we do our IO resource renumbering. The code somewhat deals with * it for 64 bits but I would expect problems on 32 bits. * * - Some 32 bits platforms such as 4xx can have physical space larger than * 32 bits so we need to use 64 bits values for the parsing */ void pci_process_bridge_OF_ranges(struct pci_controller *hose, struct device_node *dev, int primary) { int memno = 0, isa_hole = -1; unsigned long long isa_mb = 0; struct resource *res; struct of_pci_range range; struct of_pci_range_parser parser; pr_info("PCI host bridge %s %s ranges:\n", dev->full_name, primary ? "(primary)" : ""); /* Check for ranges property */ if (of_pci_range_parser_init(&parser, dev)) return; pr_debug("Parsing ranges property...\n"); for_each_of_pci_range(&parser, &range) { /* Read next ranges element */ pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ", range.pci_space, range.pci_addr); pr_debug("cpu_addr:0x%016llx size:0x%016llx\n", range.cpu_addr, range.size); /* If we failed translation or got a zero-sized region * (some FW try to feed us with non sensical zero sized regions * such as power3 which look like some kind of attempt * at exposing the VGA memory hole) */ if (range.cpu_addr == OF_BAD_ADDR || range.size == 0) continue; /* Act based on address space type */ res = NULL; switch (range.flags & IORESOURCE_TYPE_BITS) { case IORESOURCE_IO: pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n", range.cpu_addr, range.cpu_addr + range.size - 1, range.pci_addr); /* We support only one IO range */ if (hose->pci_io_size) { pr_info(" \\--> Skipped (too many) !\n"); continue; } /* On 32 bits, limit I/O space to 16MB */ if (range.size > 0x01000000) range.size = 0x01000000; /* 32 bits needs to map IOs here */ hose->io_base_virt = ioremap(range.cpu_addr, range.size); /* Expect trouble if pci_addr is not 0 */ if (primary) isa_io_base = (unsigned long)hose->io_base_virt; /* pci_io_size and io_base_phys always represent IO * space starting at 0 so we factor in pci_addr */ hose->pci_io_size = range.pci_addr + range.size; hose->io_base_phys = range.cpu_addr - range.pci_addr; /* Build resource */ res = &hose->io_resource; range.cpu_addr = range.pci_addr; break; case IORESOURCE_MEM: pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", range.cpu_addr, range.cpu_addr + range.size - 1, range.pci_addr, (range.pci_space & 0x40000000) ? "Prefetch" : ""); /* We support only 3 memory ranges */ if (memno >= 3) { pr_info(" \\--> Skipped (too many) !\n"); continue; } /* Handles ISA memory hole space here */ if (range.pci_addr == 0) { isa_mb = range.cpu_addr; isa_hole = memno; if (primary || isa_mem_base == 0) isa_mem_base = range.cpu_addr; hose->isa_mem_phys = range.cpu_addr; hose->isa_mem_size = range.size; } /* We get the PCI/Mem offset from the first range or * the, current one if the offset came from an ISA * hole. If they don't match, bugger. */ if (memno == 0 || (isa_hole >= 0 && range.pci_addr != 0 && hose->pci_mem_offset == isa_mb)) hose->pci_mem_offset = range.cpu_addr - range.pci_addr; else if (range.pci_addr != 0 && hose->pci_mem_offset != range.cpu_addr - range.pci_addr) { pr_info(" \\--> Skipped (offset mismatch) !\n"); continue; } /* Build resource */ res = &hose->mem_resources[memno++]; break; } if (res != NULL) { res->name = dev->full_name; res->flags = range.flags; res->start = range.cpu_addr; res->end = range.cpu_addr + range.size - 1; res->parent = res->child = res->sibling = NULL; } } /* If there's an ISA hole and the pci_mem_offset is -not- matching * the ISA hole offset, then we need to remove the ISA hole from * the resource list for that brige */ if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) { unsigned int next = isa_hole + 1; pr_info(" Removing ISA hole at 0x%016llx\n", isa_mb); if (next < memno) memmove(&hose->mem_resources[isa_hole], &hose->mem_resources[next], sizeof(struct resource) * (memno - next)); hose->mem_resources[--memno].flags = 0; } } /* Decide whether to display the domain number in /proc */ int pci_proc_domain(struct pci_bus *bus) { return 0; } /* This header fixup will do the resource fixup for all devices as they are * probed, but not for bridge ranges */ static void pcibios_fixup_resources(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); int i; if (!hose) { pr_err("No host bridge for PCI dev %s !\n", pci_name(dev)); return; } for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { struct resource *res = dev->resource + i; if (!res->flags) continue; if (res->start == 0) { pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]", pci_name(dev), i, (unsigned long long)res->start, (unsigned long long)res->end, (unsigned int)res->flags); pr_debug("is unassigned\n"); res->end -= res->start; res->start = 0; res->flags |= IORESOURCE_UNSET; continue; } pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n", pci_name(dev), i, (unsigned long long)res->start, (unsigned long long)res->end, (unsigned int)res->flags); } } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); /* This function tries to figure out if a bridge resource has been initialized * by the firmware or not. It doesn't have to be absolutely bullet proof, but * things go more smoothly when it gets it right. It should covers cases such * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges */ static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus, struct resource *res) { struct pci_controller *hose = pci_bus_to_host(bus); struct pci_dev *dev = bus->self; resource_size_t offset; u16 command; int i; /* Job is a bit different between memory and IO */ if (res->flags & IORESOURCE_MEM) { /* If the BAR is non-0 (res != pci_mem_offset) then it's * probably been initialized by somebody */ if (res->start != hose->pci_mem_offset) return 0; /* The BAR is 0, let's check if memory decoding is enabled on * the bridge. If not, we consider it unassigned */ pci_read_config_word(dev, PCI_COMMAND, &command); if ((command & PCI_COMMAND_MEMORY) == 0) return 1; /* Memory decoding is enabled and the BAR is 0. If any of * the bridge resources covers that starting address (0 then * it's good enough for us for memory */ for (i = 0; i < 3; i++) { if ((hose->mem_resources[i].flags & IORESOURCE_MEM) && hose->mem_resources[i].start == hose->pci_mem_offset) return 0; } /* Well, it starts at 0 and we know it will collide so we may as * well consider it as unassigned. That covers the Apple case. */ return 1; } else { /* If the BAR is non-0, then we consider it assigned */ offset = (unsigned long)hose->io_base_virt - _IO_BASE; if (((res->start - offset) & 0xfffffffful) != 0) return 0; /* Here, we are a bit different than memory as typically IO * space starting at low addresses -is- valid. What we do * instead if that we consider as unassigned anything that * doesn't have IO enabled in the PCI command register, * and that's it. */ pci_read_config_word(dev, PCI_COMMAND, &command); if (command & PCI_COMMAND_IO) return 0; /* It's starting at 0 and IO is disabled in the bridge, consider * it unassigned */ return 1; } } /* Fixup resources of a PCI<->PCI bridge */ static void pcibios_fixup_bridge(struct pci_bus *bus) { struct resource *res; int i; struct pci_dev *dev = bus->self; pci_bus_for_each_resource(bus, res, i) { if (!res) continue; if (!res->flags) continue; if (i >= 3 && bus->self->transparent) continue; pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n", pci_name(dev), i, (unsigned long long)res->start, (unsigned long long)res->end, (unsigned int)res->flags); /* Try to detect uninitialized P2P bridge resources, * and clear them out so they get re-assigned later */ if (pcibios_uninitialized_bridge_resource(bus, res)) { res->flags = 0; pr_debug("PCI:%s (unassigned)\n", pci_name(dev)); } else { pr_debug("PCI:%s %016llx-%016llx\n", pci_name(dev), (unsigned long long)res->start, (unsigned long long)res->end); } } } void pcibios_setup_bus_self(struct pci_bus *bus) { /* Fix up the bus resources for P2P bridges */ if (bus->self != NULL) pcibios_fixup_bridge(bus); } void pcibios_setup_bus_devices(struct pci_bus *bus) { struct pci_dev *dev; pr_debug("PCI: Fixup bus devices %d (%s)\n", bus->number, bus->self ? pci_name(bus->self) : "PHB"); list_for_each_entry(dev, &bus->devices, bus_list) { /* Setup OF node pointer in archdata */ dev->dev.of_node = pci_device_to_OF_node(dev); /* Fixup NUMA node as it may not be setup yet by the generic * code and is needed by the DMA init */ set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); /* Read default IRQs and fixup if necessary */ dev->irq = of_irq_parse_and_map_pci(dev, 0, 0); } } void pcibios_fixup_bus(struct pci_bus *bus) { /* When called from the generic PCI probe, read PCI<->PCI bridge * bases. This is -not- called when generating the PCI tree from * the OF device-tree. */ if (bus->self != NULL) pci_read_bridge_bases(bus); /* Now fixup the bus bus */ pcibios_setup_bus_self(bus); /* Now fixup devices on that bus */ pcibios_setup_bus_devices(bus); } EXPORT_SYMBOL(pcibios_fixup_bus); static int skip_isa_ioresource_align(struct pci_dev *dev) { return 0; } /* * We need to avoid collisions with `mirrored' VGA ports * and other strange ISA hardware, so we always want the * addresses to be allocated in the 0x000-0x0ff region * modulo 0x400. * * Why? Because some silly external IO cards only decode * the low 10 bits of the IO address. The 0x00-0xff region * is reserved for motherboard devices that decode all 16 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, * but we want to try to avoid allocating at 0x2900-0x2bff * which might have be mirrored at 0x0100-0x03ff.. */ resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; resource_size_t start = res->start; if (res->flags & IORESOURCE_IO) { if (skip_isa_ioresource_align(dev)) return start; if (start & 0x300) start = (start + 0x3ff) & ~0x3ff; } return start; } EXPORT_SYMBOL(pcibios_align_resource); /* * Reparent resource children of pr that conflict with res * under res, and make res replace those children. */ static int __init reparent_resources(struct resource *parent, struct resource *res) { struct resource *p, **pp; struct resource **firstpp = NULL; for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { if (p->end < res->start) continue; if (res->end < p->start) break; if (p->start < res->start || p->end > res->end) return -1; /* not completely contained */ if (firstpp == NULL) firstpp = pp; } if (firstpp == NULL) return -1; /* didn't find any conflicting entries? */ res->parent = parent; res->child = *firstpp; res->sibling = *pp; *firstpp = res; *pp = NULL; for (p = res->child; p != NULL; p = p->sibling) { p->parent = res; pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n", p->name, (unsigned long long)p->start, (unsigned long long)p->end, res->name); } return 0; } /* * Handle resources of PCI devices. If the world were perfect, we could * just allocate all the resource regions and do nothing more. It isn't. * On the other hand, we cannot just re-allocate all devices, as it would * require us to know lots of host bridge internals. So we attempt to * keep as much of the original configuration as possible, but tweak it * when it's found to be wrong. * * Known BIOS problems we have to work around: * - I/O or memory regions not configured * - regions configured, but not enabled in the command register * - bogus I/O addresses above 64K used * - expansion ROMs left enabled (this may sound harmless, but given * the fact the PCI specs explicitly allow address decoders to be * shared between expansion ROMs and other resource regions, it's * at least dangerous) * * Our solution: * (1) Allocate resources for all buses behind PCI-to-PCI bridges. * This gives us fixed barriers on where we can allocate. * (2) Allocate resources for all enabled devices. If there is * a collision, just mark the resource as unallocated. Also * disable expansion ROMs during this step. * (3) Try to allocate resources for disabled devices. If the * resources were assigned correctly, everything goes well, * if they weren't, they won't disturb allocation of other * resources. * (4) Assign new addresses to resources which were either * not configured at all or misconfigured. If explicitly * requested by the user, configure expansion ROM address * as well. */ static void pcibios_allocate_bus_resources(struct pci_bus *bus) { struct pci_bus *b; int i; struct resource *res, *pr; pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", pci_domain_nr(bus), bus->number); pci_bus_for_each_resource(bus, res, i) { if (!res || !res->flags || res->start > res->end || res->parent) continue; if (bus->parent == NULL) pr = (res->flags & IORESOURCE_IO) ? &ioport_resource : &iomem_resource; else { /* Don't bother with non-root busses when * re-assigning all resources. We clear the * resource flags as if they were colliding * and as such ensure proper re-allocation * later. */ pr = pci_find_parent_resource(bus->self, res); if (pr == res) { /* this happens when the generic PCI * code (wrongly) decides that this * bridge is transparent -- paulus */ continue; } } pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx ", bus->self ? pci_name(bus->self) : "PHB", bus->number, i, (unsigned long long)res->start, (unsigned long long)res->end); pr_debug("[0x%x], parent %p (%s)\n", (unsigned int)res->flags, pr, (pr && pr->name) ? pr->name : "nil"); if (pr && !(pr->flags & IORESOURCE_UNSET)) { struct pci_dev *dev = bus->self; if (request_resource(pr, res) == 0) continue; /* * Must be a conflict with an existing entry. * Move that entry (or entries) under the * bridge resource and try again. */ if (reparent_resources(pr, res) == 0) continue; if (dev && i < PCI_BRIDGE_RESOURCE_NUM && pci_claim_bridge_resource(dev, i + PCI_BRIDGE_RESOURCES) == 0) continue; } pr_warn("PCI: Cannot allocate resource region "); pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number); res->start = res->end = 0; res->flags = 0; } list_for_each_entry(b, &bus->children, node) pcibios_allocate_bus_resources(b); } static inline void alloc_resource(struct pci_dev *dev, int idx) { struct resource *pr, *r = &dev->resource[idx]; pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", pci_name(dev), idx, (unsigned long long)r->start, (unsigned long long)r->end, (unsigned int)r->flags); pr = pci_find_parent_resource(dev, r); if (!pr || (pr->flags & IORESOURCE_UNSET) || request_resource(pr, r) < 0) { pr_warn("PCI: Cannot allocate resource region %d ", idx); pr_cont("of device %s, will remap\n", pci_name(dev)); if (pr) pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n", pr, (unsigned long long)pr->start, (unsigned long long)pr->end, (unsigned int)pr->flags); /* We'll assign a new address later */ r->flags |= IORESOURCE_UNSET; r->end -= r->start; r->start = 0; } } static void __init pcibios_allocate_resources(int pass) { struct pci_dev *dev = NULL; int idx, disabled; u16 command; struct resource *r; for_each_pci_dev(dev) { pci_read_config_word(dev, PCI_COMMAND, &command); for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { r = &dev->resource[idx]; if (r->parent) /* Already allocated */ continue; if (!r->flags || (r->flags & IORESOURCE_UNSET)) continue; /* Not assigned at all */ /* We only allocate ROMs on pass 1 just in case they * have been screwed up by firmware */ if (idx == PCI_ROM_RESOURCE) disabled = 1; if (r->flags & IORESOURCE_IO) disabled = !(command & PCI_COMMAND_IO); else disabled = !(command & PCI_COMMAND_MEMORY); if (pass == disabled) alloc_resource(dev, idx); } if (pass) continue; r = &dev->resource[PCI_ROM_RESOURCE]; if (r->flags) { /* Turn the ROM off, leave the resource region, * but keep it unregistered. */ u32 reg; pci_read_config_dword(dev, dev->rom_base_reg, &reg); if (reg & PCI_ROM_ADDRESS_ENABLE) { pr_debug("PCI: Switching off ROM of %s\n", pci_name(dev)); r->flags &= ~IORESOURCE_ROM_ENABLE; pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE); } } } } static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); resource_size_t offset; struct resource *res, *pres; int i; pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus)); /* Check for IO */ if (!(hose->io_resource.flags & IORESOURCE_IO)) goto no_io; offset = (unsigned long)hose->io_base_virt - _IO_BASE; res = kzalloc(sizeof(struct resource), GFP_KERNEL); BUG_ON(res == NULL); res->name = "Legacy IO"; res->flags = IORESOURCE_IO; res->start = offset; res->end = (offset + 0xfff) & 0xfffffffful; pr_debug("Candidate legacy IO: %pR\n", res); if (request_resource(&hose->io_resource, res)) { pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n", pci_domain_nr(bus), bus->number, res); kfree(res); } no_io: /* Check for memory */ offset = hose->pci_mem_offset; pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset); for (i = 0; i < 3; i++) { pres = &hose->mem_resources[i]; if (!(pres->flags & IORESOURCE_MEM)) continue; pr_debug("hose mem res: %pR\n", pres); if ((pres->start - offset) <= 0xa0000 && (pres->end - offset) >= 0xbffff) break; } if (i >= 3) return; res = kzalloc(sizeof(struct resource), GFP_KERNEL); BUG_ON(res == NULL); res->name = "Legacy VGA memory"; res->flags = IORESOURCE_MEM; res->start = 0xa0000 + offset; res->end = 0xbffff + offset; pr_debug("Candidate VGA memory: %pR\n", res); if (request_resource(pres, res)) { pr_debug("PCI %04x:%02x Cannot reserve VGA memory %pR\n", pci_domain_nr(bus), bus->number, res); kfree(res); } } void __init pcibios_resource_survey(void) { struct pci_bus *b; /* Allocate and assign resources. If we re-assign everything, then * we skip the allocate phase */ list_for_each_entry(b, &pci_root_buses, node) pcibios_allocate_bus_resources(b); pcibios_allocate_resources(0); pcibios_allocate_resources(1); /* Before we start assigning unassigned resource, we try to reserve * the low IO area and the VGA memory area if they intersect the * bus available resources to avoid allocating things on top of them */ list_for_each_entry(b, &pci_root_buses, node) pcibios_reserve_legacy_regions(b); /* Now proceed to assigning things that were left unassigned */ pr_debug("PCI: Assigning unassigned resources...\n"); pci_assign_unassigned_resources(); } /* This is used by the PCI hotplug driver to allocate resource * of newly plugged busses. We can try to consolidate with the * rest of the code later, for now, keep it as-is as our main * resource allocation function doesn't deal with sub-trees yet. */ void pcibios_claim_one_bus(struct pci_bus *bus) { struct pci_dev *dev; struct pci_bus *child_bus; list_for_each_entry(dev, &bus->devices, bus_list) { int i; for (i = 0; i < PCI_NUM_RESOURCES; i++) { struct resource *r = &dev->resource[i]; if (r->parent || !r->start || !r->flags) continue; pr_debug("PCI: Claiming %s: ", pci_name(dev)); pr_debug("Resource %d: %016llx..%016llx [%x]\n", i, (unsigned long long)r->start, (unsigned long long)r->end, (unsigned int)r->flags); if (pci_claim_resource(dev, i) == 0) continue; pci_claim_bridge_resource(dev, i); } } list_for_each_entry(child_bus, &bus->children, node) pcibios_claim_one_bus(child_bus); } EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); /* pcibios_finish_adding_to_bus * * This is to be called by the hotplug code after devices have been * added to a bus, this include calling it for a PHB that is just * being added */ void pcibios_finish_adding_to_bus(struct pci_bus *bus) { pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n", pci_domain_nr(bus), bus->number); /* Allocate bus and devices resources */ pcibios_allocate_bus_resources(bus); pcibios_claim_one_bus(bus); /* Add new devices to global lists. Register in proc, sysfs. */ pci_bus_add_devices(bus); /* Fixup EEH */ /* eeh_add_device_tree_late(bus); */ } EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); static void pcibios_setup_phb_resources(struct pci_controller *hose, struct list_head *resources) { unsigned long io_offset; struct resource *res; int i; /* Hookup PHB IO resource */ res = &hose->io_resource; /* Fixup IO space offset */ io_offset = (unsigned long)hose->io_base_virt - isa_io_base; res->start = (res->start + io_offset) & 0xffffffffu; res->end = (res->end + io_offset) & 0xffffffffu; if (!res->flags) { pr_warn("PCI: I/O resource not set for host "); pr_cont("bridge %s (domain %d)\n", hose->dn->full_name, hose->global_number); /* Workaround for lack of IO resource only on 32-bit */ res->start = (unsigned long)hose->io_base_virt - isa_io_base; res->end = res->start + IO_SPACE_LIMIT; res->flags = IORESOURCE_IO; } pci_add_resource_offset(resources, res, (__force resource_size_t)(hose->io_base_virt - _IO_BASE)); pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n", (unsigned long long)res->start, (unsigned long long)res->end, (unsigned long)res->flags); /* Hookup PHB Memory resources */ for (i = 0; i < 3; ++i) { res = &hose->mem_resources[i]; if (!res->flags) { if (i > 0) continue; pr_err("PCI: Memory resource 0 not set for "); pr_cont("host bridge %s (domain %d)\n", hose->dn->full_name, hose->global_number); /* Workaround for lack of MEM resource only on 32-bit */ res->start = hose->pci_mem_offset; res->end = (resource_size_t)-1LL; res->flags = IORESOURCE_MEM; } pci_add_resource_offset(resources, res, hose->pci_mem_offset); pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", i, (unsigned long long)res->start, (unsigned long long)res->end, (unsigned long)res->flags); } pr_debug("PCI: PHB MEM offset = %016llx\n", (unsigned long long)hose->pci_mem_offset); pr_debug("PCI: PHB IO offset = %08lx\n", (unsigned long)hose->io_base_virt - _IO_BASE); } struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) { struct pci_controller *hose = bus->sysdata; return of_node_get(hose->dn); } static void pcibios_scan_phb(struct pci_controller *hose) { LIST_HEAD(resources); struct pci_bus *bus; struct device_node *node = hose->dn; pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node)); pcibios_setup_phb_resources(hose, &resources); bus = pci_scan_root_bus(hose->parent, hose->first_busno, hose->ops, hose, &resources); if (bus == NULL) { pr_err("Failed to create bus for PCI domain %04x\n", hose->global_number); pci_free_resource_list(&resources); return; } bus->busn_res.start = hose->first_busno; hose->bus = bus; hose->last_busno = bus->busn_res.end; } static int __init pcibios_init(void) { struct pci_controller *hose, *tmp; int next_busno = 0; pr_info("PCI: Probing PCI hardware\n"); /* Scan all of the recorded PCI controllers. */ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { hose->last_busno = 0xff; pcibios_scan_phb(hose); if (next_busno <= hose->last_busno) next_busno = hose->last_busno + 1; } pci_bus_count = next_busno; /* Call common code to handle resource allocation */ pcibios_resource_survey(); return 0; } subsys_initcall(pcibios_init); static struct pci_controller *pci_bus_to_hose(int bus) { struct pci_controller *hose, *tmp; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) if (bus >= hose->first_busno && bus <= hose->last_busno) return hose; return NULL; } /* Provide information on locations of various I/O regions in physical * memory. Do this on a per-card basis so that we choose the right * root bridge. * Note that the returned IO or memory base is a physical address */ long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) { struct pci_controller *hose; long result = -EOPNOTSUPP; hose = pci_bus_to_hose(bus); if (!hose) return -ENODEV; switch (which) { case IOBASE_BRIDGE_NUMBER: return (long)hose->first_busno; case IOBASE_MEMORY: return (long)hose->pci_mem_offset; case IOBASE_IO: return (long)hose->io_base_phys; case IOBASE_ISA_IO: return (long)isa_io_base; case IOBASE_ISA_MEM: return (long)isa_mem_base; } return result; } /* * Null PCI config access functions, for the case when we can't * find a hose. */ #define NULL_PCI_OP(rw, size, type) \ static int \ null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ { \ return PCIBIOS_DEVICE_NOT_FOUND; \ } static int null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { return PCIBIOS_DEVICE_NOT_FOUND; } static int null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { return PCIBIOS_DEVICE_NOT_FOUND; } static struct pci_ops null_pci_ops = { .read = null_read_config, .write = null_write_config, }; /* * These functions are used early on before PCI scanning is done * and all of the pci_dev and pci_bus structures have been created. */ static struct pci_bus * fake_pci_bus(struct pci_controller *hose, int busnr) { static struct pci_bus bus; if (!hose) pr_err("Can't find hose for PCI bus %d!\n", busnr); bus.number = busnr; bus.sysdata = hose; bus.ops = hose ? hose->ops : &null_pci_ops; return &bus; } #define EARLY_PCI_OP(rw, size, type) \ int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ int devfn, int offset, type value) \ { \ return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ devfn, offset, value); \ } EARLY_PCI_OP(read, byte, u8 *) EARLY_PCI_OP(read, word, u16 *) EARLY_PCI_OP(read, dword, u32 *) EARLY_PCI_OP(write, byte, u8) EARLY_PCI_OP(write, word, u16) EARLY_PCI_OP(write, dword, u32) int early_find_capability(struct pci_controller *hose, int bus, int devfn, int cap) { return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); }
gpl-2.0
sunny256/linux
drivers/hwmon/adm1025.c
269
17733
/* * adm1025.c * * Copyright (C) 2000 Chen-Yuan Wu <gwu@esoft.com> * Copyright (C) 2003-2009 Jean Delvare <jdelvare@suse.de> * * The ADM1025 is a sensor chip made by Analog Devices. It reports up to 6 * voltages (including its own power source) and up to two temperatures * (its own plus up to one external one). Voltages are scaled internally * (which is not the common way) with ratios such that the nominal value * of each voltage correspond to a register value of 192 (which means a * resolution of about 0.5% of the nominal value). Temperature values are * reported with a 1 deg resolution and a 3 deg accuracy. Complete * datasheet can be obtained from Analog's website at: * http://www.onsemi.com/PowerSolutions/product.do?id=ADM1025 * * This driver also supports the ADM1025A, which differs from the ADM1025 * only in that it has "open-drain VID inputs while the ADM1025 has * on-chip 100k pull-ups on the VID inputs". It doesn't make any * difference for us. * * This driver also supports the NE1619, a sensor chip made by Philips. * That chip is similar to the ADM1025A, with a few differences. The only * difference that matters to us is that the NE1619 has only two possible * addresses while the ADM1025A has a third one. Complete datasheet can be * obtained from Philips's website at: * http://www.semiconductors.philips.com/pip/NE1619DS.html * * Since the ADM1025 was the first chipset supported by this driver, most * comments will refer to this chipset, but are actually general and * concern all supported chipsets, unless mentioned otherwise. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/mutex.h> /* * Addresses to scan * ADM1025 and ADM1025A have three possible addresses: 0x2c, 0x2d and 0x2e. * NE1619 has two possible addresses: 0x2c and 0x2d. */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; enum chips { adm1025, ne1619 }; /* * The ADM1025 registers */ #define ADM1025_REG_MAN_ID 0x3E #define ADM1025_REG_CHIP_ID 0x3F #define ADM1025_REG_CONFIG 0x40 #define ADM1025_REG_STATUS1 0x41 #define ADM1025_REG_STATUS2 0x42 #define ADM1025_REG_IN(nr) (0x20 + (nr)) #define ADM1025_REG_IN_MAX(nr) (0x2B + (nr) * 2) #define ADM1025_REG_IN_MIN(nr) (0x2C + (nr) * 2) #define ADM1025_REG_TEMP(nr) (0x26 + (nr)) #define ADM1025_REG_TEMP_HIGH(nr) (0x37 + (nr) * 2) #define ADM1025_REG_TEMP_LOW(nr) (0x38 + (nr) * 2) #define ADM1025_REG_VID 0x47 #define ADM1025_REG_VID4 0x49 /* * Conversions and various macros * The ADM1025 uses signed 8-bit values for temperatures. */ static const int in_scale[6] = { 2500, 2250, 3300, 5000, 12000, 3300 }; #define IN_FROM_REG(reg, scale) (((reg) * (scale) + 96) / 192) #define IN_TO_REG(val, scale) ((val) <= 0 ? 0 : \ (val) >= (scale) * 255 / 192 ? 255 : \ ((val) * 192 + (scale) / 2) / (scale)) #define TEMP_FROM_REG(reg) ((reg) * 1000) #define TEMP_TO_REG(val) ((val) <= -127500 ? -128 : \ (val) >= 126500 ? 127 : \ (((val) < 0 ? (val) - 500 : \ (val) + 500) / 1000)) /* * Client data (each client gets its own) */ struct adm1025_data { struct i2c_client *client; const struct attribute_group *groups[3]; struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ u8 in[6]; /* register value */ u8 in_max[6]; /* register value */ u8 in_min[6]; /* register value */ s8 temp[2]; /* register value */ s8 temp_min[2]; /* register value */ s8 temp_max[2]; /* register value */ u16 alarms; /* register values, combined */ u8 vid; /* register values, combined */ u8 vrm; }; static struct adm1025_data *adm1025_update_device(struct device *dev) { struct adm1025_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) { int i; dev_dbg(&client->dev, "Updating data.\n"); for (i = 0; i < 6; i++) { data->in[i] = i2c_smbus_read_byte_data(client, ADM1025_REG_IN(i)); data->in_min[i] = i2c_smbus_read_byte_data(client, ADM1025_REG_IN_MIN(i)); data->in_max[i] = i2c_smbus_read_byte_data(client, ADM1025_REG_IN_MAX(i)); } for (i = 0; i < 2; i++) { data->temp[i] = i2c_smbus_read_byte_data(client, ADM1025_REG_TEMP(i)); data->temp_min[i] = i2c_smbus_read_byte_data(client, ADM1025_REG_TEMP_LOW(i)); data->temp_max[i] = i2c_smbus_read_byte_data(client, ADM1025_REG_TEMP_HIGH(i)); } data->alarms = i2c_smbus_read_byte_data(client, ADM1025_REG_STATUS1) | (i2c_smbus_read_byte_data(client, ADM1025_REG_STATUS2) << 8); data->vid = (i2c_smbus_read_byte_data(client, ADM1025_REG_VID) & 0x0f) | ((i2c_smbus_read_byte_data(client, ADM1025_REG_VID4) & 0x01) << 4); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* * Sysfs stuff */ static ssize_t show_in(struct device *dev, struct device_attribute *attr, char *buf) { int index = to_sensor_dev_attr(attr)->index; struct adm1025_data *data = adm1025_update_device(dev); return sprintf(buf, "%u\n", IN_FROM_REG(data->in[index], in_scale[index])); } static ssize_t show_in_min(struct device *dev, struct device_attribute *attr, char *buf) { int index = to_sensor_dev_attr(attr)->index; struct adm1025_data *data = adm1025_update_device(dev); return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[index], in_scale[index])); } static ssize_t show_in_max(struct device *dev, struct device_attribute *attr, char *buf) { int index = to_sensor_dev_attr(attr)->index; struct adm1025_data *data = adm1025_update_device(dev); return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[index], in_scale[index])); } static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { int index = to_sensor_dev_attr(attr)->index; struct adm1025_data *data = adm1025_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[index])); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr, char *buf) { int index = to_sensor_dev_attr(attr)->index; struct adm1025_data *data = adm1025_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[index])); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { int index = to_sensor_dev_attr(attr)->index; struct adm1025_data *data = adm1025_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[index])); } static ssize_t set_in_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int index = to_sensor_dev_attr(attr)->index; struct adm1025_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[index] = IN_TO_REG(val, in_scale[index]); i2c_smbus_write_byte_data(client, ADM1025_REG_IN_MIN(index), data->in_min[index]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int index = to_sensor_dev_attr(attr)->index; struct adm1025_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[index] = IN_TO_REG(val, in_scale[index]); i2c_smbus_write_byte_data(client, ADM1025_REG_IN_MAX(index), data->in_max[index]); mutex_unlock(&data->update_lock); return count; } #define set_in(offset) \ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ show_in, NULL, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_min, S_IWUSR | S_IRUGO, \ show_in_min, set_in_min, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_max, S_IWUSR | S_IRUGO, \ show_in_max, set_in_max, offset) set_in(0); set_in(1); set_in(2); set_in(3); set_in(4); set_in(5); static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int index = to_sensor_dev_attr(attr)->index; struct adm1025_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_min[index] = TEMP_TO_REG(val); i2c_smbus_write_byte_data(client, ADM1025_REG_TEMP_LOW(index), data->temp_min[index]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int index = to_sensor_dev_attr(attr)->index; struct adm1025_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_max[index] = TEMP_TO_REG(val); i2c_smbus_write_byte_data(client, ADM1025_REG_TEMP_HIGH(index), data->temp_max[index]); mutex_unlock(&data->update_lock); return count; } #define set_temp(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \ show_temp, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IWUSR | S_IRUGO, \ show_temp_min, set_temp_min, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IWUSR | S_IRUGO, \ show_temp_max, set_temp_max, offset - 1) set_temp(1); set_temp(2); static ssize_t alarms_show(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1025_data *data = adm1025_update_device(dev); return sprintf(buf, "%u\n", data->alarms); } static DEVICE_ATTR_RO(alarms); static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct adm1025_data *data = adm1025_update_device(dev); return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_alarm, NULL, 14); static ssize_t cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1025_data *data = adm1025_update_device(dev); return sprintf(buf, "%u\n", vid_from_reg(data->vid, data->vrm)); } static DEVICE_ATTR_RO(cpu0_vid); static ssize_t vrm_show(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1025_data *data = dev_get_drvdata(dev); return sprintf(buf, "%u\n", data->vrm); } static ssize_t vrm_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct adm1025_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; if (val > 255) return -EINVAL; data->vrm = val; return count; } static DEVICE_ATTR_RW(vrm); /* * Real code */ static struct attribute *adm1025_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp1_fault.dev_attr.attr, &dev_attr_alarms.attr, &dev_attr_cpu0_vid.attr, &dev_attr_vrm.attr, NULL }; static const struct attribute_group adm1025_group = { .attrs = adm1025_attributes, }; static struct attribute *adm1025_attributes_in4[] = { &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, NULL }; static const struct attribute_group adm1025_group_in4 = { .attrs = adm1025_attributes_in4, }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int adm1025_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; const char *name; u8 man_id, chip_id; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* Check for unused bits */ if ((i2c_smbus_read_byte_data(client, ADM1025_REG_CONFIG) & 0x80) || (i2c_smbus_read_byte_data(client, ADM1025_REG_STATUS1) & 0xC0) || (i2c_smbus_read_byte_data(client, ADM1025_REG_STATUS2) & 0xBC)) { dev_dbg(&adapter->dev, "ADM1025 detection failed at 0x%02x\n", client->addr); return -ENODEV; } /* Identification */ chip_id = i2c_smbus_read_byte_data(client, ADM1025_REG_CHIP_ID); if ((chip_id & 0xF0) != 0x20) return -ENODEV; man_id = i2c_smbus_read_byte_data(client, ADM1025_REG_MAN_ID); if (man_id == 0x41) name = "adm1025"; else if (man_id == 0xA1 && client->addr != 0x2E) name = "ne1619"; else return -ENODEV; strlcpy(info->type, name, I2C_NAME_SIZE); return 0; } static void adm1025_init_client(struct i2c_client *client) { u8 reg; struct adm1025_data *data = i2c_get_clientdata(client); int i; data->vrm = vid_which_vrm(); /* * Set high limits * Usually we avoid setting limits on driver init, but it happens * that the ADM1025 comes with stupid default limits (all registers * set to 0). In case the chip has not gone through any limit * setting yet, we better set the high limits to the max so that * no alarm triggers. */ for (i = 0; i < 6; i++) { reg = i2c_smbus_read_byte_data(client, ADM1025_REG_IN_MAX(i)); if (reg == 0) i2c_smbus_write_byte_data(client, ADM1025_REG_IN_MAX(i), 0xFF); } for (i = 0; i < 2; i++) { reg = i2c_smbus_read_byte_data(client, ADM1025_REG_TEMP_HIGH(i)); if (reg == 0) i2c_smbus_write_byte_data(client, ADM1025_REG_TEMP_HIGH(i), 0x7F); } /* * Start the conversions */ reg = i2c_smbus_read_byte_data(client, ADM1025_REG_CONFIG); if (!(reg & 0x01)) i2c_smbus_write_byte_data(client, ADM1025_REG_CONFIG, (reg&0x7E)|0x01); } static int adm1025_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev = &client->dev; struct device *hwmon_dev; struct adm1025_data *data; u8 config; data = devm_kzalloc(dev, sizeof(struct adm1025_data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(client, data); data->client = client; mutex_init(&data->update_lock); /* Initialize the ADM1025 chip */ adm1025_init_client(client); /* sysfs hooks */ data->groups[0] = &adm1025_group; /* Pin 11 is either in4 (+12V) or VID4 */ config = i2c_smbus_read_byte_data(client, ADM1025_REG_CONFIG); if (!(config & 0x20)) data->groups[1] = &adm1025_group_in4; hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, data, data->groups); return PTR_ERR_OR_ZERO(hwmon_dev); } static const struct i2c_device_id adm1025_id[] = { { "adm1025", adm1025 }, { "ne1619", ne1619 }, { } }; MODULE_DEVICE_TABLE(i2c, adm1025_id); static struct i2c_driver adm1025_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "adm1025", }, .probe = adm1025_probe, .id_table = adm1025_id, .detect = adm1025_detect, .address_list = normal_i2c, }; module_i2c_driver(adm1025_driver); MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); MODULE_DESCRIPTION("ADM1025 driver"); MODULE_LICENSE("GPL");
gpl-2.0
McBane87/Sony_Z1c_LP.242_Kernel
drivers/usb/gadget/f_mbim.c
525
49476
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/device.h> #include <linux/usb/cdc.h> #include <linux/usb/composite.h> #include <linux/platform_device.h> #include <linux/spinlock.h> /* * This function is a "Mobile Broadband Interface Model" (MBIM) link. * MBIM is intended to be used with high-speed network attachments. * * Note that MBIM requires the use of "alternate settings" for its data * interface. This means that the set_alt() method has real work to do, * and also means that a get_alt() method is required. */ #define MBIM_BULK_BUFFER_SIZE 4096 #define MBIM_IOCTL_MAGIC 'o' #define MBIM_GET_NTB_SIZE _IOR(MBIM_IOCTL_MAGIC, 2, u32) #define MBIM_GET_DATAGRAM_COUNT _IOR(MBIM_IOCTL_MAGIC, 3, u16) #define NR_MBIM_PORTS 1 /* ID for Microsoft OS String */ #define MBIM_OS_STRING_ID 0xEE struct ctrl_pkt { void *buf; int len; struct list_head list; }; struct mbim_ep_descs { struct usb_endpoint_descriptor *in; struct usb_endpoint_descriptor *out; struct usb_endpoint_descriptor *notify; }; struct mbim_notify_port { struct usb_ep *notify; struct usb_request *notify_req; u8 notify_state; atomic_t notify_count; }; enum mbim_notify_state { MBIM_NOTIFY_NONE, MBIM_NOTIFY_CONNECT, MBIM_NOTIFY_SPEED, MBIM_NOTIFY_RESPONSE_AVAILABLE, }; struct f_mbim { struct usb_function function; struct usb_composite_dev *cdev; atomic_t online; atomic_t open_excl; atomic_t ioctl_excl; atomic_t read_excl; atomic_t write_excl; wait_queue_head_t read_wq; wait_queue_head_t write_wq; enum transport_type xport; u8 port_num; struct data_port bam_port; struct mbim_notify_port not_port; struct mbim_ep_descs fs; struct mbim_ep_descs hs; u8 ctrl_id, data_id; u8 data_alt_int; struct mbim_ndp_parser_opts *parser_opts; spinlock_t lock; struct list_head cpkt_req_q; struct list_head cpkt_resp_q; u32 ntb_input_size; u16 ntb_max_datagrams; atomic_t error; }; struct mbim_ntb_input_size { u32 ntb_input_size; u16 ntb_max_datagrams; u16 reserved; }; /* temporary variable used between mbim_open() and mbim_gadget_bind() */ static struct f_mbim *_mbim_dev; static unsigned int nr_mbim_ports; static struct mbim_ports { struct f_mbim *port; unsigned port_num; } mbim_ports[NR_MBIM_PORTS]; static inline struct f_mbim *func_to_mbim(struct usb_function *f) { return container_of(f, struct f_mbim, function); } /* peak (theoretical) bulk transfer rate in bits-per-second */ static inline unsigned mbim_bitrate(struct usb_gadget *g) { if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) return 13 * 512 * 8 * 1000 * 8; else return 19 * 64 * 1 * 1000 * 8; } /*-------------------------------------------------------------------------*/ #define MBIM_NTB_DEFAULT_IN_SIZE (0x4000) #define MBIM_NTB_OUT_SIZE (0x1000) #define MBIM_NDP_IN_DIVISOR (0x4) #define NTB_DEFAULT_IN_SIZE_IPA (0x2000) #define MBIM_NTB_OUT_SIZE_IPA (0x2000) #define MBIM_FORMATS_SUPPORTED USB_CDC_NCM_NTB16_SUPPORTED static struct usb_cdc_ncm_ntb_parameters mbim_ntb_parameters = { .wLength = sizeof mbim_ntb_parameters, .bmNtbFormatsSupported = cpu_to_le16(MBIM_FORMATS_SUPPORTED), .dwNtbInMaxSize = cpu_to_le32(MBIM_NTB_DEFAULT_IN_SIZE), .wNdpInDivisor = cpu_to_le16(MBIM_NDP_IN_DIVISOR), .wNdpInPayloadRemainder = cpu_to_le16(0), .wNdpInAlignment = cpu_to_le16(4), .dwNtbOutMaxSize = cpu_to_le32(MBIM_NTB_OUT_SIZE), .wNdpOutDivisor = cpu_to_le16(4), .wNdpOutPayloadRemainder = cpu_to_le16(0), .wNdpOutAlignment = cpu_to_le16(4), .wNtbOutMaxDatagrams = 0, }; /* * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one * packet, to simplify cancellation; and a big transfer interval, to * waste less bandwidth. */ #define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */ #define NCM_STATUS_BYTECOUNT 16 /* 8 byte header + data */ static struct usb_interface_assoc_descriptor mbim_iad_desc = { .bLength = sizeof mbim_iad_desc, .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, /* .bFirstInterface = DYNAMIC, */ .bInterfaceCount = 2, /* control + data */ .bFunctionClass = 2, .bFunctionSubClass = 0x0e, .bFunctionProtocol = 0, /* .iFunction = DYNAMIC */ }; /* interface descriptor: */ static struct usb_interface_descriptor mbim_control_intf = { .bLength = sizeof mbim_control_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bNumEndpoints = 1, .bInterfaceClass = 0x02, .bInterfaceSubClass = 0x0e, .bInterfaceProtocol = 0, /* .iInterface = DYNAMIC */ }; static struct usb_cdc_header_desc mbim_header_desc = { .bLength = sizeof mbim_header_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_HEADER_TYPE, .bcdCDC = cpu_to_le16(0x0110), }; static struct usb_cdc_union_desc mbim_union_desc = { .bLength = sizeof(mbim_union_desc), .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_UNION_TYPE, /* .bMasterInterface0 = DYNAMIC */ /* .bSlaveInterface0 = DYNAMIC */ }; static struct usb_cdc_mbb_desc mbb_desc = { .bLength = sizeof mbb_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_MBB_TYPE, .bcdMbbVersion = cpu_to_le16(0x0100), .wMaxControlMessage = cpu_to_le16(0x1000), .bNumberFilters = 0x20, .bMaxFilterSize = 0x80, .wMaxSegmentSize = cpu_to_le16(0xfe0), .bmNetworkCapabilities = 0x20, }; static struct usb_cdc_ext_mbb_desc ext_mbb_desc = { .bLength = sizeof ext_mbb_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_EXT_MBB_TYPE, .bcdMbbExtendedVersion = cpu_to_le16(0x0100), .bMaxOutstandingCmdMsges = 64, .wMTU = 1500, }; /* the default data interface has no endpoints ... */ static struct usb_interface_descriptor mbim_data_nop_intf = { .bLength = sizeof mbim_data_nop_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bAlternateSetting = 0, .bNumEndpoints = 0, .bInterfaceClass = 0x0a, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0x02, /* .iInterface = DYNAMIC */ }; /* ... but the "real" data interface has two bulk endpoints */ static struct usb_interface_descriptor mbim_data_intf = { .bLength = sizeof mbim_data_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bAlternateSetting = 1, .bNumEndpoints = 2, .bInterfaceClass = 0x0a, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0x02, /* .iInterface = DYNAMIC */ }; /* full speed support: */ static struct usb_endpoint_descriptor fs_mbim_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT), .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC, }; static struct usb_endpoint_descriptor fs_mbim_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor fs_mbim_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *mbim_fs_function[] = { (struct usb_descriptor_header *) &mbim_iad_desc, /* MBIM control descriptors */ (struct usb_descriptor_header *) &mbim_control_intf, (struct usb_descriptor_header *) &mbim_header_desc, (struct usb_descriptor_header *) &mbim_union_desc, (struct usb_descriptor_header *) &mbb_desc, (struct usb_descriptor_header *) &ext_mbb_desc, (struct usb_descriptor_header *) &fs_mbim_notify_desc, /* data interface, altsettings 0 and 1 */ (struct usb_descriptor_header *) &mbim_data_nop_intf, (struct usb_descriptor_header *) &mbim_data_intf, (struct usb_descriptor_header *) &fs_mbim_in_desc, (struct usb_descriptor_header *) &fs_mbim_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor hs_mbim_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = 4*cpu_to_le16(NCM_STATUS_BYTECOUNT), .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4, }; static struct usb_endpoint_descriptor hs_mbim_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor hs_mbim_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *mbim_hs_function[] = { (struct usb_descriptor_header *) &mbim_iad_desc, /* MBIM control descriptors */ (struct usb_descriptor_header *) &mbim_control_intf, (struct usb_descriptor_header *) &mbim_header_desc, (struct usb_descriptor_header *) &mbim_union_desc, (struct usb_descriptor_header *) &mbb_desc, (struct usb_descriptor_header *) &ext_mbb_desc, (struct usb_descriptor_header *) &hs_mbim_notify_desc, /* data interface, altsettings 0 and 1 */ (struct usb_descriptor_header *) &mbim_data_nop_intf, (struct usb_descriptor_header *) &mbim_data_intf, (struct usb_descriptor_header *) &hs_mbim_in_desc, (struct usb_descriptor_header *) &hs_mbim_out_desc, NULL, }; /* string descriptors: */ #define STRING_CTRL_IDX 0 #define STRING_DATA_IDX 1 static struct usb_string mbim_string_defs[] = { [STRING_CTRL_IDX].s = "MBIM Control", [STRING_DATA_IDX].s = "MBIM Data", { } /* end of list */ }; static struct usb_gadget_strings mbim_string_table = { .language = 0x0409, /* en-us */ .strings = mbim_string_defs, }; static struct usb_gadget_strings *mbim_strings[] = { &mbim_string_table, NULL, }; /* Microsoft OS Descriptors */ /* * We specify our own bMS_VendorCode byte which Windows will use * as the bRequest value in subsequent device get requests. */ #define MBIM_VENDOR_CODE 0xA5 /* Microsoft OS String */ static u8 mbim_os_string[] = { 18, /* sizeof(mtp_os_string) */ USB_DT_STRING, /* Signature field: "MSFT100" */ 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0, /* vendor code */ MBIM_VENDOR_CODE, /* padding */ 0 }; /* Microsoft Extended Configuration Descriptor Header Section */ struct mbim_ext_config_desc_header { __le32 dwLength; __u16 bcdVersion; __le16 wIndex; __u8 bCount; __u8 reserved[7]; }; /* Microsoft Extended Configuration Descriptor Function Section */ struct mbim_ext_config_desc_function { __u8 bFirstInterfaceNumber; __u8 bInterfaceCount; __u8 compatibleID[8]; __u8 subCompatibleID[8]; __u8 reserved[6]; }; /* Microsoft Extended Configuration Descriptor */ static struct { struct mbim_ext_config_desc_header header; struct mbim_ext_config_desc_function function; } mbim_ext_config_desc = { .header = { .dwLength = __constant_cpu_to_le32(sizeof mbim_ext_config_desc), .bcdVersion = __constant_cpu_to_le16(0x0100), .wIndex = __constant_cpu_to_le16(4), .bCount = 1, }, .function = { .bFirstInterfaceNumber = 0, .bInterfaceCount = 1, .compatibleID = { 'A', 'L', 'T', 'R', 'C', 'F', 'G' }, /* .subCompatibleID = DYNAMIC */ }, }; /* * Here are options for the Datagram Pointer table (NDP) parser. * There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3), * in NDP16 offsets and sizes fields are 1 16bit word wide, * in NDP32 -- 2 16bit words wide. Also signatures are different. * To make the parser code the same, put the differences in the structure, * and switch pointers to the structures when the format is changed. */ struct mbim_ndp_parser_opts { u32 nth_sign; u32 ndp_sign; unsigned nth_size; unsigned ndp_size; unsigned ndplen_align; /* sizes in u16 units */ unsigned dgram_item_len; /* index or length */ unsigned block_length; unsigned fp_index; unsigned reserved1; unsigned reserved2; unsigned next_fp_index; }; #define INIT_NDP16_OPTS { \ .nth_sign = USB_CDC_NCM_NTH16_SIGN, \ .ndp_sign = USB_CDC_NCM_NDP16_NOCRC_SIGN, \ .nth_size = sizeof(struct usb_cdc_ncm_nth16), \ .ndp_size = sizeof(struct usb_cdc_ncm_ndp16), \ .ndplen_align = 4, \ .dgram_item_len = 1, \ .block_length = 1, \ .fp_index = 1, \ .reserved1 = 0, \ .reserved2 = 0, \ .next_fp_index = 1, \ } #define INIT_NDP32_OPTS { \ .nth_sign = USB_CDC_NCM_NTH32_SIGN, \ .ndp_sign = USB_CDC_NCM_NDP32_NOCRC_SIGN, \ .nth_size = sizeof(struct usb_cdc_ncm_nth32), \ .ndp_size = sizeof(struct usb_cdc_ncm_ndp32), \ .ndplen_align = 8, \ .dgram_item_len = 2, \ .block_length = 2, \ .fp_index = 2, \ .reserved1 = 1, \ .reserved2 = 2, \ .next_fp_index = 2, \ } static struct mbim_ndp_parser_opts mbim_ndp16_opts = INIT_NDP16_OPTS; static struct mbim_ndp_parser_opts mbim_ndp32_opts = INIT_NDP32_OPTS; static inline int mbim_lock(atomic_t *excl) { if (atomic_inc_return(excl) == 1) { return 0; } else { atomic_dec(excl); return -EBUSY; } } static inline void mbim_unlock(atomic_t *excl) { atomic_dec(excl); } static struct ctrl_pkt *mbim_alloc_ctrl_pkt(unsigned len, gfp_t flags) { struct ctrl_pkt *pkt; pkt = kzalloc(sizeof(struct ctrl_pkt), flags); if (!pkt) return ERR_PTR(-ENOMEM); pkt->buf = kmalloc(len, flags); if (!pkt->buf) { kfree(pkt); return ERR_PTR(-ENOMEM); } pkt->len = len; return pkt; } static void mbim_free_ctrl_pkt(struct ctrl_pkt *pkt) { if (pkt) { kfree(pkt->buf); kfree(pkt); } } static struct usb_request *mbim_alloc_req(struct usb_ep *ep, int buffer_size) { struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); if (!req) return NULL; req->buf = kmalloc(buffer_size, GFP_KERNEL); if (!req->buf) { usb_ep_free_request(ep, req); return NULL; } req->length = buffer_size; return req; } void fmbim_free_req(struct usb_ep *ep, struct usb_request *req) { if (req) { kfree(req->buf); usb_ep_free_request(ep, req); } } static void fmbim_ctrl_response_available(struct f_mbim *dev) { struct usb_request *req = dev->not_port.notify_req; struct usb_cdc_notification *event = NULL; unsigned long flags; int ret; pr_debug("dev:%p portno#%d\n", dev, dev->port_num); spin_lock_irqsave(&dev->lock, flags); if (!atomic_read(&dev->online)) { pr_err("dev:%p is not online\n", dev); spin_unlock_irqrestore(&dev->lock, flags); return; } if (!req) { pr_err("dev:%p req is NULL\n", dev); spin_unlock_irqrestore(&dev->lock, flags); return; } if (!req->buf) { pr_err("dev:%p req->buf is NULL\n", dev); spin_unlock_irqrestore(&dev->lock, flags); return; } if (atomic_inc_return(&dev->not_port.notify_count) != 1) { pr_debug("delay ep_queue: notifications queue is busy[%d]", atomic_read(&dev->not_port.notify_count)); spin_unlock_irqrestore(&dev->lock, flags); return; } req->length = sizeof *event; event = req->buf; event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE; event->wValue = cpu_to_le16(0); event->wIndex = cpu_to_le16(dev->ctrl_id); event->wLength = cpu_to_le16(0); spin_unlock_irqrestore(&dev->lock, flags); ret = usb_ep_queue(dev->not_port.notify, req, GFP_ATOMIC); if (ret) { atomic_dec(&dev->not_port.notify_count); pr_err("ep enqueue error %d\n", ret); } pr_debug("Successful Exit"); } static int fmbim_send_cpkt_response(struct f_mbim *gr, struct ctrl_pkt *cpkt) { struct f_mbim *dev = gr; unsigned long flags; if (!gr || !cpkt) { pr_err("Invalid cpkt, dev:%p cpkt:%p\n", gr, cpkt); return -ENODEV; } pr_debug("dev:%p port_num#%d\n", dev, dev->port_num); if (!atomic_read(&dev->online)) { pr_err("dev:%p is not connected\n", dev); mbim_free_ctrl_pkt(cpkt); return 0; } if (dev->not_port.notify_state != MBIM_NOTIFY_RESPONSE_AVAILABLE) { pr_err("dev:%p state=%d, recover!!\n", dev, dev->not_port.notify_state); mbim_free_ctrl_pkt(cpkt); return 0; } spin_lock_irqsave(&dev->lock, flags); list_add_tail(&cpkt->list, &dev->cpkt_resp_q); spin_unlock_irqrestore(&dev->lock, flags); fmbim_ctrl_response_available(dev); return 0; } /* ---------------------------- BAM INTERFACE ----------------------------- */ static int mbim_bam_setup(int no_ports) { int ret; pr_info("no_ports:%d\n", no_ports); ret = bam_data_setup(no_ports); if (ret) { pr_err("bam_data_setup failed err: %d\n", ret); return ret; } pr_info("Initialized %d ports\n", no_ports); return 0; } int mbim_configure_params(void) { struct teth_aggr_params aggr_params; int ret = 0; aggr_params.dl.aggr_prot = TETH_AGGR_PROTOCOL_MBIM; aggr_params.dl.max_datagrams = mbim_ntb_parameters.wNtbOutMaxDatagrams; aggr_params.dl.max_transfer_size_byte = mbim_ntb_parameters.dwNtbInMaxSize; aggr_params.ul.aggr_prot = TETH_AGGR_PROTOCOL_MBIM; aggr_params.ul.max_datagrams = mbim_ntb_parameters.wNtbOutMaxDatagrams; aggr_params.ul.max_transfer_size_byte = mbim_ntb_parameters.dwNtbOutMaxSize; ret = teth_bridge_set_aggr_params(&aggr_params); if (ret) pr_err("%s: teth_bridge_set_aggr_params failed\n", __func__); return ret; } static int mbim_bam_connect(struct f_mbim *dev) { int ret; u8 src_connection_idx, dst_connection_idx; struct usb_gadget *gadget = dev->cdev->gadget; enum peer_bam bam_name = (dev->xport == USB_GADGET_XPORT_BAM2BAM_IPA) ? IPA_P_BAM : A2_P_BAM; pr_info("dev:%p portno:%d\n", dev, dev->port_num); src_connection_idx = usb_bam_get_connection_idx(gadget->name, bam_name, USB_TO_PEER_PERIPHERAL, dev->port_num); dst_connection_idx = usb_bam_get_connection_idx(gadget->name, bam_name, PEER_PERIPHERAL_TO_USB, dev->port_num); if (src_connection_idx < 0 || dst_connection_idx < 0) { pr_err("%s: usb_bam_get_connection_idx failed\n", __func__); return ret; } ret = bam_data_connect(&dev->bam_port, dev->port_num, dev->xport, src_connection_idx, dst_connection_idx, USB_FUNC_MBIM); if (ret) { pr_err("bam_data_setup failed: err:%d\n", ret); return ret; } pr_info("mbim bam connected\n"); return 0; } static int mbim_bam_disconnect(struct f_mbim *dev) { pr_info("dev:%p port:%d. Do nothing.\n", dev, dev->port_num); bam_data_disconnect(&dev->bam_port, dev->port_num); return 0; } /* -------------------------------------------------------------------------*/ static inline void mbim_reset_values(struct f_mbim *mbim) { mbim->parser_opts = &mbim_ndp16_opts; mbim->ntb_input_size = MBIM_NTB_DEFAULT_IN_SIZE; atomic_set(&mbim->online, 0); } static void mbim_reset_function_queue(struct f_mbim *dev) { struct ctrl_pkt *cpkt = NULL; pr_debug("Queue empty packet for QBI"); spin_lock(&dev->lock); cpkt = mbim_alloc_ctrl_pkt(0, GFP_ATOMIC); if (!cpkt) { pr_err("%s: Unable to allocate reset function pkt\n", __func__); spin_unlock(&dev->lock); return; } list_add_tail(&cpkt->list, &dev->cpkt_req_q); spin_unlock(&dev->lock); pr_debug("%s: Wake up read queue", __func__); wake_up(&dev->read_wq); } static void fmbim_reset_cmd_complete(struct usb_ep *ep, struct usb_request *req) { struct f_mbim *dev = req->context; mbim_reset_function_queue(dev); } static void mbim_clear_queues(struct f_mbim *mbim) { struct ctrl_pkt *cpkt = NULL; struct list_head *act, *tmp; spin_lock(&mbim->lock); list_for_each_safe(act, tmp, &mbim->cpkt_req_q) { cpkt = list_entry(act, struct ctrl_pkt, list); list_del(&cpkt->list); mbim_free_ctrl_pkt(cpkt); } list_for_each_safe(act, tmp, &mbim->cpkt_resp_q) { cpkt = list_entry(act, struct ctrl_pkt, list); list_del(&cpkt->list); mbim_free_ctrl_pkt(cpkt); } spin_unlock(&mbim->lock); } /* * Context: mbim->lock held */ static void mbim_do_notify(struct f_mbim *mbim) { struct usb_request *req = mbim->not_port.notify_req; struct usb_cdc_notification *event; int status; pr_debug("notify_state: %d", mbim->not_port.notify_state); if (!req) return; event = req->buf; switch (mbim->not_port.notify_state) { case MBIM_NOTIFY_NONE: if (atomic_read(&mbim->not_port.notify_count) > 0) pr_err("Pending notifications in MBIM_NOTIFY_NONE\n"); else pr_debug("No pending notifications\n"); return; case MBIM_NOTIFY_RESPONSE_AVAILABLE: pr_debug("Notification %02x sent\n", event->bNotificationType); if (atomic_read(&mbim->not_port.notify_count) <= 0) { pr_debug("notify_response_avaliable: done"); return; } spin_unlock(&mbim->lock); status = usb_ep_queue(mbim->not_port.notify, req, GFP_ATOMIC); spin_lock(&mbim->lock); if (status) { atomic_dec(&mbim->not_port.notify_count); pr_err("Queue notify request failed, err: %d", status); } return; } event->bmRequestType = 0xA1; event->wIndex = cpu_to_le16(mbim->ctrl_id); /* * In double buffering if there is a space in FIFO, * completion callback can be called right after the call, * so unlocking */ atomic_inc(&mbim->not_port.notify_count); pr_debug("queue request: notify_count = %d", atomic_read(&mbim->not_port.notify_count)); spin_unlock(&mbim->lock); status = usb_ep_queue(mbim->not_port.notify, req, GFP_ATOMIC); spin_lock(&mbim->lock); if (status) { atomic_dec(&mbim->not_port.notify_count); pr_err("usb_ep_queue failed, err: %d", status); } } static void mbim_notify_complete(struct usb_ep *ep, struct usb_request *req) { struct f_mbim *mbim = req->context; struct usb_cdc_notification *event = req->buf; pr_debug("dev:%p\n", mbim); spin_lock(&mbim->lock); switch (req->status) { case 0: atomic_dec(&mbim->not_port.notify_count); pr_debug("notify_count = %d", atomic_read(&mbim->not_port.notify_count)); break; case -ECONNRESET: case -ESHUTDOWN: /* connection gone */ mbim->not_port.notify_state = MBIM_NOTIFY_NONE; atomic_set(&mbim->not_port.notify_count, 0); pr_info("ESHUTDOWN/ECONNRESET, connection gone"); spin_unlock(&mbim->lock); mbim_clear_queues(mbim); mbim_reset_function_queue(mbim); spin_lock(&mbim->lock); break; default: pr_err("Unknown event %02x --> %d\n", event->bNotificationType, req->status); break; } mbim_do_notify(mbim); spin_unlock(&mbim->lock); pr_debug("dev:%p Exit\n", mbim); } static void mbim_ep0out_complete(struct usb_ep *ep, struct usb_request *req) { /* now for SET_NTB_INPUT_SIZE only */ unsigned in_size = 0; struct usb_function *f = req->context; struct f_mbim *mbim = func_to_mbim(f); struct mbim_ntb_input_size *ntb = NULL; pr_debug("dev:%p\n", mbim); req->context = NULL; if (req->status || req->actual != req->length) { pr_err("Bad control-OUT transfer\n"); goto invalid; } if (req->length == 4) { in_size = get_unaligned_le32(req->buf); if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE || in_size > le32_to_cpu(mbim_ntb_parameters.dwNtbInMaxSize)) { pr_err("Illegal INPUT SIZE (%d) from host\n", in_size); goto invalid; } } else if (req->length == 8) { ntb = (struct mbim_ntb_input_size *)req->buf; in_size = get_unaligned_le32(&(ntb->ntb_input_size)); if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE || in_size > le32_to_cpu(mbim_ntb_parameters.dwNtbInMaxSize)) { pr_err("Illegal INPUT SIZE (%d) from host\n", in_size); goto invalid; } mbim->ntb_max_datagrams = get_unaligned_le16(&(ntb->ntb_max_datagrams)); } else { pr_err("Illegal NTB length %d\n", in_size); goto invalid; } pr_debug("Set NTB INPUT SIZE %d\n", in_size); mbim->ntb_input_size = in_size; return; invalid: usb_ep_set_halt(ep); pr_err("dev:%p Failed\n", mbim); return; } static void fmbim_cmd_complete(struct usb_ep *ep, struct usb_request *req) { struct f_mbim *dev = req->context; struct ctrl_pkt *cpkt = NULL; int len = req->actual; if (!dev) { pr_err("mbim dev is null\n"); return; } if (req->status < 0) { pr_err("mbim command error %d\n", req->status); return; } pr_debug("dev:%p port#%d\n", dev, dev->port_num); cpkt = mbim_alloc_ctrl_pkt(len, GFP_ATOMIC); if (!cpkt) { pr_err("Unable to allocate ctrl pkt\n"); return; } pr_debug("Add to cpkt_req_q packet with len = %d\n", len); memcpy(cpkt->buf, req->buf, len); spin_lock(&dev->lock); list_add_tail(&cpkt->list, &dev->cpkt_req_q); spin_unlock(&dev->lock); /* wakeup read thread */ pr_debug("Wake up read queue"); wake_up(&dev->read_wq); return; } static int mbim_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct f_mbim *mbim = func_to_mbim(f); struct usb_composite_dev *cdev = mbim->cdev; struct usb_request *req = cdev->req; struct ctrl_pkt *cpkt = NULL; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); /* * composite driver infrastructure handles everything except * CDC class messages; interface activation uses set_alt(). */ if (!atomic_read(&mbim->online)) { pr_info("usb cable is not connected\n"); return -ENOTCONN; } switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_RESET_FUNCTION: pr_debug("USB_CDC_RESET_FUNCTION"); value = 0; req->complete = fmbim_reset_cmd_complete; req->context = mbim; break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SEND_ENCAPSULATED_COMMAND: pr_debug("USB_CDC_SEND_ENCAPSULATED_COMMAND"); if (w_length > req->length) { pr_debug("w_length > req->length: %d > %d", w_length, req->length); } value = w_length; req->complete = fmbim_cmd_complete; req->context = mbim; break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_ENCAPSULATED_RESPONSE: pr_debug("USB_CDC_GET_ENCAPSULATED_RESPONSE"); if (w_value) { pr_err("w_length > 0: %d", w_length); break; } pr_debug("req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); spin_lock(&mbim->lock); if (list_empty(&mbim->cpkt_resp_q)) { pr_err("ctrl resp queue empty\n"); spin_unlock(&mbim->lock); break; } cpkt = list_first_entry(&mbim->cpkt_resp_q, struct ctrl_pkt, list); list_del(&cpkt->list); spin_unlock(&mbim->lock); value = min_t(unsigned, w_length, cpkt->len); memcpy(req->buf, cpkt->buf, value); mbim_free_ctrl_pkt(cpkt); pr_debug("copied encapsulated_response %d bytes", value); break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_NTB_PARAMETERS: pr_debug("USB_CDC_GET_NTB_PARAMETERS"); if (w_length == 0 || w_value != 0 || w_index != mbim->ctrl_id) break; value = w_length > sizeof mbim_ntb_parameters ? sizeof mbim_ntb_parameters : w_length; memcpy(req->buf, &mbim_ntb_parameters, value); break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_NTB_INPUT_SIZE: pr_debug("USB_CDC_GET_NTB_INPUT_SIZE"); if (w_length < 4 || w_value != 0 || w_index != mbim->ctrl_id) break; put_unaligned_le32(mbim->ntb_input_size, req->buf); value = 4; pr_debug("Reply to host INPUT SIZE %d\n", mbim->ntb_input_size); break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SET_NTB_INPUT_SIZE: pr_debug("USB_CDC_SET_NTB_INPUT_SIZE"); if (w_length != 4 && w_length != 8) { pr_err("wrong NTB length %d", w_length); break; } if (w_value != 0 || w_index != mbim->ctrl_id) break; req->complete = mbim_ep0out_complete; req->length = w_length; req->context = f; value = req->length; break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_NTB_FORMAT: { uint16_t format; pr_debug("USB_CDC_GET_NTB_FORMAT"); if (w_length < 2 || w_value != 0 || w_index != mbim->ctrl_id) break; format = (mbim->parser_opts == &mbim_ndp16_opts) ? 0 : 1; put_unaligned_le16(format, req->buf); value = 2; pr_debug("NTB FORMAT: sending %d\n", format); break; } case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SET_NTB_FORMAT: { pr_debug("USB_CDC_SET_NTB_FORMAT"); if (w_length != 0 || w_index != mbim->ctrl_id) break; switch (w_value) { case 0x0000: mbim->parser_opts = &mbim_ndp16_opts; pr_debug("NCM16 selected\n"); break; case 0x0001: mbim->parser_opts = &mbim_ndp32_opts; pr_debug("NCM32 selected\n"); break; default: break; } value = 0; break; } /* optional in mbim descriptor: */ /* case USB_CDC_GET_MAX_DATAGRAM_SIZE: */ /* case USB_CDC_SET_MAX_DATAGRAM_SIZE: */ default: pr_err("invalid control req: %02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } /* respond with data transfer or status phase? */ if (value >= 0) { pr_debug("control request: %02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); req->zero = (value < w_length); req->length = value; value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (value < 0) { pr_err("queueing req failed: %02x.%02x, err %d\n", ctrl->bRequestType, ctrl->bRequest, value); } } else { pr_err("ctrl req err %d: %02x.%02x v%04x i%04x l%d\n", value, ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } /* device either stalls (value < 0) or reports success */ return value; } /* * This function handles the Microsoft-specific OS descriptor control * requests that are issued by Windows host drivers to determine the * configuration containing the MBIM function. * * Unlike mbim_setup() this function handles two specific device requests, * and only when a configuration has not yet been selected. */ static int mbim_ctrlrequest(struct usb_composite_dev *cdev, const struct usb_ctrlrequest *ctrl) { int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); /* only respond to OS desciptors when no configuration selected */ if (cdev->config || !mbim_ext_config_desc.function.subCompatibleID[0]) return value; pr_debug("%02x.%02x v%04x i%04x l%u", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); /* Handle MSFT OS string */ if (ctrl->bRequestType == (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE) && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR && (w_value >> 8) == USB_DT_STRING && (w_value & 0xFF) == MBIM_OS_STRING_ID) { value = (w_length < sizeof(mbim_os_string) ? w_length : sizeof(mbim_os_string)); memcpy(cdev->req->buf, mbim_os_string, value); } else if (ctrl->bRequestType == (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE) && ctrl->bRequest == MBIM_VENDOR_CODE && w_index == 4) { /* Handle Extended OS descriptor */ value = (w_length < sizeof(mbim_ext_config_desc) ? w_length : sizeof(mbim_ext_config_desc)); memcpy(cdev->req->buf, &mbim_ext_config_desc, value); } /* respond with data transfer or status phase? */ if (value >= 0) { int rc; cdev->req->zero = value < w_length; cdev->req->length = value; rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); if (rc < 0) pr_err("response queue error: %d", rc); } return value; } static int mbim_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_mbim *mbim = func_to_mbim(f); struct usb_composite_dev *cdev = mbim->cdev; int ret = 0; /* Control interface has only altsetting 0 */ if (intf == mbim->ctrl_id) { pr_info("CONTROL_INTERFACE"); if (alt != 0) goto fail; if (mbim->not_port.notify->driver_data) { pr_info("reset mbim control %d\n", intf); usb_ep_disable(mbim->not_port.notify); } ret = config_ep_by_speed(cdev->gadget, f, mbim->not_port.notify); if (ret) { mbim->not_port.notify->desc = NULL; pr_err("Failed configuring notify ep %s: err %d\n", mbim->not_port.notify->name, ret); return ret; } ret = usb_ep_enable(mbim->not_port.notify); if (ret) { pr_err("usb ep#%s enable failed, err#%d\n", mbim->not_port.notify->name, ret); return ret; } mbim->not_port.notify->driver_data = mbim; /* Data interface has two altsettings, 0 and 1 */ } else if (intf == mbim->data_id) { pr_info("DATA_INTERFACE"); if (alt > 1) goto fail; if (mbim->bam_port.in->driver_data) { pr_info("reset mbim\n"); mbim_reset_values(mbim); } /* * CDC Network only sends data in non-default altsettings. * Changing altsettings resets filters, statistics, etc. */ if (alt == 1) { pr_info("Alt set 1, initialize ports"); if (!mbim->bam_port.in->desc) { pr_info("Choose endpoints"); ret = config_ep_by_speed(cdev->gadget, f, mbim->bam_port.in); if (ret) { mbim->bam_port.in->desc = NULL; pr_err("IN ep %s failed: %d\n", mbim->bam_port.in->name, ret); return ret; } pr_info("Set mbim port in_desc = 0x%p", mbim->bam_port.in->desc); ret = config_ep_by_speed(cdev->gadget, f, mbim->bam_port.out); if (ret) { mbim->bam_port.out->desc = NULL; pr_err("OUT ep %s failed: %d\n", mbim->bam_port.out->name, ret); return ret; } pr_info("Set mbim port out_desc = 0x%p", mbim->bam_port.out->desc); pr_debug("Activate mbim\n"); mbim_bam_connect(mbim); } else { pr_info("PORTS already SET"); } } mbim->data_alt_int = alt; spin_lock(&mbim->lock); mbim->not_port.notify_state = MBIM_NOTIFY_RESPONSE_AVAILABLE; spin_unlock(&mbim->lock); } else { goto fail; } atomic_set(&mbim->online, 1); pr_info("SET DEVICE ONLINE"); /* wakeup file threads */ wake_up(&mbim->read_wq); wake_up(&mbim->write_wq); return 0; fail: pr_err("ERROR: Illegal Interface"); return -EINVAL; } /* * Because the data interface supports multiple altsettings, * this MBIM function *MUST* implement a get_alt() method. */ static int mbim_get_alt(struct usb_function *f, unsigned intf) { struct f_mbim *mbim = func_to_mbim(f); if (intf == mbim->ctrl_id) return 0; else if (intf == mbim->data_id) return mbim->data_alt_int; return -EINVAL; } static void mbim_disable(struct usb_function *f) { struct f_mbim *mbim = func_to_mbim(f); pr_info("SET DEVICE OFFLINE"); atomic_set(&mbim->online, 0); mbim->not_port.notify_state = MBIM_NOTIFY_NONE; mbim_clear_queues(mbim); mbim_reset_function_queue(mbim); mbim_bam_disconnect(mbim); if (mbim->not_port.notify->driver_data) { usb_ep_disable(mbim->not_port.notify); mbim->not_port.notify->driver_data = NULL; } atomic_set(&mbim->not_port.notify_count, 0); pr_info("mbim deactivated\n"); } #define MBIM_ACTIVE_PORT 0 static void mbim_suspend(struct usb_function *f) { pr_info("mbim suspended\n"); bam_data_suspend(MBIM_ACTIVE_PORT); } static void mbim_resume(struct usb_function *f) { pr_info("mbim resumed\n"); bam_data_resume(MBIM_ACTIVE_PORT); } /*---------------------- function driver setup/binding ---------------------*/ static int mbim_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_mbim *mbim = func_to_mbim(f); int status; struct usb_ep *ep; pr_info("Enter"); mbim->cdev = cdev; /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; mbim->ctrl_id = status; mbim_iad_desc.bFirstInterface = status; mbim_control_intf.bInterfaceNumber = status; mbim_union_desc.bMasterInterface0 = status; status = usb_interface_id(c, f); if (status < 0) goto fail; mbim->data_id = status; mbim->data_alt_int = 0; mbim_data_nop_intf.bInterfaceNumber = status; mbim_data_intf.bInterfaceNumber = status; mbim_union_desc.bSlaveInterface0 = status; mbim->bam_port.cdev = cdev; mbim->bam_port.func = &mbim->function; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_in_desc); if (!ep) { pr_err("usb epin autoconfig failed\n"); goto fail; } pr_info("usb epin autoconfig succeeded\n"); ep->driver_data = cdev; /* claim */ mbim->bam_port.in = ep; ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_out_desc); if (!ep) { pr_err("usb epout autoconfig failed\n"); goto fail; } pr_info("usb epout autoconfig succeeded\n"); ep->driver_data = cdev; /* claim */ mbim->bam_port.out = ep; ep = usb_ep_autoconfig(cdev->gadget, &fs_mbim_notify_desc); if (!ep) { pr_err("usb notify ep autoconfig failed\n"); goto fail; } pr_info("usb notify ep autoconfig succeeded\n"); mbim->not_port.notify = ep; ep->driver_data = cdev; /* claim */ status = -ENOMEM; /* allocate notification request and buffer */ mbim->not_port.notify_req = mbim_alloc_req(ep, NCM_STATUS_BYTECOUNT); if (!mbim->not_port.notify_req) { pr_info("failed to allocate notify request\n"); goto fail; } pr_info("allocated notify ep request & request buffer\n"); mbim->not_port.notify_req->context = mbim; mbim->not_port.notify_req->complete = mbim_notify_complete; if (mbim->xport == USB_GADGET_XPORT_BAM2BAM_IPA) mbb_desc.wMaxSegmentSize = cpu_to_le16(0x800); else mbb_desc.wMaxSegmentSize = cpu_to_le16(0xfe0); /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(mbim_fs_function); if (!f->descriptors) goto fail; /* * support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ if (gadget_is_dualspeed(c->cdev->gadget)) { hs_mbim_in_desc.bEndpointAddress = fs_mbim_in_desc.bEndpointAddress; hs_mbim_out_desc.bEndpointAddress = fs_mbim_out_desc.bEndpointAddress; hs_mbim_notify_desc.bEndpointAddress = fs_mbim_notify_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(mbim_hs_function); if (!f->hs_descriptors) goto fail; } /* * If MBIM is bound in a config other than the first, tell Windows * about it by returning the num as a string in the OS descriptor's * subCompatibleID field. Windows only supports up to config #4. */ if (c->bConfigurationValue >= 2 && c->bConfigurationValue <= 4) { pr_debug("MBIM in configuration %d", c->bConfigurationValue); mbim_ext_config_desc.function.subCompatibleID[0] = c->bConfigurationValue + '0'; } pr_info("mbim(%d): %s speed IN/%s OUT/%s NOTIFY/%s\n", mbim->port_num, gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", mbim->bam_port.in->name, mbim->bam_port.out->name, mbim->not_port.notify->name); return 0; fail: pr_err("%s failed to bind, err %d\n", f->name, status); if (f->descriptors) usb_free_descriptors(f->descriptors); if (mbim->not_port.notify_req) { kfree(mbim->not_port.notify_req->buf); usb_ep_free_request(mbim->not_port.notify, mbim->not_port.notify_req); } /* we might as well release our claims on endpoints */ if (mbim->not_port.notify) mbim->not_port.notify->driver_data = NULL; if (mbim->bam_port.out) mbim->bam_port.out->driver_data = NULL; if (mbim->bam_port.in) mbim->bam_port.in->driver_data = NULL; return status; } static void mbim_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_mbim *mbim = func_to_mbim(f); bam_data_destroy(mbim->port_num); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); kfree(mbim->not_port.notify_req->buf); usb_ep_free_request(mbim->not_port.notify, mbim->not_port.notify_req); mbim_ext_config_desc.function.subCompatibleID[0] = 0; } /** * mbim_bind_config - add MBIM link to a configuration * @c: the configuration to support the network link * Context: single threaded during gadget setup * Returns zero on success, else negative errno. */ int mbim_bind_config(struct usb_configuration *c, unsigned portno, char *xport_name) { struct f_mbim *mbim = NULL; int status = 0; pr_info("port number %u", portno); if (portno >= nr_mbim_ports) { pr_err("Can not add port %u. Max ports = %d", portno, nr_mbim_ports); return -ENODEV; } status = mbim_bam_setup(nr_mbim_ports); if (status) { pr_err("bam setup failed"); return status; } /* maybe allocate device-global string IDs */ if (mbim_string_defs[0].id == 0) { /* control interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; mbim_string_defs[STRING_CTRL_IDX].id = status; mbim_control_intf.iInterface = status; /* data interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; mbim_string_defs[STRING_DATA_IDX].id = status; mbim_data_nop_intf.iInterface = status; mbim_data_intf.iInterface = status; } /* allocate and initialize one new instance */ mbim = mbim_ports[0].port; if (!mbim) { pr_info("mbim struct not allocated"); return -ENOMEM; } mbim->cdev = c->cdev; mbim_reset_values(mbim); mbim->function.name = "usb_mbim"; mbim->function.strings = mbim_strings; mbim->function.bind = mbim_bind; mbim->function.unbind = mbim_unbind; mbim->function.set_alt = mbim_set_alt; mbim->function.get_alt = mbim_get_alt; mbim->function.setup = mbim_setup; mbim->function.disable = mbim_disable; mbim->function.suspend = mbim_suspend; mbim->function.resume = mbim_resume; mbim->xport = str_to_xport(xport_name); if (mbim->xport != USB_GADGET_XPORT_BAM2BAM_IPA) { /* Use BAM2BAM by default if not IPA */ mbim->xport = USB_GADGET_XPORT_BAM2BAM; } else { /* For IPA we use limit of 16 */ mbim_ntb_parameters.wNtbOutMaxDatagrams = 16; /* For IPA this is proven to give maximum throughput */ mbim_ntb_parameters.dwNtbInMaxSize = cpu_to_le32(NTB_DEFAULT_IN_SIZE_IPA); mbim_ntb_parameters.dwNtbOutMaxSize = cpu_to_le32(MBIM_NTB_OUT_SIZE_IPA); mbim_ntb_parameters.wNdpInDivisor = 1; } INIT_LIST_HEAD(&mbim->cpkt_req_q); INIT_LIST_HEAD(&mbim->cpkt_resp_q); status = usb_add_function(c, &mbim->function); pr_info("Exit status %d", status); return status; } /* ------------ MBIM DRIVER File Operations API for USER SPACE ------------ */ static ssize_t mbim_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { struct f_mbim *dev = fp->private_data; struct ctrl_pkt *cpkt = NULL; unsigned long flags; int ret = 0; pr_debug("Enter(%d)\n", count); if (!dev) { pr_err("Received NULL mbim pointer\n"); return -ENODEV; } if (count > MBIM_BULK_BUFFER_SIZE) { pr_err("Buffer size is too big %d, should be at most %d\n", count, MBIM_BULK_BUFFER_SIZE); return -EINVAL; } if (mbim_lock(&dev->read_excl)) { pr_err("Previous reading is not finished yet\n"); return -EBUSY; } /* block until mbim online */ while (!(atomic_read(&dev->online) || atomic_read(&dev->error))) { pr_err("USB cable not connected. Wait.\n"); ret = wait_event_interruptible(dev->read_wq, (atomic_read(&dev->online) || atomic_read(&dev->error))); if (ret < 0) { mbim_unlock(&dev->read_excl); return -ERESTARTSYS; } } if (atomic_read(&dev->error)) { mbim_unlock(&dev->read_excl); return -EIO; } spin_lock_irqsave(&dev->lock, flags); while (list_empty(&dev->cpkt_req_q)) { pr_debug("Requests list is empty. Wait.\n"); spin_unlock_irqrestore(&dev->lock, flags); ret = wait_event_interruptible(dev->read_wq, !list_empty(&dev->cpkt_req_q)); if (ret < 0) { pr_err("Waiting failed\n"); mbim_unlock(&dev->read_excl); return -ERESTARTSYS; } pr_debug("Received request packet\n"); spin_lock_irqsave(&dev->lock, flags); } cpkt = list_first_entry(&dev->cpkt_req_q, struct ctrl_pkt, list); if (cpkt->len > count) { spin_unlock_irqrestore(&dev->lock, flags); mbim_unlock(&dev->read_excl); pr_err("cpkt size too big:%d > buf size:%d\n", cpkt->len, count); return -ENOMEM; } pr_debug("cpkt size:%d\n", cpkt->len); list_del(&cpkt->list); spin_unlock_irqrestore(&dev->lock, flags); mbim_unlock(&dev->read_excl); ret = copy_to_user(buf, cpkt->buf, cpkt->len); if (ret) { pr_err("copy_to_user failed: err %d\n", ret); ret = -ENOMEM; } else { pr_debug("copied %d bytes to user\n", cpkt->len); ret = cpkt->len; } mbim_free_ctrl_pkt(cpkt); return ret; } static ssize_t mbim_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { struct f_mbim *dev = fp->private_data; struct ctrl_pkt *cpkt = NULL; int ret = 0; pr_debug("Enter(%d)", count); if (!dev) { pr_err("Received NULL mbim pointer\n"); return -ENODEV; } if (!count) { pr_err("zero length ctrl pkt\n"); return -ENODEV; } if (count > MAX_CTRL_PKT_SIZE) { pr_err("given pkt size too big:%d > max_pkt_size:%d\n", count, MAX_CTRL_PKT_SIZE); return -ENOMEM; } if (mbim_lock(&dev->write_excl)) { pr_err("Previous writing not finished yet\n"); return -EBUSY; } if (!atomic_read(&dev->online)) { pr_err("USB cable not connected\n"); mbim_unlock(&dev->write_excl); return -EPIPE; } cpkt = mbim_alloc_ctrl_pkt(count, GFP_KERNEL); if (!cpkt) { pr_err("failed to allocate ctrl pkt\n"); mbim_unlock(&dev->write_excl); return -ENOMEM; } ret = copy_from_user(cpkt->buf, buf, count); if (ret) { pr_err("copy_from_user failed err:%d\n", ret); mbim_free_ctrl_pkt(cpkt); mbim_unlock(&dev->write_excl); return 0; } fmbim_send_cpkt_response(dev, cpkt); mbim_unlock(&dev->write_excl); pr_debug("Exit(%d)", count); return count; } static int mbim_open(struct inode *ip, struct file *fp) { pr_info("Open mbim driver\n"); while (!_mbim_dev) { pr_err("mbim_dev not created yet\n"); return -ENODEV; } if (mbim_lock(&_mbim_dev->open_excl)) { pr_err("Already opened\n"); return -EBUSY; } pr_info("Lock mbim_dev->open_excl for open\n"); if (!atomic_read(&_mbim_dev->online)) pr_err("USB cable not connected\n"); fp->private_data = _mbim_dev; atomic_set(&_mbim_dev->error, 0); pr_info("Exit, mbim file opened\n"); return 0; } static int mbim_release(struct inode *ip, struct file *fp) { pr_info("Close mbim file"); mbim_unlock(&_mbim_dev->open_excl); return 0; } static long mbim_ioctl(struct file *fp, unsigned cmd, unsigned long arg) { struct f_mbim *mbim = fp->private_data; int ret = 0; pr_debug("Received command %d", cmd); if (mbim_lock(&mbim->ioctl_excl)) return -EBUSY; switch (cmd) { case MBIM_GET_NTB_SIZE: ret = copy_to_user((void __user *)arg, &mbim->ntb_input_size, sizeof(mbim->ntb_input_size)); if (ret) { pr_err("copying to user space failed"); ret = -EFAULT; } pr_info("Sent NTB size %d", mbim->ntb_input_size); break; case MBIM_GET_DATAGRAM_COUNT: ret = copy_to_user((void __user *)arg, &mbim->ntb_max_datagrams, sizeof(mbim->ntb_max_datagrams)); if (ret) { pr_err("copying to user space failed"); ret = -EFAULT; } pr_info("Sent NTB datagrams count %d", mbim->ntb_max_datagrams); break; default: pr_err("wrong parameter"); ret = -EINVAL; } mbim_unlock(&mbim->ioctl_excl); return ret; } /* file operations for MBIM device /dev/android_mbim */ static const struct file_operations mbim_fops = { .owner = THIS_MODULE, .open = mbim_open, .release = mbim_release, .read = mbim_read, .write = mbim_write, .unlocked_ioctl = mbim_ioctl, }; static struct miscdevice mbim_device = { .minor = MISC_DYNAMIC_MINOR, .name = "android_mbim", .fops = &mbim_fops, }; static int mbim_init(int instances) { int i; struct f_mbim *dev = NULL; int ret; pr_info("initialize %d instances\n", instances); if (instances > NR_MBIM_PORTS) { pr_err("Max-%d instances supported\n", NR_MBIM_PORTS); return -EINVAL; } for (i = 0; i < instances; i++) { dev = kzalloc(sizeof(struct f_mbim), GFP_KERNEL); if (!dev) { pr_err("Failed to allocate mbim dev\n"); ret = -ENOMEM; goto fail_probe; } dev->port_num = i; spin_lock_init(&dev->lock); INIT_LIST_HEAD(&dev->cpkt_req_q); INIT_LIST_HEAD(&dev->cpkt_resp_q); mbim_ports[i].port = dev; mbim_ports[i].port_num = i; init_waitqueue_head(&dev->read_wq); init_waitqueue_head(&dev->write_wq); atomic_set(&dev->open_excl, 0); atomic_set(&dev->ioctl_excl, 0); atomic_set(&dev->read_excl, 0); atomic_set(&dev->write_excl, 0); nr_mbim_ports++; } _mbim_dev = dev; ret = misc_register(&mbim_device); if (ret) { pr_err("mbim driver failed to register"); goto fail_probe; } pr_info("Initialized %d ports\n", nr_mbim_ports); return ret; fail_probe: pr_err("Failed"); for (i = 0; i < nr_mbim_ports; i++) { kfree(mbim_ports[i].port); mbim_ports[i].port = NULL; } return ret; } static void fmbim_cleanup(void) { int i = 0; pr_info("Enter"); for (i = 0; i < nr_mbim_ports; i++) { kfree(mbim_ports[i].port); mbim_ports[i].port = NULL; } nr_mbim_ports = 0; misc_deregister(&mbim_device); _mbim_dev = NULL; }
gpl-2.0
gtvhacker/Sony-Kernel
arch/arm/mach-mx1/clock.c
781
13186
/* * Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/list.h> #include <linux/math64.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <asm/clkdev.h> #include <mach/clock.h> #include <mach/hardware.h> #include <mach/common.h> #include "crm_regs.h" static int _clk_enable(struct clk *clk) { unsigned int reg; reg = __raw_readl(clk->enable_reg); reg |= 1 << clk->enable_shift; __raw_writel(reg, clk->enable_reg); return 0; } static void _clk_disable(struct clk *clk) { unsigned int reg; reg = __raw_readl(clk->enable_reg); reg &= ~(1 << clk->enable_shift); __raw_writel(reg, clk->enable_reg); } static int _clk_can_use_parent(const struct clk *clk_arr[], unsigned int size, struct clk *parent) { int i; for (i = 0; i < size; i++) if (parent == clk_arr[i]) return i; return -EINVAL; } static unsigned long _clk_simple_round_rate(struct clk *clk, unsigned long rate, unsigned int limit) { int div; unsigned long parent_rate; parent_rate = clk_get_rate(clk->parent); div = parent_rate / rate; if (parent_rate % rate) div++; if (div > limit) div = limit; return parent_rate / div; } static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate) { return clk->parent->round_rate(clk->parent, rate); } static int _clk_parent_set_rate(struct clk *clk, unsigned long rate) { return clk->parent->set_rate(clk->parent, rate); } static unsigned long clk16m_get_rate(struct clk *clk) { return 16000000; } static struct clk clk16m = { .get_rate = clk16m_get_rate, .enable = _clk_enable, .enable_reg = CCM_CSCR, .enable_shift = CCM_CSCR_OSC_EN_SHIFT, .disable = _clk_disable, }; /* in Hz */ static unsigned long clk32_rate; static unsigned long clk32_get_rate(struct clk *clk) { return clk32_rate; } static struct clk clk32 = { .get_rate = clk32_get_rate, }; static unsigned long clk32_premult_get_rate(struct clk *clk) { return clk_get_rate(clk->parent) * 512; } static struct clk clk32_premult = { .parent = &clk32, .get_rate = clk32_premult_get_rate, }; static const struct clk *prem_clk_clocks[] = { &clk32_premult, &clk16m, }; static int prem_clk_set_parent(struct clk *clk, struct clk *parent) { int i; unsigned int reg = __raw_readl(CCM_CSCR); i = _clk_can_use_parent(prem_clk_clocks, ARRAY_SIZE(prem_clk_clocks), parent); switch (i) { case 0: reg &= ~CCM_CSCR_SYSTEM_SEL; break; case 1: reg |= CCM_CSCR_SYSTEM_SEL; break; default: return i; } __raw_writel(reg, CCM_CSCR); return 0; } static struct clk prem_clk = { .set_parent = prem_clk_set_parent, }; static unsigned long system_clk_get_rate(struct clk *clk) { return mxc_decode_pll(__raw_readl(CCM_SPCTL0), clk_get_rate(clk->parent)); } static struct clk system_clk = { .parent = &prem_clk, .get_rate = system_clk_get_rate, }; static unsigned long mcu_clk_get_rate(struct clk *clk) { return mxc_decode_pll(__raw_readl(CCM_MPCTL0), clk_get_rate(clk->parent)); } static struct clk mcu_clk = { .parent = &clk32_premult, .get_rate = mcu_clk_get_rate, }; static unsigned long fclk_get_rate(struct clk *clk) { unsigned long fclk = clk_get_rate(clk->parent); if (__raw_readl(CCM_CSCR) & CCM_CSCR_PRESC) fclk /= 2; return fclk; } static struct clk fclk = { .parent = &mcu_clk, .get_rate = fclk_get_rate, }; /* * get hclk ( SDRAM, CSI, Memory Stick, I2C, DMA ) */ static unsigned long hclk_get_rate(struct clk *clk) { return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) & CCM_CSCR_BCLK_MASK) >> CCM_CSCR_BCLK_OFFSET) + 1); } static unsigned long hclk_round_rate(struct clk *clk, unsigned long rate) { return _clk_simple_round_rate(clk, rate, 16); } static int hclk_set_rate(struct clk *clk, unsigned long rate) { unsigned int div; unsigned int reg; unsigned long parent_rate; parent_rate = clk_get_rate(clk->parent); div = parent_rate / rate; if (div > 16 || div < 1 || ((parent_rate / div) != rate)) return -EINVAL; div--; reg = __raw_readl(CCM_CSCR); reg &= ~CCM_CSCR_BCLK_MASK; reg |= div << CCM_CSCR_BCLK_OFFSET; __raw_writel(reg, CCM_CSCR); return 0; } static struct clk hclk = { .parent = &system_clk, .get_rate = hclk_get_rate, .round_rate = hclk_round_rate, .set_rate = hclk_set_rate, }; static unsigned long clk48m_get_rate(struct clk *clk) { return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) & CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET) + 1); } static unsigned long clk48m_round_rate(struct clk *clk, unsigned long rate) { return _clk_simple_round_rate(clk, rate, 8); } static int clk48m_set_rate(struct clk *clk, unsigned long rate) { unsigned int div; unsigned int reg; unsigned long parent_rate; parent_rate = clk_get_rate(clk->parent); div = parent_rate / rate; if (div > 8 || div < 1 || ((parent_rate / div) != rate)) return -EINVAL; div--; reg = __raw_readl(CCM_CSCR); reg &= ~CCM_CSCR_USB_MASK; reg |= div << CCM_CSCR_USB_OFFSET; __raw_writel(reg, CCM_CSCR); return 0; } static struct clk clk48m = { .parent = &system_clk, .get_rate = clk48m_get_rate, .round_rate = clk48m_round_rate, .set_rate = clk48m_set_rate, }; /* * get peripheral clock 1 ( UART[12], Timer[12], PWM ) */ static unsigned long perclk1_get_rate(struct clk *clk) { return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) & CCM_PCDR_PCLK1_MASK) >> CCM_PCDR_PCLK1_OFFSET) + 1); } static unsigned long perclk1_round_rate(struct clk *clk, unsigned long rate) { return _clk_simple_round_rate(clk, rate, 16); } static int perclk1_set_rate(struct clk *clk, unsigned long rate) { unsigned int div; unsigned int reg; unsigned long parent_rate; parent_rate = clk_get_rate(clk->parent); div = parent_rate / rate; if (div > 16 || div < 1 || ((parent_rate / div) != rate)) return -EINVAL; div--; reg = __raw_readl(CCM_PCDR); reg &= ~CCM_PCDR_PCLK1_MASK; reg |= div << CCM_PCDR_PCLK1_OFFSET; __raw_writel(reg, CCM_PCDR); return 0; } /* * get peripheral clock 2 ( LCD, SD, SPI[12] ) */ static unsigned long perclk2_get_rate(struct clk *clk) { return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) & CCM_PCDR_PCLK2_MASK) >> CCM_PCDR_PCLK2_OFFSET) + 1); } static unsigned long perclk2_round_rate(struct clk *clk, unsigned long rate) { return _clk_simple_round_rate(clk, rate, 16); } static int perclk2_set_rate(struct clk *clk, unsigned long rate) { unsigned int div; unsigned int reg; unsigned long parent_rate; parent_rate = clk_get_rate(clk->parent); div = parent_rate / rate; if (div > 16 || div < 1 || ((parent_rate / div) != rate)) return -EINVAL; div--; reg = __raw_readl(CCM_PCDR); reg &= ~CCM_PCDR_PCLK2_MASK; reg |= div << CCM_PCDR_PCLK2_OFFSET; __raw_writel(reg, CCM_PCDR); return 0; } /* * get peripheral clock 3 ( SSI ) */ static unsigned long perclk3_get_rate(struct clk *clk) { return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) & CCM_PCDR_PCLK3_MASK) >> CCM_PCDR_PCLK3_OFFSET) + 1); } static unsigned long perclk3_round_rate(struct clk *clk, unsigned long rate) { return _clk_simple_round_rate(clk, rate, 128); } static int perclk3_set_rate(struct clk *clk, unsigned long rate) { unsigned int div; unsigned int reg; unsigned long parent_rate; parent_rate = clk_get_rate(clk->parent); div = parent_rate / rate; if (div > 128 || div < 1 || ((parent_rate / div) != rate)) return -EINVAL; div--; reg = __raw_readl(CCM_PCDR); reg &= ~CCM_PCDR_PCLK3_MASK; reg |= div << CCM_PCDR_PCLK3_OFFSET; __raw_writel(reg, CCM_PCDR); return 0; } static struct clk perclk[] = { { .id = 0, .parent = &system_clk, .get_rate = perclk1_get_rate, .round_rate = perclk1_round_rate, .set_rate = perclk1_set_rate, }, { .id = 1, .parent = &system_clk, .get_rate = perclk2_get_rate, .round_rate = perclk2_round_rate, .set_rate = perclk2_set_rate, }, { .id = 2, .parent = &system_clk, .get_rate = perclk3_get_rate, .round_rate = perclk3_round_rate, .set_rate = perclk3_set_rate, } }; static const struct clk *clko_clocks[] = { &perclk[0], &hclk, &clk48m, &clk16m, &prem_clk, &fclk, }; static int clko_set_parent(struct clk *clk, struct clk *parent) { int i; unsigned int reg; i = _clk_can_use_parent(clko_clocks, ARRAY_SIZE(clko_clocks), parent); if (i < 0) return i; reg = __raw_readl(CCM_CSCR) & ~CCM_CSCR_CLKO_MASK; reg |= i << CCM_CSCR_CLKO_OFFSET; __raw_writel(reg, CCM_CSCR); if (clko_clocks[i]->set_rate && clko_clocks[i]->round_rate) { clk->set_rate = _clk_parent_set_rate; clk->round_rate = _clk_parent_round_rate; } else { clk->set_rate = NULL; clk->round_rate = NULL; } return 0; } static struct clk clko_clk = { .set_parent = clko_set_parent, }; static struct clk dma_clk = { .parent = &hclk, .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, .enable = _clk_enable, .enable_reg = SCM_GCCR, .enable_shift = SCM_GCCR_DMA_CLK_EN_OFFSET, .disable = _clk_disable, }; static struct clk csi_clk = { .parent = &hclk, .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, .enable = _clk_enable, .enable_reg = SCM_GCCR, .enable_shift = SCM_GCCR_CSI_CLK_EN_OFFSET, .disable = _clk_disable, }; static struct clk mma_clk = { .parent = &hclk, .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, .enable = _clk_enable, .enable_reg = SCM_GCCR, .enable_shift = SCM_GCCR_MMA_CLK_EN_OFFSET, .disable = _clk_disable, }; static struct clk usbd_clk = { .parent = &clk48m, .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, .enable = _clk_enable, .enable_reg = SCM_GCCR, .enable_shift = SCM_GCCR_USBD_CLK_EN_OFFSET, .disable = _clk_disable, }; static struct clk gpt_clk = { .parent = &perclk[0], .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk uart_clk = { .parent = &perclk[0], .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk i2c_clk = { .parent = &hclk, .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk spi_clk = { .parent = &perclk[1], .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk sdhc_clk = { .parent = &perclk[1], .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk lcdc_clk = { .parent = &perclk[1], .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk mshc_clk = { .parent = &hclk, .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk ssi_clk = { .parent = &perclk[2], .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk rtc_clk = { .parent = &clk32, }; #define _REGISTER_CLOCK(d, n, c) \ { \ .dev_id = d, \ .con_id = n, \ .clk = &c, \ }, static struct clk_lookup lookups[] __initdata = { _REGISTER_CLOCK(NULL, "dma", dma_clk) _REGISTER_CLOCK("mx1-camera.0", NULL, csi_clk) _REGISTER_CLOCK(NULL, "mma", mma_clk) _REGISTER_CLOCK("imx_udc.0", NULL, usbd_clk) _REGISTER_CLOCK(NULL, "gpt", gpt_clk) _REGISTER_CLOCK("imx-uart.0", NULL, uart_clk) _REGISTER_CLOCK("imx-uart.1", NULL, uart_clk) _REGISTER_CLOCK("imx-uart.2", NULL, uart_clk) _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk) _REGISTER_CLOCK("spi_imx.0", NULL, spi_clk) _REGISTER_CLOCK("imx-mmc.0", NULL, sdhc_clk) _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk) _REGISTER_CLOCK(NULL, "mshc", mshc_clk) _REGISTER_CLOCK(NULL, "ssi", ssi_clk) _REGISTER_CLOCK("mxc_rtc.0", NULL, rtc_clk) }; int __init mx1_clocks_init(unsigned long fref) { unsigned int reg; /* disable clocks we are able to */ __raw_writel(0, SCM_GCCR); clk32_rate = fref; reg = __raw_readl(CCM_CSCR); /* detect clock reference for system PLL */ if (reg & CCM_CSCR_SYSTEM_SEL) { prem_clk.parent = &clk16m; } else { /* ensure that oscillator is disabled */ reg &= ~(1 << CCM_CSCR_OSC_EN_SHIFT); __raw_writel(reg, CCM_CSCR); prem_clk.parent = &clk32_premult; } /* detect reference for CLKO */ reg = (reg & CCM_CSCR_CLKO_MASK) >> CCM_CSCR_CLKO_OFFSET; clko_clk.parent = (struct clk *)clko_clocks[reg]; clkdev_add_table(lookups, ARRAY_SIZE(lookups)); clk_enable(&hclk); clk_enable(&fclk); mxc_timer_init(&gpt_clk, IO_ADDRESS(TIM1_BASE_ADDR), TIM1_INT); return 0; }
gpl-2.0
TrustZoneGenericDriver/linux
drivers/isdn/hisax/hfc_usb.c
1293
44651
/* * hfc_usb.c * * $Id: hfc_usb.c,v 2.3.2.24 2007/10/14 08:40:29 mbachem Exp $ * * modular HiSax ISDN driver for Colognechip HFC-S USB chip * * Authors : Peter Sprenger (sprenger@moving-bytes.de) * Martin Bachem (m.bachem@gmx.de, info@colognechip.com) * * based on the first hfc_usb driver of * Werner Cornelius (werner@isdn-development.de) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * See Version Histroy at the bottom of this file * */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kernel_stat.h> #include <linux/usb.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include "hisax.h" #include "hisax_if.h" #include "hfc_usb.h" static const char *hfcusb_revision = "$Revision: 2.3.2.24 $ $Date: 2007/10/14 08:40:29 $ "; /* Hisax debug support * debug flags defined in hfc_usb.h as HFCUSB_DBG_[*] */ #define __debug_variable hfc_debug #include "hisax_debug.h" static u_int debug; module_param(debug, uint, 0); static int hfc_debug; /* private vendor specific data */ typedef struct { __u8 led_scheme; // led display scheme signed short led_bits[8]; // array of 8 possible LED bitmask settings char *vend_name; // device name } hfcsusb_vdata; /* VID/PID device list */ static struct usb_device_id hfcusb_idtab[] = { { USB_DEVICE(0x0959, 0x2bd0), .driver_info = (unsigned long) &((hfcsusb_vdata) {LED_OFF, {4, 0, 2, 1}, "ISDN USB TA (Cologne Chip HFC-S USB based)"}), }, { USB_DEVICE(0x0675, 0x1688), .driver_info = (unsigned long) &((hfcsusb_vdata) {LED_SCHEME1, {1, 2, 0, 0}, "DrayTek miniVigor 128 USB ISDN TA"}), }, { USB_DEVICE(0x07b0, 0x0007), .driver_info = (unsigned long) &((hfcsusb_vdata) {LED_SCHEME1, {0x80, -64, -32, -16}, "Billion tiny USB ISDN TA 128"}), }, { USB_DEVICE(0x0742, 0x2008), .driver_info = (unsigned long) &((hfcsusb_vdata) {LED_SCHEME1, {4, 0, 2, 1}, "Stollmann USB TA"}), }, { USB_DEVICE(0x0742, 0x2009), .driver_info = (unsigned long) &((hfcsusb_vdata) {LED_SCHEME1, {4, 0, 2, 1}, "Aceex USB ISDN TA"}), }, { USB_DEVICE(0x0742, 0x200A), .driver_info = (unsigned long) &((hfcsusb_vdata) {LED_SCHEME1, {4, 0, 2, 1}, "OEM USB ISDN TA"}), }, { USB_DEVICE(0x08e3, 0x0301), .driver_info = (unsigned long) &((hfcsusb_vdata) {LED_SCHEME1, {2, 0, 1, 4}, "Olitec USB RNIS"}), }, { USB_DEVICE(0x07fa, 0x0846), .driver_info = (unsigned long) &((hfcsusb_vdata) {LED_SCHEME1, {0x80, -64, -32, -16}, "Bewan Modem RNIS USB"}), }, { USB_DEVICE(0x07fa, 0x0847), .driver_info = (unsigned long) &((hfcsusb_vdata) {LED_SCHEME1, {0x80, -64, -32, -16}, "Djinn Numeris USB"}), }, { USB_DEVICE(0x07b0, 0x0006), .driver_info = (unsigned long) &((hfcsusb_vdata) {LED_SCHEME1, {0x80, -64, -32, -16}, "Twister ISDN TA"}), }, { USB_DEVICE(0x071d, 0x1005), .driver_info = (unsigned long) &((hfcsusb_vdata) {LED_SCHEME1, {0x02, 0, 0x01, 0x04}, "Eicon DIVA USB 4.0"}), }, { } }; /* structure defining input+output fifos (interrupt/bulk mode) */ struct usb_fifo; /* forward definition */ typedef struct iso_urb_struct { struct urb *purb; __u8 buffer[ISO_BUFFER_SIZE]; /* buffer incoming/outgoing data */ struct usb_fifo *owner_fifo; /* pointer to owner fifo */ } iso_urb_struct; struct hfcusb_data; /* forward definition */ typedef struct usb_fifo { int fifonum; /* fifo index attached to this structure */ int active; /* fifo is currently active */ struct hfcusb_data *hfc; /* pointer to main structure */ int pipe; /* address of endpoint */ __u8 usb_packet_maxlen; /* maximum length for usb transfer */ unsigned int max_size; /* maximum size of receive/send packet */ __u8 intervall; /* interrupt interval */ struct sk_buff *skbuff; /* actual used buffer */ struct urb *urb; /* transfer structure for usb routines */ __u8 buffer[128]; /* buffer incoming/outgoing data */ int bit_line; /* how much bits are in the fifo? */ volatile __u8 usb_transfer_mode; /* switched between ISO and INT */ iso_urb_struct iso[2]; /* need two urbs to have one always for pending */ struct hisax_if *hif; /* hisax interface */ int delete_flg; /* only delete skbuff once */ int last_urblen; /* remember length of last packet */ } usb_fifo; /* structure holding all data for one device */ typedef struct hfcusb_data { /* HiSax Interface for loadable Layer1 drivers */ struct hisax_d_if d_if; /* see hisax_if.h */ struct hisax_b_if b_if[2]; /* see hisax_if.h */ int protocol; struct usb_device *dev; /* our device */ int if_used; /* used interface number */ int alt_used; /* used alternate config */ int ctrl_paksize; /* control pipe packet size */ int ctrl_in_pipe, /* handles for control pipe */ ctrl_out_pipe; int cfg_used; /* configuration index used */ int vend_idx; /* vendor found */ int b_mode[2]; /* B-channel mode */ int l1_activated; /* layer 1 activated */ int disc_flag; /* TRUE if device was disonnected to avoid some USB actions */ int packet_size, iso_packet_size; /* control pipe background handling */ ctrl_buft ctrl_buff[HFC_CTRL_BUFSIZE]; /* buffer holding queued data */ volatile int ctrl_in_idx, ctrl_out_idx, ctrl_cnt; /* input/output pointer + count */ struct urb *ctrl_urb; /* transfer structure for control channel */ struct usb_ctrlrequest ctrl_write; /* buffer for control write request */ struct usb_ctrlrequest ctrl_read; /* same for read request */ __u8 old_led_state, led_state; volatile __u8 threshold_mask; /* threshold actually reported */ volatile __u8 bch_enables; /* or mask for sctrl_r and sctrl register values */ usb_fifo fifos[HFCUSB_NUM_FIFOS]; /* structure holding all fifo data */ volatile __u8 l1_state; /* actual l1 state */ struct timer_list t3_timer; /* timer 3 for activation/deactivation */ struct timer_list t4_timer; /* timer 4 for activation/deactivation */ } hfcusb_data; static void collect_rx_frame(usb_fifo *fifo, __u8 *data, int len, int finish); static inline const char * symbolic(struct hfcusb_symbolic_list list[], const int num) { int i; for (i = 0; list[i].name != NULL; i++) if (list[i].num == num) return (list[i].name); return "<unknown ERROR>"; } static void ctrl_start_transfer(hfcusb_data *hfc) { if (hfc->ctrl_cnt) { hfc->ctrl_urb->pipe = hfc->ctrl_out_pipe; hfc->ctrl_urb->setup_packet = (u_char *)&hfc->ctrl_write; hfc->ctrl_urb->transfer_buffer = NULL; hfc->ctrl_urb->transfer_buffer_length = 0; hfc->ctrl_write.wIndex = cpu_to_le16(hfc->ctrl_buff[hfc->ctrl_out_idx].hfc_reg); hfc->ctrl_write.wValue = cpu_to_le16(hfc->ctrl_buff[hfc->ctrl_out_idx].reg_val); usb_submit_urb(hfc->ctrl_urb, GFP_ATOMIC); /* start transfer */ } } /* ctrl_start_transfer */ static int queue_control_request(hfcusb_data *hfc, __u8 reg, __u8 val, int action) { ctrl_buft *buf; if (hfc->ctrl_cnt >= HFC_CTRL_BUFSIZE) return (1); /* no space left */ buf = &hfc->ctrl_buff[hfc->ctrl_in_idx]; /* pointer to new index */ buf->hfc_reg = reg; buf->reg_val = val; buf->action = action; if (++hfc->ctrl_in_idx >= HFC_CTRL_BUFSIZE) hfc->ctrl_in_idx = 0; /* pointer wrap */ if (++hfc->ctrl_cnt == 1) ctrl_start_transfer(hfc); return (0); } static void ctrl_complete(struct urb *urb) { hfcusb_data *hfc = (hfcusb_data *) urb->context; urb->dev = hfc->dev; if (hfc->ctrl_cnt) { hfc->ctrl_cnt--; /* decrement actual count */ if (++hfc->ctrl_out_idx >= HFC_CTRL_BUFSIZE) hfc->ctrl_out_idx = 0; /* pointer wrap */ ctrl_start_transfer(hfc); /* start next transfer */ } } /* write led data to auxport & invert if necessary */ static void write_led(hfcusb_data *hfc, __u8 led_state) { if (led_state != hfc->old_led_state) { hfc->old_led_state = led_state; queue_control_request(hfc, HFCUSB_P_DATA, led_state, 1); } } static void set_led_bit(hfcusb_data *hfc, signed short led_bits, int on) { if (on) { if (led_bits < 0) hfc->led_state &= ~abs(led_bits); else hfc->led_state |= led_bits; } else { if (led_bits < 0) hfc->led_state |= abs(led_bits); else hfc->led_state &= ~led_bits; } } /* handle LED requests */ static void handle_led(hfcusb_data *hfc, int event) { hfcsusb_vdata *driver_info = (hfcsusb_vdata *) hfcusb_idtab[hfc->vend_idx].driver_info; /* if no scheme -> no LED action */ if (driver_info->led_scheme == LED_OFF) return; switch (event) { case LED_POWER_ON: set_led_bit(hfc, driver_info->led_bits[0], 1); set_led_bit(hfc, driver_info->led_bits[1], 0); set_led_bit(hfc, driver_info->led_bits[2], 0); set_led_bit(hfc, driver_info->led_bits[3], 0); break; case LED_POWER_OFF: set_led_bit(hfc, driver_info->led_bits[0], 0); set_led_bit(hfc, driver_info->led_bits[1], 0); set_led_bit(hfc, driver_info->led_bits[2], 0); set_led_bit(hfc, driver_info->led_bits[3], 0); break; case LED_S0_ON: set_led_bit(hfc, driver_info->led_bits[1], 1); break; case LED_S0_OFF: set_led_bit(hfc, driver_info->led_bits[1], 0); break; case LED_B1_ON: set_led_bit(hfc, driver_info->led_bits[2], 1); break; case LED_B1_OFF: set_led_bit(hfc, driver_info->led_bits[2], 0); break; case LED_B2_ON: set_led_bit(hfc, driver_info->led_bits[3], 1); break; case LED_B2_OFF: set_led_bit(hfc, driver_info->led_bits[3], 0); break; } write_led(hfc, hfc->led_state); } /* ISDN l1 timer T3 expires */ static void l1_timer_expire_t3(hfcusb_data *hfc) { hfc->d_if.ifc.l1l2(&hfc->d_if.ifc, PH_DEACTIVATE | INDICATION, NULL); DBG(HFCUSB_DBG_STATES, "HFC-S USB: PH_DEACTIVATE | INDICATION sent (T3 expire)"); hfc->l1_activated = 0; handle_led(hfc, LED_S0_OFF); /* deactivate : */ queue_control_request(hfc, HFCUSB_STATES, 0x10, 1); queue_control_request(hfc, HFCUSB_STATES, 3, 1); } /* ISDN l1 timer T4 expires */ static void l1_timer_expire_t4(hfcusb_data *hfc) { hfc->d_if.ifc.l1l2(&hfc->d_if.ifc, PH_DEACTIVATE | INDICATION, NULL); DBG(HFCUSB_DBG_STATES, "HFC-S USB: PH_DEACTIVATE | INDICATION sent (T4 expire)"); hfc->l1_activated = 0; handle_led(hfc, LED_S0_OFF); } /* S0 state changed */ static void s0_state_handler(hfcusb_data *hfc, __u8 state) { __u8 old_state; old_state = hfc->l1_state; if (state == old_state || state < 1 || state > 8) return; DBG(HFCUSB_DBG_STATES, "HFC-S USB: S0 statechange(%d -> %d)", old_state, state); if (state < 4 || state == 7 || state == 8) { if (timer_pending(&hfc->t3_timer)) del_timer(&hfc->t3_timer); DBG(HFCUSB_DBG_STATES, "HFC-S USB: T3 deactivated"); } if (state >= 7) { if (timer_pending(&hfc->t4_timer)) del_timer(&hfc->t4_timer); DBG(HFCUSB_DBG_STATES, "HFC-S USB: T4 deactivated"); } if (state == 7 && !hfc->l1_activated) { hfc->d_if.ifc.l1l2(&hfc->d_if.ifc, PH_ACTIVATE | INDICATION, NULL); DBG(HFCUSB_DBG_STATES, "HFC-S USB: PH_ACTIVATE | INDICATION sent"); hfc->l1_activated = 1; handle_led(hfc, LED_S0_ON); } else if (state <= 3 /* && activated */) { if (old_state == 7 || old_state == 8) { DBG(HFCUSB_DBG_STATES, "HFC-S USB: T4 activated"); if (!timer_pending(&hfc->t4_timer)) { hfc->t4_timer.expires = jiffies + (HFC_TIMER_T4 * HZ) / 1000; add_timer(&hfc->t4_timer); } } else { hfc->d_if.ifc.l1l2(&hfc->d_if.ifc, PH_DEACTIVATE | INDICATION, NULL); DBG(HFCUSB_DBG_STATES, "HFC-S USB: PH_DEACTIVATE | INDICATION sent"); hfc->l1_activated = 0; handle_led(hfc, LED_S0_OFF); } } hfc->l1_state = state; } static void fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, void *buf, int num_packets, int packet_size, int interval, usb_complete_t complete, void *context) { int k; urb->dev = dev; urb->pipe = pipe; urb->complete = complete; urb->number_of_packets = num_packets; urb->transfer_buffer_length = packet_size * num_packets; urb->context = context; urb->transfer_buffer = buf; urb->transfer_flags = URB_ISO_ASAP; urb->actual_length = 0; urb->interval = interval; for (k = 0; k < num_packets; k++) { urb->iso_frame_desc[k].offset = packet_size * k; urb->iso_frame_desc[k].length = packet_size; urb->iso_frame_desc[k].actual_length = 0; } } /* allocs urbs and start isoc transfer with two pending urbs to avoid * gaps in the transfer chain */ static int start_isoc_chain(usb_fifo *fifo, int num_packets_per_urb, usb_complete_t complete, int packet_size) { int i, k, errcode; DBG(HFCUSB_DBG_INIT, "HFC-S USB: starting ISO-URBs for fifo:%d\n", fifo->fifonum); /* allocate Memory for Iso out Urbs */ for (i = 0; i < 2; i++) { if (!(fifo->iso[i].purb)) { fifo->iso[i].purb = usb_alloc_urb(num_packets_per_urb, GFP_KERNEL); if (!(fifo->iso[i].purb)) { printk(KERN_INFO "alloc urb for fifo %i failed!!!", fifo->fifonum); } fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo; /* Init the first iso */ if (ISO_BUFFER_SIZE >= (fifo->usb_packet_maxlen * num_packets_per_urb)) { fill_isoc_urb(fifo->iso[i].purb, fifo->hfc->dev, fifo->pipe, fifo->iso[i].buffer, num_packets_per_urb, fifo->usb_packet_maxlen, fifo->intervall, complete, &fifo->iso[i]); memset(fifo->iso[i].buffer, 0, sizeof(fifo->iso[i].buffer)); /* defining packet delimeters in fifo->buffer */ for (k = 0; k < num_packets_per_urb; k++) { fifo->iso[i].purb-> iso_frame_desc[k].offset = k * packet_size; fifo->iso[i].purb-> iso_frame_desc[k].length = packet_size; } } else { printk(KERN_INFO "HFC-S USB: ISO Buffer size to small!\n"); } } fifo->bit_line = BITLINE_INF; errcode = usb_submit_urb(fifo->iso[i].purb, GFP_KERNEL); fifo->active = (errcode >= 0) ? 1 : 0; if (errcode < 0) printk(KERN_INFO "HFC-S USB: usb_submit_urb URB nr:%d, error(%i): '%s'\n", i, errcode, symbolic(urb_errlist, errcode)); } return (fifo->active); } /* stops running iso chain and frees their pending urbs */ static void stop_isoc_chain(usb_fifo *fifo) { int i; for (i = 0; i < 2; i++) { if (fifo->iso[i].purb) { DBG(HFCUSB_DBG_INIT, "HFC-S USB: Stopping iso chain for fifo %i.%i", fifo->fifonum, i); usb_kill_urb(fifo->iso[i].purb); usb_free_urb(fifo->iso[i].purb); fifo->iso[i].purb = NULL; } } usb_kill_urb(fifo->urb); usb_free_urb(fifo->urb); fifo->urb = NULL; fifo->active = 0; } /* defines how much ISO packets are handled in one URB */ static int iso_packets[8] = { ISOC_PACKETS_B, ISOC_PACKETS_B, ISOC_PACKETS_B, ISOC_PACKETS_B, ISOC_PACKETS_D, ISOC_PACKETS_D, ISOC_PACKETS_D, ISOC_PACKETS_D }; static void tx_iso_complete(struct urb *urb) { iso_urb_struct *context_iso_urb = (iso_urb_struct *) urb->context; usb_fifo *fifo = context_iso_urb->owner_fifo; hfcusb_data *hfc = fifo->hfc; int k, tx_offset, num_isoc_packets, sink, len, current_len, errcode; int frame_complete, transp_mode, fifon, status; __u8 threshbit; fifon = fifo->fifonum; status = urb->status; tx_offset = 0; /* ISO transfer only partially completed, look at individual frame status for details */ if (status == -EXDEV) { DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-S USB: tx_iso_complete with -EXDEV" ", urb->status %d, fifonum %d\n", status, fifon); for (k = 0; k < iso_packets[fifon]; ++k) { errcode = urb->iso_frame_desc[k].status; if (errcode) DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-S USB: tx_iso_complete " "packet %i, status: %i\n", k, errcode); } // clear status, so go on with ISO transfers status = 0; } if (fifo->active && !status) { transp_mode = 0; if (fifon < 4 && hfc->b_mode[fifon / 2] == L1_MODE_TRANS) transp_mode = 1; /* is FifoFull-threshold set for our channel? */ threshbit = (hfc->threshold_mask & (1 << fifon)); num_isoc_packets = iso_packets[fifon]; /* predict dataflow to avoid fifo overflow */ if (fifon >= HFCUSB_D_TX) { sink = (threshbit) ? SINK_DMIN : SINK_DMAX; } else { sink = (threshbit) ? SINK_MIN : SINK_MAX; } fill_isoc_urb(urb, fifo->hfc->dev, fifo->pipe, context_iso_urb->buffer, num_isoc_packets, fifo->usb_packet_maxlen, fifo->intervall, tx_iso_complete, urb->context); memset(context_iso_urb->buffer, 0, sizeof(context_iso_urb->buffer)); frame_complete = 0; /* Generate next ISO Packets */ for (k = 0; k < num_isoc_packets; ++k) { if (fifo->skbuff) { len = fifo->skbuff->len; /* we lower data margin every msec */ fifo->bit_line -= sink; current_len = (0 - fifo->bit_line) / 8; /* maximum 15 byte for every ISO packet makes our life easier */ if (current_len > 14) current_len = 14; current_len = (len <= current_len) ? len : current_len; /* how much bit do we put on the line? */ fifo->bit_line += current_len * 8; context_iso_urb->buffer[tx_offset] = 0; if (current_len == len) { if (!transp_mode) { /* here frame completion */ context_iso_urb-> buffer[tx_offset] = 1; /* add 2 byte flags and 16bit CRC at end of ISDN frame */ fifo->bit_line += 32; } frame_complete = 1; } memcpy(context_iso_urb->buffer + tx_offset + 1, fifo->skbuff->data, current_len); skb_pull(fifo->skbuff, current_len); /* define packet delimeters within the URB buffer */ urb->iso_frame_desc[k].offset = tx_offset; urb->iso_frame_desc[k].length = current_len + 1; tx_offset += (current_len + 1); } else { urb->iso_frame_desc[k].offset = tx_offset++; urb->iso_frame_desc[k].length = 1; fifo->bit_line -= sink; /* we lower data margin every msec */ if (fifo->bit_line < BITLINE_INF) { fifo->bit_line = BITLINE_INF; } } if (frame_complete) { fifo->delete_flg = 1; fifo->hif->l1l2(fifo->hif, PH_DATA | CONFIRM, (void *) (unsigned long) fifo->skbuff-> truesize); if (fifo->skbuff && fifo->delete_flg) { dev_kfree_skb_any(fifo->skbuff); fifo->skbuff = NULL; fifo->delete_flg = 0; } frame_complete = 0; } } errcode = usb_submit_urb(urb, GFP_ATOMIC); if (errcode < 0) { printk(KERN_INFO "HFC-S USB: error submitting ISO URB: %d\n", errcode); } } else { if (status && !hfc->disc_flag) { printk(KERN_INFO "HFC-S USB: tx_iso_complete: error(%i): '%s', fifonum=%d\n", status, symbolic(urb_errlist, status), fifon); } } } static void rx_iso_complete(struct urb *urb) { iso_urb_struct *context_iso_urb = (iso_urb_struct *) urb->context; usb_fifo *fifo = context_iso_urb->owner_fifo; hfcusb_data *hfc = fifo->hfc; int k, len, errcode, offset, num_isoc_packets, fifon, maxlen, status; unsigned int iso_status; __u8 *buf; static __u8 eof[8]; fifon = fifo->fifonum; status = urb->status; if (urb->status == -EOVERFLOW) { DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-USB: ignoring USB DATAOVERRUN fifo(%i)", fifon); status = 0; } /* ISO transfer only partially completed, look at individual frame status for details */ if (status == -EXDEV) { DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-S USB: rx_iso_complete with -EXDEV " "urb->status %d, fifonum %d\n", status, fifon); status = 0; } if (fifo->active && !status) { num_isoc_packets = iso_packets[fifon]; maxlen = fifo->usb_packet_maxlen; for (k = 0; k < num_isoc_packets; ++k) { len = urb->iso_frame_desc[k].actual_length; offset = urb->iso_frame_desc[k].offset; buf = context_iso_urb->buffer + offset; iso_status = urb->iso_frame_desc[k].status; if (iso_status && !hfc->disc_flag) DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-S USB: rx_iso_complete " "ISO packet %i, status: %i\n", k, iso_status); if (fifon == HFCUSB_D_RX) { DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-S USB: ISO-D-RX lst_urblen:%2d " "act_urblen:%2d max-urblen:%2d EOF:0x%0x", fifo->last_urblen, len, maxlen, eof[5]); DBG_PACKET(HFCUSB_DBG_VERBOSE_USB, buf, len); } if (fifo->last_urblen != maxlen) { /* the threshold mask is in the 2nd status byte */ hfc->threshold_mask = buf[1]; /* care for L1 state only for D-Channel to avoid overlapped iso completions */ if (fifon == HFCUSB_D_RX) { /* the S0 state is in the upper half of the 1st status byte */ s0_state_handler(hfc, buf[0] >> 4); } eof[fifon] = buf[0] & 1; if (len > 2) collect_rx_frame(fifo, buf + 2, len - 2, (len < maxlen) ? eof[fifon] : 0); } else { collect_rx_frame(fifo, buf, len, (len < maxlen) ? eof[fifon] : 0); } fifo->last_urblen = len; } fill_isoc_urb(urb, fifo->hfc->dev, fifo->pipe, context_iso_urb->buffer, num_isoc_packets, fifo->usb_packet_maxlen, fifo->intervall, rx_iso_complete, urb->context); errcode = usb_submit_urb(urb, GFP_ATOMIC); if (errcode < 0) { printk(KERN_ERR "HFC-S USB: error submitting ISO URB: %d\n", errcode); } } else { if (status && !hfc->disc_flag) { printk(KERN_ERR "HFC-S USB: rx_iso_complete : " "urb->status %d, fifonum %d\n", status, fifon); } } } /* collect rx data from INT- and ISO-URBs */ static void collect_rx_frame(usb_fifo *fifo, __u8 *data, int len, int finish) { hfcusb_data *hfc = fifo->hfc; int transp_mode, fifon; fifon = fifo->fifonum; transp_mode = 0; if (fifon < 4 && hfc->b_mode[fifon / 2] == L1_MODE_TRANS) transp_mode = 1; if (!fifo->skbuff) { fifo->skbuff = dev_alloc_skb(fifo->max_size + 3); if (!fifo->skbuff) { printk(KERN_ERR "HFC-S USB: cannot allocate buffer for fifo(%d)\n", fifon); return; } } if (len) { if (fifo->skbuff->len + len < fifo->max_size) { memcpy(skb_put(fifo->skbuff, len), data, len); } else { DBG(HFCUSB_DBG_FIFO_ERR, "HCF-USB: got frame exceeded fifo->max_size(%d) fifo(%d)", fifo->max_size, fifon); DBG_SKB(HFCUSB_DBG_VERBOSE_USB, fifo->skbuff); skb_trim(fifo->skbuff, 0); } } if (transp_mode && fifo->skbuff->len >= 128) { fifo->hif->l1l2(fifo->hif, PH_DATA | INDICATION, fifo->skbuff); fifo->skbuff = NULL; return; } /* we have a complete hdlc packet */ if (finish) { if (fifo->skbuff->len > 3 && !fifo->skbuff->data[fifo->skbuff->len - 1]) { if (fifon == HFCUSB_D_RX) { DBG(HFCUSB_DBG_DCHANNEL, "HFC-S USB: D-RX len(%d)", fifo->skbuff->len); DBG_SKB(HFCUSB_DBG_DCHANNEL, fifo->skbuff); } /* remove CRC & status */ skb_trim(fifo->skbuff, fifo->skbuff->len - 3); if (fifon == HFCUSB_PCM_RX) { fifo->hif->l1l2(fifo->hif, PH_DATA_E | INDICATION, fifo->skbuff); } else fifo->hif->l1l2(fifo->hif, PH_DATA | INDICATION, fifo->skbuff); fifo->skbuff = NULL; /* buffer was freed from upper layer */ } else { DBG(HFCUSB_DBG_FIFO_ERR, "HFC-S USB: ERROR frame len(%d) fifo(%d)", fifo->skbuff->len, fifon); DBG_SKB(HFCUSB_DBG_VERBOSE_USB, fifo->skbuff); skb_trim(fifo->skbuff, 0); } } } static void rx_int_complete(struct urb *urb) { int len; int status; __u8 *buf, maxlen, fifon; usb_fifo *fifo = (usb_fifo *) urb->context; hfcusb_data *hfc = fifo->hfc; static __u8 eof[8]; urb->dev = hfc->dev; /* security init */ fifon = fifo->fifonum; if ((!fifo->active) || (urb->status)) { DBG(HFCUSB_DBG_INIT, "HFC-S USB: RX-Fifo %i is going down (%i)", fifon, urb->status); fifo->urb->interval = 0; /* cancel automatic rescheduling */ if (fifo->skbuff) { dev_kfree_skb_any(fifo->skbuff); fifo->skbuff = NULL; } return; } len = urb->actual_length; buf = fifo->buffer; maxlen = fifo->usb_packet_maxlen; if (fifon == HFCUSB_D_RX) { DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-S USB: INT-D-RX lst_urblen:%2d " "act_urblen:%2d max-urblen:%2d EOF:0x%0x", fifo->last_urblen, len, maxlen, eof[5]); DBG_PACKET(HFCUSB_DBG_VERBOSE_USB, buf, len); } if (fifo->last_urblen != fifo->usb_packet_maxlen) { /* the threshold mask is in the 2nd status byte */ hfc->threshold_mask = buf[1]; /* the S0 state is in the upper half of the 1st status byte */ s0_state_handler(hfc, buf[0] >> 4); eof[fifon] = buf[0] & 1; /* if we have more than the 2 status bytes -> collect data */ if (len > 2) collect_rx_frame(fifo, buf + 2, urb->actual_length - 2, (len < maxlen) ? eof[fifon] : 0); } else { collect_rx_frame(fifo, buf, urb->actual_length, (len < maxlen) ? eof[fifon] : 0); } fifo->last_urblen = urb->actual_length; status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { printk(KERN_INFO "HFC-S USB: %s error resubmitting URB fifo(%d)\n", __func__, fifon); } } /* start initial INT-URB for certain fifo */ static void start_int_fifo(usb_fifo *fifo) { int errcode; DBG(HFCUSB_DBG_INIT, "HFC-S USB: starting RX INT-URB for fifo:%d\n", fifo->fifonum); if (!fifo->urb) { fifo->urb = usb_alloc_urb(0, GFP_KERNEL); if (!fifo->urb) return; } usb_fill_int_urb(fifo->urb, fifo->hfc->dev, fifo->pipe, fifo->buffer, fifo->usb_packet_maxlen, rx_int_complete, fifo, fifo->intervall); fifo->active = 1; /* must be marked active */ errcode = usb_submit_urb(fifo->urb, GFP_KERNEL); if (errcode) { printk(KERN_ERR "HFC-S USB: submit URB error(%s): status:%i\n", __func__, errcode); fifo->active = 0; fifo->skbuff = NULL; } } static void setup_bchannel(hfcusb_data *hfc, int channel, int mode) { __u8 val, idx_table[2] = { 0, 2 }; if (hfc->disc_flag) { return; } DBG(HFCUSB_DBG_STATES, "HFC-S USB: setting channel %d to mode %d", channel, mode); hfc->b_mode[channel] = mode; /* setup CON_HDLC */ val = 0; if (mode != L1_MODE_NULL) val = 8; /* enable fifo? */ if (mode == L1_MODE_TRANS) val |= 2; /* set transparent bit */ /* set FIFO to transmit register */ queue_control_request(hfc, HFCUSB_FIFO, idx_table[channel], 1); queue_control_request(hfc, HFCUSB_CON_HDLC, val, 1); /* reset fifo */ queue_control_request(hfc, HFCUSB_INC_RES_F, 2, 1); /* set FIFO to receive register */ queue_control_request(hfc, HFCUSB_FIFO, idx_table[channel] + 1, 1); queue_control_request(hfc, HFCUSB_CON_HDLC, val, 1); /* reset fifo */ queue_control_request(hfc, HFCUSB_INC_RES_F, 2, 1); val = 0x40; if (hfc->b_mode[0]) val |= 1; if (hfc->b_mode[1]) val |= 2; queue_control_request(hfc, HFCUSB_SCTRL, val, 1); val = 0; if (hfc->b_mode[0]) val |= 1; if (hfc->b_mode[1]) val |= 2; queue_control_request(hfc, HFCUSB_SCTRL_R, val, 1); if (mode == L1_MODE_NULL) { if (channel) handle_led(hfc, LED_B2_OFF); else handle_led(hfc, LED_B1_OFF); } else { if (channel) handle_led(hfc, LED_B2_ON); else handle_led(hfc, LED_B1_ON); } } static void hfc_usb_l2l1(struct hisax_if *my_hisax_if, int pr, void *arg) { usb_fifo *fifo = my_hisax_if->priv; hfcusb_data *hfc = fifo->hfc; switch (pr) { case PH_ACTIVATE | REQUEST: if (fifo->fifonum == HFCUSB_D_TX) { DBG(HFCUSB_DBG_STATES, "HFC_USB: hfc_usb_d_l2l1 D-chan: PH_ACTIVATE | REQUEST"); if (hfc->l1_state != 3 && hfc->l1_state != 7) { hfc->d_if.ifc.l1l2(&hfc->d_if.ifc, PH_DEACTIVATE | INDICATION, NULL); DBG(HFCUSB_DBG_STATES, "HFC-S USB: PH_DEACTIVATE | INDICATION sent (not state 3 or 7)"); } else { if (hfc->l1_state == 7) { /* l1 already active */ hfc->d_if.ifc.l1l2(&hfc-> d_if. ifc, PH_ACTIVATE | INDICATION, NULL); DBG(HFCUSB_DBG_STATES, "HFC-S USB: PH_ACTIVATE | INDICATION sent again ;)"); } else { /* force sending sending INFO1 */ queue_control_request(hfc, HFCUSB_STATES, 0x14, 1); mdelay(1); /* start l1 activation */ queue_control_request(hfc, HFCUSB_STATES, 0x04, 1); if (!timer_pending (&hfc->t3_timer)) { hfc->t3_timer. expires = jiffies + (HFC_TIMER_T3 * HZ) / 1000; add_timer(&hfc-> t3_timer); } } } } else { DBG(HFCUSB_DBG_STATES, "HFC_USB: hfc_usb_d_l2l1 B-chan: PH_ACTIVATE | REQUEST"); setup_bchannel(hfc, (fifo->fifonum == HFCUSB_B1_TX) ? 0 : 1, (long) arg); fifo->hif->l1l2(fifo->hif, PH_ACTIVATE | INDICATION, NULL); } break; case PH_DEACTIVATE | REQUEST: if (fifo->fifonum == HFCUSB_D_TX) { DBG(HFCUSB_DBG_STATES, "HFC_USB: hfc_usb_d_l2l1 D-chan: PH_DEACTIVATE | REQUEST"); } else { DBG(HFCUSB_DBG_STATES, "HFC_USB: hfc_usb_d_l2l1 Bx-chan: PH_DEACTIVATE | REQUEST"); setup_bchannel(hfc, (fifo->fifonum == HFCUSB_B1_TX) ? 0 : 1, (int) L1_MODE_NULL); fifo->hif->l1l2(fifo->hif, PH_DEACTIVATE | INDICATION, NULL); } break; case PH_DATA | REQUEST: if (fifo->skbuff && fifo->delete_flg) { dev_kfree_skb_any(fifo->skbuff); fifo->skbuff = NULL; fifo->delete_flg = 0; } fifo->skbuff = arg; /* we have a new buffer */ break; default: DBG(HFCUSB_DBG_STATES, "HFC_USB: hfc_usb_d_l2l1: unknown state : %#x", pr); break; } } /* initial init HFC-S USB chip registers, HiSax interface, USB URBs */ static int hfc_usb_init(hfcusb_data *hfc) { usb_fifo *fifo; int i; u_char b; struct hisax_b_if *p_b_if[2]; /* check the chip id */ if (read_usb(hfc, HFCUSB_CHIP_ID, &b) != 1) { printk(KERN_INFO "HFC-USB: cannot read chip id\n"); return (1); } if (b != HFCUSB_CHIPID) { printk(KERN_INFO "HFC-S USB: Invalid chip id 0x%02x\n", b); return (1); } /* first set the needed config, interface and alternate */ usb_set_interface(hfc->dev, hfc->if_used, hfc->alt_used); /* do Chip reset */ write_usb(hfc, HFCUSB_CIRM, 8); /* aux = output, reset off */ write_usb(hfc, HFCUSB_CIRM, 0x10); /* set USB_SIZE to match wMaxPacketSize for INT or BULK transfers */ write_usb(hfc, HFCUSB_USB_SIZE, (hfc->packet_size / 8) | ((hfc->packet_size / 8) << 4)); /* set USB_SIZE_I to match wMaxPacketSize for ISO transfers */ write_usb(hfc, HFCUSB_USB_SIZE_I, hfc->iso_packet_size); /* enable PCM/GCI master mode */ write_usb(hfc, HFCUSB_MST_MODE1, 0); /* set default values */ write_usb(hfc, HFCUSB_MST_MODE0, 1); /* enable master mode */ /* init the fifos */ write_usb(hfc, HFCUSB_F_THRES, (HFCUSB_TX_THRESHOLD / 8) | ((HFCUSB_RX_THRESHOLD / 8) << 4)); fifo = hfc->fifos; for (i = 0; i < HFCUSB_NUM_FIFOS; i++) { write_usb(hfc, HFCUSB_FIFO, i); /* select the desired fifo */ fifo[i].skbuff = NULL; /* init buffer pointer */ fifo[i].max_size = (i <= HFCUSB_B2_RX) ? MAX_BCH_SIZE : MAX_DFRAME_LEN; fifo[i].last_urblen = 0; /* set 2 bit for D- & E-channel */ write_usb(hfc, HFCUSB_HDLC_PAR, ((i <= HFCUSB_B2_RX) ? 0 : 2)); /* rx hdlc, enable IFF for D-channel */ write_usb(hfc, HFCUSB_CON_HDLC, ((i == HFCUSB_D_TX) ? 0x09 : 0x08)); write_usb(hfc, HFCUSB_INC_RES_F, 2); /* reset the fifo */ } write_usb(hfc, HFCUSB_CLKDEL, 0x0f); /* clock delay value */ write_usb(hfc, HFCUSB_STATES, 3 | 0x10); /* set deactivated mode */ write_usb(hfc, HFCUSB_STATES, 3); /* enable state machine */ write_usb(hfc, HFCUSB_SCTRL_R, 0); /* disable both B receivers */ write_usb(hfc, HFCUSB_SCTRL, 0x40); /* disable B transmitters + capacitive mode */ /* set both B-channel to not connected */ hfc->b_mode[0] = L1_MODE_NULL; hfc->b_mode[1] = L1_MODE_NULL; hfc->l1_activated = 0; hfc->disc_flag = 0; hfc->led_state = 0; hfc->old_led_state = 0; /* init the t3 timer */ init_timer(&hfc->t3_timer); hfc->t3_timer.data = (long) hfc; hfc->t3_timer.function = (void *) l1_timer_expire_t3; /* init the t4 timer */ init_timer(&hfc->t4_timer); hfc->t4_timer.data = (long) hfc; hfc->t4_timer.function = (void *) l1_timer_expire_t4; /* init the background machinery for control requests */ hfc->ctrl_read.bRequestType = 0xc0; hfc->ctrl_read.bRequest = 1; hfc->ctrl_read.wLength = cpu_to_le16(1); hfc->ctrl_write.bRequestType = 0x40; hfc->ctrl_write.bRequest = 0; hfc->ctrl_write.wLength = 0; usb_fill_control_urb(hfc->ctrl_urb, hfc->dev, hfc->ctrl_out_pipe, (u_char *)&hfc->ctrl_write, NULL, 0, ctrl_complete, hfc); /* Init All Fifos */ for (i = 0; i < HFCUSB_NUM_FIFOS; i++) { hfc->fifos[i].iso[0].purb = NULL; hfc->fifos[i].iso[1].purb = NULL; hfc->fifos[i].active = 0; } /* register Modul to upper Hisax Layers */ hfc->d_if.owner = THIS_MODULE; hfc->d_if.ifc.priv = &hfc->fifos[HFCUSB_D_TX]; hfc->d_if.ifc.l2l1 = hfc_usb_l2l1; for (i = 0; i < 2; i++) { hfc->b_if[i].ifc.priv = &hfc->fifos[HFCUSB_B1_TX + i * 2]; hfc->b_if[i].ifc.l2l1 = hfc_usb_l2l1; p_b_if[i] = &hfc->b_if[i]; } /* default Prot: EURO ISDN, should be a module_param */ hfc->protocol = 2; i = hisax_register(&hfc->d_if, p_b_if, "hfc_usb", hfc->protocol); if (i) { printk(KERN_INFO "HFC-S USB: hisax_register -> %d\n", i); return i; } #ifdef CONFIG_HISAX_DEBUG hfc_debug = debug; #endif for (i = 0; i < 4; i++) hfc->fifos[i].hif = &p_b_if[i / 2]->ifc; for (i = 4; i < 8; i++) hfc->fifos[i].hif = &hfc->d_if.ifc; /* 3 (+1) INT IN + 3 ISO OUT */ if (hfc->cfg_used == CNF_3INT3ISO || hfc->cfg_used == CNF_4INT3ISO) { start_int_fifo(hfc->fifos + HFCUSB_D_RX); if (hfc->fifos[HFCUSB_PCM_RX].pipe) start_int_fifo(hfc->fifos + HFCUSB_PCM_RX); start_int_fifo(hfc->fifos + HFCUSB_B1_RX); start_int_fifo(hfc->fifos + HFCUSB_B2_RX); } /* 3 (+1) ISO IN + 3 ISO OUT */ if (hfc->cfg_used == CNF_3ISO3ISO || hfc->cfg_used == CNF_4ISO3ISO) { start_isoc_chain(hfc->fifos + HFCUSB_D_RX, ISOC_PACKETS_D, rx_iso_complete, 16); if (hfc->fifos[HFCUSB_PCM_RX].pipe) start_isoc_chain(hfc->fifos + HFCUSB_PCM_RX, ISOC_PACKETS_D, rx_iso_complete, 16); start_isoc_chain(hfc->fifos + HFCUSB_B1_RX, ISOC_PACKETS_B, rx_iso_complete, 16); start_isoc_chain(hfc->fifos + HFCUSB_B2_RX, ISOC_PACKETS_B, rx_iso_complete, 16); } start_isoc_chain(hfc->fifos + HFCUSB_D_TX, ISOC_PACKETS_D, tx_iso_complete, 1); start_isoc_chain(hfc->fifos + HFCUSB_B1_TX, ISOC_PACKETS_B, tx_iso_complete, 1); start_isoc_chain(hfc->fifos + HFCUSB_B2_TX, ISOC_PACKETS_B, tx_iso_complete, 1); handle_led(hfc, LED_POWER_ON); return (0); } /* initial callback for each plugged USB device */ static int hfc_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); hfcusb_data *context; struct usb_host_interface *iface = intf->cur_altsetting; struct usb_host_interface *iface_used = NULL; struct usb_host_endpoint *ep; int ifnum = iface->desc.bInterfaceNumber; int i, idx, alt_idx, probe_alt_setting, vend_idx, cfg_used, *vcf, attr, cfg_found, cidx, ep_addr; int cmptbl[16], small_match, iso_packet_size, packet_size, alt_used = 0; hfcsusb_vdata *driver_info; vend_idx = 0xffff; for (i = 0; hfcusb_idtab[i].idVendor; i++) { if ((le16_to_cpu(dev->descriptor.idVendor) == hfcusb_idtab[i].idVendor) && (le16_to_cpu(dev->descriptor.idProduct) == hfcusb_idtab[i].idProduct)) { vend_idx = i; continue; } } printk(KERN_INFO "HFC-S USB: probing interface(%d) actalt(%d) minor(%d)\n", ifnum, iface->desc.bAlternateSetting, intf->minor); if (vend_idx != 0xffff) { /* if vendor and product ID is OK, start probing alternate settings */ alt_idx = 0; small_match = 0xffff; /* default settings */ iso_packet_size = 16; packet_size = 64; while (alt_idx < intf->num_altsetting) { iface = intf->altsetting + alt_idx; probe_alt_setting = iface->desc.bAlternateSetting; cfg_used = 0; /* check for config EOL element */ while (validconf[cfg_used][0]) { cfg_found = 1; vcf = validconf[cfg_used]; /* first endpoint descriptor */ ep = iface->endpoint; memcpy(cmptbl, vcf, 16 * sizeof(int)); /* check for all endpoints in this alternate setting */ for (i = 0; i < iface->desc.bNumEndpoints; i++) { ep_addr = ep->desc.bEndpointAddress; /* get endpoint base */ idx = ((ep_addr & 0x7f) - 1) * 2; if (ep_addr & 0x80) idx++; attr = ep->desc.bmAttributes; if (cmptbl[idx] == EP_NUL) { cfg_found = 0; } if (attr == USB_ENDPOINT_XFER_INT && cmptbl[idx] == EP_INT) cmptbl[idx] = EP_NUL; if (attr == USB_ENDPOINT_XFER_BULK && cmptbl[idx] == EP_BLK) cmptbl[idx] = EP_NUL; if (attr == USB_ENDPOINT_XFER_ISOC && cmptbl[idx] == EP_ISO) cmptbl[idx] = EP_NUL; /* check if all INT endpoints match minimum interval */ if ((attr == USB_ENDPOINT_XFER_INT) && (ep->desc.bInterval < vcf[17])) { cfg_found = 0; } ep++; } for (i = 0; i < 16; i++) { /* all entries must be EP_NOP or EP_NUL for a valid config */ if (cmptbl[i] != EP_NOP && cmptbl[i] != EP_NUL) cfg_found = 0; } if (cfg_found) { if (cfg_used < small_match) { small_match = cfg_used; alt_used = probe_alt_setting; iface_used = iface; } } cfg_used++; } alt_idx++; } /* (alt_idx < intf->num_altsetting) */ /* found a valid USB Ta Endpint config */ if (small_match != 0xffff) { iface = iface_used; if (!(context = kzalloc(sizeof(hfcusb_data), GFP_KERNEL))) return (-ENOMEM); /* got no mem */ ep = iface->endpoint; vcf = validconf[small_match]; for (i = 0; i < iface->desc.bNumEndpoints; i++) { ep_addr = ep->desc.bEndpointAddress; /* get endpoint base */ idx = ((ep_addr & 0x7f) - 1) * 2; if (ep_addr & 0x80) idx++; cidx = idx & 7; attr = ep->desc.bmAttributes; /* init Endpoints */ if (vcf[idx] != EP_NOP && vcf[idx] != EP_NUL) { switch (attr) { case USB_ENDPOINT_XFER_INT: context-> fifos[cidx]. pipe = usb_rcvintpipe (dev, ep->desc. bEndpointAddress); context-> fifos[cidx]. usb_transfer_mode = USB_INT; packet_size = le16_to_cpu(ep->desc.wMaxPacketSize); break; case USB_ENDPOINT_XFER_BULK: if (ep_addr & 0x80) context-> fifos [cidx]. pipe = usb_rcvbulkpipe (dev, ep-> desc. bEndpointAddress); else context-> fifos [cidx]. pipe = usb_sndbulkpipe (dev, ep-> desc. bEndpointAddress); context-> fifos[cidx]. usb_transfer_mode = USB_BULK; packet_size = le16_to_cpu(ep->desc.wMaxPacketSize); break; case USB_ENDPOINT_XFER_ISOC: if (ep_addr & 0x80) context-> fifos [cidx]. pipe = usb_rcvisocpipe (dev, ep-> desc. bEndpointAddress); else context-> fifos [cidx]. pipe = usb_sndisocpipe (dev, ep-> desc. bEndpointAddress); context-> fifos[cidx]. usb_transfer_mode = USB_ISOC; iso_packet_size = le16_to_cpu(ep->desc.wMaxPacketSize); break; default: context-> fifos[cidx]. pipe = 0; } /* switch attribute */ if (context->fifos[cidx].pipe) { context->fifos[cidx]. fifonum = cidx; context->fifos[cidx].hfc = context; context->fifos[cidx].usb_packet_maxlen = le16_to_cpu(ep->desc.wMaxPacketSize); context->fifos[cidx]. intervall = ep->desc.bInterval; context->fifos[cidx]. skbuff = NULL; } } ep++; } context->dev = dev; /* save device */ context->if_used = ifnum; /* save used interface */ context->alt_used = alt_used; /* and alternate config */ context->ctrl_paksize = dev->descriptor.bMaxPacketSize0; /* control size */ context->cfg_used = vcf[16]; /* store used config */ context->vend_idx = vend_idx; /* store found vendor */ context->packet_size = packet_size; context->iso_packet_size = iso_packet_size; /* create the control pipes needed for register access */ context->ctrl_in_pipe = usb_rcvctrlpipe(context->dev, 0); context->ctrl_out_pipe = usb_sndctrlpipe(context->dev, 0); driver_info = (hfcsusb_vdata *) hfcusb_idtab[vend_idx].driver_info; context->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); if (!context->ctrl_urb) { pr_warn("%s: No memory for control urb\n", driver_info->vend_name); kfree(context); return -ENOMEM; } pr_info("HFC-S USB: detected \"%s\"\n", driver_info->vend_name); DBG(HFCUSB_DBG_INIT, "HFC-S USB: Endpoint-Config: %s (if=%d alt=%d), E-Channel(%d)", conf_str[small_match], context->if_used, context->alt_used, validconf[small_match][18]); /* init the chip and register the driver */ if (hfc_usb_init(context)) { usb_kill_urb(context->ctrl_urb); usb_free_urb(context->ctrl_urb); context->ctrl_urb = NULL; kfree(context); return (-EIO); } usb_set_intfdata(intf, context); return (0); } } else { printk(KERN_INFO "HFC-S USB: no valid vendor found in USB descriptor\n"); } return (-EIO); } /* callback for unplugged USB device */ static void hfc_usb_disconnect(struct usb_interface *intf) { hfcusb_data *context = usb_get_intfdata(intf); int i; handle_led(context, LED_POWER_OFF); schedule_timeout(HZ / 100); printk(KERN_INFO "HFC-S USB: device disconnect\n"); context->disc_flag = 1; usb_set_intfdata(intf, NULL); if (timer_pending(&context->t3_timer)) del_timer(&context->t3_timer); if (timer_pending(&context->t4_timer)) del_timer(&context->t4_timer); /* tell all fifos to terminate */ for (i = 0; i < HFCUSB_NUM_FIFOS; i++) { if (context->fifos[i].usb_transfer_mode == USB_ISOC) { if (context->fifos[i].active > 0) { stop_isoc_chain(&context->fifos[i]); DBG(HFCUSB_DBG_INIT, "HFC-S USB: %s stopping ISOC chain Fifo(%i)", __func__, i); } } else { if (context->fifos[i].active > 0) { context->fifos[i].active = 0; DBG(HFCUSB_DBG_INIT, "HFC-S USB: %s unlinking URB for Fifo(%i)", __func__, i); } usb_kill_urb(context->fifos[i].urb); usb_free_urb(context->fifos[i].urb); context->fifos[i].urb = NULL; } context->fifos[i].active = 0; } usb_kill_urb(context->ctrl_urb); usb_free_urb(context->ctrl_urb); context->ctrl_urb = NULL; hisax_unregister(&context->d_if); kfree(context); /* free our structure again */ } static struct usb_driver hfc_drv = { .name = "hfc_usb", .id_table = hfcusb_idtab, .probe = hfc_usb_probe, .disconnect = hfc_usb_disconnect, .disable_hub_initiated_lpm = 1, }; static void __exit hfc_usb_mod_exit(void) { usb_deregister(&hfc_drv); /* release our driver */ printk(KERN_INFO "HFC-S USB: module removed\n"); } static int __init hfc_usb_mod_init(void) { char revstr[30], datestr[30], dummy[30]; #ifndef CONFIG_HISAX_DEBUG hfc_debug = debug; #endif sscanf(hfcusb_revision, "%s %s $ %s %s %s $ ", dummy, revstr, dummy, datestr, dummy); printk(KERN_INFO "HFC-S USB: driver module revision %s date %s loaded, (debug=%i)\n", revstr, datestr, debug); if (usb_register(&hfc_drv)) { printk(KERN_INFO "HFC-S USB: Unable to register HFC-S USB module at usb stack\n"); return (-1); /* unable to register */ } return (0); } module_init(hfc_usb_mod_init); module_exit(hfc_usb_mod_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(usb, hfcusb_idtab);
gpl-2.0
x456/kernel
drivers/staging/gdm72xx/sdio_boot.c
2317
3501
/* * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/mmc/core.h> #include <linux/mmc/card.h> #include <linux/mmc/sdio_func.h> #include <linux/firmware.h> #include "gdm_sdio.h" #include "sdio_boot.h" #define TYPE_A_HEADER_SIZE 4 #define TYPE_A_LOOKAHEAD_SIZE 16 #define YMEM0_SIZE 0x8000 /* 32kbytes */ #define DOWNLOAD_SIZE (YMEM0_SIZE - TYPE_A_HEADER_SIZE) #define FW_DIR "gdm72xx/" #define FW_KRN "gdmskrn.bin" #define FW_RFS "gdmsrfs.bin" static u8 *tx_buf; static int ack_ready(struct sdio_func *func) { unsigned long start = jiffies; u8 val; int ret; while ((jiffies - start) < HZ) { val = sdio_readb(func, 0x13, &ret); if (val & 0x01) return 1; schedule(); } return 0; } static int download_image(struct sdio_func *func, const char *img_name) { int ret = 0, len, pno; u8 *buf = tx_buf; loff_t pos = 0; int img_len; const struct firmware *firm; ret = request_firmware(&firm, img_name, &func->dev); if (ret < 0) { dev_err(&func->dev, "requesting firmware %s failed with error %d\n", img_name, ret); return ret; } buf = kmalloc(DOWNLOAD_SIZE + TYPE_A_HEADER_SIZE, GFP_KERNEL); if (buf == NULL) return -ENOMEM; img_len = firm->size; if (img_len <= 0) { ret = -1; goto out; } pno = 0; while (img_len > 0) { if (img_len > DOWNLOAD_SIZE) { len = DOWNLOAD_SIZE; buf[3] = 0; } else { len = img_len; /* the last packet */ buf[3] = 2; } buf[0] = len & 0xff; buf[1] = (len >> 8) & 0xff; buf[2] = (len >> 16) & 0xff; memcpy(buf+TYPE_A_HEADER_SIZE, firm->data + pos, len); ret = sdio_memcpy_toio(func, 0, buf, len + TYPE_A_HEADER_SIZE); if (ret < 0) { dev_err(&func->dev, "send image error: packet number = %d ret = %d\n", pno, ret); goto out; } if (buf[3] == 2) /* The last packet */ break; if (!ack_ready(func)) { ret = -EIO; dev_err(&func->dev, "Ack is not ready.\n"); goto out; } ret = sdio_memcpy_fromio(func, buf, 0, TYPE_A_LOOKAHEAD_SIZE); if (ret < 0) { dev_err(&func->dev, "receive ack error: packet number = %d ret = %d\n", pno, ret); goto out; } sdio_writeb(func, 0x01, 0x13, &ret); sdio_writeb(func, 0x00, 0x10, &ret); /* PCRRT */ img_len -= DOWNLOAD_SIZE; pos += DOWNLOAD_SIZE; pno++; } out: kfree(buf); return ret; } int sdio_boot(struct sdio_func *func) { int ret; const char *krn_name = FW_DIR FW_KRN; const char *rfs_name = FW_DIR FW_RFS; tx_buf = kmalloc(YMEM0_SIZE, GFP_KERNEL); if (tx_buf == NULL) return -ENOMEM; ret = download_image(func, krn_name); if (ret) goto restore_fs; dev_info(&func->dev, "GCT: Kernel download success.\n"); ret = download_image(func, rfs_name); if (ret) goto restore_fs; dev_info(&func->dev, "GCT: Filesystem download success.\n"); restore_fs: kfree(tx_buf); return ret; }
gpl-2.0
one-2-z/a830s_kernel
drivers/hid/hid-roccat-kone.c
2573
22944
/* * Roccat Kone driver for Linux * * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ /* * Roccat Kone is a gamer mouse which consists of a mouse part and a keyboard * part. The keyboard part enables the mouse to execute stored macros with mixed * key- and button-events. * * TODO implement on-the-fly polling-rate change * The windows driver has the ability to change the polling rate of the * device on the press of a mousebutton. * Is it possible to remove and reinstall the urb in raw-event- or any * other handler, or to defer this action to be executed somewhere else? * * TODO is it possible to overwrite group for sysfs attributes via udev? */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" #include "hid-roccat-kone.h" static uint profile_numbers[5] = {0, 1, 2, 3, 4}; /* kone_class is used for creating sysfs attributes via roccat char device */ static struct class *kone_class; static void kone_set_settings_checksum(struct kone_settings *settings) { uint16_t checksum = 0; unsigned char *address = (unsigned char *)settings; int i; for (i = 0; i < sizeof(struct kone_settings) - 2; ++i, ++address) checksum += *address; settings->checksum = cpu_to_le16(checksum); } /* * Checks success after writing data to mouse * On success returns 0 * On failure returns errno */ static int kone_check_write(struct usb_device *usb_dev) { int retval; uint8_t data; do { /* * Mouse needs 50 msecs until it says ok, but there are * 30 more msecs needed for next write to work. */ msleep(80); retval = roccat_common_receive(usb_dev, kone_command_confirm_write, &data, 1); if (retval) return retval; /* * value of 3 seems to mean something like * "not finished yet, but it looks good" * So check again after a moment. */ } while (data == 3); if (data == 1) /* everything alright */ return 0; /* unknown answer */ hid_err(usb_dev, "got retval %d when checking write\n", data); return -EIO; } /* * Reads settings from mouse and stores it in @buf * On success returns 0 * On failure returns errno */ static int kone_get_settings(struct usb_device *usb_dev, struct kone_settings *buf) { return roccat_common_receive(usb_dev, kone_command_settings, buf, sizeof(struct kone_settings)); } /* * Writes settings from @buf to mouse * On success returns 0 * On failure returns errno */ static int kone_set_settings(struct usb_device *usb_dev, struct kone_settings const *settings) { int retval; retval = roccat_common_send(usb_dev, kone_command_settings, settings, sizeof(struct kone_settings)); if (retval) return retval; return kone_check_write(usb_dev); } /* * Reads profile data from mouse and stores it in @buf * @number: profile number to read * On success returns 0 * On failure returns errno */ static int kone_get_profile(struct usb_device *usb_dev, struct kone_profile *buf, int number) { int len; if (number < 1 || number > 5) return -EINVAL; len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), USB_REQ_CLEAR_FEATURE, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, kone_command_profile, number, buf, sizeof(struct kone_profile), USB_CTRL_SET_TIMEOUT); if (len != sizeof(struct kone_profile)) return -EIO; return 0; } /* * Writes profile data to mouse. * @number: profile number to write * On success returns 0 * On failure returns errno */ static int kone_set_profile(struct usb_device *usb_dev, struct kone_profile const *profile, int number) { int len; if (number < 1 || number > 5) return -EINVAL; len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), USB_REQ_SET_CONFIGURATION, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, kone_command_profile, number, (void *)profile, sizeof(struct kone_profile), USB_CTRL_SET_TIMEOUT); if (len != sizeof(struct kone_profile)) return len; if (kone_check_write(usb_dev)) return -EIO; return 0; } /* * Reads value of "fast-clip-weight" and stores it in @result * On success returns 0 * On failure returns errno */ static int kone_get_weight(struct usb_device *usb_dev, int *result) { int retval; uint8_t data; retval = roccat_common_receive(usb_dev, kone_command_weight, &data, 1); if (retval) return retval; *result = (int)data; return 0; } /* * Reads firmware_version of mouse and stores it in @result * On success returns 0 * On failure returns errno */ static int kone_get_firmware_version(struct usb_device *usb_dev, int *result) { int retval; uint16_t data; retval = roccat_common_receive(usb_dev, kone_command_firmware_version, &data, 2); if (retval) return retval; *result = le16_to_cpu(data); return 0; } static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev)); if (off >= sizeof(struct kone_settings)) return 0; if (off + count > sizeof(struct kone_settings)) count = sizeof(struct kone_settings) - off; mutex_lock(&kone->kone_lock); memcpy(buf, ((char const *)&kone->settings) + off, count); mutex_unlock(&kone->kone_lock); return count; } /* * Writing settings automatically activates startup_profile. * This function keeps values in kone_device up to date and assumes that in * case of error the old data is still valid */ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval = 0, difference; /* I need to get my data in one piece */ if (off != 0 || count != sizeof(struct kone_settings)) return -EINVAL; mutex_lock(&kone->kone_lock); difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings)); if (difference) { retval = kone_set_settings(usb_dev, (struct kone_settings const *)buf); if (!retval) memcpy(&kone->settings, buf, sizeof(struct kone_settings)); } mutex_unlock(&kone->kone_lock); if (retval) return retval; /* * If we get here, treat settings as okay and update actual values * according to startup_profile */ kone->actual_profile = kone->settings.startup_profile; kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi; return sizeof(struct kone_settings); } static ssize_t kone_sysfs_read_profilex(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev)); if (off >= sizeof(struct kone_profile)) return 0; if (off + count > sizeof(struct kone_profile)) count = sizeof(struct kone_profile) - off; mutex_lock(&kone->kone_lock); memcpy(buf, ((char const *)&kone->profiles[*(uint *)(attr->private)]) + off, count); mutex_unlock(&kone->kone_lock); return count; } /* Writes data only if different to stored data */ static ssize_t kone_sysfs_write_profilex(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); struct kone_profile *profile; int retval = 0, difference; /* I need to get my data in one piece */ if (off != 0 || count != sizeof(struct kone_profile)) return -EINVAL; profile = &kone->profiles[*(uint *)(attr->private)]; mutex_lock(&kone->kone_lock); difference = memcmp(buf, profile, sizeof(struct kone_profile)); if (difference) { retval = kone_set_profile(usb_dev, (struct kone_profile const *)buf, *(uint *)(attr->private) + 1); if (!retval) memcpy(profile, buf, sizeof(struct kone_profile)); } mutex_unlock(&kone->kone_lock); if (retval) return retval; return sizeof(struct kone_profile); } static ssize_t kone_sysfs_show_actual_profile(struct device *dev, struct device_attribute *attr, char *buf) { struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_profile); } static ssize_t kone_sysfs_show_actual_dpi(struct device *dev, struct device_attribute *attr, char *buf) { struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_dpi); } /* weight is read each time, since we don't get informed when it's changed */ static ssize_t kone_sysfs_show_weight(struct device *dev, struct device_attribute *attr, char *buf) { struct kone_device *kone; struct usb_device *usb_dev; int weight = 0; int retval; dev = dev->parent->parent; kone = hid_get_drvdata(dev_get_drvdata(dev)); usb_dev = interface_to_usbdev(to_usb_interface(dev)); mutex_lock(&kone->kone_lock); retval = kone_get_weight(usb_dev, &weight); mutex_unlock(&kone->kone_lock); if (retval) return retval; return snprintf(buf, PAGE_SIZE, "%d\n", weight); } static ssize_t kone_sysfs_show_firmware_version(struct device *dev, struct device_attribute *attr, char *buf) { struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", kone->firmware_version); } static ssize_t kone_sysfs_show_tcu(struct device *dev, struct device_attribute *attr, char *buf) { struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.tcu); } static int kone_tcu_command(struct usb_device *usb_dev, int number) { unsigned char value; value = number; return roccat_common_send(usb_dev, kone_command_calibrate, &value, 1); } /* * Calibrating the tcu is the only action that changes settings data inside the * mouse, so this data needs to be reread */ static ssize_t kone_sysfs_set_tcu(struct device *dev, struct device_attribute *attr, char const *buf, size_t size) { struct kone_device *kone; struct usb_device *usb_dev; int retval; unsigned long state; dev = dev->parent->parent; kone = hid_get_drvdata(dev_get_drvdata(dev)); usb_dev = interface_to_usbdev(to_usb_interface(dev)); retval = strict_strtoul(buf, 10, &state); if (retval) return retval; if (state != 0 && state != 1) return -EINVAL; mutex_lock(&kone->kone_lock); if (state == 1) { /* state activate */ retval = kone_tcu_command(usb_dev, 1); if (retval) goto exit_unlock; retval = kone_tcu_command(usb_dev, 2); if (retval) goto exit_unlock; ssleep(5); /* tcu needs this time for calibration */ retval = kone_tcu_command(usb_dev, 3); if (retval) goto exit_unlock; retval = kone_tcu_command(usb_dev, 0); if (retval) goto exit_unlock; retval = kone_tcu_command(usb_dev, 4); if (retval) goto exit_unlock; /* * Kone needs this time to settle things. * Reading settings too early will result in invalid data. * Roccat's driver waits 1 sec, maybe this time could be * shortened. */ ssleep(1); } /* calibration changes values in settings, so reread */ retval = kone_get_settings(usb_dev, &kone->settings); if (retval) goto exit_no_settings; /* only write settings back if activation state is different */ if (kone->settings.tcu != state) { kone->settings.tcu = state; kone_set_settings_checksum(&kone->settings); retval = kone_set_settings(usb_dev, &kone->settings); if (retval) { hid_err(usb_dev, "couldn't set tcu state\n"); /* * try to reread valid settings into buffer overwriting * first error code */ retval = kone_get_settings(usb_dev, &kone->settings); if (retval) goto exit_no_settings; goto exit_unlock; } } retval = size; exit_no_settings: hid_err(usb_dev, "couldn't read settings\n"); exit_unlock: mutex_unlock(&kone->kone_lock); return retval; } static ssize_t kone_sysfs_show_startup_profile(struct device *dev, struct device_attribute *attr, char *buf) { struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.startup_profile); } static ssize_t kone_sysfs_set_startup_profile(struct device *dev, struct device_attribute *attr, char const *buf, size_t size) { struct kone_device *kone; struct usb_device *usb_dev; int retval; unsigned long new_startup_profile; dev = dev->parent->parent; kone = hid_get_drvdata(dev_get_drvdata(dev)); usb_dev = interface_to_usbdev(to_usb_interface(dev)); retval = strict_strtoul(buf, 10, &new_startup_profile); if (retval) return retval; if (new_startup_profile < 1 || new_startup_profile > 5) return -EINVAL; mutex_lock(&kone->kone_lock); kone->settings.startup_profile = new_startup_profile; kone_set_settings_checksum(&kone->settings); retval = kone_set_settings(usb_dev, &kone->settings); mutex_unlock(&kone->kone_lock); if (retval) return retval; /* changing the startup profile immediately activates this profile */ kone->actual_profile = new_startup_profile; kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi; return size; } static struct device_attribute kone_attributes[] = { /* * Read actual dpi settings. * Returns raw value for further processing. Refer to enum * kone_polling_rates to get real value. */ __ATTR(actual_dpi, 0440, kone_sysfs_show_actual_dpi, NULL), __ATTR(actual_profile, 0440, kone_sysfs_show_actual_profile, NULL), /* * The mouse can be equipped with one of four supplied weights from 5 * to 20 grams which are recognized and its value can be read out. * This returns the raw value reported by the mouse for easy evaluation * by software. Refer to enum kone_weights to get corresponding real * weight. */ __ATTR(weight, 0440, kone_sysfs_show_weight, NULL), /* * Prints firmware version stored in mouse as integer. * The raw value reported by the mouse is returned for easy evaluation, * to get the real version number the decimal point has to be shifted 2 * positions to the left. E.g. a value of 138 means 1.38. */ __ATTR(firmware_version, 0440, kone_sysfs_show_firmware_version, NULL), /* * Prints state of Tracking Control Unit as number where 0 = off and * 1 = on. Writing 0 deactivates tcu and writing 1 calibrates and * activates the tcu */ __ATTR(tcu, 0660, kone_sysfs_show_tcu, kone_sysfs_set_tcu), /* Prints and takes the number of the profile the mouse starts with */ __ATTR(startup_profile, 0660, kone_sysfs_show_startup_profile, kone_sysfs_set_startup_profile), __ATTR_NULL }; static struct bin_attribute kone_bin_attributes[] = { { .attr = { .name = "settings", .mode = 0660 }, .size = sizeof(struct kone_settings), .read = kone_sysfs_read_settings, .write = kone_sysfs_write_settings }, { .attr = { .name = "profile1", .mode = 0660 }, .size = sizeof(struct kone_profile), .read = kone_sysfs_read_profilex, .write = kone_sysfs_write_profilex, .private = &profile_numbers[0] }, { .attr = { .name = "profile2", .mode = 0660 }, .size = sizeof(struct kone_profile), .read = kone_sysfs_read_profilex, .write = kone_sysfs_write_profilex, .private = &profile_numbers[1] }, { .attr = { .name = "profile3", .mode = 0660 }, .size = sizeof(struct kone_profile), .read = kone_sysfs_read_profilex, .write = kone_sysfs_write_profilex, .private = &profile_numbers[2] }, { .attr = { .name = "profile4", .mode = 0660 }, .size = sizeof(struct kone_profile), .read = kone_sysfs_read_profilex, .write = kone_sysfs_write_profilex, .private = &profile_numbers[3] }, { .attr = { .name = "profile5", .mode = 0660 }, .size = sizeof(struct kone_profile), .read = kone_sysfs_read_profilex, .write = kone_sysfs_write_profilex, .private = &profile_numbers[4] }, __ATTR_NULL }; static int kone_init_kone_device_struct(struct usb_device *usb_dev, struct kone_device *kone) { uint i; int retval; mutex_init(&kone->kone_lock); for (i = 0; i < 5; ++i) { retval = kone_get_profile(usb_dev, &kone->profiles[i], i + 1); if (retval) return retval; } retval = kone_get_settings(usb_dev, &kone->settings); if (retval) return retval; retval = kone_get_firmware_version(usb_dev, &kone->firmware_version); if (retval) return retval; kone->actual_profile = kone->settings.startup_profile; kone->actual_dpi = kone->profiles[kone->actual_profile].startup_dpi; return 0; } /* * Since IGNORE_MOUSE quirk moved to hid-apple, there is no way to bind only to * mousepart if usb_hid is compiled into the kernel and kone is compiled as * module. * Secial behaviour is bound only to mousepart since only mouseevents contain * additional notifications. */ static int kone_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); struct kone_device *kone; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) { kone = kzalloc(sizeof(*kone), GFP_KERNEL); if (!kone) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, kone); retval = kone_init_kone_device_struct(usb_dev, kone); if (retval) { hid_err(hdev, "couldn't init struct kone_device\n"); goto exit_free; } retval = roccat_connect(kone_class, hdev, sizeof(struct kone_roccat_report)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); /* be tolerant about not getting chrdev */ } else { kone->roccat_claimed = 1; kone->chrdev_minor = retval; } } else { hid_set_drvdata(hdev, NULL); } return 0; exit_free: kfree(kone); return retval; } static void kone_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct kone_device *kone; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) { kone = hid_get_drvdata(hdev); if (kone->roccat_claimed) roccat_disconnect(kone->chrdev_minor); kfree(hid_get_drvdata(hdev)); } } static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { hid_err(hdev, "hw start failed\n"); goto exit; } retval = kone_init_specials(hdev); if (retval) { hid_err(hdev, "couldn't install mouse\n"); goto exit_stop; } return 0; exit_stop: hid_hw_stop(hdev); exit: return retval; } static void kone_remove(struct hid_device *hdev) { kone_remove_specials(hdev); hid_hw_stop(hdev); } /* handle special events and keep actual profile and dpi values up to date */ static void kone_keep_values_up_to_date(struct kone_device *kone, struct kone_mouse_event const *event) { switch (event->event) { case kone_mouse_event_switch_profile: case kone_mouse_event_osd_profile: kone->actual_profile = event->value; kone->actual_dpi = kone->profiles[kone->actual_profile - 1]. startup_dpi; break; case kone_mouse_event_switch_dpi: case kone_mouse_event_osd_dpi: kone->actual_dpi = event->value; break; } } static void kone_report_to_chrdev(struct kone_device const *kone, struct kone_mouse_event const *event) { struct kone_roccat_report roccat_report; switch (event->event) { case kone_mouse_event_switch_profile: case kone_mouse_event_switch_dpi: case kone_mouse_event_osd_profile: case kone_mouse_event_osd_dpi: roccat_report.event = event->event; roccat_report.value = event->value; roccat_report.key = 0; roccat_report_event(kone->chrdev_minor, (uint8_t *)&roccat_report); break; case kone_mouse_event_call_overlong_macro: if (event->value == kone_keystroke_action_press) { roccat_report.event = kone_mouse_event_call_overlong_macro; roccat_report.value = kone->actual_profile; roccat_report.key = event->macro_key; roccat_report_event(kone->chrdev_minor, (uint8_t *)&roccat_report); } break; } } /* * Is called for keyboard- and mousepart. * Only mousepart gets informations about special events in its extended event * structure. */ static int kone_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct kone_device *kone = hid_get_drvdata(hdev); struct kone_mouse_event *event = (struct kone_mouse_event *)data; /* keyboard events are always processed by default handler */ if (size != sizeof(struct kone_mouse_event)) return 0; /* * Firmware 1.38 introduced new behaviour for tilt and special buttons. * Pressed button is reported in each movement event. * Workaround sends only one event per press. */ if (memcmp(&kone->last_mouse_event.tilt, &event->tilt, 5)) memcpy(&kone->last_mouse_event, event, sizeof(struct kone_mouse_event)); else memset(&event->tilt, 0, 5); kone_keep_values_up_to_date(kone, event); if (kone->roccat_claimed) kone_report_to_chrdev(kone, event); return 0; /* always do further processing */ } static const struct hid_device_id kone_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) }, { } }; MODULE_DEVICE_TABLE(hid, kone_devices); static struct hid_driver kone_driver = { .name = "kone", .id_table = kone_devices, .probe = kone_probe, .remove = kone_remove, .raw_event = kone_raw_event }; static int __init kone_init(void) { int retval; /* class name has to be same as driver name */ kone_class = class_create(THIS_MODULE, "kone"); if (IS_ERR(kone_class)) return PTR_ERR(kone_class); kone_class->dev_attrs = kone_attributes; kone_class->dev_bin_attrs = kone_bin_attributes; retval = hid_register_driver(&kone_driver); if (retval) class_destroy(kone_class); return retval; } static void __exit kone_exit(void) { hid_unregister_driver(&kone_driver); class_destroy(kone_class); } module_init(kone_init); module_exit(kone_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat Kone driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
hei1125/Nova_Kernel
drivers/char/mbcs.c
3341
20363
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved. */ /* * MOATB Core Services driver. */ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/mm.h> #include <linux/uio.h> #include <linux/mutex.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/sn/addrs.h> #include <asm/sn/intr.h> #include <asm/sn/tiocx.h> #include "mbcs.h" #define MBCS_DEBUG 0 #if MBCS_DEBUG #define DBG(fmt...) printk(KERN_ALERT fmt) #else #define DBG(fmt...) #endif static DEFINE_MUTEX(mbcs_mutex); static int mbcs_major; static LIST_HEAD(soft_list); /* * file operations */ static const struct file_operations mbcs_ops = { .open = mbcs_open, .llseek = mbcs_sram_llseek, .read = mbcs_sram_read, .write = mbcs_sram_write, .mmap = mbcs_gscr_mmap, }; struct mbcs_callback_arg { int minor; struct cx_dev *cx_dev; }; static inline void mbcs_getdma_init(struct getdma *gdma) { memset(gdma, 0, sizeof(struct getdma)); gdma->DoneIntEnable = 1; } static inline void mbcs_putdma_init(struct putdma *pdma) { memset(pdma, 0, sizeof(struct putdma)); pdma->DoneIntEnable = 1; } static inline void mbcs_algo_init(struct algoblock *algo_soft) { memset(algo_soft, 0, sizeof(struct algoblock)); } static inline void mbcs_getdma_set(void *mmr, uint64_t hostAddr, uint64_t localAddr, uint64_t localRamSel, uint64_t numPkts, uint64_t amoEnable, uint64_t intrEnable, uint64_t peerIO, uint64_t amoHostDest, uint64_t amoModType, uint64_t intrHostDest, uint64_t intrVector) { union dma_control rdma_control; union dma_amo_dest amo_dest; union intr_dest intr_dest; union dma_localaddr local_addr; union dma_hostaddr host_addr; rdma_control.dma_control_reg = 0; amo_dest.dma_amo_dest_reg = 0; intr_dest.intr_dest_reg = 0; local_addr.dma_localaddr_reg = 0; host_addr.dma_hostaddr_reg = 0; host_addr.dma_sys_addr = hostAddr; MBCS_MMR_SET(mmr, MBCS_RD_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg); local_addr.dma_ram_addr = localAddr; local_addr.dma_ram_sel = localRamSel; MBCS_MMR_SET(mmr, MBCS_RD_DMA_LOC_ADDR, local_addr.dma_localaddr_reg); rdma_control.dma_op_length = numPkts; rdma_control.done_amo_en = amoEnable; rdma_control.done_int_en = intrEnable; rdma_control.pio_mem_n = peerIO; MBCS_MMR_SET(mmr, MBCS_RD_DMA_CTRL, rdma_control.dma_control_reg); amo_dest.dma_amo_sys_addr = amoHostDest; amo_dest.dma_amo_mod_type = amoModType; MBCS_MMR_SET(mmr, MBCS_RD_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg); intr_dest.address = intrHostDest; intr_dest.int_vector = intrVector; MBCS_MMR_SET(mmr, MBCS_RD_DMA_INT_DEST, intr_dest.intr_dest_reg); } static inline void mbcs_putdma_set(void *mmr, uint64_t hostAddr, uint64_t localAddr, uint64_t localRamSel, uint64_t numPkts, uint64_t amoEnable, uint64_t intrEnable, uint64_t peerIO, uint64_t amoHostDest, uint64_t amoModType, uint64_t intrHostDest, uint64_t intrVector) { union dma_control wdma_control; union dma_amo_dest amo_dest; union intr_dest intr_dest; union dma_localaddr local_addr; union dma_hostaddr host_addr; wdma_control.dma_control_reg = 0; amo_dest.dma_amo_dest_reg = 0; intr_dest.intr_dest_reg = 0; local_addr.dma_localaddr_reg = 0; host_addr.dma_hostaddr_reg = 0; host_addr.dma_sys_addr = hostAddr; MBCS_MMR_SET(mmr, MBCS_WR_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg); local_addr.dma_ram_addr = localAddr; local_addr.dma_ram_sel = localRamSel; MBCS_MMR_SET(mmr, MBCS_WR_DMA_LOC_ADDR, local_addr.dma_localaddr_reg); wdma_control.dma_op_length = numPkts; wdma_control.done_amo_en = amoEnable; wdma_control.done_int_en = intrEnable; wdma_control.pio_mem_n = peerIO; MBCS_MMR_SET(mmr, MBCS_WR_DMA_CTRL, wdma_control.dma_control_reg); amo_dest.dma_amo_sys_addr = amoHostDest; amo_dest.dma_amo_mod_type = amoModType; MBCS_MMR_SET(mmr, MBCS_WR_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg); intr_dest.address = intrHostDest; intr_dest.int_vector = intrVector; MBCS_MMR_SET(mmr, MBCS_WR_DMA_INT_DEST, intr_dest.intr_dest_reg); } static inline void mbcs_algo_set(void *mmr, uint64_t amoHostDest, uint64_t amoModType, uint64_t intrHostDest, uint64_t intrVector, uint64_t algoStepCount) { union dma_amo_dest amo_dest; union intr_dest intr_dest; union algo_step step; step.algo_step_reg = 0; intr_dest.intr_dest_reg = 0; amo_dest.dma_amo_dest_reg = 0; amo_dest.dma_amo_sys_addr = amoHostDest; amo_dest.dma_amo_mod_type = amoModType; MBCS_MMR_SET(mmr, MBCS_ALG_AMO_DEST, amo_dest.dma_amo_dest_reg); intr_dest.address = intrHostDest; intr_dest.int_vector = intrVector; MBCS_MMR_SET(mmr, MBCS_ALG_INT_DEST, intr_dest.intr_dest_reg); step.alg_step_cnt = algoStepCount; MBCS_MMR_SET(mmr, MBCS_ALG_STEP, step.algo_step_reg); } static inline int mbcs_getdma_start(struct mbcs_soft *soft) { void *mmr_base; struct getdma *gdma; uint64_t numPkts; union cm_control cm_control; mmr_base = soft->mmr_base; gdma = &soft->getdma; /* check that host address got setup */ if (!gdma->hostAddr) return -1; numPkts = (gdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE; /* program engine */ mbcs_getdma_set(mmr_base, tiocx_dma_addr(gdma->hostAddr), gdma->localAddr, (gdma->localAddr < MB2) ? 0 : (gdma->localAddr < MB4) ? 1 : (gdma->localAddr < MB6) ? 2 : 3, numPkts, gdma->DoneAmoEnable, gdma->DoneIntEnable, gdma->peerIO, gdma->amoHostDest, gdma->amoModType, gdma->intrHostDest, gdma->intrVector); /* start engine */ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); cm_control.rd_dma_go = 1; MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); return 0; } static inline int mbcs_putdma_start(struct mbcs_soft *soft) { void *mmr_base; struct putdma *pdma; uint64_t numPkts; union cm_control cm_control; mmr_base = soft->mmr_base; pdma = &soft->putdma; /* check that host address got setup */ if (!pdma->hostAddr) return -1; numPkts = (pdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE; /* program engine */ mbcs_putdma_set(mmr_base, tiocx_dma_addr(pdma->hostAddr), pdma->localAddr, (pdma->localAddr < MB2) ? 0 : (pdma->localAddr < MB4) ? 1 : (pdma->localAddr < MB6) ? 2 : 3, numPkts, pdma->DoneAmoEnable, pdma->DoneIntEnable, pdma->peerIO, pdma->amoHostDest, pdma->amoModType, pdma->intrHostDest, pdma->intrVector); /* start engine */ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); cm_control.wr_dma_go = 1; MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); return 0; } static inline int mbcs_algo_start(struct mbcs_soft *soft) { struct algoblock *algo_soft = &soft->algo; void *mmr_base = soft->mmr_base; union cm_control cm_control; if (mutex_lock_interruptible(&soft->algolock)) return -ERESTARTSYS; atomic_set(&soft->algo_done, 0); mbcs_algo_set(mmr_base, algo_soft->amoHostDest, algo_soft->amoModType, algo_soft->intrHostDest, algo_soft->intrVector, algo_soft->algoStepCount); /* start algorithm */ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); cm_control.alg_done_int_en = 1; cm_control.alg_go = 1; MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); mutex_unlock(&soft->algolock); return 0; } static inline ssize_t do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr, size_t len, loff_t * off) { int rv = 0; if (mutex_lock_interruptible(&soft->dmawritelock)) return -ERESTARTSYS; atomic_set(&soft->dmawrite_done, 0); soft->putdma.hostAddr = hostAddr; soft->putdma.localAddr = *off; soft->putdma.bytes = len; if (mbcs_putdma_start(soft) < 0) { DBG(KERN_ALERT "do_mbcs_sram_dmawrite: " "mbcs_putdma_start failed\n"); rv = -EAGAIN; goto dmawrite_exit; } if (wait_event_interruptible(soft->dmawrite_queue, atomic_read(&soft->dmawrite_done))) { rv = -ERESTARTSYS; goto dmawrite_exit; } rv = len; *off += len; dmawrite_exit: mutex_unlock(&soft->dmawritelock); return rv; } static inline ssize_t do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr, size_t len, loff_t * off) { int rv = 0; if (mutex_lock_interruptible(&soft->dmareadlock)) return -ERESTARTSYS; atomic_set(&soft->dmawrite_done, 0); soft->getdma.hostAddr = hostAddr; soft->getdma.localAddr = *off; soft->getdma.bytes = len; if (mbcs_getdma_start(soft) < 0) { DBG(KERN_ALERT "mbcs_strategy: mbcs_getdma_start failed\n"); rv = -EAGAIN; goto dmaread_exit; } if (wait_event_interruptible(soft->dmaread_queue, atomic_read(&soft->dmaread_done))) { rv = -ERESTARTSYS; goto dmaread_exit; } rv = len; *off += len; dmaread_exit: mutex_unlock(&soft->dmareadlock); return rv; } static int mbcs_open(struct inode *ip, struct file *fp) { struct mbcs_soft *soft; int minor; mutex_lock(&mbcs_mutex); minor = iminor(ip); /* Nothing protects access to this list... */ list_for_each_entry(soft, &soft_list, list) { if (soft->nasid == minor) { fp->private_data = soft->cxdev; mutex_unlock(&mbcs_mutex); return 0; } } mutex_unlock(&mbcs_mutex); return -ENODEV; } static ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off) { struct cx_dev *cx_dev = fp->private_data; struct mbcs_soft *soft = cx_dev->soft; uint64_t hostAddr; int rv = 0; hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len)); if (hostAddr == 0) return -ENOMEM; rv = do_mbcs_sram_dmawrite(soft, hostAddr, len, off); if (rv < 0) goto exit; if (copy_to_user(buf, (void *)hostAddr, len)) rv = -EFAULT; exit: free_pages(hostAddr, get_order(len)); return rv; } static ssize_t mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off) { struct cx_dev *cx_dev = fp->private_data; struct mbcs_soft *soft = cx_dev->soft; uint64_t hostAddr; int rv = 0; hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len)); if (hostAddr == 0) return -ENOMEM; if (copy_from_user((void *)hostAddr, buf, len)) { rv = -EFAULT; goto exit; } rv = do_mbcs_sram_dmaread(soft, hostAddr, len, off); exit: free_pages(hostAddr, get_order(len)); return rv; } static loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence) { loff_t newpos; switch (whence) { case SEEK_SET: newpos = off; break; case SEEK_CUR: newpos = filp->f_pos + off; break; case SEEK_END: newpos = MBCS_SRAM_SIZE + off; break; default: /* can't happen */ return -EINVAL; } if (newpos < 0) return -EINVAL; filp->f_pos = newpos; return newpos; } static uint64_t mbcs_pioaddr(struct mbcs_soft *soft, uint64_t offset) { uint64_t mmr_base; mmr_base = (uint64_t) (soft->mmr_base + offset); return mmr_base; } static void mbcs_debug_pioaddr_set(struct mbcs_soft *soft) { soft->debug_addr = mbcs_pioaddr(soft, MBCS_DEBUG_START); } static void mbcs_gscr_pioaddr_set(struct mbcs_soft *soft) { soft->gscr_addr = mbcs_pioaddr(soft, MBCS_GSCR_START); } static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma) { struct cx_dev *cx_dev = fp->private_data; struct mbcs_soft *soft = cx_dev->soft; if (vma->vm_pgoff != 0) return -EINVAL; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ if (remap_pfn_range(vma, vma->vm_start, __pa(soft->gscr_addr) >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; return 0; } /** * mbcs_completion_intr_handler - Primary completion handler. * @irq: irq * @arg: soft struct for device * */ static irqreturn_t mbcs_completion_intr_handler(int irq, void *arg) { struct mbcs_soft *soft = (struct mbcs_soft *)arg; void *mmr_base; union cm_status cm_status; union cm_control cm_control; mmr_base = soft->mmr_base; cm_status.cm_status_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_STATUS); if (cm_status.rd_dma_done) { /* stop dma-read engine, clear status */ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); cm_control.rd_dma_clr = 1; MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); atomic_set(&soft->dmaread_done, 1); wake_up(&soft->dmaread_queue); } if (cm_status.wr_dma_done) { /* stop dma-write engine, clear status */ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); cm_control.wr_dma_clr = 1; MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); atomic_set(&soft->dmawrite_done, 1); wake_up(&soft->dmawrite_queue); } if (cm_status.alg_done) { /* clear status */ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); cm_control.alg_done_clr = 1; MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); atomic_set(&soft->algo_done, 1); wake_up(&soft->algo_queue); } return IRQ_HANDLED; } /** * mbcs_intr_alloc - Allocate interrupts. * @dev: device pointer * */ static int mbcs_intr_alloc(struct cx_dev *dev) { struct sn_irq_info *sn_irq; struct mbcs_soft *soft; struct getdma *getdma; struct putdma *putdma; struct algoblock *algo; soft = dev->soft; getdma = &soft->getdma; putdma = &soft->putdma; algo = &soft->algo; soft->get_sn_irq = NULL; soft->put_sn_irq = NULL; soft->algo_sn_irq = NULL; sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1); if (sn_irq == NULL) return -EAGAIN; soft->get_sn_irq = sn_irq; getdma->intrHostDest = sn_irq->irq_xtalkaddr; getdma->intrVector = sn_irq->irq_irq; if (request_irq(sn_irq->irq_irq, (void *)mbcs_completion_intr_handler, IRQF_SHARED, "MBCS get intr", (void *)soft)) { tiocx_irq_free(soft->get_sn_irq); return -EAGAIN; } sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1); if (sn_irq == NULL) { free_irq(soft->get_sn_irq->irq_irq, soft); tiocx_irq_free(soft->get_sn_irq); return -EAGAIN; } soft->put_sn_irq = sn_irq; putdma->intrHostDest = sn_irq->irq_xtalkaddr; putdma->intrVector = sn_irq->irq_irq; if (request_irq(sn_irq->irq_irq, (void *)mbcs_completion_intr_handler, IRQF_SHARED, "MBCS put intr", (void *)soft)) { tiocx_irq_free(soft->put_sn_irq); free_irq(soft->get_sn_irq->irq_irq, soft); tiocx_irq_free(soft->get_sn_irq); return -EAGAIN; } sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1); if (sn_irq == NULL) { free_irq(soft->put_sn_irq->irq_irq, soft); tiocx_irq_free(soft->put_sn_irq); free_irq(soft->get_sn_irq->irq_irq, soft); tiocx_irq_free(soft->get_sn_irq); return -EAGAIN; } soft->algo_sn_irq = sn_irq; algo->intrHostDest = sn_irq->irq_xtalkaddr; algo->intrVector = sn_irq->irq_irq; if (request_irq(sn_irq->irq_irq, (void *)mbcs_completion_intr_handler, IRQF_SHARED, "MBCS algo intr", (void *)soft)) { tiocx_irq_free(soft->algo_sn_irq); free_irq(soft->put_sn_irq->irq_irq, soft); tiocx_irq_free(soft->put_sn_irq); free_irq(soft->get_sn_irq->irq_irq, soft); tiocx_irq_free(soft->get_sn_irq); return -EAGAIN; } return 0; } /** * mbcs_intr_dealloc - Remove interrupts. * @dev: device pointer * */ static void mbcs_intr_dealloc(struct cx_dev *dev) { struct mbcs_soft *soft; soft = dev->soft; free_irq(soft->get_sn_irq->irq_irq, soft); tiocx_irq_free(soft->get_sn_irq); free_irq(soft->put_sn_irq->irq_irq, soft); tiocx_irq_free(soft->put_sn_irq); free_irq(soft->algo_sn_irq->irq_irq, soft); tiocx_irq_free(soft->algo_sn_irq); } static inline int mbcs_hw_init(struct mbcs_soft *soft) { void *mmr_base = soft->mmr_base; union cm_control cm_control; union cm_req_timeout cm_req_timeout; uint64_t err_stat; cm_req_timeout.cm_req_timeout_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_REQ_TOUT); cm_req_timeout.time_out = MBCS_CM_CONTROL_REQ_TOUT_MASK; MBCS_MMR_SET(mmr_base, MBCS_CM_REQ_TOUT, cm_req_timeout.cm_req_timeout_reg); mbcs_gscr_pioaddr_set(soft); mbcs_debug_pioaddr_set(soft); /* clear errors */ err_stat = MBCS_MMR_GET(mmr_base, MBCS_CM_ERR_STAT); MBCS_MMR_SET(mmr_base, MBCS_CM_CLR_ERR_STAT, err_stat); MBCS_MMR_ZERO(mmr_base, MBCS_CM_ERROR_DETAIL1); /* enable interrupts */ /* turn off 2^23 (INT_EN_PIO_REQ_ADDR_INV) */ MBCS_MMR_SET(mmr_base, MBCS_CM_ERR_INT_EN, 0x3ffffff7e00ffUL); /* arm status regs and clear engines */ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL); cm_control.rearm_stat_regs = 1; cm_control.alg_clr = 1; cm_control.wr_dma_clr = 1; cm_control.rd_dma_clr = 1; MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg); return 0; } static ssize_t show_algo(struct device *dev, struct device_attribute *attr, char *buf) { struct cx_dev *cx_dev = to_cx_dev(dev); struct mbcs_soft *soft = cx_dev->soft; uint64_t debug0; /* * By convention, the first debug register contains the * algorithm number and revision. */ debug0 = *(uint64_t *) soft->debug_addr; return sprintf(buf, "0x%x 0x%x\n", upper_32_bits(debug0), lower_32_bits(debug0)); } static ssize_t store_algo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int n; struct cx_dev *cx_dev = to_cx_dev(dev); struct mbcs_soft *soft = cx_dev->soft; if (count <= 0) return 0; n = simple_strtoul(buf, NULL, 0); if (n == 1) { mbcs_algo_start(soft); if (wait_event_interruptible(soft->algo_queue, atomic_read(&soft->algo_done))) return -ERESTARTSYS; } return count; } DEVICE_ATTR(algo, 0644, show_algo, store_algo); /** * mbcs_probe - Initialize for device * @dev: device pointer * @device_id: id table pointer * */ static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id) { struct mbcs_soft *soft; dev->soft = NULL; soft = kzalloc(sizeof(struct mbcs_soft), GFP_KERNEL); if (soft == NULL) return -ENOMEM; soft->nasid = dev->cx_id.nasid; list_add(&soft->list, &soft_list); soft->mmr_base = (void *)tiocx_swin_base(dev->cx_id.nasid); dev->soft = soft; soft->cxdev = dev; init_waitqueue_head(&soft->dmawrite_queue); init_waitqueue_head(&soft->dmaread_queue); init_waitqueue_head(&soft->algo_queue); mutex_init(&soft->dmawritelock); mutex_init(&soft->dmareadlock); mutex_init(&soft->algolock); mbcs_getdma_init(&soft->getdma); mbcs_putdma_init(&soft->putdma); mbcs_algo_init(&soft->algo); mbcs_hw_init(soft); /* Allocate interrupts */ mbcs_intr_alloc(dev); device_create_file(&dev->dev, &dev_attr_algo); return 0; } static int mbcs_remove(struct cx_dev *dev) { if (dev->soft) { mbcs_intr_dealloc(dev); kfree(dev->soft); } device_remove_file(&dev->dev, &dev_attr_algo); return 0; } static const struct cx_device_id __devinitdata mbcs_id_table[] = { { .part_num = MBCS_PART_NUM, .mfg_num = MBCS_MFG_NUM, }, { .part_num = MBCS_PART_NUM_ALG0, .mfg_num = MBCS_MFG_NUM, }, {0, 0} }; MODULE_DEVICE_TABLE(cx, mbcs_id_table); static struct cx_drv mbcs_driver = { .name = DEVICE_NAME, .id_table = mbcs_id_table, .probe = mbcs_probe, .remove = mbcs_remove, }; static void __exit mbcs_exit(void) { unregister_chrdev(mbcs_major, DEVICE_NAME); cx_driver_unregister(&mbcs_driver); } static int __init mbcs_init(void) { int rv; if (!ia64_platform_is("sn2")) return -ENODEV; // Put driver into chrdevs[]. Get major number. rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops); if (rv < 0) { DBG(KERN_ALERT "mbcs_init: can't get major number. %d\n", rv); return rv; } mbcs_major = rv; return cx_driver_register(&mbcs_driver); } module_init(mbcs_init); module_exit(mbcs_exit); MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>"); MODULE_DESCRIPTION("Driver for MOATB Core Services"); MODULE_LICENSE("GPL");
gpl-2.0
MoKee/android_kernel_huawei_msm8226
drivers/media/video/omap/omap_vout.c
4109
57996
/* * omap_vout.c * * Copyright (C) 2005-2010 Texas Instruments. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * * Leveraged code from the OMAP2 camera driver * Video-for-Linux (Version 2) camera capture driver for * the OMAP24xx camera controller. * * Author: Andy Lowe (source@mvista.com) * * Copyright (C) 2004 MontaVista Software, Inc. * Copyright (C) 2010 Texas Instruments. * * History: * 20-APR-2006 Khasim Modified VRFB based Rotation, * The image data is always read from 0 degree * view and written * to the virtual space of desired rotation angle * 4-DEC-2006 Jian Changed to support better memory management * * 17-Nov-2008 Hardik Changed driver to use video_ioctl2 * * 23-Feb-2010 Vaibhav H Modified to use new DSS2 interface * */ #include <linux/init.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/videodev2.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <media/videobuf-dma-contig.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <plat/dma.h> #include <plat/vrfb.h> #include <video/omapdss.h> #include "omap_voutlib.h" #include "omap_voutdef.h" #include "omap_vout_vrfb.h" MODULE_AUTHOR("Texas Instruments"); MODULE_DESCRIPTION("OMAP Video for Linux Video out driver"); MODULE_LICENSE("GPL"); /* Driver Configuration macros */ #define VOUT_NAME "omap_vout" enum omap_vout_channels { OMAP_VIDEO1, OMAP_VIDEO2, }; static struct videobuf_queue_ops video_vbq_ops; /* Variables configurable through module params*/ static u32 video1_numbuffers = 3; static u32 video2_numbuffers = 3; static u32 video1_bufsize = OMAP_VOUT_MAX_BUF_SIZE; static u32 video2_bufsize = OMAP_VOUT_MAX_BUF_SIZE; static bool vid1_static_vrfb_alloc; static bool vid2_static_vrfb_alloc; static bool debug; /* Module parameters */ module_param(video1_numbuffers, uint, S_IRUGO); MODULE_PARM_DESC(video1_numbuffers, "Number of buffers to be allocated at init time for Video1 device."); module_param(video2_numbuffers, uint, S_IRUGO); MODULE_PARM_DESC(video2_numbuffers, "Number of buffers to be allocated at init time for Video2 device."); module_param(video1_bufsize, uint, S_IRUGO); MODULE_PARM_DESC(video1_bufsize, "Size of the buffer to be allocated for video1 device"); module_param(video2_bufsize, uint, S_IRUGO); MODULE_PARM_DESC(video2_bufsize, "Size of the buffer to be allocated for video2 device"); module_param(vid1_static_vrfb_alloc, bool, S_IRUGO); MODULE_PARM_DESC(vid1_static_vrfb_alloc, "Static allocation of the VRFB buffer for video1 device"); module_param(vid2_static_vrfb_alloc, bool, S_IRUGO); MODULE_PARM_DESC(vid2_static_vrfb_alloc, "Static allocation of the VRFB buffer for video2 device"); module_param(debug, bool, S_IRUGO); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* list of image formats supported by OMAP2 video pipelines */ static const struct v4l2_fmtdesc omap_formats[] = { { /* Note: V4L2 defines RGB565 as: * * Byte 0 Byte 1 * g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3 * * We interpret RGB565 as: * * Byte 0 Byte 1 * g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3 */ .description = "RGB565, le", .pixelformat = V4L2_PIX_FMT_RGB565, }, { /* Note: V4L2 defines RGB32 as: RGB-8-8-8-8 we use * this for RGB24 unpack mode, the last 8 bits are ignored * */ .description = "RGB32, le", .pixelformat = V4L2_PIX_FMT_RGB32, }, { /* Note: V4L2 defines RGB24 as: RGB-8-8-8 we use * this for RGB24 packed mode * */ .description = "RGB24, le", .pixelformat = V4L2_PIX_FMT_RGB24, }, { .description = "YUYV (YUV 4:2:2), packed", .pixelformat = V4L2_PIX_FMT_YUYV, }, { .description = "UYVY, packed", .pixelformat = V4L2_PIX_FMT_UYVY, }, }; #define NUM_OUTPUT_FORMATS (ARRAY_SIZE(omap_formats)) /* * Try format */ static int omap_vout_try_format(struct v4l2_pix_format *pix) { int ifmt, bpp = 0; pix->height = clamp(pix->height, (u32)VID_MIN_HEIGHT, (u32)VID_MAX_HEIGHT); pix->width = clamp(pix->width, (u32)VID_MIN_WIDTH, (u32)VID_MAX_WIDTH); for (ifmt = 0; ifmt < NUM_OUTPUT_FORMATS; ifmt++) { if (pix->pixelformat == omap_formats[ifmt].pixelformat) break; } if (ifmt == NUM_OUTPUT_FORMATS) ifmt = 0; pix->pixelformat = omap_formats[ifmt].pixelformat; pix->field = V4L2_FIELD_ANY; pix->priv = 0; switch (pix->pixelformat) { case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_UYVY: default: pix->colorspace = V4L2_COLORSPACE_JPEG; bpp = YUYV_BPP; break; case V4L2_PIX_FMT_RGB565: case V4L2_PIX_FMT_RGB565X: pix->colorspace = V4L2_COLORSPACE_SRGB; bpp = RGB565_BPP; break; case V4L2_PIX_FMT_RGB24: pix->colorspace = V4L2_COLORSPACE_SRGB; bpp = RGB24_BPP; break; case V4L2_PIX_FMT_RGB32: case V4L2_PIX_FMT_BGR32: pix->colorspace = V4L2_COLORSPACE_SRGB; bpp = RGB32_BPP; break; } pix->bytesperline = pix->width * bpp; pix->sizeimage = pix->bytesperline * pix->height; return bpp; } /* * omap_vout_uservirt_to_phys: This inline function is used to convert user * space virtual address to physical address. */ static u32 omap_vout_uservirt_to_phys(u32 virtp) { unsigned long physp = 0; struct vm_area_struct *vma; struct mm_struct *mm = current->mm; vma = find_vma(mm, virtp); /* For kernel direct-mapped memory, take the easy way */ if (virtp >= PAGE_OFFSET) { physp = virt_to_phys((void *) virtp); } else if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) { /* this will catch, kernel-allocated, mmaped-to-usermode addresses */ physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start); } else { /* otherwise, use get_user_pages() for general userland pages */ int res, nr_pages = 1; struct page *pages; down_read(&current->mm->mmap_sem); res = get_user_pages(current, current->mm, virtp, nr_pages, 1, 0, &pages, NULL); up_read(&current->mm->mmap_sem); if (res == nr_pages) { physp = __pa(page_address(&pages[0]) + (virtp & ~PAGE_MASK)); } else { printk(KERN_WARNING VOUT_NAME "get_user_pages failed\n"); return 0; } } return physp; } /* * Free the V4L2 buffers */ void omap_vout_free_buffers(struct omap_vout_device *vout) { int i, numbuffers; /* Allocate memory for the buffers */ numbuffers = (vout->vid) ? video2_numbuffers : video1_numbuffers; vout->buffer_size = (vout->vid) ? video2_bufsize : video1_bufsize; for (i = 0; i < numbuffers; i++) { omap_vout_free_buffer(vout->buf_virt_addr[i], vout->buffer_size); vout->buf_phy_addr[i] = 0; vout->buf_virt_addr[i] = 0; } } /* * Convert V4L2 rotation to DSS rotation * V4L2 understand 0, 90, 180, 270. * Convert to 0, 1, 2 and 3 respectively for DSS */ static int v4l2_rot_to_dss_rot(int v4l2_rotation, enum dss_rotation *rotation, bool mirror) { int ret = 0; switch (v4l2_rotation) { case 90: *rotation = dss_rotation_90_degree; break; case 180: *rotation = dss_rotation_180_degree; break; case 270: *rotation = dss_rotation_270_degree; break; case 0: *rotation = dss_rotation_0_degree; break; default: ret = -EINVAL; } return ret; } static int omap_vout_calculate_offset(struct omap_vout_device *vout) { struct omapvideo_info *ovid; struct v4l2_rect *crop = &vout->crop; struct v4l2_pix_format *pix = &vout->pix; int *cropped_offset = &vout->cropped_offset; int ps = 2, line_length = 0; ovid = &vout->vid_info; if (ovid->rotation_type == VOUT_ROT_VRFB) { omap_vout_calculate_vrfb_offset(vout); } else { vout->line_length = line_length = pix->width; if (V4L2_PIX_FMT_YUYV == pix->pixelformat || V4L2_PIX_FMT_UYVY == pix->pixelformat) ps = 2; else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) ps = 4; else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) ps = 3; vout->ps = ps; *cropped_offset = (line_length * ps) * crop->top + crop->left * ps; } v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s Offset:%x\n", __func__, vout->cropped_offset); return 0; } /* * Convert V4L2 pixel format to DSS pixel format */ static int video_mode_to_dss_mode(struct omap_vout_device *vout) { struct omap_overlay *ovl; struct omapvideo_info *ovid; struct v4l2_pix_format *pix = &vout->pix; enum omap_color_mode mode; ovid = &vout->vid_info; ovl = ovid->overlays[0]; switch (pix->pixelformat) { case 0: break; case V4L2_PIX_FMT_YUYV: mode = OMAP_DSS_COLOR_YUV2; break; case V4L2_PIX_FMT_UYVY: mode = OMAP_DSS_COLOR_UYVY; break; case V4L2_PIX_FMT_RGB565: mode = OMAP_DSS_COLOR_RGB16; break; case V4L2_PIX_FMT_RGB24: mode = OMAP_DSS_COLOR_RGB24P; break; case V4L2_PIX_FMT_RGB32: mode = (ovl->id == OMAP_DSS_VIDEO1) ? OMAP_DSS_COLOR_RGB24U : OMAP_DSS_COLOR_ARGB32; break; case V4L2_PIX_FMT_BGR32: mode = OMAP_DSS_COLOR_RGBX32; break; default: mode = -EINVAL; } return mode; } /* * Setup the overlay */ static int omapvid_setup_overlay(struct omap_vout_device *vout, struct omap_overlay *ovl, int posx, int posy, int outw, int outh, u32 addr) { int ret = 0; struct omap_overlay_info info; int cropheight, cropwidth, pixheight, pixwidth; if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0 && (outw != vout->pix.width || outh != vout->pix.height)) { ret = -EINVAL; goto setup_ovl_err; } vout->dss_mode = video_mode_to_dss_mode(vout); if (vout->dss_mode == -EINVAL) { ret = -EINVAL; goto setup_ovl_err; } /* Setup the input plane parameters according to * rotation value selected. */ if (is_rotation_90_or_270(vout)) { cropheight = vout->crop.width; cropwidth = vout->crop.height; pixheight = vout->pix.width; pixwidth = vout->pix.height; } else { cropheight = vout->crop.height; cropwidth = vout->crop.width; pixheight = vout->pix.height; pixwidth = vout->pix.width; } ovl->get_overlay_info(ovl, &info); info.paddr = addr; info.width = cropwidth; info.height = cropheight; info.color_mode = vout->dss_mode; info.mirror = vout->mirror; info.pos_x = posx; info.pos_y = posy; info.out_width = outw; info.out_height = outh; info.global_alpha = vout->win.global_alpha; if (!is_rotation_enabled(vout)) { info.rotation = 0; info.rotation_type = OMAP_DSS_ROT_DMA; info.screen_width = pixwidth; } else { info.rotation = vout->rotation; info.rotation_type = OMAP_DSS_ROT_VRFB; info.screen_width = 2048; } v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s enable=%d addr=%x width=%d\n height=%d color_mode=%d\n" "rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n" "out_height=%d rotation_type=%d screen_width=%d\n", __func__, ovl->is_enabled(ovl), info.paddr, info.width, info.height, info.color_mode, info.rotation, info.mirror, info.pos_x, info.pos_y, info.out_width, info.out_height, info.rotation_type, info.screen_width); ret = ovl->set_overlay_info(ovl, &info); if (ret) goto setup_ovl_err; return 0; setup_ovl_err: v4l2_warn(&vout->vid_dev->v4l2_dev, "setup_overlay failed\n"); return ret; } /* * Initialize the overlay structure */ static int omapvid_init(struct omap_vout_device *vout, u32 addr) { int ret = 0, i; struct v4l2_window *win; struct omap_overlay *ovl; int posx, posy, outw, outh, temp; struct omap_video_timings *timing; struct omapvideo_info *ovid = &vout->vid_info; win = &vout->win; for (i = 0; i < ovid->num_overlays; i++) { ovl = ovid->overlays[i]; if (!ovl->manager || !ovl->manager->device) return -EINVAL; timing = &ovl->manager->device->panel.timings; outw = win->w.width; outh = win->w.height; switch (vout->rotation) { case dss_rotation_90_degree: /* Invert the height and width for 90 * and 270 degree rotation */ temp = outw; outw = outh; outh = temp; posy = (timing->y_res - win->w.width) - win->w.left; posx = win->w.top; break; case dss_rotation_180_degree: posx = (timing->x_res - win->w.width) - win->w.left; posy = (timing->y_res - win->w.height) - win->w.top; break; case dss_rotation_270_degree: temp = outw; outw = outh; outh = temp; posy = win->w.left; posx = (timing->x_res - win->w.height) - win->w.top; break; default: posx = win->w.left; posy = win->w.top; break; } ret = omapvid_setup_overlay(vout, ovl, posx, posy, outw, outh, addr); if (ret) goto omapvid_init_err; } return 0; omapvid_init_err: v4l2_warn(&vout->vid_dev->v4l2_dev, "apply_changes failed\n"); return ret; } /* * Apply the changes set the go bit of DSS */ static int omapvid_apply_changes(struct omap_vout_device *vout) { int i; struct omap_overlay *ovl; struct omapvideo_info *ovid = &vout->vid_info; for (i = 0; i < ovid->num_overlays; i++) { ovl = ovid->overlays[i]; if (!ovl->manager || !ovl->manager->device) return -EINVAL; ovl->manager->apply(ovl->manager); } return 0; } static int omapvid_handle_interlace_display(struct omap_vout_device *vout, unsigned int irqstatus, struct timeval timevalue) { u32 fid; if (vout->first_int) { vout->first_int = 0; goto err; } if (irqstatus & DISPC_IRQ_EVSYNC_ODD) fid = 1; else if (irqstatus & DISPC_IRQ_EVSYNC_EVEN) fid = 0; else goto err; vout->field_id ^= 1; if (fid != vout->field_id) { if (fid == 0) vout->field_id = fid; } else if (0 == fid) { if (vout->cur_frm == vout->next_frm) goto err; vout->cur_frm->ts = timevalue; vout->cur_frm->state = VIDEOBUF_DONE; wake_up_interruptible(&vout->cur_frm->done); vout->cur_frm = vout->next_frm; } else { if (list_empty(&vout->dma_queue) || (vout->cur_frm != vout->next_frm)) goto err; } return vout->field_id; err: return 0; } static void omap_vout_isr(void *arg, unsigned int irqstatus) { int ret, fid, mgr_id; u32 addr, irq; struct omap_overlay *ovl; struct timeval timevalue; struct omapvideo_info *ovid; struct omap_dss_device *cur_display; struct omap_vout_device *vout = (struct omap_vout_device *)arg; if (!vout->streaming) return; ovid = &vout->vid_info; ovl = ovid->overlays[0]; /* get the display device attached to the overlay */ if (!ovl->manager || !ovl->manager->device) return; mgr_id = ovl->manager->id; cur_display = ovl->manager->device; spin_lock(&vout->vbq_lock); do_gettimeofday(&timevalue); switch (cur_display->type) { case OMAP_DISPLAY_TYPE_DSI: case OMAP_DISPLAY_TYPE_DPI: if (mgr_id == OMAP_DSS_CHANNEL_LCD) irq = DISPC_IRQ_VSYNC; else if (mgr_id == OMAP_DSS_CHANNEL_LCD2) irq = DISPC_IRQ_VSYNC2; else goto vout_isr_err; if (!(irqstatus & irq)) goto vout_isr_err; break; case OMAP_DISPLAY_TYPE_VENC: fid = omapvid_handle_interlace_display(vout, irqstatus, timevalue); if (!fid) goto vout_isr_err; break; case OMAP_DISPLAY_TYPE_HDMI: if (!(irqstatus & DISPC_IRQ_EVSYNC_EVEN)) goto vout_isr_err; break; default: goto vout_isr_err; } if (!vout->first_int && (vout->cur_frm != vout->next_frm)) { vout->cur_frm->ts = timevalue; vout->cur_frm->state = VIDEOBUF_DONE; wake_up_interruptible(&vout->cur_frm->done); vout->cur_frm = vout->next_frm; } vout->first_int = 0; if (list_empty(&vout->dma_queue)) goto vout_isr_err; vout->next_frm = list_entry(vout->dma_queue.next, struct videobuf_buffer, queue); list_del(&vout->next_frm->queue); vout->next_frm->state = VIDEOBUF_ACTIVE; addr = (unsigned long) vout->queued_buf_addr[vout->next_frm->i] + vout->cropped_offset; /* First save the configuration in ovelray structure */ ret = omapvid_init(vout, addr); if (ret) printk(KERN_ERR VOUT_NAME "failed to set overlay info\n"); /* Enable the pipeline and set the Go bit */ ret = omapvid_apply_changes(vout); if (ret) printk(KERN_ERR VOUT_NAME "failed to change mode\n"); vout_isr_err: spin_unlock(&vout->vbq_lock); } /* Video buffer call backs */ /* * Buffer setup function is called by videobuf layer when REQBUF ioctl is * called. This is used to setup buffers and return size and count of * buffers allocated. After the call to this buffer, videobuf layer will * setup buffer queue depending on the size and count of buffers */ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) { int startindex = 0, i, j; u32 phy_addr = 0, virt_addr = 0; struct omap_vout_device *vout = q->priv_data; struct omapvideo_info *ovid = &vout->vid_info; int vid_max_buf_size; if (!vout) return -EINVAL; vid_max_buf_size = vout->vid == OMAP_VIDEO1 ? video1_bufsize : video2_bufsize; if (V4L2_BUF_TYPE_VIDEO_OUTPUT != q->type) return -EINVAL; startindex = (vout->vid == OMAP_VIDEO1) ? video1_numbuffers : video2_numbuffers; if (V4L2_MEMORY_MMAP == vout->memory && *count < startindex) *count = startindex; if (ovid->rotation_type == VOUT_ROT_VRFB) { if (omap_vout_vrfb_buffer_setup(vout, count, startindex)) return -ENOMEM; } if (V4L2_MEMORY_MMAP != vout->memory) return 0; /* Now allocated the V4L2 buffers */ *size = PAGE_ALIGN(vout->pix.width * vout->pix.height * vout->bpp); startindex = (vout->vid == OMAP_VIDEO1) ? video1_numbuffers : video2_numbuffers; /* Check the size of the buffer */ if (*size > vid_max_buf_size) { v4l2_err(&vout->vid_dev->v4l2_dev, "buffer allocation mismatch [%u] [%u]\n", *size, vout->buffer_size); return -ENOMEM; } for (i = startindex; i < *count; i++) { vout->buffer_size = *size; virt_addr = omap_vout_alloc_buffer(vout->buffer_size, &phy_addr); if (!virt_addr) { if (ovid->rotation_type == VOUT_ROT_NONE) { break; } else { if (!is_rotation_enabled(vout)) break; /* Free the VRFB buffers if no space for V4L2 buffers */ for (j = i; j < *count; j++) { omap_vout_free_buffer( vout->smsshado_virt_addr[j], vout->smsshado_size); vout->smsshado_virt_addr[j] = 0; vout->smsshado_phy_addr[j] = 0; } } } vout->buf_virt_addr[i] = virt_addr; vout->buf_phy_addr[i] = phy_addr; } *count = vout->buffer_allocated = i; return 0; } /* * Free the V4L2 buffers additionally allocated than default * number of buffers */ static void omap_vout_free_extra_buffers(struct omap_vout_device *vout) { int num_buffers = 0, i; num_buffers = (vout->vid == OMAP_VIDEO1) ? video1_numbuffers : video2_numbuffers; for (i = num_buffers; i < vout->buffer_allocated; i++) { if (vout->buf_virt_addr[i]) omap_vout_free_buffer(vout->buf_virt_addr[i], vout->buffer_size); vout->buf_virt_addr[i] = 0; vout->buf_phy_addr[i] = 0; } vout->buffer_allocated = num_buffers; } /* * This function will be called when VIDIOC_QBUF ioctl is called. * It prepare buffers before give out for the display. This function * converts user space virtual address into physical address if userptr memory * exchange mechanism is used. If rotation is enabled, it copies entire * buffer into VRFB memory space before giving it to the DSS. */ static int omap_vout_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, enum v4l2_field field) { struct omap_vout_device *vout = q->priv_data; struct omapvideo_info *ovid = &vout->vid_info; if (VIDEOBUF_NEEDS_INIT == vb->state) { vb->width = vout->pix.width; vb->height = vout->pix.height; vb->size = vb->width * vb->height * vout->bpp; vb->field = field; } vb->state = VIDEOBUF_PREPARED; /* if user pointer memory mechanism is used, get the physical * address of the buffer */ if (V4L2_MEMORY_USERPTR == vb->memory) { if (0 == vb->baddr) return -EINVAL; /* Physical address */ vout->queued_buf_addr[vb->i] = (u8 *) omap_vout_uservirt_to_phys(vb->baddr); } else { u32 addr, dma_addr; unsigned long size; addr = (unsigned long) vout->buf_virt_addr[vb->i]; size = (unsigned long) vb->size; dma_addr = dma_map_single(vout->vid_dev->v4l2_dev.dev, (void *) addr, size, DMA_TO_DEVICE); if (dma_mapping_error(vout->vid_dev->v4l2_dev.dev, dma_addr)) v4l2_err(&vout->vid_dev->v4l2_dev, "dma_map_single failed\n"); vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i]; } if (ovid->rotation_type == VOUT_ROT_VRFB) return omap_vout_prepare_vrfb(vout, vb); else return 0; } /* * Buffer queue function will be called from the videobuf layer when _QBUF * ioctl is called. It is used to enqueue buffer, which is ready to be * displayed. */ static void omap_vout_buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct omap_vout_device *vout = q->priv_data; /* Driver is also maintainig a queue. So enqueue buffer in the driver * queue */ list_add_tail(&vb->queue, &vout->dma_queue); vb->state = VIDEOBUF_QUEUED; } /* * Buffer release function is called from videobuf layer to release buffer * which are already allocated */ static void omap_vout_buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct omap_vout_device *vout = q->priv_data; vb->state = VIDEOBUF_NEEDS_INIT; if (V4L2_MEMORY_MMAP != vout->memory) return; } /* * File operations */ static unsigned int omap_vout_poll(struct file *file, struct poll_table_struct *wait) { struct omap_vout_device *vout = file->private_data; struct videobuf_queue *q = &vout->vbq; return videobuf_poll_stream(file, q, wait); } static void omap_vout_vm_open(struct vm_area_struct *vma) { struct omap_vout_device *vout = vma->vm_private_data; v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "vm_open [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end); vout->mmap_count++; } static void omap_vout_vm_close(struct vm_area_struct *vma) { struct omap_vout_device *vout = vma->vm_private_data; v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "vm_close [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end); vout->mmap_count--; } static struct vm_operations_struct omap_vout_vm_ops = { .open = omap_vout_vm_open, .close = omap_vout_vm_close, }; static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma) { int i; void *pos; unsigned long start = vma->vm_start; unsigned long size = (vma->vm_end - vma->vm_start); struct omap_vout_device *vout = file->private_data; struct videobuf_queue *q = &vout->vbq; v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, " %s pgoff=0x%lx, start=0x%lx, end=0x%lx\n", __func__, vma->vm_pgoff, vma->vm_start, vma->vm_end); /* look for the buffer to map */ for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == q->bufs[i]) continue; if (V4L2_MEMORY_MMAP != q->bufs[i]->memory) continue; if (q->bufs[i]->boff == (vma->vm_pgoff << PAGE_SHIFT)) break; } if (VIDEO_MAX_FRAME == i) { v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "offset invalid [offset=0x%lx]\n", (vma->vm_pgoff << PAGE_SHIFT)); return -EINVAL; } /* Check the size of the buffer */ if (size > vout->buffer_size) { v4l2_err(&vout->vid_dev->v4l2_dev, "insufficient memory [%lu] [%u]\n", size, vout->buffer_size); return -ENOMEM; } q->bufs[i]->baddr = vma->vm_start; vma->vm_flags |= VM_RESERVED; vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); vma->vm_ops = &omap_vout_vm_ops; vma->vm_private_data = (void *) vout; pos = (void *)vout->buf_virt_addr[i]; vma->vm_pgoff = virt_to_phys((void *)pos) >> PAGE_SHIFT; while (size > 0) { unsigned long pfn; pfn = virt_to_phys((void *) pos) >> PAGE_SHIFT; if (remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED)) return -EAGAIN; start += PAGE_SIZE; pos += PAGE_SIZE; size -= PAGE_SIZE; } vout->mmap_count++; v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__); return 0; } static int omap_vout_release(struct file *file) { unsigned int ret, i; struct videobuf_queue *q; struct omapvideo_info *ovid; struct omap_vout_device *vout = file->private_data; v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__); ovid = &vout->vid_info; if (!vout) return 0; q = &vout->vbq; /* Disable all the overlay managers connected with this interface */ for (i = 0; i < ovid->num_overlays; i++) { struct omap_overlay *ovl = ovid->overlays[i]; if (ovl->manager && ovl->manager->device) ovl->disable(ovl); } /* Turn off the pipeline */ ret = omapvid_apply_changes(vout); if (ret) v4l2_warn(&vout->vid_dev->v4l2_dev, "Unable to apply changes\n"); /* Free all buffers */ omap_vout_free_extra_buffers(vout); /* Free the VRFB buffers only if they are allocated * during reqbufs. Don't free if init time allocated */ if (ovid->rotation_type == VOUT_ROT_VRFB) { if (!vout->vrfb_static_allocation) omap_vout_free_vrfb_buffers(vout); } videobuf_mmap_free(q); /* Even if apply changes fails we should continue freeing allocated memory */ if (vout->streaming) { u32 mask = 0; mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_VSYNC2; omap_dispc_unregister_isr(omap_vout_isr, vout, mask); vout->streaming = 0; videobuf_streamoff(q); videobuf_queue_cancel(q); } if (vout->mmap_count != 0) vout->mmap_count = 0; vout->opened -= 1; file->private_data = NULL; if (vout->buffer_allocated) videobuf_mmap_free(q); v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__); return ret; } static int omap_vout_open(struct file *file) { struct videobuf_queue *q; struct omap_vout_device *vout = NULL; vout = video_drvdata(file); v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__); if (vout == NULL) return -ENODEV; /* for now, we only support single open */ if (vout->opened) return -EBUSY; vout->opened += 1; file->private_data = vout; vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; q = &vout->vbq; video_vbq_ops.buf_setup = omap_vout_buffer_setup; video_vbq_ops.buf_prepare = omap_vout_buffer_prepare; video_vbq_ops.buf_release = omap_vout_buffer_release; video_vbq_ops.buf_queue = omap_vout_buffer_queue; spin_lock_init(&vout->vbq_lock); videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev, &vout->vbq_lock, vout->type, V4L2_FIELD_NONE, sizeof(struct videobuf_buffer), vout, NULL); v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__); return 0; } /* * V4L2 ioctls */ static int vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct omap_vout_device *vout = fh; strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver)); strlcpy(cap->card, vout->vfd->name, sizeof(cap->card)); cap->bus_info[0] = '\0'; cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_OVERLAY; return 0; } static int vidioc_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdesc *fmt) { int index = fmt->index; if (index >= NUM_OUTPUT_FORMATS) return -EINVAL; fmt->flags = omap_formats[index].flags; strlcpy(fmt->description, omap_formats[index].description, sizeof(fmt->description)); fmt->pixelformat = omap_formats[index].pixelformat; return 0; } static int vidioc_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f) { struct omap_vout_device *vout = fh; f->fmt.pix = vout->pix; return 0; } static int vidioc_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f) { struct omap_overlay *ovl; struct omapvideo_info *ovid; struct omap_video_timings *timing; struct omap_vout_device *vout = fh; ovid = &vout->vid_info; ovl = ovid->overlays[0]; if (!ovl->manager || !ovl->manager->device) return -EINVAL; /* get the display device attached to the overlay */ timing = &ovl->manager->device->panel.timings; vout->fbuf.fmt.height = timing->y_res; vout->fbuf.fmt.width = timing->x_res; omap_vout_try_format(&f->fmt.pix); return 0; } static int vidioc_s_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f) { int ret, bpp; struct omap_overlay *ovl; struct omapvideo_info *ovid; struct omap_video_timings *timing; struct omap_vout_device *vout = fh; if (vout->streaming) return -EBUSY; mutex_lock(&vout->lock); ovid = &vout->vid_info; ovl = ovid->overlays[0]; /* get the display device attached to the overlay */ if (!ovl->manager || !ovl->manager->device) { ret = -EINVAL; goto s_fmt_vid_out_exit; } timing = &ovl->manager->device->panel.timings; /* We dont support RGB24-packed mode if vrfb rotation * is enabled*/ if ((is_rotation_enabled(vout)) && f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) { ret = -EINVAL; goto s_fmt_vid_out_exit; } /* get the framebuffer parameters */ if (is_rotation_90_or_270(vout)) { vout->fbuf.fmt.height = timing->x_res; vout->fbuf.fmt.width = timing->y_res; } else { vout->fbuf.fmt.height = timing->y_res; vout->fbuf.fmt.width = timing->x_res; } /* change to samller size is OK */ bpp = omap_vout_try_format(&f->fmt.pix); f->fmt.pix.sizeimage = f->fmt.pix.width * f->fmt.pix.height * bpp; /* try & set the new output format */ vout->bpp = bpp; vout->pix = f->fmt.pix; vout->vrfb_bpp = 1; /* If YUYV then vrfb bpp is 2, for others its 1 */ if (V4L2_PIX_FMT_YUYV == vout->pix.pixelformat || V4L2_PIX_FMT_UYVY == vout->pix.pixelformat) vout->vrfb_bpp = 2; /* set default crop and win */ omap_vout_new_format(&vout->pix, &vout->fbuf, &vout->crop, &vout->win); /* Save the changes in the overlay strcuture */ ret = omapvid_init(vout, 0); if (ret) { v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n"); goto s_fmt_vid_out_exit; } ret = 0; s_fmt_vid_out_exit: mutex_unlock(&vout->lock); return ret; } static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh, struct v4l2_format *f) { int ret = 0; struct omap_vout_device *vout = fh; struct omap_overlay *ovl; struct omapvideo_info *ovid; struct v4l2_window *win = &f->fmt.win; ovid = &vout->vid_info; ovl = ovid->overlays[0]; ret = omap_vout_try_window(&vout->fbuf, win); if (!ret) { if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0) win->global_alpha = 255; else win->global_alpha = f->fmt.win.global_alpha; } return ret; } static int vidioc_s_fmt_vid_overlay(struct file *file, void *fh, struct v4l2_format *f) { int ret = 0; struct omap_overlay *ovl; struct omapvideo_info *ovid; struct omap_vout_device *vout = fh; struct v4l2_window *win = &f->fmt.win; mutex_lock(&vout->lock); ovid = &vout->vid_info; ovl = ovid->overlays[0]; ret = omap_vout_new_window(&vout->crop, &vout->win, &vout->fbuf, win); if (!ret) { /* Video1 plane does not support global alpha on OMAP3 */ if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0) vout->win.global_alpha = 255; else vout->win.global_alpha = f->fmt.win.global_alpha; vout->win.chromakey = f->fmt.win.chromakey; } mutex_unlock(&vout->lock); return ret; } static int vidioc_enum_fmt_vid_overlay(struct file *file, void *fh, struct v4l2_fmtdesc *fmt) { int index = fmt->index; if (index >= NUM_OUTPUT_FORMATS) return -EINVAL; fmt->flags = omap_formats[index].flags; strlcpy(fmt->description, omap_formats[index].description, sizeof(fmt->description)); fmt->pixelformat = omap_formats[index].pixelformat; return 0; } static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh, struct v4l2_format *f) { u32 key_value = 0; struct omap_overlay *ovl; struct omapvideo_info *ovid; struct omap_vout_device *vout = fh; struct omap_overlay_manager_info info; struct v4l2_window *win = &f->fmt.win; ovid = &vout->vid_info; ovl = ovid->overlays[0]; win->w = vout->win.w; win->field = vout->win.field; win->global_alpha = vout->win.global_alpha; if (ovl->manager && ovl->manager->get_manager_info) { ovl->manager->get_manager_info(ovl->manager, &info); key_value = info.trans_key; } win->chromakey = key_value; return 0; } static int vidioc_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap) { struct omap_vout_device *vout = fh; struct v4l2_pix_format *pix = &vout->pix; if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; /* Width and height are always even */ cropcap->bounds.width = pix->width & ~1; cropcap->bounds.height = pix->height & ~1; omap_vout_default_crop(&vout->pix, &vout->fbuf, &cropcap->defrect); cropcap->pixelaspect.numerator = 1; cropcap->pixelaspect.denominator = 1; return 0; } static int vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *crop) { struct omap_vout_device *vout = fh; if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; crop->c = vout->crop; return 0; } static int vidioc_s_crop(struct file *file, void *fh, struct v4l2_crop *crop) { int ret = -EINVAL; struct omap_vout_device *vout = fh; struct omapvideo_info *ovid; struct omap_overlay *ovl; struct omap_video_timings *timing; if (vout->streaming) return -EBUSY; mutex_lock(&vout->lock); ovid = &vout->vid_info; ovl = ovid->overlays[0]; if (!ovl->manager || !ovl->manager->device) { ret = -EINVAL; goto s_crop_err; } /* get the display device attached to the overlay */ timing = &ovl->manager->device->panel.timings; if (is_rotation_90_or_270(vout)) { vout->fbuf.fmt.height = timing->x_res; vout->fbuf.fmt.width = timing->y_res; } else { vout->fbuf.fmt.height = timing->y_res; vout->fbuf.fmt.width = timing->x_res; } if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ret = omap_vout_new_crop(&vout->pix, &vout->crop, &vout->win, &vout->fbuf, &crop->c); s_crop_err: mutex_unlock(&vout->lock); return ret; } static int vidioc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *ctrl) { int ret = 0; switch (ctrl->id) { case V4L2_CID_ROTATE: ret = v4l2_ctrl_query_fill(ctrl, 0, 270, 90, 0); break; case V4L2_CID_BG_COLOR: ret = v4l2_ctrl_query_fill(ctrl, 0, 0xFFFFFF, 1, 0); break; case V4L2_CID_VFLIP: ret = v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0); break; default: ctrl->name[0] = '\0'; ret = -EINVAL; } return ret; } static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *ctrl) { int ret = 0; struct omap_vout_device *vout = fh; switch (ctrl->id) { case V4L2_CID_ROTATE: ctrl->value = vout->control[0].value; break; case V4L2_CID_BG_COLOR: { struct omap_overlay_manager_info info; struct omap_overlay *ovl; ovl = vout->vid_info.overlays[0]; if (!ovl->manager || !ovl->manager->get_manager_info) { ret = -EINVAL; break; } ovl->manager->get_manager_info(ovl->manager, &info); ctrl->value = info.default_color; break; } case V4L2_CID_VFLIP: ctrl->value = vout->control[2].value; break; default: ret = -EINVAL; } return ret; } static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a) { int ret = 0; struct omap_vout_device *vout = fh; switch (a->id) { case V4L2_CID_ROTATE: { struct omapvideo_info *ovid; int rotation = a->value; ovid = &vout->vid_info; mutex_lock(&vout->lock); if (rotation && ovid->rotation_type == VOUT_ROT_NONE) { mutex_unlock(&vout->lock); ret = -ERANGE; break; } if (rotation && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) { mutex_unlock(&vout->lock); ret = -EINVAL; break; } if (v4l2_rot_to_dss_rot(rotation, &vout->rotation, vout->mirror)) { mutex_unlock(&vout->lock); ret = -EINVAL; break; } vout->control[0].value = rotation; mutex_unlock(&vout->lock); break; } case V4L2_CID_BG_COLOR: { struct omap_overlay *ovl; unsigned int color = a->value; struct omap_overlay_manager_info info; ovl = vout->vid_info.overlays[0]; mutex_lock(&vout->lock); if (!ovl->manager || !ovl->manager->get_manager_info) { mutex_unlock(&vout->lock); ret = -EINVAL; break; } ovl->manager->get_manager_info(ovl->manager, &info); info.default_color = color; if (ovl->manager->set_manager_info(ovl->manager, &info)) { mutex_unlock(&vout->lock); ret = -EINVAL; break; } vout->control[1].value = color; mutex_unlock(&vout->lock); break; } case V4L2_CID_VFLIP: { struct omap_overlay *ovl; struct omapvideo_info *ovid; unsigned int mirror = a->value; ovid = &vout->vid_info; ovl = ovid->overlays[0]; mutex_lock(&vout->lock); if (mirror && ovid->rotation_type == VOUT_ROT_NONE) { mutex_unlock(&vout->lock); ret = -ERANGE; break; } if (mirror && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) { mutex_unlock(&vout->lock); ret = -EINVAL; break; } vout->mirror = mirror; vout->control[2].value = mirror; mutex_unlock(&vout->lock); break; } default: ret = -EINVAL; } return ret; } static int vidioc_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *req) { int ret = 0; unsigned int i, num_buffers = 0; struct omap_vout_device *vout = fh; struct videobuf_queue *q = &vout->vbq; if ((req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) || (req->count < 0)) return -EINVAL; /* if memory is not mmp or userptr return error */ if ((V4L2_MEMORY_MMAP != req->memory) && (V4L2_MEMORY_USERPTR != req->memory)) return -EINVAL; mutex_lock(&vout->lock); /* Cannot be requested when streaming is on */ if (vout->streaming) { ret = -EBUSY; goto reqbuf_err; } /* If buffers are already allocated free them */ if (q->bufs[0] && (V4L2_MEMORY_MMAP == q->bufs[0]->memory)) { if (vout->mmap_count) { ret = -EBUSY; goto reqbuf_err; } num_buffers = (vout->vid == OMAP_VIDEO1) ? video1_numbuffers : video2_numbuffers; for (i = num_buffers; i < vout->buffer_allocated; i++) { omap_vout_free_buffer(vout->buf_virt_addr[i], vout->buffer_size); vout->buf_virt_addr[i] = 0; vout->buf_phy_addr[i] = 0; } vout->buffer_allocated = num_buffers; videobuf_mmap_free(q); } else if (q->bufs[0] && (V4L2_MEMORY_USERPTR == q->bufs[0]->memory)) { if (vout->buffer_allocated) { videobuf_mmap_free(q); for (i = 0; i < vout->buffer_allocated; i++) { kfree(q->bufs[i]); q->bufs[i] = NULL; } vout->buffer_allocated = 0; } } /*store the memory type in data structure */ vout->memory = req->memory; INIT_LIST_HEAD(&vout->dma_queue); /* call videobuf_reqbufs api */ ret = videobuf_reqbufs(q, req); if (ret < 0) goto reqbuf_err; vout->buffer_allocated = req->count; reqbuf_err: mutex_unlock(&vout->lock); return ret; } static int vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) { struct omap_vout_device *vout = fh; return videobuf_querybuf(&vout->vbq, b); } static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buffer) { struct omap_vout_device *vout = fh; struct videobuf_queue *q = &vout->vbq; if ((V4L2_BUF_TYPE_VIDEO_OUTPUT != buffer->type) || (buffer->index >= vout->buffer_allocated) || (q->bufs[buffer->index]->memory != buffer->memory)) { return -EINVAL; } if (V4L2_MEMORY_USERPTR == buffer->memory) { if ((buffer->length < vout->pix.sizeimage) || (0 == buffer->m.userptr)) { return -EINVAL; } } if ((is_rotation_enabled(vout)) && vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) { v4l2_warn(&vout->vid_dev->v4l2_dev, "DMA Channel not allocated for Rotation\n"); return -EINVAL; } return videobuf_qbuf(q, buffer); } static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) { struct omap_vout_device *vout = fh; struct videobuf_queue *q = &vout->vbq; int ret; u32 addr; unsigned long size; struct videobuf_buffer *vb; vb = q->bufs[b->index]; if (!vout->streaming) return -EINVAL; if (file->f_flags & O_NONBLOCK) /* Call videobuf_dqbuf for non blocking mode */ ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1); else /* Call videobuf_dqbuf for blocking mode */ ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0); addr = (unsigned long) vout->buf_phy_addr[vb->i]; size = (unsigned long) vb->size; dma_unmap_single(vout->vid_dev->v4l2_dev.dev, addr, size, DMA_TO_DEVICE); return ret; } static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i) { int ret = 0, j; u32 addr = 0, mask = 0; struct omap_vout_device *vout = fh; struct videobuf_queue *q = &vout->vbq; struct omapvideo_info *ovid = &vout->vid_info; mutex_lock(&vout->lock); if (vout->streaming) { ret = -EBUSY; goto streamon_err; } ret = videobuf_streamon(q); if (ret) goto streamon_err; if (list_empty(&vout->dma_queue)) { ret = -EIO; goto streamon_err1; } /* Get the next frame from the buffer queue */ vout->next_frm = vout->cur_frm = list_entry(vout->dma_queue.next, struct videobuf_buffer, queue); /* Remove buffer from the buffer queue */ list_del(&vout->cur_frm->queue); /* Mark state of the current frame to active */ vout->cur_frm->state = VIDEOBUF_ACTIVE; /* Initialize field_id and started member */ vout->field_id = 0; /* set flag here. Next QBUF will start DMA */ vout->streaming = 1; vout->first_int = 1; if (omap_vout_calculate_offset(vout)) { ret = -EINVAL; goto streamon_err1; } addr = (unsigned long) vout->queued_buf_addr[vout->cur_frm->i] + vout->cropped_offset; mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_VSYNC2; omap_dispc_register_isr(omap_vout_isr, vout, mask); for (j = 0; j < ovid->num_overlays; j++) { struct omap_overlay *ovl = ovid->overlays[j]; if (ovl->manager && ovl->manager->device) { struct omap_overlay_info info; ovl->get_overlay_info(ovl, &info); info.paddr = addr; if (ovl->set_overlay_info(ovl, &info)) { ret = -EINVAL; goto streamon_err1; } } } /* First save the configuration in ovelray structure */ ret = omapvid_init(vout, addr); if (ret) v4l2_err(&vout->vid_dev->v4l2_dev, "failed to set overlay info\n"); /* Enable the pipeline and set the Go bit */ ret = omapvid_apply_changes(vout); if (ret) v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n"); for (j = 0; j < ovid->num_overlays; j++) { struct omap_overlay *ovl = ovid->overlays[j]; if (ovl->manager && ovl->manager->device) { ret = ovl->enable(ovl); if (ret) goto streamon_err1; } } ret = 0; streamon_err1: if (ret) ret = videobuf_streamoff(q); streamon_err: mutex_unlock(&vout->lock); return ret; } static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i) { u32 mask = 0; int ret = 0, j; struct omap_vout_device *vout = fh; struct omapvideo_info *ovid = &vout->vid_info; if (!vout->streaming) return -EINVAL; vout->streaming = 0; mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_VSYNC2; omap_dispc_unregister_isr(omap_vout_isr, vout, mask); for (j = 0; j < ovid->num_overlays; j++) { struct omap_overlay *ovl = ovid->overlays[j]; if (ovl->manager && ovl->manager->device) ovl->disable(ovl); } /* Turn of the pipeline */ ret = omapvid_apply_changes(vout); if (ret) v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode in" " streamoff\n"); INIT_LIST_HEAD(&vout->dma_queue); ret = videobuf_streamoff(&vout->vbq); return ret; } static int vidioc_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a) { int enable = 0; struct omap_overlay *ovl; struct omapvideo_info *ovid; struct omap_vout_device *vout = fh; struct omap_overlay_manager_info info; enum omap_dss_trans_key_type key_type = OMAP_DSS_COLOR_KEY_GFX_DST; ovid = &vout->vid_info; ovl = ovid->overlays[0]; /* OMAP DSS doesn't support Source and Destination color key together */ if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) && (a->flags & V4L2_FBUF_FLAG_CHROMAKEY)) return -EINVAL; /* OMAP DSS Doesn't support the Destination color key and alpha blending together */ if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY) && (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA)) return -EINVAL; if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY)) { vout->fbuf.flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY; key_type = OMAP_DSS_COLOR_KEY_VID_SRC; } else vout->fbuf.flags &= ~V4L2_FBUF_FLAG_SRC_CHROMAKEY; if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY)) { vout->fbuf.flags |= V4L2_FBUF_FLAG_CHROMAKEY; key_type = OMAP_DSS_COLOR_KEY_GFX_DST; } else vout->fbuf.flags &= ~V4L2_FBUF_FLAG_CHROMAKEY; if (a->flags & (V4L2_FBUF_FLAG_CHROMAKEY | V4L2_FBUF_FLAG_SRC_CHROMAKEY)) enable = 1; else enable = 0; if (ovl->manager && ovl->manager->get_manager_info && ovl->manager->set_manager_info) { ovl->manager->get_manager_info(ovl->manager, &info); info.trans_enabled = enable; info.trans_key_type = key_type; info.trans_key = vout->win.chromakey; if (ovl->manager->set_manager_info(ovl->manager, &info)) return -EINVAL; } if (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) { vout->fbuf.flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA; enable = 1; } else { vout->fbuf.flags &= ~V4L2_FBUF_FLAG_LOCAL_ALPHA; enable = 0; } if (ovl->manager && ovl->manager->get_manager_info && ovl->manager->set_manager_info) { ovl->manager->get_manager_info(ovl->manager, &info); /* enable this only if there is no zorder cap */ if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0) info.partial_alpha_enabled = enable; if (ovl->manager->set_manager_info(ovl->manager, &info)) return -EINVAL; } return 0; } static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a) { struct omap_overlay *ovl; struct omapvideo_info *ovid; struct omap_vout_device *vout = fh; struct omap_overlay_manager_info info; ovid = &vout->vid_info; ovl = ovid->overlays[0]; /* The video overlay must stay within the framebuffer and can't be positioned independently. */ a->flags = V4L2_FBUF_FLAG_OVERLAY; a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY | V4L2_FBUF_CAP_SRC_CHROMAKEY; if (ovl->manager && ovl->manager->get_manager_info) { ovl->manager->get_manager_info(ovl->manager, &info); if (info.trans_key_type == OMAP_DSS_COLOR_KEY_VID_SRC) a->flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY; if (info.trans_key_type == OMAP_DSS_COLOR_KEY_GFX_DST) a->flags |= V4L2_FBUF_FLAG_CHROMAKEY; } if (ovl->manager && ovl->manager->get_manager_info) { ovl->manager->get_manager_info(ovl->manager, &info); if (info.partial_alpha_enabled) a->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA; } return 0; } static const struct v4l2_ioctl_ops vout_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out, .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out, .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out, .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_fbuf = vidioc_s_fbuf, .vidioc_g_fbuf = vidioc_g_fbuf, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay, .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay, .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt_vid_overlay, .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay, .vidioc_cropcap = vidioc_cropcap, .vidioc_g_crop = vidioc_g_crop, .vidioc_s_crop = vidioc_s_crop, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, }; static const struct v4l2_file_operations omap_vout_fops = { .owner = THIS_MODULE, .poll = omap_vout_poll, .unlocked_ioctl = video_ioctl2, .mmap = omap_vout_mmap, .open = omap_vout_open, .release = omap_vout_release, }; /* Init functions used during driver initialization */ /* Initial setup of video_data */ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout) { struct video_device *vfd; struct v4l2_pix_format *pix; struct v4l2_control *control; struct omap_dss_device *display = vout->vid_info.overlays[0]->manager->device; /* set the default pix */ pix = &vout->pix; /* Set the default picture of QVGA */ pix->width = QQVGA_WIDTH; pix->height = QQVGA_HEIGHT; /* Default pixel format is RGB 5-6-5 */ pix->pixelformat = V4L2_PIX_FMT_RGB565; pix->field = V4L2_FIELD_ANY; pix->bytesperline = pix->width * 2; pix->sizeimage = pix->bytesperline * pix->height; pix->priv = 0; pix->colorspace = V4L2_COLORSPACE_JPEG; vout->bpp = RGB565_BPP; vout->fbuf.fmt.width = display->panel.timings.x_res; vout->fbuf.fmt.height = display->panel.timings.y_res; /* Set the data structures for the overlay parameters*/ vout->win.global_alpha = 255; vout->fbuf.flags = 0; vout->fbuf.capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_SRC_CHROMAKEY | V4L2_FBUF_CAP_CHROMAKEY; vout->win.chromakey = 0; omap_vout_new_format(pix, &vout->fbuf, &vout->crop, &vout->win); /*Initialize the control variables for rotation, flipping and background color. */ control = vout->control; control[0].id = V4L2_CID_ROTATE; control[0].value = 0; vout->rotation = 0; vout->mirror = 0; vout->control[2].id = V4L2_CID_HFLIP; vout->control[2].value = 0; if (vout->vid_info.rotation_type == VOUT_ROT_VRFB) vout->vrfb_bpp = 2; control[1].id = V4L2_CID_BG_COLOR; control[1].value = 0; /* initialize the video_device struct */ vfd = vout->vfd = video_device_alloc(); if (!vfd) { printk(KERN_ERR VOUT_NAME ": could not allocate" " video device struct\n"); return -ENOMEM; } vfd->release = video_device_release; vfd->ioctl_ops = &vout_ioctl_ops; strlcpy(vfd->name, VOUT_NAME, sizeof(vfd->name)); vfd->fops = &omap_vout_fops; vfd->v4l2_dev = &vout->vid_dev->v4l2_dev; mutex_init(&vout->lock); vfd->minor = -1; return 0; } /* Setup video buffers */ static int __init omap_vout_setup_video_bufs(struct platform_device *pdev, int vid_num) { u32 numbuffers; int ret = 0, i; struct omapvideo_info *ovid; struct omap_vout_device *vout; struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); struct omap2video_device *vid_dev = container_of(v4l2_dev, struct omap2video_device, v4l2_dev); vout = vid_dev->vouts[vid_num]; ovid = &vout->vid_info; numbuffers = (vid_num == 0) ? video1_numbuffers : video2_numbuffers; vout->buffer_size = (vid_num == 0) ? video1_bufsize : video2_bufsize; dev_info(&pdev->dev, "Buffer Size = %d\n", vout->buffer_size); for (i = 0; i < numbuffers; i++) { vout->buf_virt_addr[i] = omap_vout_alloc_buffer(vout->buffer_size, (u32 *) &vout->buf_phy_addr[i]); if (!vout->buf_virt_addr[i]) { numbuffers = i; ret = -ENOMEM; goto free_buffers; } } vout->cropped_offset = 0; if (ovid->rotation_type == VOUT_ROT_VRFB) { int static_vrfb_allocation = (vid_num == 0) ? vid1_static_vrfb_alloc : vid2_static_vrfb_alloc; ret = omap_vout_setup_vrfb_bufs(pdev, vid_num, static_vrfb_allocation); } return ret; free_buffers: for (i = 0; i < numbuffers; i++) { omap_vout_free_buffer(vout->buf_virt_addr[i], vout->buffer_size); vout->buf_virt_addr[i] = 0; vout->buf_phy_addr[i] = 0; } return ret; } /* Create video out devices */ static int __init omap_vout_create_video_devices(struct platform_device *pdev) { int ret = 0, k; struct omap_vout_device *vout; struct video_device *vfd = NULL; struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); struct omap2video_device *vid_dev = container_of(v4l2_dev, struct omap2video_device, v4l2_dev); for (k = 0; k < pdev->num_resources; k++) { vout = kzalloc(sizeof(struct omap_vout_device), GFP_KERNEL); if (!vout) { dev_err(&pdev->dev, ": could not allocate memory\n"); return -ENOMEM; } vout->vid = k; vid_dev->vouts[k] = vout; vout->vid_dev = vid_dev; /* Select video2 if only 1 overlay is controlled by V4L2 */ if (pdev->num_resources == 1) vout->vid_info.overlays[0] = vid_dev->overlays[k + 2]; else /* Else select video1 and video2 one by one. */ vout->vid_info.overlays[0] = vid_dev->overlays[k + 1]; vout->vid_info.num_overlays = 1; vout->vid_info.id = k + 1; /* Set VRFB as rotation_type for omap2 and omap3 */ if (cpu_is_omap24xx() || cpu_is_omap34xx()) vout->vid_info.rotation_type = VOUT_ROT_VRFB; /* Setup the default configuration for the video devices */ if (omap_vout_setup_video_data(vout) != 0) { ret = -ENOMEM; goto error; } /* Allocate default number of buffers for the video streaming * and reserve the VRFB space for rotation */ if (omap_vout_setup_video_bufs(pdev, k) != 0) { ret = -ENOMEM; goto error1; } /* Register the Video device with V4L2 */ vfd = vout->vfd; if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) { dev_err(&pdev->dev, ": Could not register " "Video for Linux device\n"); vfd->minor = -1; ret = -ENODEV; goto error2; } video_set_drvdata(vfd, vout); /* Configure the overlay structure */ ret = omapvid_init(vid_dev->vouts[k], 0); if (!ret) goto success; error2: if (vout->vid_info.rotation_type == VOUT_ROT_VRFB) omap_vout_release_vrfb(vout); omap_vout_free_buffers(vout); error1: video_device_release(vfd); error: kfree(vout); return ret; success: dev_info(&pdev->dev, ": registered and initialized" " video device %d\n", vfd->minor); if (k == (pdev->num_resources - 1)) return 0; } return -ENODEV; } /* Driver functions */ static void omap_vout_cleanup_device(struct omap_vout_device *vout) { struct video_device *vfd; struct omapvideo_info *ovid; if (!vout) return; vfd = vout->vfd; ovid = &vout->vid_info; if (vfd) { if (!video_is_registered(vfd)) { /* * The device was never registered, so release the * video_device struct directly. */ video_device_release(vfd); } else { /* * The unregister function will release the video_device * struct as well as unregistering it. */ video_unregister_device(vfd); } } if (ovid->rotation_type == VOUT_ROT_VRFB) { omap_vout_release_vrfb(vout); /* Free the VRFB buffer if allocated * init time */ if (vout->vrfb_static_allocation) omap_vout_free_vrfb_buffers(vout); } omap_vout_free_buffers(vout); kfree(vout); } static int omap_vout_remove(struct platform_device *pdev) { int k; struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); struct omap2video_device *vid_dev = container_of(v4l2_dev, struct omap2video_device, v4l2_dev); v4l2_device_unregister(v4l2_dev); for (k = 0; k < pdev->num_resources; k++) omap_vout_cleanup_device(vid_dev->vouts[k]); for (k = 0; k < vid_dev->num_displays; k++) { if (vid_dev->displays[k]->state != OMAP_DSS_DISPLAY_DISABLED) vid_dev->displays[k]->driver->disable(vid_dev->displays[k]); omap_dss_put_device(vid_dev->displays[k]); } kfree(vid_dev); return 0; } static int __init omap_vout_probe(struct platform_device *pdev) { int ret = 0, i; struct omap_overlay *ovl; struct omap_dss_device *dssdev = NULL; struct omap_dss_device *def_display; struct omap2video_device *vid_dev = NULL; if (pdev->num_resources == 0) { dev_err(&pdev->dev, "probed for an unknown device\n"); return -ENODEV; } vid_dev = kzalloc(sizeof(struct omap2video_device), GFP_KERNEL); if (vid_dev == NULL) return -ENOMEM; vid_dev->num_displays = 0; for_each_dss_dev(dssdev) { omap_dss_get_device(dssdev); if (!dssdev->driver) { dev_warn(&pdev->dev, "no driver for display: %s\n", dssdev->name); omap_dss_put_device(dssdev); continue; } vid_dev->displays[vid_dev->num_displays++] = dssdev; } if (vid_dev->num_displays == 0) { dev_err(&pdev->dev, "no displays\n"); ret = -EINVAL; goto probe_err0; } vid_dev->num_overlays = omap_dss_get_num_overlays(); for (i = 0; i < vid_dev->num_overlays; i++) vid_dev->overlays[i] = omap_dss_get_overlay(i); vid_dev->num_managers = omap_dss_get_num_overlay_managers(); for (i = 0; i < vid_dev->num_managers; i++) vid_dev->managers[i] = omap_dss_get_overlay_manager(i); /* Get the Video1 overlay and video2 overlay. * Setup the Display attached to that overlays */ for (i = 1; i < vid_dev->num_overlays; i++) { ovl = omap_dss_get_overlay(i); if (ovl->manager && ovl->manager->device) { def_display = ovl->manager->device; } else { dev_warn(&pdev->dev, "cannot find display\n"); def_display = NULL; } if (def_display) { struct omap_dss_driver *dssdrv = def_display->driver; ret = dssdrv->enable(def_display); if (ret) { /* Here we are not considering a error * as display may be enabled by frame * buffer driver */ dev_warn(&pdev->dev, "'%s' Display already enabled\n", def_display->name); } } } if (v4l2_device_register(&pdev->dev, &vid_dev->v4l2_dev) < 0) { dev_err(&pdev->dev, "v4l2_device_register failed\n"); ret = -ENODEV; goto probe_err1; } ret = omap_vout_create_video_devices(pdev); if (ret) goto probe_err2; for (i = 0; i < vid_dev->num_displays; i++) { struct omap_dss_device *display = vid_dev->displays[i]; if (display->driver->update) display->driver->update(display, 0, 0, display->panel.timings.x_res, display->panel.timings.y_res); } return 0; probe_err2: v4l2_device_unregister(&vid_dev->v4l2_dev); probe_err1: for (i = 1; i < vid_dev->num_overlays; i++) { def_display = NULL; ovl = omap_dss_get_overlay(i); if (ovl->manager && ovl->manager->device) def_display = ovl->manager->device; if (def_display && def_display->driver) def_display->driver->disable(def_display); } probe_err0: kfree(vid_dev); return ret; } static struct platform_driver omap_vout_driver = { .driver = { .name = VOUT_NAME, }, .remove = omap_vout_remove, }; static int __init omap_vout_init(void) { if (platform_driver_probe(&omap_vout_driver, omap_vout_probe) != 0) { printk(KERN_ERR VOUT_NAME ":Could not register Video driver\n"); return -EINVAL; } return 0; } static void omap_vout_cleanup(void) { platform_driver_unregister(&omap_vout_driver); } late_initcall(omap_vout_init); module_exit(omap_vout_cleanup);
gpl-2.0
meizuosc/m462
drivers/isdn/hardware/eicon/istream.c
9741
6338
/* * Copyright (c) Eicon Networks, 2002. * This source file is supplied for the use with Eicon Networks range of DIVA Server Adapters. * Eicon File Revision : 2.1 * This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "platform.h" #if defined(DIVA_ISTREAM) /* { */ #include "pc.h" #include "pr_pc.h" #include "di_defs.h" #include "divasync.h" #include "di.h" #if !defined USE_EXTENDED_DEBUGS #include "dimaint.h" #else #define dprintf #endif #include "dfifo.h" int diva_istream_write(void *context, int Id, void *data, int length, int final, byte usr1, byte usr2); int diva_istream_read(void *context, int Id, void *data, int max_length, int *final, byte *usr1, byte *usr2); /* ------------------------------------------------------------------- Does provide iStream interface to the client ------------------------------------------------------------------- */ void diva_xdi_provide_istream_info(ADAPTER *a, diva_xdi_stream_interface_t *pi) { pi->provided_service = 0; } /* ------------------------------------------------------------------ Does write the data from caller's buffer to the card's stream interface. If synchronous service was requested, then function does return amount of data written to stream. 'final' does indicate that piece of data to be written is final part of frame (necessary only by structured datatransfer) return 0 if zero lengh packet was written return -1 if stream is full ------------------------------------------------------------------ */ int diva_istream_write(void *context, int Id, void *data, int length, int final, byte usr1, byte usr2) { ADAPTER *a = (ADAPTER *)context; int written = 0, to_write = -1; char tmp[4]; byte *data_ptr = (byte *)data; for (;;) { a->ram_in_dw(a, #ifdef PLATFORM_GT_32BIT ULongToPtr(a->tx_stream[Id] + a->tx_pos[Id]), #else (void *)(a->tx_stream[Id] + a->tx_pos[Id]), #endif (dword *)&tmp[0], 1); if (tmp[0] & DIVA_DFIFO_READY) { /* No free blocks more */ if (to_write < 0) return (-1); /* was not able to write */ break; /* only part of message was written */ } to_write = min(length, DIVA_DFIFO_DATA_SZ); if (to_write) { a->ram_out_buffer(a, #ifdef PLATFORM_GT_32BIT ULongToPtr(a->tx_stream[Id] + a->tx_pos[Id] + 4), #else (void *)(a->tx_stream[Id] + a->tx_pos[Id] + 4), #endif data_ptr, (word)to_write); length -= to_write; written += to_write; data_ptr += to_write; } tmp[1] = (char)to_write; tmp[0] = (tmp[0] & DIVA_DFIFO_WRAP) | DIVA_DFIFO_READY | ((!length && final) ? DIVA_DFIFO_LAST : 0); if (tmp[0] & DIVA_DFIFO_LAST) { tmp[2] = usr1; tmp[3] = usr2; } a->ram_out_dw(a, #ifdef PLATFORM_GT_32BIT ULongToPtr(a->tx_stream[Id] + a->tx_pos[Id]), #else (void *)(a->tx_stream[Id] + a->tx_pos[Id]), #endif (dword *)&tmp[0], 1); if (tmp[0] & DIVA_DFIFO_WRAP) { a->tx_pos[Id] = 0; } else { a->tx_pos[Id] += DIVA_DFIFO_STEP; } if (!length) { break; } } return (written); } /* ------------------------------------------------------------------- In case of SYNCRONOUS service: Does write data from stream in caller's buffer. Does return amount of data written to buffer Final flag is set on return if last part of structured frame was received return 0 if zero packet was received return -1 if stream is empty return -2 if read buffer does not profide sufficient space to accommodate entire segment max_length should be at least 68 bytes ------------------------------------------------------------------- */ int diva_istream_read(void *context, int Id, void *data, int max_length, int *final, byte *usr1, byte *usr2) { ADAPTER *a = (ADAPTER *)context; int read = 0, to_read = -1; char tmp[4]; byte *data_ptr = (byte *)data; *final = 0; for (;;) { a->ram_in_dw(a, #ifdef PLATFORM_GT_32BIT ULongToPtr(a->rx_stream[Id] + a->rx_pos[Id]), #else (void *)(a->rx_stream[Id] + a->rx_pos[Id]), #endif (dword *)&tmp[0], 1); if (tmp[1] > max_length) { if (to_read < 0) return (-2); /* was not able to read */ break; } if (!(tmp[0] & DIVA_DFIFO_READY)) { if (to_read < 0) return (-1); /* was not able to read */ break; } to_read = min(max_length, (int)tmp[1]); if (to_read) { a->ram_in_buffer(a, #ifdef PLATFORM_GT_32BIT ULongToPtr(a->rx_stream[Id] + a->rx_pos[Id] + 4), #else (void *)(a->rx_stream[Id] + a->rx_pos[Id] + 4), #endif data_ptr, (word)to_read); max_length -= to_read; read += to_read; data_ptr += to_read; } if (tmp[0] & DIVA_DFIFO_LAST) { *final = 1; } tmp[0] &= DIVA_DFIFO_WRAP; a->ram_out_dw(a, #ifdef PLATFORM_GT_32BIT ULongToPtr(a->rx_stream[Id] + a->rx_pos[Id]), #else (void *)(a->rx_stream[Id] + a->rx_pos[Id]), #endif (dword *)&tmp[0], 1); if (tmp[0] & DIVA_DFIFO_WRAP) { a->rx_pos[Id] = 0; } else { a->rx_pos[Id] += DIVA_DFIFO_STEP; } if (*final) { if (usr1) *usr1 = tmp[2]; if (usr2) *usr2 = tmp[3]; break; } } return (read); } /* --------------------------------------------------------------------- Does check if one of streams had caused interrupt and does wake up corresponding application --------------------------------------------------------------------- */ void pr_stream(ADAPTER *a) { } #endif /* } */
gpl-2.0
Galaxy-J5/android_kernel_samsung_j5nlte
drivers/isdn/hardware/eicon/s_pri.c
9741
7756
/* * Copyright (c) Eicon Networks, 2002. * This source file is supplied for the use with Eicon Networks range of DIVA Server Adapters. * Eicon File Revision : 2.1 * This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "platform.h" #include "di_defs.h" #include "pc.h" #include "pr_pc.h" #include "di.h" #include "mi_pc.h" #include "pc_maint.h" #include "divasync.h" #include "io.h" #include "helpers.h" #include "dsrv_pri.h" #include "dsp_defs.h" /*****************************************************************************/ #define MAX_XLOG_SIZE (64 * 1024) /* ------------------------------------------------------------------------- Does return offset between ADAPTER->ram and real begin of memory ------------------------------------------------------------------------- */ static dword pri_ram_offset(ADAPTER *a) { return ((dword)MP_SHARED_RAM_OFFSET); } /* ------------------------------------------------------------------------- Recovery XLOG buffer from the card ------------------------------------------------------------------------- */ static void pri_cpu_trapped(PISDN_ADAPTER IoAdapter) { byte __iomem *base; word *Xlog; dword regs[4], TrapID, size; Xdesc xlogDesc; /* * check for trapped MIPS 46xx CPU, dump exception frame */ base = DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter); TrapID = READ_DWORD(&base[0x80]); if ((TrapID == 0x99999999) || (TrapID == 0x99999901)) { dump_trap_frame(IoAdapter, &base[0x90]); IoAdapter->trapped = 1; } regs[0] = READ_DWORD(&base[MP_PROTOCOL_OFFSET + 0x70]); regs[1] = READ_DWORD(&base[MP_PROTOCOL_OFFSET + 0x74]); regs[2] = READ_DWORD(&base[MP_PROTOCOL_OFFSET + 0x78]); regs[3] = READ_DWORD(&base[MP_PROTOCOL_OFFSET + 0x7c]); regs[0] &= IoAdapter->MemorySize - 1; if ((regs[0] < IoAdapter->MemorySize - 1)) { if (!(Xlog = (word *)diva_os_malloc(0, MAX_XLOG_SIZE))) { DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, base); return; } size = IoAdapter->MemorySize - regs[0]; if (size > MAX_XLOG_SIZE) size = MAX_XLOG_SIZE; memcpy_fromio(Xlog, &base[regs[0]], size); xlogDesc.buf = Xlog; xlogDesc.cnt = READ_WORD(&base[regs[1] & (IoAdapter->MemorySize - 1)]); xlogDesc.out = READ_WORD(&base[regs[2] & (IoAdapter->MemorySize - 1)]); dump_xlog_buffer(IoAdapter, &xlogDesc); diva_os_free(0, Xlog); IoAdapter->trapped = 2; } DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, base); } /* ------------------------------------------------------------------------- Hardware reset of PRI card ------------------------------------------------------------------------- */ static void reset_pri_hardware(PISDN_ADAPTER IoAdapter) { byte __iomem *p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter); WRITE_BYTE(p, _MP_RISC_RESET | _MP_LED1 | _MP_LED2); diva_os_wait(50); WRITE_BYTE(p, 0x00); diva_os_wait(50); DIVA_OS_MEM_DETACH_RESET(IoAdapter, p); } /* ------------------------------------------------------------------------- Stop Card Hardware ------------------------------------------------------------------------- */ static void stop_pri_hardware(PISDN_ADAPTER IoAdapter) { dword i; byte __iomem *p; dword volatile __iomem *cfgReg = (void __iomem *)DIVA_OS_MEM_ATTACH_CFG(IoAdapter); WRITE_DWORD(&cfgReg[3], 0); WRITE_DWORD(&cfgReg[1], 0); DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfgReg); IoAdapter->a.ram_out(&IoAdapter->a, &RAM->SWReg, SWREG_HALT_CPU); i = 0; while ((i < 100) && (IoAdapter->a.ram_in(&IoAdapter->a, &RAM->SWReg) != 0)) { diva_os_wait(1); i++; } DBG_TRC(("%s: PRI stopped (%d)", IoAdapter->Name, i)) cfgReg = (void __iomem *)DIVA_OS_MEM_ATTACH_CFG(IoAdapter); WRITE_DWORD(&cfgReg[0], ((dword)(~0x03E00000))); DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfgReg); diva_os_wait(1); p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter); WRITE_BYTE(p, _MP_RISC_RESET | _MP_LED1 | _MP_LED2); DIVA_OS_MEM_DETACH_RESET(IoAdapter, p); } static int load_pri_hardware(PISDN_ADAPTER IoAdapter) { return (0); } /* -------------------------------------------------------------------------- PRI Adapter interrupt Service Routine -------------------------------------------------------------------------- */ static int pri_ISR(struct _ISDN_ADAPTER *IoAdapter) { byte __iomem *cfg = DIVA_OS_MEM_ATTACH_CFG(IoAdapter); if (!(READ_DWORD(cfg) & 0x80000000)) { DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfg); return (0); } /* clear interrupt line */ WRITE_DWORD(cfg, (dword)~0x03E00000); DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfg); IoAdapter->IrqCount++; if (IoAdapter->Initialized) { diva_os_schedule_soft_isr(&IoAdapter->isr_soft_isr); } return (1); } /* ------------------------------------------------------------------------- Disable interrupt in the card hardware ------------------------------------------------------------------------- */ static void disable_pri_interrupt(PISDN_ADAPTER IoAdapter) { dword volatile __iomem *cfgReg = (dword volatile __iomem *)DIVA_OS_MEM_ATTACH_CFG(IoAdapter); WRITE_DWORD(&cfgReg[3], 0); WRITE_DWORD(&cfgReg[1], 0); WRITE_DWORD(&cfgReg[0], (dword)(~0x03E00000)); DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfgReg); } /* ------------------------------------------------------------------------- Install entry points for PRI Adapter ------------------------------------------------------------------------- */ static void prepare_common_pri_functions(PISDN_ADAPTER IoAdapter) { ADAPTER *a = &IoAdapter->a; a->ram_in = mem_in; a->ram_inw = mem_inw; a->ram_in_buffer = mem_in_buffer; a->ram_look_ahead = mem_look_ahead; a->ram_out = mem_out; a->ram_outw = mem_outw; a->ram_out_buffer = mem_out_buffer; a->ram_inc = mem_inc; a->ram_offset = pri_ram_offset; a->ram_out_dw = mem_out_dw; a->ram_in_dw = mem_in_dw; a->istream_wakeup = pr_stream; IoAdapter->out = pr_out; IoAdapter->dpc = pr_dpc; IoAdapter->tst_irq = scom_test_int; IoAdapter->clr_irq = scom_clear_int; IoAdapter->pcm = (struct pc_maint *)(MIPS_MAINT_OFFS - MP_SHARED_RAM_OFFSET); IoAdapter->load = load_pri_hardware; IoAdapter->disIrq = disable_pri_interrupt; IoAdapter->rstFnc = reset_pri_hardware; IoAdapter->stop = stop_pri_hardware; IoAdapter->trapFnc = pri_cpu_trapped; IoAdapter->diva_isr_handler = pri_ISR; } /* ------------------------------------------------------------------------- Install entry points for PRI Adapter ------------------------------------------------------------------------- */ void prepare_pri_functions(PISDN_ADAPTER IoAdapter) { IoAdapter->MemorySize = MP_MEMORY_SIZE; prepare_common_pri_functions(IoAdapter); diva_os_prepare_pri_functions(IoAdapter); } /* ------------------------------------------------------------------------- Install entry points for PRI Rev.2 Adapter ------------------------------------------------------------------------- */ void prepare_pri2_functions(PISDN_ADAPTER IoAdapter) { IoAdapter->MemorySize = MP2_MEMORY_SIZE; prepare_common_pri_functions(IoAdapter); diva_os_prepare_pri2_functions(IoAdapter); } /* ------------------------------------------------------------------------- */
gpl-2.0
shoey63/kernel-copyleft
arch/powerpc/oprofile/backtrace.c
10509
2953
/** * Copyright (C) 2005 Brian Rogan <bcr6@cornell.edu>, IBM * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. **/ #include <linux/oprofile.h> #include <linux/sched.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/compat.h> #define STACK_SP(STACK) *(STACK) #define STACK_LR64(STACK) *((unsigned long *)(STACK) + 2) #define STACK_LR32(STACK) *((unsigned int *)(STACK) + 1) #ifdef CONFIG_PPC64 #define STACK_LR(STACK) STACK_LR64(STACK) #else #define STACK_LR(STACK) STACK_LR32(STACK) #endif static unsigned int user_getsp32(unsigned int sp, int is_first) { unsigned int stack_frame[2]; void __user *p = compat_ptr(sp); if (!access_ok(VERIFY_READ, p, sizeof(stack_frame))) return 0; /* * The most likely reason for this is that we returned -EFAULT, * which means that we've done all that we can do from * interrupt context. */ if (__copy_from_user_inatomic(stack_frame, p, sizeof(stack_frame))) return 0; if (!is_first) oprofile_add_trace(STACK_LR32(stack_frame)); /* * We do not enforce increasing stack addresses here because * we may transition to a different stack, eg a signal handler. */ return STACK_SP(stack_frame); } #ifdef CONFIG_PPC64 static unsigned long user_getsp64(unsigned long sp, int is_first) { unsigned long stack_frame[3]; if (!access_ok(VERIFY_READ, (void __user *)sp, sizeof(stack_frame))) return 0; if (__copy_from_user_inatomic(stack_frame, (void __user *)sp, sizeof(stack_frame))) return 0; if (!is_first) oprofile_add_trace(STACK_LR64(stack_frame)); return STACK_SP(stack_frame); } #endif static unsigned long kernel_getsp(unsigned long sp, int is_first) { unsigned long *stack_frame = (unsigned long *)sp; if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) return 0; if (!is_first) oprofile_add_trace(STACK_LR(stack_frame)); /* * We do not enforce increasing stack addresses here because * we might be transitioning from an interrupt stack to a kernel * stack. validate_sp() is designed to understand this, so just * use it. */ return STACK_SP(stack_frame); } void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth) { unsigned long sp = regs->gpr[1]; int first_frame = 1; /* We ditch the top stackframe so need to loop through an extra time */ depth += 1; if (!user_mode(regs)) { while (depth--) { sp = kernel_getsp(sp, first_frame); if (!sp) break; first_frame = 0; } } else { #ifdef CONFIG_PPC64 if (!is_32bit_task()) { while (depth--) { sp = user_getsp64(sp, first_frame); if (!sp) break; first_frame = 0; } return; } #endif while (depth--) { sp = user_getsp32(sp, first_frame); if (!sp) break; first_frame = 0; } } }
gpl-2.0
jonsmirl/mpc5200
fs/befs/debug.c
12557
7739
/* * linux/fs/befs/debug.c * * Copyright (C) 2001 Will Dyson (will_dyson at pobox.com) * * With help from the ntfs-tng driver by Anton Altparmakov * * Copyright (C) 1999 Makoto Kato (m_kato@ga2.so-net.ne.jp) * * debug functions */ #ifdef __KERNEL__ #include <stdarg.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/slab.h> #endif /* __KERNEL__ */ #include "befs.h" #define ERRBUFSIZE 1024 void befs_error(const struct super_block *sb, const char *fmt, ...) { va_list args; char *err_buf = kmalloc(ERRBUFSIZE, GFP_KERNEL); if (err_buf == NULL) { printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE); return; } va_start(args, fmt); vsnprintf(err_buf, ERRBUFSIZE, fmt, args); va_end(args); printk(KERN_ERR "BeFS(%s): %s\n", sb->s_id, err_buf); kfree(err_buf); } void befs_warning(const struct super_block *sb, const char *fmt, ...) { va_list args; char *err_buf = kmalloc(ERRBUFSIZE, GFP_KERNEL); if (err_buf == NULL) { printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE); return; } va_start(args, fmt); vsnprintf(err_buf, ERRBUFSIZE, fmt, args); va_end(args); printk(KERN_WARNING "BeFS(%s): %s\n", sb->s_id, err_buf); kfree(err_buf); } void befs_debug(const struct super_block *sb, const char *fmt, ...) { #ifdef CONFIG_BEFS_DEBUG va_list args; char *err_buf = NULL; if (BEFS_SB(sb)->mount_opts.debug) { err_buf = kmalloc(ERRBUFSIZE, GFP_KERNEL); if (err_buf == NULL) { printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE); return; } va_start(args, fmt); vsnprintf(err_buf, ERRBUFSIZE, fmt, args); va_end(args); printk(KERN_DEBUG "BeFS(%s): %s\n", sb->s_id, err_buf); kfree(err_buf); } #endif //CONFIG_BEFS_DEBUG } void befs_dump_inode(const struct super_block *sb, befs_inode * inode) { #ifdef CONFIG_BEFS_DEBUG befs_block_run tmp_run; befs_debug(sb, "befs_inode information"); befs_debug(sb, " magic1 %08x", fs32_to_cpu(sb, inode->magic1)); tmp_run = fsrun_to_cpu(sb, inode->inode_num); befs_debug(sb, " inode_num %u, %hu, %hu", tmp_run.allocation_group, tmp_run.start, tmp_run.len); befs_debug(sb, " uid %u", fs32_to_cpu(sb, inode->uid)); befs_debug(sb, " gid %u", fs32_to_cpu(sb, inode->gid)); befs_debug(sb, " mode %08x", fs32_to_cpu(sb, inode->mode)); befs_debug(sb, " flags %08x", fs32_to_cpu(sb, inode->flags)); befs_debug(sb, " create_time %Lu", fs64_to_cpu(sb, inode->create_time)); befs_debug(sb, " last_modified_time %Lu", fs64_to_cpu(sb, inode->last_modified_time)); tmp_run = fsrun_to_cpu(sb, inode->parent); befs_debug(sb, " parent [%u, %hu, %hu]", tmp_run.allocation_group, tmp_run.start, tmp_run.len); tmp_run = fsrun_to_cpu(sb, inode->attributes); befs_debug(sb, " attributes [%u, %hu, %hu]", tmp_run.allocation_group, tmp_run.start, tmp_run.len); befs_debug(sb, " type %08x", fs32_to_cpu(sb, inode->type)); befs_debug(sb, " inode_size %u", fs32_to_cpu(sb, inode->inode_size)); if (S_ISLNK(fs32_to_cpu(sb, inode->mode))) { befs_debug(sb, " Symbolic link [%s]", inode->data.symlink); } else { int i; for (i = 0; i < BEFS_NUM_DIRECT_BLOCKS; i++) { tmp_run = fsrun_to_cpu(sb, inode->data.datastream.direct[i]); befs_debug(sb, " direct %d [%u, %hu, %hu]", i, tmp_run.allocation_group, tmp_run.start, tmp_run.len); } befs_debug(sb, " max_direct_range %Lu", fs64_to_cpu(sb, inode->data.datastream. max_direct_range)); tmp_run = fsrun_to_cpu(sb, inode->data.datastream.indirect); befs_debug(sb, " indirect [%u, %hu, %hu]", tmp_run.allocation_group, tmp_run.start, tmp_run.len); befs_debug(sb, " max_indirect_range %Lu", fs64_to_cpu(sb, inode->data.datastream. max_indirect_range)); tmp_run = fsrun_to_cpu(sb, inode->data.datastream.double_indirect); befs_debug(sb, " double indirect [%u, %hu, %hu]", tmp_run.allocation_group, tmp_run.start, tmp_run.len); befs_debug(sb, " max_double_indirect_range %Lu", fs64_to_cpu(sb, inode->data.datastream. max_double_indirect_range)); befs_debug(sb, " size %Lu", fs64_to_cpu(sb, inode->data.datastream.size)); } #endif //CONFIG_BEFS_DEBUG } /* * Display super block structure for debug. */ void befs_dump_super_block(const struct super_block *sb, befs_super_block * sup) { #ifdef CONFIG_BEFS_DEBUG befs_block_run tmp_run; befs_debug(sb, "befs_super_block information"); befs_debug(sb, " name %s", sup->name); befs_debug(sb, " magic1 %08x", fs32_to_cpu(sb, sup->magic1)); befs_debug(sb, " fs_byte_order %08x", fs32_to_cpu(sb, sup->fs_byte_order)); befs_debug(sb, " block_size %u", fs32_to_cpu(sb, sup->block_size)); befs_debug(sb, " block_shift %u", fs32_to_cpu(sb, sup->block_shift)); befs_debug(sb, " num_blocks %Lu", fs64_to_cpu(sb, sup->num_blocks)); befs_debug(sb, " used_blocks %Lu", fs64_to_cpu(sb, sup->used_blocks)); befs_debug(sb, " magic2 %08x", fs32_to_cpu(sb, sup->magic2)); befs_debug(sb, " blocks_per_ag %u", fs32_to_cpu(sb, sup->blocks_per_ag)); befs_debug(sb, " ag_shift %u", fs32_to_cpu(sb, sup->ag_shift)); befs_debug(sb, " num_ags %u", fs32_to_cpu(sb, sup->num_ags)); befs_debug(sb, " flags %08x", fs32_to_cpu(sb, sup->flags)); tmp_run = fsrun_to_cpu(sb, sup->log_blocks); befs_debug(sb, " log_blocks %u, %hu, %hu", tmp_run.allocation_group, tmp_run.start, tmp_run.len); befs_debug(sb, " log_start %Ld", fs64_to_cpu(sb, sup->log_start)); befs_debug(sb, " log_end %Ld", fs64_to_cpu(sb, sup->log_end)); befs_debug(sb, " magic3 %08x", fs32_to_cpu(sb, sup->magic3)); tmp_run = fsrun_to_cpu(sb, sup->root_dir); befs_debug(sb, " root_dir %u, %hu, %hu", tmp_run.allocation_group, tmp_run.start, tmp_run.len); tmp_run = fsrun_to_cpu(sb, sup->indices); befs_debug(sb, " indices %u, %hu, %hu", tmp_run.allocation_group, tmp_run.start, tmp_run.len); #endif //CONFIG_BEFS_DEBUG } #if 0 /* unused */ void befs_dump_small_data(const struct super_block *sb, befs_small_data * sd) { } /* unused */ void befs_dump_run(const struct super_block *sb, befs_disk_block_run run) { #ifdef CONFIG_BEFS_DEBUG befs_block_run n = fsrun_to_cpu(sb, run); befs_debug(sb, "[%u, %hu, %hu]", n.allocation_group, n.start, n.len); #endif //CONFIG_BEFS_DEBUG } #endif /* 0 */ void befs_dump_index_entry(const struct super_block *sb, befs_disk_btree_super * super) { #ifdef CONFIG_BEFS_DEBUG befs_debug(sb, "Btree super structure"); befs_debug(sb, " magic %08x", fs32_to_cpu(sb, super->magic)); befs_debug(sb, " node_size %u", fs32_to_cpu(sb, super->node_size)); befs_debug(sb, " max_depth %08x", fs32_to_cpu(sb, super->max_depth)); befs_debug(sb, " data_type %08x", fs32_to_cpu(sb, super->data_type)); befs_debug(sb, " root_node_pointer %016LX", fs64_to_cpu(sb, super->root_node_ptr)); befs_debug(sb, " free_node_pointer %016LX", fs64_to_cpu(sb, super->free_node_ptr)); befs_debug(sb, " maximum size %016LX", fs64_to_cpu(sb, super->max_size)); #endif //CONFIG_BEFS_DEBUG } void befs_dump_index_node(const struct super_block *sb, befs_btree_nodehead * node) { #ifdef CONFIG_BEFS_DEBUG befs_debug(sb, "Btree node structure"); befs_debug(sb, " left %016LX", fs64_to_cpu(sb, node->left)); befs_debug(sb, " right %016LX", fs64_to_cpu(sb, node->right)); befs_debug(sb, " overflow %016LX", fs64_to_cpu(sb, node->overflow)); befs_debug(sb, " all_key_count %hu", fs16_to_cpu(sb, node->all_key_count)); befs_debug(sb, " all_key_length %hu", fs16_to_cpu(sb, node->all_key_length)); #endif //CONFIG_BEFS_DEBUG }
gpl-2.0
redstar3894/android-gcc-4.6
libgfortran/generated/pack_r8.c
14
7638
/* Specific implementation of the PACK intrinsic Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. Contributed by Paul Brook <paul@nowt.org> This file is part of the GNU Fortran 95 runtime library (libgfortran). Libgfortran is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. Ligbfortran is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "libgfortran.h" #include <stdlib.h> #include <assert.h> #include <string.h> #if defined (HAVE_GFC_REAL_8) /* PACK is specified as follows: 13.14.80 PACK (ARRAY, MASK, [VECTOR]) Description: Pack an array into an array of rank one under the control of a mask. Class: Transformational function. Arguments: ARRAY may be of any type. It shall not be scalar. MASK shall be of type LOGICAL. It shall be conformable with ARRAY. VECTOR (optional) shall be of the same type and type parameters as ARRAY. VECTOR shall have at least as many elements as there are true elements in MASK. If MASK is a scalar with the value true, VECTOR shall have at least as many elements as there are in ARRAY. Result Characteristics: The result is an array of rank one with the same type and type parameters as ARRAY. If VECTOR is present, the result size is that of VECTOR; otherwise, the result size is the number /t/ of true elements in MASK unless MASK is scalar with the value true, in which case the result size is the size of ARRAY. Result Value: Element /i/ of the result is the element of ARRAY that corresponds to the /i/th true element of MASK, taking elements in array element order, for /i/ = 1, 2, ..., /t/. If VECTOR is present and has size /n/ > /t/, element /i/ of the result has the value VECTOR(/i/), for /i/ = /t/ + 1, ..., /n/. Examples: The nonzero elements of an array M with the value | 0 0 0 | | 9 0 0 | may be "gathered" by the function PACK. The result of | 0 0 7 | PACK (M, MASK = M.NE.0) is [9,7] and the result of PACK (M, M.NE.0, VECTOR = (/ 2,4,6,8,10,12 /)) is [9,7,6,8,10,12]. There are two variants of the PACK intrinsic: one, where MASK is array valued, and the other one where MASK is scalar. */ void pack_r8 (gfc_array_r8 *ret, const gfc_array_r8 *array, const gfc_array_l1 *mask, const gfc_array_r8 *vector) { /* r.* indicates the return array. */ index_type rstride0; GFC_REAL_8 * restrict rptr; /* s.* indicates the source array. */ index_type sstride[GFC_MAX_DIMENSIONS]; index_type sstride0; const GFC_REAL_8 *sptr; /* m.* indicates the mask array. */ index_type mstride[GFC_MAX_DIMENSIONS]; index_type mstride0; const GFC_LOGICAL_1 *mptr; index_type count[GFC_MAX_DIMENSIONS]; index_type extent[GFC_MAX_DIMENSIONS]; int zero_sized; index_type n; index_type dim; index_type nelem; index_type total; int mask_kind; dim = GFC_DESCRIPTOR_RANK (array); mptr = mask->data; /* Use the same loop for all logical types, by using GFC_LOGICAL_1 and using shifting to address size and endian issues. */ mask_kind = GFC_DESCRIPTOR_SIZE (mask); if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 #ifdef HAVE_GFC_LOGICAL_16 || mask_kind == 16 #endif ) { /* Do not convert a NULL pointer as we use test for NULL below. */ if (mptr) mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind); } else runtime_error ("Funny sized logical array"); zero_sized = 0; for (n = 0; n < dim; n++) { count[n] = 0; extent[n] = GFC_DESCRIPTOR_EXTENT(array,n); if (extent[n] <= 0) zero_sized = 1; sstride[n] = GFC_DESCRIPTOR_STRIDE(array,n); mstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(mask,n); } if (sstride[0] == 0) sstride[0] = 1; if (mstride[0] == 0) mstride[0] = mask_kind; if (zero_sized) sptr = NULL; else sptr = array->data; if (ret->data == NULL || unlikely (compile_options.bounds_check)) { /* Count the elements, either for allocating memory or for bounds checking. */ if (vector != NULL) { /* The return array will have as many elements as there are in VECTOR. */ total = GFC_DESCRIPTOR_EXTENT(vector,0); if (total < 0) { total = 0; vector = NULL; } } else { /* We have to count the true elements in MASK. */ total = count_0 (mask); } if (ret->data == NULL) { /* Setup the array descriptor. */ GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; if (total == 0) { /* In this case, nothing remains to be done. */ ret->data = internal_malloc_size (1); return; } else ret->data = internal_malloc_size (sizeof (GFC_REAL_8) * total); } else { /* We come here because of range checking. */ index_type ret_extent; ret_extent = GFC_DESCRIPTOR_EXTENT(ret,0); if (total != ret_extent) runtime_error ("Incorrect extent in return value of PACK intrinsic;" " is %ld, should be %ld", (long int) total, (long int) ret_extent); } } rstride0 = GFC_DESCRIPTOR_STRIDE(ret,0); if (rstride0 == 0) rstride0 = 1; sstride0 = sstride[0]; mstride0 = mstride[0]; rptr = ret->data; while (sptr && mptr) { /* Test this element. */ if (*mptr) { /* Add it. */ *rptr = *sptr; rptr += rstride0; } /* Advance to the next element. */ sptr += sstride0; mptr += mstride0; count[0]++; n = 0; while (count[n] == extent[n]) { /* When we get to the end of a dimension, reset it and increment the next dimension. */ count[n] = 0; /* We could precalculate these products, but this is a less frequently used path so probably not worth it. */ sptr -= sstride[n] * extent[n]; mptr -= mstride[n] * extent[n]; n++; if (n >= dim) { /* Break out of the loop. */ sptr = NULL; break; } else { count[n]++; sptr += sstride[n]; mptr += mstride[n]; } } } /* Add any remaining elements from VECTOR. */ if (vector) { n = GFC_DESCRIPTOR_EXTENT(vector,0); nelem = ((rptr - ret->data) / rstride0); if (n > nelem) { sstride0 = GFC_DESCRIPTOR_STRIDE(vector,0); if (sstride0 == 0) sstride0 = 1; sptr = vector->data + sstride0 * nelem; n -= nelem; while (n--) { *rptr = *sptr; rptr += rstride0; sptr += sstride0; } } } } #endif
gpl-2.0
bq/aquaris-M4.5
fs/ext4/balloc.c
14
26083
/* * linux/fs/ext4/balloc.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <linux/time.h> #include <linux/capability.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/quotaops.h> #include <linux/buffer_head.h> #include "ext4.h" #include "ext4_jbd2.h" #include "mballoc.h" #include <trace/events/ext4.h> static unsigned ext4_num_base_meta_clusters(struct super_block *sb, ext4_group_t block_group); /* * balloc.c contains the blocks allocation and deallocation routines */ /* * Calculate block group number for a given block number */ ext4_group_t ext4_get_group_number(struct super_block *sb, ext4_fsblk_t block) { ext4_group_t group; if (test_opt2(sb, STD_GROUP_SIZE)) group = (block - le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >> (EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3); else ext4_get_group_no_and_offset(sb, block, &group, NULL); return group; } /* * Calculate the block group number and offset into the block/cluster * allocation bitmap, given a block number */ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; ext4_grpblk_t offset; blocknr = blocknr - le32_to_cpu(es->s_first_data_block); offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >> EXT4_SB(sb)->s_cluster_bits; if (offsetp) *offsetp = offset; if (blockgrpp) *blockgrpp = blocknr; } /* * Check whether the 'block' lives within the 'block_group'. Returns 1 if so * and 0 otherwise. */ static inline int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block, ext4_group_t block_group) { ext4_group_t actual_group; actual_group = ext4_get_group_number(sb, block); return (actual_group == block_group) ? 1 : 0; } /* Return the number of clusters used for file system metadata; this * represents the overhead needed by the file system. */ static unsigned ext4_num_overhead_clusters(struct super_block *sb, ext4_group_t block_group, struct ext4_group_desc *gdp) { unsigned num_clusters; int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c; ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group); ext4_fsblk_t itbl_blk; struct ext4_sb_info *sbi = EXT4_SB(sb); /* This is the number of clusters used by the superblock, * block group descriptors, and reserved block group * descriptor blocks */ num_clusters = ext4_num_base_meta_clusters(sb, block_group); /* * For the allocation bitmaps and inode table, we first need * to check to see if the block is in the block group. If it * is, then check to see if the cluster is already accounted * for in the clusters used for the base metadata cluster, or * if we can increment the base metadata cluster to include * that block. Otherwise, we will have to track the cluster * used for the allocation bitmap or inode table explicitly. * Normally all of these blocks are contiguous, so the special * case handling shouldn't be necessary except for *very* * unusual file system layouts. */ if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { block_cluster = EXT4_B2C(sbi, ext4_block_bitmap(sb, gdp) - start); if (block_cluster < num_clusters) block_cluster = -1; else if (block_cluster == num_clusters) { num_clusters++; block_cluster = -1; } } if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { inode_cluster = EXT4_B2C(sbi, ext4_inode_bitmap(sb, gdp) - start); if (inode_cluster < num_clusters) inode_cluster = -1; else if (inode_cluster == num_clusters) { num_clusters++; inode_cluster = -1; } } itbl_blk = ext4_inode_table(sb, gdp); for (i = 0; i < sbi->s_itb_per_group; i++) { if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { c = EXT4_B2C(sbi, itbl_blk + i - start); if ((c < num_clusters) || (c == inode_cluster) || (c == block_cluster) || (c == itbl_cluster)) continue; if (c == num_clusters) { num_clusters++; continue; } num_clusters++; itbl_cluster = c; } } if (block_cluster != -1) num_clusters++; if (inode_cluster != -1) num_clusters++; return num_clusters; } static unsigned int num_clusters_in_group(struct super_block *sb, ext4_group_t block_group) { unsigned int blocks; if (block_group == ext4_get_groups_count(sb) - 1) { /* * Even though mke2fs always initializes the first and * last group, just in case some other tool was used, * we need to make sure we calculate the right free * blocks. */ blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) - ext4_group_first_block_no(sb, block_group); } else blocks = EXT4_BLOCKS_PER_GROUP(sb); return EXT4_NUM_B2C(EXT4_SB(sb), blocks); } /* Initializes an uninitialized block bitmap */ static int ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, ext4_group_t block_group, struct ext4_group_desc *gdp) { unsigned int bit, bit_max; struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t start, tmp; int flex_bg = 0; struct ext4_group_info *grp; J_ASSERT_BH(bh, buffer_locked(bh)); /* If checksum is bad mark all blocks used to prevent allocation * essentially implementing a per-group read-only flag. */ if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { grp = ext4_get_group_info(sb, block_group); if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) percpu_counter_sub(&sbi->s_freeclusters_counter, grp->bb_free); set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) { int count; count = ext4_free_inodes_count(sb, gdp); percpu_counter_sub(&sbi->s_freeinodes_counter, count); } set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); return -EIO; } memset(bh->b_data, 0, sb->s_blocksize); bit_max = ext4_num_base_meta_clusters(sb, block_group); for (bit = 0; bit < bit_max; bit++) ext4_set_bit(bit, bh->b_data); start = ext4_group_first_block_no(sb, block_group); if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) flex_bg = 1; /* Set bits for block and inode bitmaps, and inode table */ tmp = ext4_block_bitmap(sb, gdp); if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); tmp = ext4_inode_bitmap(sb, gdp); if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); tmp = ext4_inode_table(sb, gdp); for (; tmp < ext4_inode_table(sb, gdp) + sbi->s_itb_per_group; tmp++) { if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); } /* * Also if the number of blocks within the group is less than * the blocksize * 8 ( which is the size of bitmap ), set rest * of the block bitmap to 1 */ ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group), sb->s_blocksize * 8, bh->b_data); ext4_block_bitmap_csum_set(sb, block_group, gdp, bh); ext4_group_desc_csum_set(sb, block_group, gdp); return 0; } /* Return the number of free blocks in a block group. It is used when * the block bitmap is uninitialized, so we can't just count the bits * in the bitmap. */ unsigned ext4_free_clusters_after_init(struct super_block *sb, ext4_group_t block_group, struct ext4_group_desc *gdp) { return num_clusters_in_group(sb, block_group) - ext4_num_overhead_clusters(sb, block_group, gdp); } /* * The free blocks are managed by bitmaps. A file system contains several * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap * block for inodes, N blocks for the inode table and data blocks. * * The file system contains group descriptors which are located after the * super block. Each descriptor contains the number of the bitmap block and * the free blocks count in the block. The descriptors are loaded in memory * when a file system is mounted (see ext4_fill_super). */ /** * ext4_get_group_desc() -- load group descriptor from disk * @sb: super block * @block_group: given block group * @bh: pointer to the buffer head to store the block * group descriptor */ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, ext4_group_t block_group, struct buffer_head **bh) { unsigned int group_desc; unsigned int offset; ext4_group_t ngroups = ext4_get_groups_count(sb); struct ext4_group_desc *desc; struct ext4_sb_info *sbi = EXT4_SB(sb); if (block_group >= ngroups) { ext4_error(sb, "block_group >= groups_count - block_group = %u," " groups_count = %u", block_group, ngroups); return NULL; } group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); if (!sbi->s_group_desc[group_desc]) { ext4_error(sb, "Group descriptor not loaded - " "block_group = %u, group_desc = %u, desc = %u", block_group, group_desc, offset); return NULL; } desc = (struct ext4_group_desc *)( (__u8 *)sbi->s_group_desc[group_desc]->b_data + offset * EXT4_DESC_SIZE(sb)); if (bh) *bh = sbi->s_group_desc[group_desc]; return desc; } /* * Return the block number which was discovered to be invalid, or 0 if * the block bitmap is valid. */ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb, struct ext4_group_desc *desc, ext4_group_t block_group, struct buffer_head *bh) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_grpblk_t offset; ext4_grpblk_t next_zero_bit; ext4_fsblk_t blk; ext4_fsblk_t group_first_block; if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { /* with FLEX_BG, the inode/block bitmaps and itable * blocks may not be in the group at all * so the bitmap validation will be skipped for those groups * or it has to also read the block group where the bitmaps * are located to verify they are set. */ return 0; } group_first_block = ext4_group_first_block_no(sb, block_group); /* check whether block bitmap block number is set */ blk = ext4_block_bitmap(sb, desc); offset = blk - group_first_block; if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) /* bad block bitmap */ return blk; /* check whether the inode bitmap block number is set */ blk = ext4_inode_bitmap(sb, desc); offset = blk - group_first_block; if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) /* bad block bitmap */ return blk; /* check whether the inode table block number is set */ blk = ext4_inode_table(sb, desc); offset = blk - group_first_block; next_zero_bit = ext4_find_next_zero_bit(bh->b_data, EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group), EXT4_B2C(sbi, offset)); if (next_zero_bit < EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group)) /* bad bitmap for inode tables */ return blk; return 0; } static void ext4_validate_block_bitmap(struct super_block *sb, struct ext4_group_desc *desc, ext4_group_t block_group, struct buffer_head *bh) { ext4_fsblk_t blk; struct ext4_group_info *grp = ext4_get_group_info(sb, block_group); struct ext4_sb_info *sbi = EXT4_SB(sb); if (buffer_verified(bh)) return; ext4_lock_group(sb, block_group); blk = ext4_valid_block_bitmap(sb, desc, block_group, bh); if (unlikely(blk != 0)) { ext4_unlock_group(sb, block_group); ext4_error(sb, "bg %u: block %llu: invalid block bitmap", block_group, blk); if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) percpu_counter_sub(&sbi->s_freeclusters_counter, grp->bb_free); set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); return; } if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, desc, bh))) { ext4_unlock_group(sb, block_group); ext4_error(sb, "bg %u: bad block bitmap checksum", block_group); if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) percpu_counter_sub(&sbi->s_freeclusters_counter, grp->bb_free); set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); return; } set_buffer_verified(bh); ext4_unlock_group(sb, block_group); } /** * ext4_read_block_bitmap_nowait() * @sb: super block * @block_group: given block group * * Read the bitmap for a given block_group,and validate the * bits for block/inode/inode tables are set in the bitmaps * * Return buffer_head on success or NULL in case of failure. */ struct buffer_head * ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) { struct ext4_group_desc *desc; struct buffer_head *bh; ext4_fsblk_t bitmap_blk; desc = ext4_get_group_desc(sb, block_group, NULL); if (!desc) return NULL; bitmap_blk = ext4_block_bitmap(sb, desc); bh = sb_getblk(sb, bitmap_blk); if (unlikely(!bh)) { ext4_error(sb, "Cannot get buffer for block bitmap - " "block_group = %u, block_bitmap = %llu", block_group, bitmap_blk); return NULL; } if (bitmap_uptodate(bh)) goto verify; lock_buffer(bh); if (bitmap_uptodate(bh)) { unlock_buffer(bh); goto verify; } ext4_lock_group(sb, block_group); if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { int err; err = ext4_init_block_bitmap(sb, bh, block_group, desc); set_bitmap_uptodate(bh); set_buffer_uptodate(bh); ext4_unlock_group(sb, block_group); unlock_buffer(bh); if (err) ext4_error(sb, "Checksum bad for grp %u", block_group); return bh; } ext4_unlock_group(sb, block_group); if (buffer_uptodate(bh)) { /* * if not uninit if bh is uptodate, * bitmap is also uptodate */ set_bitmap_uptodate(bh); unlock_buffer(bh); goto verify; } /* * submit the buffer_head for reading */ set_buffer_new(bh); trace_ext4_read_block_bitmap_load(sb, block_group); bh->b_end_io = ext4_end_bitmap_read; get_bh(bh); submit_bh(READ | REQ_META | REQ_PRIO, bh); return bh; verify: ext4_validate_block_bitmap(sb, desc, block_group, bh); if (buffer_verified(bh)) return bh; put_bh(bh); return NULL; } /* Returns 0 on success, 1 on error */ int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group, struct buffer_head *bh) { struct ext4_group_desc *desc; if (!buffer_new(bh)) return 0; desc = ext4_get_group_desc(sb, block_group, NULL); if (!desc) return 1; wait_on_buffer(bh); if (!buffer_uptodate(bh)) { ext4_error(sb, "Cannot read block bitmap - " "block_group = %u, block_bitmap = %llu", block_group, (unsigned long long) bh->b_blocknr); return 1; } clear_buffer_new(bh); /* Panic or remount fs read-only if block bitmap is invalid */ ext4_validate_block_bitmap(sb, desc, block_group, bh); /* ...but check for error just in case errors=continue. */ return !buffer_verified(bh); } struct buffer_head * ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) { struct buffer_head *bh; bh = ext4_read_block_bitmap_nowait(sb, block_group); if (!bh) return NULL; if (ext4_wait_block_bitmap(sb, block_group, bh)) { put_bh(bh); return NULL; } return bh; } /** * ext4_has_free_clusters() * @sbi: in-core super block structure. * @nclusters: number of needed blocks * @flags: flags from ext4_mb_new_blocks() * * Check if filesystem has nclusters free & available for allocation. * On success return 1, return 0 on failure. */ static int ext4_has_free_clusters(struct ext4_sb_info *sbi, s64 nclusters, unsigned int flags) { s64 free_clusters, dirty_clusters, rsv, resv_clusters; struct percpu_counter *fcc = &sbi->s_freeclusters_counter; struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter; free_clusters = percpu_counter_read_positive(fcc); dirty_clusters = percpu_counter_read_positive(dcc); resv_clusters = atomic64_read(&sbi->s_resv_clusters); /* * r_blocks_count should always be multiple of the cluster ratio so * we are safe to do a plane bit shift only. */ rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) + resv_clusters; if (free_clusters - (nclusters + rsv + dirty_clusters) < EXT4_FREECLUSTERS_WATERMARK) { free_clusters = percpu_counter_sum_positive(fcc); dirty_clusters = percpu_counter_sum_positive(dcc); } /* Check whether we have space after accounting for current * dirty clusters & root reserved clusters. */ if (free_clusters >= (rsv + nclusters + dirty_clusters)) return 1; /* Hm, nope. Are (enough) root reserved clusters available? */ if (uid_gte(sbi->s_resuid, current_fsuid()) || (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) || capable(CAP_SYS_RESOURCE) || (flags & EXT4_MB_USE_ROOT_BLOCKS)) { if (free_clusters >= (nclusters + dirty_clusters + resv_clusters)) return 1; } /* No free blocks. Let's see if we can dip into reserved pool */ if (flags & EXT4_MB_USE_RESERVED) { if (free_clusters >= (nclusters + dirty_clusters)) return 1; } return 0; } int ext4_claim_free_clusters(struct ext4_sb_info *sbi, s64 nclusters, unsigned int flags) { if (ext4_has_free_clusters(sbi, nclusters, flags)) { percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters); return 0; } else return -ENOSPC; } /** * ext4_should_retry_alloc() * @sb: super block * @retries number of attemps has been made * * ext4_should_retry_alloc() is called when ENOSPC is returned, and if * it is profitable to retry the operation, this function will wait * for the current or committing transaction to complete, and then * return TRUE. * * if the total number of retries exceed three times, return FALSE. */ int ext4_should_retry_alloc(struct super_block *sb, int *retries) { if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) || (*retries)++ > 3 || !EXT4_SB(sb)->s_journal) return 0; jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); } /* * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks * * @handle: handle to this transaction * @inode: file inode * @goal: given target block(filesystem wide) * @count: pointer to total number of clusters needed * @errp: error code * * Return 1st allocated block number on success, *count stores total account * error stores in errp pointer */ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, ext4_fsblk_t goal, unsigned int flags, unsigned long *count, int *errp) { struct ext4_allocation_request ar; ext4_fsblk_t ret; memset(&ar, 0, sizeof(ar)); /* Fill with neighbour allocated blocks */ ar.inode = inode; ar.goal = goal; ar.len = count ? *count : 1; ar.flags = flags; ret = ext4_mb_new_blocks(handle, &ar, errp); if (count) *count = ar.len; /* * Account for the allocated meta blocks. We will never * fail EDQUOT for metdata, but we do account for it. */ if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) { spin_lock(&EXT4_I(inode)->i_block_reservation_lock); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); dquot_alloc_block_nofail(inode, EXT4_C2B(EXT4_SB(inode->i_sb), ar.len)); } return ret; } /** * ext4_count_free_clusters() -- count filesystem free clusters * @sb: superblock * * Adds up the number of free clusters from each block group. */ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb) { ext4_fsblk_t desc_count; struct ext4_group_desc *gdp; ext4_group_t i; ext4_group_t ngroups = ext4_get_groups_count(sb); struct ext4_group_info *grp; #ifdef EXT4FS_DEBUG struct ext4_super_block *es; ext4_fsblk_t bitmap_count; unsigned int x; struct buffer_head *bitmap_bh = NULL; es = EXT4_SB(sb)->s_es; desc_count = 0; bitmap_count = 0; gdp = NULL; for (i = 0; i < ngroups; i++) { gdp = ext4_get_group_desc(sb, i, NULL); if (!gdp) continue; grp = NULL; if (EXT4_SB(sb)->s_group_info) grp = ext4_get_group_info(sb, i); if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) desc_count += ext4_free_group_clusters(sb, gdp); brelse(bitmap_bh); bitmap_bh = ext4_read_block_bitmap(sb, i); if (bitmap_bh == NULL) continue; x = ext4_count_free(bitmap_bh->b_data, EXT4_CLUSTERS_PER_GROUP(sb) / 8); printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n", i, ext4_free_group_clusters(sb, gdp), x); bitmap_count += x; } brelse(bitmap_bh); printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu" ", computed = %llu, %llu\n", EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)), desc_count, bitmap_count); return bitmap_count; #else desc_count = 0; for (i = 0; i < ngroups; i++) { gdp = ext4_get_group_desc(sb, i, NULL); if (!gdp) continue; grp = NULL; if (EXT4_SB(sb)->s_group_info) grp = ext4_get_group_info(sb, i); if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) desc_count += ext4_free_group_clusters(sb, gdp); } return desc_count; #endif } static inline int test_root(ext4_group_t a, int b) { while (1) { if (a < b) return 0; if (a == b) return 1; if ((a % b) != 0) return 0; a = a / b; } } /** * ext4_bg_has_super - number of blocks used by the superblock in group * @sb: superblock for filesystem * @group: group number to check * * Return the number of blocks used by the superblock (primary or backup) * in this group. Currently this will be only 0 or 1. */ int ext4_bg_has_super(struct super_block *sb, ext4_group_t group) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; if (group == 0) return 1; if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_SPARSE_SUPER2)) { if (group == le32_to_cpu(es->s_backup_bgs[0]) || group == le32_to_cpu(es->s_backup_bgs[1])) return 1; return 0; } if ((group <= 1) || !EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) return 1; if (!(group & 1)) return 0; if (test_root(group, 3) || (test_root(group, 5)) || test_root(group, 7)) return 1; return 0; } static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, ext4_group_t group) { unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb); ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1; if (group == first || group == first + 1 || group == last) return 1; return 0; } static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, ext4_group_t group) { if (!ext4_bg_has_super(sb, group)) return 0; if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG)) return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); else return EXT4_SB(sb)->s_gdb_count; } /** * ext4_bg_num_gdb - number of blocks used by the group table in group * @sb: superblock for filesystem * @group: group number to check * * Return the number of blocks used by the group descriptor table * (primary or backup) in this group. In the future there may be a * different number of descriptor blocks in each group. */ unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group) { unsigned long first_meta_bg = le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) || metagroup < first_meta_bg) return ext4_bg_num_gdb_nometa(sb, group); return ext4_bg_num_gdb_meta(sb,group); } /* * This function returns the number of file system metadata clusters at * the beginning of a block group, including the reserved gdt blocks. */ static unsigned ext4_num_base_meta_clusters(struct super_block *sb, ext4_group_t block_group) { struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned num; /* Check for superblock and gdt backups in this group */ num = ext4_bg_has_super(sb, block_group); if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) || block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) * sbi->s_desc_per_block) { if (num) { num += ext4_bg_num_gdb(sb, block_group); num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks); } } else { /* For META_BG_BLOCK_GROUPS */ num += ext4_bg_num_gdb(sb, block_group); } return EXT4_NUM_B2C(sbi, num); } /** * ext4_inode_to_goal_block - return a hint for block allocation * @inode: inode for block allocation * * Return the ideal location to start allocating blocks for a * newly created inode. */ ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); ext4_group_t block_group; ext4_grpblk_t colour; int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); ext4_fsblk_t bg_start; ext4_fsblk_t last_block; block_group = ei->i_block_group; if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { /* * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME * block groups per flexgroup, reserve the first block * group for directories and special files. Regular * files will start at the second block group. This * tends to speed up directory access and improves * fsck times. */ block_group &= ~(flex_size-1); if (S_ISREG(inode->i_mode)) block_group++; } bg_start = ext4_group_first_block_no(inode->i_sb, block_group); last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; /* * If we are doing delayed allocation, we don't need take * colour into account. */ if (test_opt(inode->i_sb, DELALLOC)) return bg_start; if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) colour = (current->pid % 16) * (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); else colour = (current->pid % 16) * ((last_block - bg_start) / 16); return bg_start + colour; }
gpl-2.0
basanta078/linux-370
arch/mips/kernel/asm-offsets.c
14
13767
/* * offset.c: Calculate pt_regs and task_struct offsets. * * Copyright (C) 1996 David S. Miller * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. */ #include <linux/compat.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <asm/ptrace.h> #include <asm/processor.h> #define text(t) __asm__("\n@@@" t) #define _offset(type, member) (&(((type *)NULL)->member)) #define offset(string, ptr, member) \ __asm__("\n@@@" string "%0" : : "i" (_offset(ptr, member))) #define constant(string, member) \ __asm__("\n@@@" string "%X0" : : "ri" (member)) #define size(string, size) \ __asm__("\n@@@" string "%0" : : "i" (sizeof(size))) #define linefeed text("") void output_ptreg_defines(void) { text("/* MIPS pt_regs offsets. */"); offset("#define PT_R0 ", struct pt_regs, regs[0]); offset("#define PT_R1 ", struct pt_regs, regs[1]); offset("#define PT_R2 ", struct pt_regs, regs[2]); offset("#define PT_R3 ", struct pt_regs, regs[3]); offset("#define PT_R4 ", struct pt_regs, regs[4]); offset("#define PT_R5 ", struct pt_regs, regs[5]); offset("#define PT_R6 ", struct pt_regs, regs[6]); offset("#define PT_R7 ", struct pt_regs, regs[7]); offset("#define PT_R8 ", struct pt_regs, regs[8]); offset("#define PT_R9 ", struct pt_regs, regs[9]); offset("#define PT_R10 ", struct pt_regs, regs[10]); offset("#define PT_R11 ", struct pt_regs, regs[11]); offset("#define PT_R12 ", struct pt_regs, regs[12]); offset("#define PT_R13 ", struct pt_regs, regs[13]); offset("#define PT_R14 ", struct pt_regs, regs[14]); offset("#define PT_R15 ", struct pt_regs, regs[15]); offset("#define PT_R16 ", struct pt_regs, regs[16]); offset("#define PT_R17 ", struct pt_regs, regs[17]); offset("#define PT_R18 ", struct pt_regs, regs[18]); offset("#define PT_R19 ", struct pt_regs, regs[19]); offset("#define PT_R20 ", struct pt_regs, regs[20]); offset("#define PT_R21 ", struct pt_regs, regs[21]); offset("#define PT_R22 ", struct pt_regs, regs[22]); offset("#define PT_R23 ", struct pt_regs, regs[23]); offset("#define PT_R24 ", struct pt_regs, regs[24]); offset("#define PT_R25 ", struct pt_regs, regs[25]); offset("#define PT_R26 ", struct pt_regs, regs[26]); offset("#define PT_R27 ", struct pt_regs, regs[27]); offset("#define PT_R28 ", struct pt_regs, regs[28]); offset("#define PT_R29 ", struct pt_regs, regs[29]); offset("#define PT_R30 ", struct pt_regs, regs[30]); offset("#define PT_R31 ", struct pt_regs, regs[31]); offset("#define PT_LO ", struct pt_regs, lo); offset("#define PT_HI ", struct pt_regs, hi); #ifdef CONFIG_CPU_HAS_SMARTMIPS offset("#define PT_ACX ", struct pt_regs, acx); #endif offset("#define PT_EPC ", struct pt_regs, cp0_epc); offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr); offset("#define PT_STATUS ", struct pt_regs, cp0_status); offset("#define PT_CAUSE ", struct pt_regs, cp0_cause); #ifdef CONFIG_MIPS_MT_SMTC offset("#define PT_TCSTATUS ", struct pt_regs, cp0_tcstatus); #endif /* CONFIG_MIPS_MT_SMTC */ size("#define PT_SIZE ", struct pt_regs); linefeed; } void output_task_defines(void) { text("/* MIPS task_struct offsets. */"); offset("#define TASK_STATE ", struct task_struct, state); offset("#define TASK_THREAD_INFO ", struct task_struct, stack); offset("#define TASK_FLAGS ", struct task_struct, flags); offset("#define TASK_MM ", struct task_struct, mm); offset("#define TASK_PID ", struct task_struct, pid); size( "#define TASK_STRUCT_SIZE ", struct task_struct); linefeed; } void output_thread_info_defines(void) { text("/* MIPS thread_info offsets. */"); offset("#define TI_TASK ", struct thread_info, task); offset("#define TI_EXEC_DOMAIN ", struct thread_info, exec_domain); offset("#define TI_FLAGS ", struct thread_info, flags); offset("#define TI_TP_VALUE ", struct thread_info, tp_value); offset("#define TI_CPU ", struct thread_info, cpu); offset("#define TI_PRE_COUNT ", struct thread_info, preempt_count); offset("#define TI_ADDR_LIMIT ", struct thread_info, addr_limit); offset("#define TI_RESTART_BLOCK ", struct thread_info, restart_block); offset("#define TI_REGS ", struct thread_info, regs); constant("#define _THREAD_SIZE ", THREAD_SIZE); constant("#define _THREAD_MASK ", THREAD_MASK); linefeed; } void output_thread_defines(void) { text("/* MIPS specific thread_struct offsets. */"); offset("#define THREAD_REG16 ", struct task_struct, thread.reg16); offset("#define THREAD_REG17 ", struct task_struct, thread.reg17); offset("#define THREAD_REG18 ", struct task_struct, thread.reg18); offset("#define THREAD_REG19 ", struct task_struct, thread.reg19); offset("#define THREAD_REG20 ", struct task_struct, thread.reg20); offset("#define THREAD_REG21 ", struct task_struct, thread.reg21); offset("#define THREAD_REG22 ", struct task_struct, thread.reg22); offset("#define THREAD_REG23 ", struct task_struct, thread.reg23); offset("#define THREAD_REG29 ", struct task_struct, thread.reg29); offset("#define THREAD_REG30 ", struct task_struct, thread.reg30); offset("#define THREAD_REG31 ", struct task_struct, thread.reg31); offset("#define THREAD_STATUS ", struct task_struct, thread.cp0_status); offset("#define THREAD_FPU ", struct task_struct, thread.fpu); offset("#define THREAD_BVADDR ", struct task_struct, \ thread.cp0_badvaddr); offset("#define THREAD_BUADDR ", struct task_struct, \ thread.cp0_baduaddr); offset("#define THREAD_ECODE ", struct task_struct, \ thread.error_code); offset("#define THREAD_TRAPNO ", struct task_struct, thread.trap_no); offset("#define THREAD_MFLAGS ", struct task_struct, thread.mflags); offset("#define THREAD_TRAMP ", struct task_struct, \ thread.irix_trampoline); offset("#define THREAD_OLDCTX ", struct task_struct, \ thread.irix_oldctx); linefeed; } void output_thread_fpu_defines(void) { offset("#define THREAD_FPR0 ", struct task_struct, thread.fpu.fpr[0]); offset("#define THREAD_FPR1 ", struct task_struct, thread.fpu.fpr[1]); offset("#define THREAD_FPR2 ", struct task_struct, thread.fpu.fpr[2]); offset("#define THREAD_FPR3 ", struct task_struct, thread.fpu.fpr[3]); offset("#define THREAD_FPR4 ", struct task_struct, thread.fpu.fpr[4]); offset("#define THREAD_FPR5 ", struct task_struct, thread.fpu.fpr[5]); offset("#define THREAD_FPR6 ", struct task_struct, thread.fpu.fpr[6]); offset("#define THREAD_FPR7 ", struct task_struct, thread.fpu.fpr[7]); offset("#define THREAD_FPR8 ", struct task_struct, thread.fpu.fpr[8]); offset("#define THREAD_FPR9 ", struct task_struct, thread.fpu.fpr[9]); offset("#define THREAD_FPR10 ", struct task_struct, thread.fpu.fpr[10]); offset("#define THREAD_FPR11 ", struct task_struct, thread.fpu.fpr[11]); offset("#define THREAD_FPR12 ", struct task_struct, thread.fpu.fpr[12]); offset("#define THREAD_FPR13 ", struct task_struct, thread.fpu.fpr[13]); offset("#define THREAD_FPR14 ", struct task_struct, thread.fpu.fpr[14]); offset("#define THREAD_FPR15 ", struct task_struct, thread.fpu.fpr[15]); offset("#define THREAD_FPR16 ", struct task_struct, thread.fpu.fpr[16]); offset("#define THREAD_FPR17 ", struct task_struct, thread.fpu.fpr[17]); offset("#define THREAD_FPR18 ", struct task_struct, thread.fpu.fpr[18]); offset("#define THREAD_FPR19 ", struct task_struct, thread.fpu.fpr[19]); offset("#define THREAD_FPR20 ", struct task_struct, thread.fpu.fpr[20]); offset("#define THREAD_FPR21 ", struct task_struct, thread.fpu.fpr[21]); offset("#define THREAD_FPR22 ", struct task_struct, thread.fpu.fpr[22]); offset("#define THREAD_FPR23 ", struct task_struct, thread.fpu.fpr[23]); offset("#define THREAD_FPR24 ", struct task_struct, thread.fpu.fpr[24]); offset("#define THREAD_FPR25 ", struct task_struct, thread.fpu.fpr[25]); offset("#define THREAD_FPR26 ", struct task_struct, thread.fpu.fpr[26]); offset("#define THREAD_FPR27 ", struct task_struct, thread.fpu.fpr[27]); offset("#define THREAD_FPR28 ", struct task_struct, thread.fpu.fpr[28]); offset("#define THREAD_FPR29 ", struct task_struct, thread.fpu.fpr[29]); offset("#define THREAD_FPR30 ", struct task_struct, thread.fpu.fpr[30]); offset("#define THREAD_FPR31 ", struct task_struct, thread.fpu.fpr[31]); offset("#define THREAD_FCR31 ", struct task_struct, thread.fpu.fcr31); linefeed; } void output_mm_defines(void) { text("/* Size of struct page */"); size("#define STRUCT_PAGE_SIZE ", struct page); linefeed; text("/* Linux mm_struct offsets. */"); offset("#define MM_USERS ", struct mm_struct, mm_users); offset("#define MM_PGD ", struct mm_struct, pgd); offset("#define MM_CONTEXT ", struct mm_struct, context); linefeed; constant("#define _PAGE_SIZE ", PAGE_SIZE); constant("#define _PAGE_SHIFT ", PAGE_SHIFT); linefeed; constant("#define _PGD_T_SIZE ", sizeof(pgd_t)); constant("#define _PMD_T_SIZE ", sizeof(pmd_t)); constant("#define _PTE_T_SIZE ", sizeof(pte_t)); linefeed; constant("#define _PGD_T_LOG2 ", PGD_T_LOG2); constant("#define _PMD_T_LOG2 ", PMD_T_LOG2); constant("#define _PTE_T_LOG2 ", PTE_T_LOG2); linefeed; constant("#define _PMD_SHIFT ", PMD_SHIFT); constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT); linefeed; constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD); constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD); constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE); linefeed; } #ifdef CONFIG_32BIT void output_sc_defines(void) { text("/* Linux sigcontext offsets. */"); offset("#define SC_REGS ", struct sigcontext, sc_regs); offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs); offset("#define SC_ACX ", struct sigcontext, sc_acx); offset("#define SC_MDHI ", struct sigcontext, sc_mdhi); offset("#define SC_MDLO ", struct sigcontext, sc_mdlo); offset("#define SC_PC ", struct sigcontext, sc_pc); offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr); offset("#define SC_FPC_EIR ", struct sigcontext, sc_fpc_eir); offset("#define SC_HI1 ", struct sigcontext, sc_hi1); offset("#define SC_LO1 ", struct sigcontext, sc_lo1); offset("#define SC_HI2 ", struct sigcontext, sc_hi2); offset("#define SC_LO2 ", struct sigcontext, sc_lo2); offset("#define SC_HI3 ", struct sigcontext, sc_hi3); offset("#define SC_LO3 ", struct sigcontext, sc_lo3); linefeed; } #endif #ifdef CONFIG_64BIT void output_sc_defines(void) { text("/* Linux sigcontext offsets. */"); offset("#define SC_REGS ", struct sigcontext, sc_regs); offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs); offset("#define SC_MDHI ", struct sigcontext, sc_mdhi); offset("#define SC_MDLO ", struct sigcontext, sc_mdlo); offset("#define SC_PC ", struct sigcontext, sc_pc); offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr); linefeed; } #endif #ifdef CONFIG_MIPS32_COMPAT void output_sc32_defines(void) { text("/* Linux 32-bit sigcontext offsets. */"); offset("#define SC32_FPREGS ", struct sigcontext32, sc_fpregs); offset("#define SC32_FPC_CSR ", struct sigcontext32, sc_fpc_csr); offset("#define SC32_FPC_EIR ", struct sigcontext32, sc_fpc_eir); linefeed; } #endif void output_signal_defined(void) { text("/* Linux signal numbers. */"); constant("#define _SIGHUP ", SIGHUP); constant("#define _SIGINT ", SIGINT); constant("#define _SIGQUIT ", SIGQUIT); constant("#define _SIGILL ", SIGILL); constant("#define _SIGTRAP ", SIGTRAP); constant("#define _SIGIOT ", SIGIOT); constant("#define _SIGABRT ", SIGABRT); constant("#define _SIGEMT ", SIGEMT); constant("#define _SIGFPE ", SIGFPE); constant("#define _SIGKILL ", SIGKILL); constant("#define _SIGBUS ", SIGBUS); constant("#define _SIGSEGV ", SIGSEGV); constant("#define _SIGSYS ", SIGSYS); constant("#define _SIGPIPE ", SIGPIPE); constant("#define _SIGALRM ", SIGALRM); constant("#define _SIGTERM ", SIGTERM); constant("#define _SIGUSR1 ", SIGUSR1); constant("#define _SIGUSR2 ", SIGUSR2); constant("#define _SIGCHLD ", SIGCHLD); constant("#define _SIGPWR ", SIGPWR); constant("#define _SIGWINCH ", SIGWINCH); constant("#define _SIGURG ", SIGURG); constant("#define _SIGIO ", SIGIO); constant("#define _SIGSTOP ", SIGSTOP); constant("#define _SIGTSTP ", SIGTSTP); constant("#define _SIGCONT ", SIGCONT); constant("#define _SIGTTIN ", SIGTTIN); constant("#define _SIGTTOU ", SIGTTOU); constant("#define _SIGVTALRM ", SIGVTALRM); constant("#define _SIGPROF ", SIGPROF); constant("#define _SIGXCPU ", SIGXCPU); constant("#define _SIGXFSZ ", SIGXFSZ); linefeed; } void output_irq_cpustat_t_defines(void) { text("/* Linux irq_cpustat_t offsets. */"); offset("#define IC_SOFTIRQ_PENDING ", irq_cpustat_t, __softirq_pending); size("#define IC_IRQ_CPUSTAT_T ", irq_cpustat_t); linefeed; }
gpl-2.0
FEDEVEL/openrex-linux-3.14
drivers/video/omap2/dss/hdmi_phy.c
270
4231
/* * HDMI PHY * * Copyright (C) 2013 Texas Instruments Incorporated * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/io.h> #include <linux/platform_device.h> #include <video/omapdss.h> #include "dss.h" #include "hdmi.h" void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s) { #define DUMPPHY(r) seq_printf(s, "%-35s %08x\n", #r,\ hdmi_read_reg(phy->base, r)) DUMPPHY(HDMI_TXPHY_TX_CTRL); DUMPPHY(HDMI_TXPHY_DIGITAL_CTRL); DUMPPHY(HDMI_TXPHY_POWER_CTRL); DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL); } static irqreturn_t hdmi_irq_handler(int irq, void *data) { struct hdmi_wp_data *wp = data; u32 irqstatus; irqstatus = hdmi_wp_get_irqstatus(wp); hdmi_wp_set_irqstatus(wp, irqstatus); if ((irqstatus & HDMI_IRQ_LINK_CONNECT) && irqstatus & HDMI_IRQ_LINK_DISCONNECT) { /* * If we get both connect and disconnect interrupts at the same * time, turn off the PHY, clear interrupts, and restart, which * raises connect interrupt if a cable is connected, or nothing * if cable is not connected. */ hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF); hdmi_wp_set_irqstatus(wp, HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT); hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON); } else if (irqstatus & HDMI_IRQ_LINK_CONNECT) { hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_TXON); } else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) { hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON); } return IRQ_HANDLED; } int hdmi_phy_enable(struct hdmi_phy_data *phy, struct hdmi_wp_data *wp, struct hdmi_config *cfg) { u16 r = 0; u32 irqstatus; hdmi_wp_clear_irqenable(wp, 0xffffffff); irqstatus = hdmi_wp_get_irqstatus(wp); hdmi_wp_set_irqstatus(wp, irqstatus); r = hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON); if (r) return r; /* * Read address 0 in order to get the SCP reset done completed * Dummy access performed to make sure reset is done */ hdmi_read_reg(phy->base, HDMI_TXPHY_TX_CTRL); /* * Write to phy address 0 to configure the clock * use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field */ REG_FLD_MOD(phy->base, HDMI_TXPHY_TX_CTRL, 0x1, 31, 30); /* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */ hdmi_write_reg(phy->base, HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000); /* Setup max LDO voltage */ REG_FLD_MOD(phy->base, HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0); /* Write to phy address 3 to change the polarity control */ REG_FLD_MOD(phy->base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27); r = request_threaded_irq(phy->irq, NULL, hdmi_irq_handler, IRQF_ONESHOT, "OMAP HDMI", wp); if (r) { DSSERR("HDMI IRQ request failed\n"); hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF); return r; } hdmi_wp_set_irqenable(wp, HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT); return 0; } void hdmi_phy_disable(struct hdmi_phy_data *phy, struct hdmi_wp_data *wp) { free_irq(phy->irq, wp); hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF); } #define PHY_OFFSET 0x300 #define PHY_SIZE 0x100 int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy) { struct resource *res; struct resource temp_res; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); if (!res) { DSSDBG("can't get PHY mem resource by name\n"); /* * if hwmod/DT doesn't have the memory resource information * split into HDMI sub blocks by name, we try again by getting * the platform's first resource. this code will be removed when * the driver can get the mem resources by name */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { DSSERR("can't get PHY mem resource\n"); return -EINVAL; } temp_res.start = res->start + PHY_OFFSET; temp_res.end = temp_res.start + PHY_SIZE - 1; res = &temp_res; } phy->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!phy->base) { DSSERR("can't ioremap TX PHY\n"); return -ENOMEM; } phy->irq = platform_get_irq(pdev, 0); if (phy->irq < 0) { DSSERR("platform_get_irq failed\n"); return -ENODEV; } return 0; }
gpl-2.0
rudij7/green_machine_bacon
drivers/platform/msm/qpnp-pwm.c
526
54847
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Qualcomm QPNP Pulse Width Modulation (PWM) driver * * The HW module is also called LPG (Light Pattern Generator). */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/module.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/spmi.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/radix-tree.h> #include <linux/qpnp/pwm.h> #define QPNP_LPG_DRIVER_NAME "qcom,qpnp-pwm" #define QPNP_LPG_CHANNEL_BASE "qpnp-lpg-channel-base" #define QPNP_LPG_LUT_BASE "qpnp-lpg-lut-base" #define QPNP_PWM_MODE_ONLY_SUB_TYPE 0x0B /* LPG Control for LPG_PATTERN_CONFIG */ #define QPNP_RAMP_DIRECTION_SHIFT 4 #define QPNP_RAMP_DIRECTION_MASK 0x10 #define QPNP_PATTERN_REPEAT_SHIFT 3 #define QPNP_PATTERN_REPEAT_MASK 0x08 #define QPNP_RAMP_TOGGLE_SHIFT 2 #define QPNP_RAMP_TOGGLE_MASK 0x04 #define QPNP_EN_PAUSE_HI_SHIFT 1 #define QPNP_EN_PAUSE_HI_MASK 0x02 #define QPNP_EN_PAUSE_LO_MASK 0x01 /* LPG Control for LPG_PWM_SIZE_CLK */ #define QPNP_PWM_SIZE_SHIFT_SUB_TYPE 2 #define QPNP_PWM_SIZE_MASK_SUB_TYPE 0x4 #define QPNP_PWM_FREQ_CLK_SELECT_MASK_SUB_TYPE 0x03 #define QPNP_PWM_SIZE_9_BIT_SUB_TYPE 0x01 #define QPNP_SET_PWM_CLK_SUB_TYPE(val, clk, pwm_size) \ do { \ val = (clk + 1) & QPNP_PWM_FREQ_CLK_SELECT_MASK_SUB_TYPE; \ val |= (((pwm_size > 6 ? QPNP_PWM_SIZE_9_BIT_SUB_TYPE : 0) << \ QPNP_PWM_SIZE_SHIFT_SUB_TYPE) & QPNP_PWM_SIZE_MASK_SUB_TYPE); \ } while (0) #define QPNP_GET_PWM_SIZE_SUB_TYPE(reg) ((reg & QPNP_PWM_SIZE_MASK_SUB_TYPE) \ >> QPNP_PWM_SIZE_SHIFT_SUB_TYPE) #define QPNP_PWM_SIZE_SHIFT 4 #define QPNP_PWM_SIZE_MASK 0x30 #define QPNP_PWM_FREQ_CLK_SELECT_MASK 0x03 #define QPNP_MIN_PWM_BIT_SIZE 6 #define QPNP_MAX_PWM_BIT_SIZE 9 #define QPNP_SET_PWM_CLK(val, clk, pwm_size) \ do { \ val = (clk + 1) & QPNP_PWM_FREQ_CLK_SELECT_MASK; \ val |= (((pwm_size - QPNP_MIN_PWM_BIT_SIZE) << \ QPNP_PWM_SIZE_SHIFT) & QPNP_PWM_SIZE_MASK); \ } while (0) #define QPNP_GET_PWM_SIZE(reg) ((reg & QPNP_PWM_SIZE_MASK) \ >> QPNP_PWM_SIZE_SHIFT) /* LPG Control for LPG_PWM_FREQ_PREDIV_CLK */ #define QPNP_PWM_FREQ_PRE_DIVIDE_SHIFT 5 #define QPNP_PWM_FREQ_PRE_DIVIDE_MASK 0x60 #define QPNP_PWM_FREQ_EXP_MASK 0x07 #define QPNP_SET_PWM_FREQ_PREDIV(val, pre_div, pre_div_exp) \ do { \ val = (pre_div << QPNP_PWM_FREQ_PRE_DIVIDE_SHIFT) & \ QPNP_PWM_FREQ_PRE_DIVIDE_MASK; \ val |= (pre_div_exp & QPNP_PWM_FREQ_EXP_MASK); \ } while (0) /* LPG Control for LPG_PWM_TYPE_CONFIG */ #define QPNP_EN_GLITCH_REMOVAL_SHIFT 5 #define QPNP_EN_GLITCH_REMOVAL_MASK 0x20 #define QPNP_EN_FULL_SCALE_SHIFT 3 #define QPNP_EN_FULL_SCALE_MASK 0x08 #define QPNP_EN_PHASE_STAGGER_SHIFT 2 #define QPNP_EN_PHASE_STAGGER_MASK 0x04 #define QPNP_PHASE_STAGGER_MASK 0x03 /* LPG Control for PWM_VALUE_LSB */ #define QPNP_PWM_VALUE_LSB_MASK 0xFF /* LPG Control for PWM_VALUE_MSB */ #define QPNP_PWM_VALUE_MSB_SHIFT 8 #define QPNP_PWM_VALUE_MSB_MASK 0x01 /* LPG Control for ENABLE_CONTROL */ #define QPNP_EN_PWM_HIGH_SHIFT 7 #define QPNP_EN_PWM_HIGH_MASK 0x80 #define QPNP_EN_PWM_LO_SHIFT 6 #define QPNP_EN_PWM_LO_MASK 0x40 #define QPNP_EN_PWM_OUTPUT_SHIFT 5 #define QPNP_EN_PWM_OUTPUT_MASK 0x20 #define QPNP_PWM_SRC_SELECT_SHIFT 2 #define QPNP_PWM_SRC_SELECT_MASK 0x04 #define QPNP_PWM_EN_RAMP_GEN_SHIFT 1 #define QPNP_PWM_EN_RAMP_GEN_MASK 0x02 #define QPNP_ENABLE_PWM(value) \ (value |= (1 << QPNP_EN_PWM_OUTPUT_SHIFT) & QPNP_EN_PWM_OUTPUT_MASK) #define QPNP_DISABLE_PWM(value) (value &= ~QPNP_EN_PWM_OUTPUT_MASK) /* LPG Control for PWM_SYNC */ #define QPNP_PWM_SYNC_VALUE 0x01 #define QPNP_PWM_SYNC_MASK 0x01 /* LPG Control for RAMP_CONTROL */ #define QPNP_RAMP_START_MASK 0x01 #define QPNP_RAMP_CONTROL_SHIFT 8 #define QPNP_ENABLE_LUT_V0(value) (value |= QPNP_RAMP_START_MASK) #define QPNP_DISABLE_LUT_V0(value) (value &= ~QPNP_RAMP_START_MASK) #define QPNP_ENABLE_LUT_V1(value, id) \ do { \ (id < 8) ? (value |= BIT(id)) : \ (value |= (BIT(id) >> QPNP_RAMP_CONTROL_SHIFT)); \ } while (0) #define QPNP_DISABLE_LUT_V1(value, id) \ do { \ (id < 8) ? (value &= ~BIT(id)) : \ (value &= (~BIT(id) >> QPNP_RAMP_CONTROL_SHIFT)); \ } while (0) /* LPG Control for RAMP_STEP_DURATION_LSB */ #define QPNP_RAMP_STEP_DURATION_LSB_MASK 0xFF /* LPG Control for RAMP_STEP_DURATION_MSB */ #define QPNP_RAMP_STEP_DURATION_MSB_SHIFT 8 #define QPNP_RAMP_STEP_DURATION_MSB_MASK 0x01 #define QPNP_PWM_1KHZ 1024 #define QPNP_GET_RAMP_STEP_DURATION(ramp_time_ms) \ ((ramp_time_ms * QPNP_PWM_1KHZ) / 1000) /* LPG Control for PAUSE_HI_MULTIPLIER_LSB */ #define QPNP_PAUSE_HI_MULTIPLIER_LSB_MASK 0xFF /* LPG Control for PAUSE_HI_MULTIPLIER_MSB */ #define QPNP_PAUSE_HI_MULTIPLIER_MSB_SHIFT 8 #define QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK 0x1F /* LPG Control for PAUSE_LO_MULTIPLIER_LSB */ #define QPNP_PAUSE_LO_MULTIPLIER_LSB_MASK 0xFF /* LPG Control for PAUSE_LO_MULTIPLIER_MSB */ #define QPNP_PAUSE_LO_MULTIPLIER_MSB_SHIFT 8 #define QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK 0x1F /* LPG Control for HI_INDEX */ #define QPNP_HI_INDEX_MASK 0x3F /* LPG Control for LO_INDEX */ #define QPNP_LO_INDEX_MASK 0x3F #define NUM_CLOCKS 3 #define QPNP_PWM_M_MAX 7 #define NSEC_1024HZ (NSEC_PER_SEC / 1024) #define NSEC_32768HZ (NSEC_PER_SEC / 32768) #define NSEC_19P2MHZ (NSEC_PER_SEC / 19200000) #define NUM_LPG_PRE_DIVIDE 4 #define PRE_DIVIDE_1 1 #define PRE_DIVIDE_3 3 #define PRE_DIVIDE_5 5 #define PRE_DIVIDE_6 6 #define SPMI_LPG_REG_BASE_OFFSET 0x40 #define SPMI_LPG_REVISION2_OFFSET 0x1 #define SPMI_LPG_REV1_RAMP_CONTROL_OFFSET 0x86 #define SPMI_LPG_SUB_TYPE_OFFSET 0x5 #define SPMI_LPG_PWM_SYNC 0x7 #define SPMI_LPG_REG_ADDR(b, n) (b + SPMI_LPG_REG_BASE_OFFSET + (n)) #define SPMI_MAX_BUF_LEN 8 #define QPNP_GPLED_LPG_CHANNEL_RANGE_START 8 #define QPNP_GPLED_LPG_CHANNEL_RANGE_END 11 #define qpnp_check_gpled_lpg_channel(id) \ (id >= QPNP_GPLED_LPG_CHANNEL_RANGE_START && \ id <= QPNP_GPLED_LPG_CHANNEL_RANGE_END) #define QPNP_PWM_LUT_NOT_SUPPORTED 0x1 /* Supported PWM sizes */ #define QPNP_PWM_SIZE_6_BIT 6 #define QPNP_PWM_SIZE_7_BIT 7 #define QPNP_PWM_SIZE_8_BIT 8 #define QPNP_PWM_SIZE_9_BIT 9 /* Supported time levels */ enum time_level { LVL_NSEC, LVL_USEC, }; /* LPG revisions */ enum qpnp_lpg_revision { QPNP_LPG_REVISION_0 = 0x0, QPNP_LPG_REVISION_1 = 0x1, }; /* LPG LUT MODE STATE */ enum qpnp_lut_state { QPNP_LUT_ENABLE = 0x0, QPNP_LUT_DISABLE = 0x1, }; /* PWM MODE STATE */ enum qpnp_pwm_state { QPNP_PWM_ENABLE = 0x0, QPNP_PWM_DISABLE = 0x1, }; /* SPMI LPG registers */ enum qpnp_lpg_registers_list { QPNP_LPG_PATTERN_CONFIG, QPNP_LPG_PWM_SIZE_CLK, QPNP_LPG_PWM_FREQ_PREDIV_CLK, QPNP_LPG_PWM_TYPE_CONFIG, QPNP_PWM_VALUE_LSB, QPNP_PWM_VALUE_MSB, QPNP_ENABLE_CONTROL, QPNP_RAMP_CONTROL, QPNP_RAMP_STEP_DURATION_LSB = QPNP_RAMP_CONTROL + 9, QPNP_RAMP_STEP_DURATION_MSB, QPNP_PAUSE_HI_MULTIPLIER_LSB, QPNP_PAUSE_HI_MULTIPLIER_MSB, QPNP_PAUSE_LO_MULTIPLIER_LSB, QPNP_PAUSE_LO_MULTIPLIER_MSB, QPNP_HI_INDEX, QPNP_LO_INDEX, QPNP_TOTAL_LPG_SPMI_REGISTERS }; /* * Formula from HSID, * pause_time (hi/lo) = (pause_cnt- 1)*(ramp_ms) * OR, * pause_cnt = (pause_time / ramp_ms) + 1 */ #define QPNP_SET_PAUSE_CNT(to_pause_cnt, from_pause, ramp_ms) \ (to_pause_cnt = (from_pause / (ramp_ms ? ramp_ms : 1)) + 1) static unsigned int pt_t[NUM_LPG_PRE_DIVIDE][NUM_CLOCKS] = { { PRE_DIVIDE_1 * NSEC_1024HZ, PRE_DIVIDE_1 * NSEC_32768HZ, PRE_DIVIDE_1 * NSEC_19P2MHZ, }, { PRE_DIVIDE_3 * NSEC_1024HZ, PRE_DIVIDE_3 * NSEC_32768HZ, PRE_DIVIDE_3 * NSEC_19P2MHZ, }, { PRE_DIVIDE_5 * NSEC_1024HZ, PRE_DIVIDE_5 * NSEC_32768HZ, PRE_DIVIDE_5 * NSEC_19P2MHZ, }, { PRE_DIVIDE_6 * NSEC_1024HZ, PRE_DIVIDE_6 * NSEC_32768HZ, PRE_DIVIDE_6 * NSEC_19P2MHZ, }, }; static RADIX_TREE(lpg_dev_tree, GFP_KERNEL); struct qpnp_lut_config { u8 *duty_pct_list; int list_len; int lo_index; int hi_index; int lut_pause_hi_cnt; int lut_pause_lo_cnt; int ramp_step_ms; bool ramp_direction; bool pattern_repeat; bool ramp_toggle; bool enable_pause_hi; bool enable_pause_lo; }; struct qpnp_lpg_config { struct qpnp_lut_config lut_config; u16 base_addr; u16 lut_base_addr; u16 lut_size; }; struct qpnp_pwm_config { int channel_id; bool in_use; const char *lable; int pwm_value; int pwm_period; /* in microseconds */ int pwm_duty; /* in microseconds */ struct pwm_period_config period; int force_pwm_size; }; /* Public facing structure */ struct pwm_device { struct qpnp_lpg_chip *chip; struct qpnp_pwm_config pwm_config; }; struct qpnp_lpg_chip { struct spmi_device *spmi_dev; struct pwm_device pwm_dev; spinlock_t lpg_lock; struct qpnp_lpg_config lpg_config; u8 qpnp_lpg_registers[QPNP_TOTAL_LPG_SPMI_REGISTERS]; enum qpnp_lpg_revision revision; u8 sub_type; u32 flags; }; /* Internal functions */ static inline void qpnp_set_pattern_config(u8 *val, struct qpnp_lut_config *lut_config) { *val = lut_config->enable_pause_lo & QPNP_EN_PAUSE_LO_MASK; *val |= (lut_config->enable_pause_hi << QPNP_EN_PAUSE_HI_SHIFT) & QPNP_EN_PAUSE_HI_MASK; *val |= (lut_config->ramp_toggle << QPNP_RAMP_TOGGLE_SHIFT) & QPNP_RAMP_TOGGLE_MASK; *val |= (lut_config->pattern_repeat << QPNP_PATTERN_REPEAT_SHIFT) & QPNP_PATTERN_REPEAT_MASK; *val |= (lut_config->ramp_direction << QPNP_RAMP_DIRECTION_SHIFT) & QPNP_RAMP_DIRECTION_MASK; } static inline void qpnp_set_pwm_type_config(u8 *val, bool glitch, bool full_scale, bool en_phase, bool phase) { *val = phase; *val |= (en_phase << QPNP_EN_PHASE_STAGGER_SHIFT) & QPNP_EN_PHASE_STAGGER_MASK; *val |= (full_scale << QPNP_EN_FULL_SCALE_SHIFT) & QPNP_EN_FULL_SCALE_MASK; *val |= (glitch << QPNP_EN_GLITCH_REMOVAL_SHIFT) & QPNP_EN_GLITCH_REMOVAL_MASK; } static int qpnp_set_control(bool pwm_hi, bool pwm_lo, bool pwm_out, bool pwm_src, bool ramp_gen) { return (ramp_gen << QPNP_PWM_EN_RAMP_GEN_SHIFT) | (pwm_src << QPNP_PWM_SRC_SELECT_SHIFT) | (pwm_out << QPNP_EN_PWM_OUTPUT_SHIFT) | (pwm_lo << QPNP_EN_PWM_LO_SHIFT) | (pwm_hi << QPNP_EN_PWM_HIGH_SHIFT); } #define QPNP_ENABLE_LUT_CONTROL qpnp_set_control(0, 0, 0, 0, 1) #define QPNP_ENABLE_PWM_CONTROL qpnp_set_control(0, 0, 0, 1, 0) #define QPNP_ENABLE_PWM_MODE qpnp_set_control(1, 1, 1, 1, 0) #define QPNP_ENABLE_PWM_MODE_GPLED_CHANNEL qpnp_set_control(1, 1, 1, 1, 1) #define QPNP_ENABLE_LPG_MODE qpnp_set_control(1, 1, 1, 0, 1) #define QPNP_DISABLE_PWM_MODE qpnp_set_control(0, 0, 0, 1, 0) #define QPNP_DISABLE_LPG_MODE qpnp_set_control(0, 0, 0, 0, 1) #define QPNP_IS_PWM_CONFIG_SELECTED(val) (val & QPNP_PWM_SRC_SELECT_MASK) #define QPNP_ENABLE_PWM_MODE_ONLY_SUB_TYPE 0x80 #define QPNP_DISABLE_PWM_MODE_ONLY_SUB_TYPE 0x0 #define QPNP_PWM_MODE_ONLY_ENABLE_DISABLE_MASK_SUB_TYPE 0x80 static inline void qpnp_convert_to_lut_flags(int *flags, struct qpnp_lut_config *l_config) { *flags = ((l_config->ramp_direction ? PM_PWM_LUT_RAMP_UP : 0) | (l_config->pattern_repeat ? PM_PWM_LUT_LOOP : 0)| (l_config->ramp_toggle ? PM_PWM_LUT_REVERSE : 0) | (l_config->enable_pause_hi ? PM_PWM_LUT_PAUSE_HI_EN : 0) | (l_config->enable_pause_lo ? PM_PWM_LUT_PAUSE_LO_EN : 0)); } static inline void qpnp_set_lut_params(struct lut_params *l_params, struct qpnp_lut_config *l_config, int s_idx, int size) { l_params->start_idx = s_idx; l_params->idx_len = size; l_params->lut_pause_hi = l_config->lut_pause_hi_cnt; l_params->lut_pause_lo = l_config->lut_pause_lo_cnt; l_params->ramp_step_ms = l_config->ramp_step_ms; qpnp_convert_to_lut_flags(&l_params->flags, l_config); } static void qpnp_lpg_save(u8 *u8p, u8 mask, u8 val) { *u8p &= ~mask; *u8p |= val & mask; } static int qpnp_lpg_save_and_write(u8 value, u8 mask, u8 *reg, u16 addr, u16 size, struct qpnp_lpg_chip *chip) { qpnp_lpg_save(reg, mask, value); return spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid, addr, reg, size); } /* * PWM Frequency = Clock Frequency / (N * T) * or * PWM Period = Clock Period * (N * T) * where * N = 2^9 or 2^6 for 9-bit or 6-bit PWM size * T = Pre-divide * 2^m, where m = 0..7 (exponent) * * This is the formula to figure out m for the best pre-divide and clock: * (PWM Period / N) = (Pre-divide * Clock Period) * 2^m */ static void qpnp_lpg_calc_period(enum time_level tm_lvl, unsigned int period_value, struct pwm_device *pwm) { int n, m, clk, div; int best_m, best_div, best_clk; unsigned int last_err, cur_err, min_err; unsigned int tmp_p, period_n; int id = pwm->pwm_config.channel_id; int force_pwm_size = pwm->pwm_config.force_pwm_size; struct qpnp_lpg_chip *chip = pwm->chip; struct pwm_period_config *period = &pwm->pwm_config.period; /* PWM Period / N */ if (qpnp_check_gpled_lpg_channel(id)) n = 7; else n = 6; if (tm_lvl == LVL_USEC) { if (period_value < ((unsigned)(-1) / NSEC_PER_USEC)) { period_n = (period_value * NSEC_PER_USEC) >> n; } else { if (qpnp_check_gpled_lpg_channel(id)) n = 8; else n = 9; period_n = (period_value >> n) * NSEC_PER_USEC; } } else { period_n = period_value >> n; } if (force_pwm_size != 0) { if (n < force_pwm_size) period_n = period_n >> (force_pwm_size - n); else period_n = period_n << (n - force_pwm_size); n = force_pwm_size; pr_info("LPG channel '%d' pwm size is forced to=%d\n", id, n); } min_err = last_err = (unsigned)(-1); best_m = 0; best_clk = 0; best_div = 0; for (clk = 0; clk < NUM_CLOCKS; clk++) { for (div = 0; div < NUM_LPG_PRE_DIVIDE; div++) { /* period_n = (PWM Period / N) */ /* tmp_p = (Pre-divide * Clock Period) * 2^m */ tmp_p = pt_t[div][clk]; for (m = 0; m <= QPNP_PWM_M_MAX; m++) { if (period_n > tmp_p) cur_err = period_n - tmp_p; else cur_err = tmp_p - period_n; if (cur_err < min_err) { min_err = cur_err; best_m = m; best_clk = clk; best_div = div; } if (m && cur_err > last_err) /* Break for bigger cur_err */ break; last_err = cur_err; tmp_p <<= 1; } } } /* Adapt to optimal pwm size, the higher the resolution the better */ if (!force_pwm_size) { if (qpnp_check_gpled_lpg_channel(id)) { if (n == 7 && best_m >= 1) { n += 1; best_m -= 1; } } else if (n == 6) { if (best_m >= 3) { n += 3; best_m -= 3; } else if (best_m >= 1 && chip->sub_type != QPNP_PWM_MODE_ONLY_SUB_TYPE) { n += 1; best_m -= 1; } } } period->pwm_size = n; period->clk = best_clk; period->pre_div = best_div; period->pre_div_exp = best_m; } static void qpnp_lpg_calc_pwm_value(struct pwm_device *pwm, unsigned int period_value, unsigned int duty_value) { unsigned int max_pwm_value, tmp; struct qpnp_pwm_config *pwm_config = &pwm->pwm_config; /* Figure out pwm_value with overflow handling */ tmp = 1 << (sizeof(tmp) * 8 - pwm_config->period.pwm_size); if (duty_value < tmp) { tmp = duty_value << pwm_config->period.pwm_size; pwm_config->pwm_value = tmp / period_value; } else { tmp = period_value >> pwm_config->period.pwm_size; pwm_config->pwm_value = duty_value / tmp; } max_pwm_value = (1 << pwm_config->period.pwm_size) - 1; if (pwm_config->pwm_value > max_pwm_value) pwm_config->pwm_value = max_pwm_value; } static int qpnp_lpg_change_table(struct pwm_device *pwm, int duty_pct[], int raw_value) { unsigned int pwm_value, max_pwm_value; struct qpnp_lpg_chip *chip = pwm->chip; struct qpnp_lut_config *lut = &chip->lpg_config.lut_config; int i, pwm_size, rc = 0; int burst_size = SPMI_MAX_BUF_LEN; int list_len = lut->list_len << 1; int offset = (lut->lo_index << 1) - 2; pwm_size = QPNP_GET_PWM_SIZE( chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]) + QPNP_MIN_PWM_BIT_SIZE; max_pwm_value = (1 << pwm_size) - 1; if (unlikely(lut->list_len != (lut->hi_index - lut->lo_index + 1))) { pr_err("LUT internal Data structure corruption detected\n"); pr_err("LUT list size: %d\n", lut->list_len); pr_err("However, index size is: %d\n", (lut->hi_index - lut->lo_index + 1)); return -EINVAL; } for (i = 0; i < lut->list_len; i++) { if (raw_value) pwm_value = duty_pct[i]; else pwm_value = (duty_pct[i] << pwm_size) / 100; if (pwm_value > max_pwm_value) pwm_value = max_pwm_value; if (qpnp_check_gpled_lpg_channel(pwm->pwm_config.channel_id)) { lut->duty_pct_list[i] = pwm_value; } else { lut->duty_pct_list[i*2] = pwm_value; lut->duty_pct_list[(i*2)+1] = (pwm_value >> QPNP_PWM_VALUE_MSB_SHIFT) & QPNP_PWM_VALUE_MSB_MASK; } } /* * For the Keypad Backlight Lookup Table (KPDBL_LUT), * offset is lo_index. */ if (qpnp_check_gpled_lpg_channel(pwm->pwm_config.channel_id)) offset = lut->lo_index; /* Write with max allowable burst mode, each entry is of two bytes */ for (i = 0; i < list_len; i += burst_size) { if (i + burst_size >= list_len) burst_size = list_len - i; rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid, chip->lpg_config.lut_base_addr + offset + i, lut->duty_pct_list + i, burst_size); } return rc; } static void qpnp_lpg_save_period(struct pwm_device *pwm) { u8 mask, val; struct qpnp_lpg_chip *chip = pwm->chip; struct qpnp_pwm_config *pwm_config = &pwm->pwm_config; if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE) { QPNP_SET_PWM_CLK_SUB_TYPE(val, pwm_config->period.clk, pwm_config->period.pwm_size); mask = QPNP_PWM_SIZE_MASK_SUB_TYPE | QPNP_PWM_FREQ_CLK_SELECT_MASK_SUB_TYPE; } else { QPNP_SET_PWM_CLK(val, pwm_config->period.clk, pwm_config->period.pwm_size); mask = QPNP_PWM_SIZE_MASK | QPNP_PWM_FREQ_CLK_SELECT_MASK; } qpnp_lpg_save(&chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK], mask, val); QPNP_SET_PWM_FREQ_PREDIV(val, pwm_config->period.pre_div, pwm_config->period.pre_div_exp); mask = QPNP_PWM_FREQ_PRE_DIVIDE_MASK | QPNP_PWM_FREQ_EXP_MASK; qpnp_lpg_save(&chip->qpnp_lpg_registers[QPNP_LPG_PWM_FREQ_PREDIV_CLK], mask, val); } static int qpnp_lpg_save_pwm_value(struct pwm_device *pwm) { unsigned int max_pwm_value; int pwm_size; u8 mask, value; struct qpnp_lpg_chip *chip = pwm->chip; struct qpnp_pwm_config *pwm_config = &pwm->pwm_config; struct qpnp_lpg_config *lpg_config = &chip->lpg_config; int rc; if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE) pwm_size = QPNP_GET_PWM_SIZE_SUB_TYPE( chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]) ? QPNP_MAX_PWM_BIT_SIZE : QPNP_MIN_PWM_BIT_SIZE; else pwm_size = QPNP_GET_PWM_SIZE( chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]) + QPNP_MIN_PWM_BIT_SIZE; max_pwm_value = (1 << pwm_size) - 1; if (pwm_config->pwm_value > max_pwm_value) pwm_config->pwm_value = max_pwm_value; value = pwm_config->pwm_value; mask = QPNP_PWM_VALUE_LSB_MASK; rc = qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_PWM_VALUE_LSB], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_PWM_VALUE_LSB), 1, chip); if (rc) return rc; value = (pwm_config->pwm_value >> QPNP_PWM_VALUE_MSB_SHIFT) & QPNP_PWM_VALUE_MSB_MASK; mask = QPNP_PWM_VALUE_MSB_MASK; rc = qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_PWM_VALUE_MSB], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_PWM_VALUE_MSB), 1, chip); if (rc) return rc; if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE) { value = QPNP_PWM_SYNC_VALUE & QPNP_PWM_SYNC_MASK; rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid, SPMI_LPG_REG_ADDR(lpg_config->base_addr, SPMI_LPG_PWM_SYNC), &value, 1); } return rc; } static int qpnp_lpg_configure_pattern(struct pwm_device *pwm) { struct qpnp_lpg_config *lpg_config = &pwm->chip->lpg_config; struct qpnp_lut_config *lut_config = &lpg_config->lut_config; struct qpnp_lpg_chip *chip = pwm->chip; u8 value, mask; qpnp_set_pattern_config(&value, lut_config); mask = QPNP_RAMP_DIRECTION_MASK | QPNP_PATTERN_REPEAT_MASK | QPNP_RAMP_TOGGLE_MASK | QPNP_EN_PAUSE_HI_MASK | QPNP_EN_PAUSE_LO_MASK; return qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_LPG_PATTERN_CONFIG], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_LPG_PATTERN_CONFIG), 1, chip); } static int qpnp_lpg_configure_pwm(struct pwm_device *pwm) { struct qpnp_lpg_config *lpg_config = &pwm->chip->lpg_config; struct qpnp_lpg_chip *chip = pwm->chip; int rc; u8 value, mask; rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid, SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_LPG_PWM_SIZE_CLK), &chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK], 1); if (rc) return rc; rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid, SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_LPG_PWM_FREQ_PREDIV_CLK), &chip->qpnp_lpg_registers[QPNP_LPG_PWM_FREQ_PREDIV_CLK], 1); if (rc) return rc; qpnp_set_pwm_type_config(&value, 1, 0, 0, 0); mask = QPNP_EN_GLITCH_REMOVAL_MASK | QPNP_EN_FULL_SCALE_MASK | QPNP_EN_PHASE_STAGGER_MASK | QPNP_PHASE_STAGGER_MASK; return qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_LPG_PWM_TYPE_CONFIG], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_LPG_PWM_TYPE_CONFIG), 1, chip); } static int qpnp_configure_pwm_control(struct pwm_device *pwm) { struct qpnp_lpg_config *lpg_config = &pwm->chip->lpg_config; struct qpnp_lpg_chip *chip = pwm->chip; u8 value, mask; if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE) return 0; value = QPNP_ENABLE_PWM_CONTROL; mask = QPNP_EN_PWM_HIGH_MASK | QPNP_EN_PWM_LO_MASK | QPNP_EN_PWM_OUTPUT_MASK | QPNP_PWM_SRC_SELECT_MASK | QPNP_PWM_EN_RAMP_GEN_MASK; return qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_ENABLE_CONTROL), 1, chip); } static int qpnp_configure_lpg_control(struct pwm_device *pwm) { struct qpnp_lpg_config *lpg_config = &pwm->chip->lpg_config; struct qpnp_lpg_chip *chip = pwm->chip; u8 value, mask; value = QPNP_ENABLE_LUT_CONTROL; mask = QPNP_EN_PWM_HIGH_MASK | QPNP_EN_PWM_LO_MASK | QPNP_EN_PWM_OUTPUT_MASK | QPNP_PWM_SRC_SELECT_MASK | QPNP_PWM_EN_RAMP_GEN_MASK; return qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_ENABLE_CONTROL), 1, chip); } static int qpnp_lpg_configure_ramp_step_duration(struct pwm_device *pwm) { struct qpnp_lpg_config *lpg_config = &pwm->chip->lpg_config; struct qpnp_lut_config lut_config = lpg_config->lut_config; struct qpnp_lpg_chip *chip = pwm->chip; int rc, value; u8 val, mask; value = QPNP_GET_RAMP_STEP_DURATION(lut_config.ramp_step_ms); val = value & QPNP_RAMP_STEP_DURATION_LSB_MASK; mask = QPNP_RAMP_STEP_DURATION_LSB_MASK; rc = qpnp_lpg_save_and_write(val, mask, &pwm->chip->qpnp_lpg_registers[QPNP_RAMP_STEP_DURATION_LSB], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_RAMP_STEP_DURATION_LSB), 1, chip); if (rc) return rc; val = (value >> QPNP_RAMP_STEP_DURATION_MSB_SHIFT) & QPNP_RAMP_STEP_DURATION_MSB_MASK; mask = QPNP_RAMP_STEP_DURATION_MSB_MASK; return qpnp_lpg_save_and_write(val, mask, &pwm->chip->qpnp_lpg_registers[QPNP_RAMP_STEP_DURATION_MSB], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_RAMP_STEP_DURATION_MSB), 1, chip); } static int qpnp_lpg_configure_pause(struct pwm_device *pwm) { struct qpnp_lpg_config *lpg_config = &pwm->chip->lpg_config; struct qpnp_lut_config lut_config = lpg_config->lut_config; struct qpnp_lpg_chip *chip = pwm->chip; u8 value, mask; int rc = 0; if (lut_config.enable_pause_hi) { value = lut_config.lut_pause_hi_cnt; mask = QPNP_PAUSE_HI_MULTIPLIER_LSB_MASK; rc = qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_LSB], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_PAUSE_HI_MULTIPLIER_LSB), 1, chip); if (rc) return rc; value = (lut_config.lut_pause_hi_cnt >> QPNP_PAUSE_HI_MULTIPLIER_MSB_SHIFT) & QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK; mask = QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK; rc = qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_MSB], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_PAUSE_HI_MULTIPLIER_MSB), 1, chip); } else { value = 0; mask = QPNP_PAUSE_HI_MULTIPLIER_LSB_MASK; rc = qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_LSB], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_PAUSE_HI_MULTIPLIER_LSB), 1, chip); if (rc) return rc; mask = QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK; rc = qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_MSB], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_PAUSE_HI_MULTIPLIER_MSB), 1, chip); if (rc) return rc; } if (lut_config.enable_pause_lo) { value = lut_config.lut_pause_lo_cnt; mask = QPNP_PAUSE_LO_MULTIPLIER_LSB_MASK; rc = qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_LSB], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_PAUSE_LO_MULTIPLIER_LSB), 1, chip); if (rc) return rc; value = (lut_config.lut_pause_lo_cnt >> QPNP_PAUSE_LO_MULTIPLIER_MSB_SHIFT) & QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK; mask = QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK; rc = qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_MSB], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_PAUSE_LO_MULTIPLIER_MSB), 1, chip); } else { value = 0; mask = QPNP_PAUSE_LO_MULTIPLIER_LSB_MASK; rc = qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_LSB], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_PAUSE_LO_MULTIPLIER_LSB), 1, chip); if (rc) return rc; mask = QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK; rc = qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_MSB], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_PAUSE_LO_MULTIPLIER_MSB), 1, chip); return rc; } return rc; } static int qpnp_lpg_configure_index(struct pwm_device *pwm) { struct qpnp_lpg_config *lpg_config = &pwm->chip->lpg_config; struct qpnp_lut_config lut_config = lpg_config->lut_config; struct qpnp_lpg_chip *chip = pwm->chip; u8 value, mask; int rc = 0; value = lut_config.hi_index; mask = QPNP_HI_INDEX_MASK; rc = qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_HI_INDEX], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_HI_INDEX), 1, chip); if (rc) return rc; value = lut_config.lo_index; mask = QPNP_LO_INDEX_MASK; rc = qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_LO_INDEX], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_LO_INDEX), 1, chip); return rc; } static int qpnp_lpg_change_lut(struct pwm_device *pwm) { int rc; rc = qpnp_lpg_configure_pattern(pwm); if (rc) { pr_err("Failed to configure LUT pattern"); return rc; } rc = qpnp_lpg_configure_pwm(pwm); if (rc) { pr_err("Failed to configure LUT pattern"); return rc; } rc = qpnp_configure_lpg_control(pwm); if (rc) { pr_err("Failed to configure pause registers"); return rc; } rc = qpnp_lpg_configure_ramp_step_duration(pwm); if (rc) { pr_err("Failed to configure duty time"); return rc; } rc = qpnp_lpg_configure_pause(pwm); if (rc) { pr_err("Failed to configure pause registers"); return rc; } rc = qpnp_lpg_configure_index(pwm); if (rc) { pr_err("Failed to configure index registers"); return rc; } return rc; } static int qpnp_lpg_configure_lut_state(struct pwm_device *pwm, enum qpnp_lut_state state) { struct qpnp_lpg_config *lpg_config = &pwm->chip->lpg_config; struct qpnp_lpg_chip *chip = pwm->chip; u8 value1, value2, mask1, mask2; u8 *reg1, *reg2; u16 addr, addr1; int rc; value1 = pwm->chip->qpnp_lpg_registers[QPNP_RAMP_CONTROL]; reg1 = &pwm->chip->qpnp_lpg_registers[QPNP_RAMP_CONTROL]; reg2 = &pwm->chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL]; mask2 = QPNP_EN_PWM_HIGH_MASK | QPNP_EN_PWM_LO_MASK | QPNP_EN_PWM_OUTPUT_MASK | QPNP_PWM_SRC_SELECT_MASK | QPNP_PWM_EN_RAMP_GEN_MASK; switch (chip->revision) { case QPNP_LPG_REVISION_0: if (state == QPNP_LUT_ENABLE) { QPNP_ENABLE_LUT_V0(value1); value2 = QPNP_ENABLE_LPG_MODE; } else { QPNP_DISABLE_LUT_V0(value1); value2 = QPNP_DISABLE_LPG_MODE; } mask1 = QPNP_RAMP_START_MASK; addr1 = SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_RAMP_CONTROL); break; case QPNP_LPG_REVISION_1: if (state == QPNP_LUT_ENABLE) { QPNP_ENABLE_LUT_V1(value1, pwm->pwm_config.channel_id); value2 = QPNP_ENABLE_LPG_MODE; } else { QPNP_DISABLE_LUT_V1(value1, pwm->pwm_config.channel_id); value2 = QPNP_DISABLE_LPG_MODE; } mask1 = value1; addr1 = lpg_config->lut_base_addr + SPMI_LPG_REV1_RAMP_CONTROL_OFFSET; break; default: pr_err("Invalid LPG revision\n"); return -EINVAL; } addr = SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_ENABLE_CONTROL); rc = qpnp_lpg_save_and_write(value2, mask2, reg2, addr, 1, chip); if (rc) return rc; return qpnp_lpg_save_and_write(value1, mask1, reg1, addr1, 1, chip); } static inline int qpnp_enable_pwm_mode(struct qpnp_pwm_config *pwm_conf) { if (qpnp_check_gpled_lpg_channel(pwm_conf->channel_id)) return QPNP_ENABLE_PWM_MODE_GPLED_CHANNEL; return QPNP_ENABLE_PWM_MODE; } static int qpnp_lpg_configure_pwm_state(struct pwm_device *pwm, enum qpnp_pwm_state state) { struct qpnp_lpg_config *lpg_config = &pwm->chip->lpg_config; struct qpnp_lpg_chip *chip = pwm->chip; u8 value, mask; int rc; if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE) { if (state == QPNP_PWM_ENABLE) value = QPNP_ENABLE_PWM_MODE_ONLY_SUB_TYPE; else value = QPNP_DISABLE_PWM_MODE_ONLY_SUB_TYPE; mask = QPNP_PWM_MODE_ONLY_ENABLE_DISABLE_MASK_SUB_TYPE; } else { if (state == QPNP_PWM_ENABLE) value = qpnp_enable_pwm_mode(&pwm->pwm_config); else value = QPNP_DISABLE_PWM_MODE; mask = QPNP_EN_PWM_HIGH_MASK | QPNP_EN_PWM_LO_MASK | QPNP_EN_PWM_OUTPUT_MASK | QPNP_PWM_SRC_SELECT_MASK | QPNP_PWM_EN_RAMP_GEN_MASK; } rc = qpnp_lpg_save_and_write(value, mask, &pwm->chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL], SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_ENABLE_CONTROL), 1, chip); if (rc) goto out; /* * Due to LPG hardware bug, in the PWM mode, having enabled PWM, * We have to write PWM values one more time. */ if (state == QPNP_PWM_ENABLE) return qpnp_lpg_save_pwm_value(pwm); out: return rc; } static int _pwm_config(struct pwm_device *pwm, enum time_level tm_lvl, int duty_value, int period_value) { struct qpnp_pwm_config *pwm_config; struct qpnp_lpg_chip *chip; struct pwm_period_config *period; int period_us, duty_us; int rc; chip = pwm->chip; pwm_config = &pwm->pwm_config; period = &pwm_config->period; if (tm_lvl == LVL_USEC) { period_us = period_value; duty_us = duty_value; } else { period_us = period_value / NSEC_PER_USEC; duty_us = duty_value / NSEC_PER_USEC; } if (pwm_config->pwm_period != period_us) { qpnp_lpg_calc_period(tm_lvl, period_value, pwm); qpnp_lpg_save_period(pwm); pwm_config->pwm_period = period_us; } pwm_config->pwm_duty = duty_us; qpnp_lpg_calc_pwm_value(pwm, period_value, duty_value); rc = qpnp_lpg_save_pwm_value(pwm); if (rc) { pr_err("Could not update PWM value for channel %d rc=%d\n", pwm_config->channel_id, rc); return rc; } rc = qpnp_lpg_configure_pwm(pwm); if (rc) { pr_err("Could not configure PWM clock for\n"); pr_err("channel %d rc=%d\n", pwm_config->channel_id, rc); return rc; } rc = qpnp_configure_pwm_control(pwm); if (rc) { pr_err("Could not update PWM control for"); pr_err("channel %d rc=%d\n", pwm_config->channel_id, rc); return rc; } pr_debug("duty/period=%u/%u %s: pwm_value=%d (of %d)\n", (unsigned)duty_us, (unsigned)period_us, (tm_lvl == LVL_USEC) ? "usec" : "nsec", pwm_config->pwm_value, 1 << period->pwm_size); return 0; } static int _pwm_lut_config(struct pwm_device *pwm, int period_us, int duty_pct[], struct lut_params lut_params) { struct qpnp_lpg_config *lpg_config; struct qpnp_lut_config *lut_config; struct pwm_period_config *period; struct qpnp_pwm_config *pwm_config; int start_idx = lut_params.start_idx; int len = lut_params.idx_len; int flags = lut_params.flags; int raw_lut, ramp_step_ms; int rc = 0; pwm_config = &pwm->pwm_config; lpg_config = &pwm->chip->lpg_config; lut_config = &lpg_config->lut_config; period = &pwm_config->period; if (pwm_config->pwm_period != period_us) { qpnp_lpg_calc_period(LVL_USEC, period_us, pwm); qpnp_lpg_save_period(pwm); pwm_config->pwm_period = period_us; } if (flags & PM_PWM_LUT_NO_TABLE) goto after_table_write; raw_lut = 0; if (flags & PM_PWM_LUT_USE_RAW_VALUE) raw_lut = 1; lut_config->list_len = len; lut_config->lo_index = start_idx + 1; lut_config->hi_index = start_idx + len; rc = qpnp_lpg_change_table(pwm, duty_pct, raw_lut); if (rc) { pr_err("qpnp_lpg_change_table: rc=%d\n", rc); return -EINVAL; } after_table_write: ramp_step_ms = lut_params.ramp_step_ms; if (ramp_step_ms > PM_PWM_LUT_RAMP_STEP_TIME_MAX) ramp_step_ms = PM_PWM_LUT_RAMP_STEP_TIME_MAX; QPNP_SET_PAUSE_CNT(lut_config->lut_pause_lo_cnt, lut_params.lut_pause_lo, ramp_step_ms); if (lut_config->lut_pause_lo_cnt > PM_PWM_MAX_PAUSE_CNT) lut_config->lut_pause_lo_cnt = PM_PWM_MAX_PAUSE_CNT; QPNP_SET_PAUSE_CNT(lut_config->lut_pause_hi_cnt, lut_params.lut_pause_hi, ramp_step_ms); if (lut_config->lut_pause_hi_cnt > PM_PWM_MAX_PAUSE_CNT) lut_config->lut_pause_hi_cnt = PM_PWM_MAX_PAUSE_CNT; lut_config->ramp_step_ms = ramp_step_ms; lut_config->ramp_direction = !!(flags & PM_PWM_LUT_RAMP_UP); lut_config->pattern_repeat = !!(flags & PM_PWM_LUT_LOOP); lut_config->ramp_toggle = !!(flags & PM_PWM_LUT_REVERSE); lut_config->enable_pause_hi = !!(flags & PM_PWM_LUT_PAUSE_HI_EN); lut_config->enable_pause_lo = !!(flags & PM_PWM_LUT_PAUSE_LO_EN); rc = qpnp_lpg_change_lut(pwm); return rc; } static int _pwm_enable(struct pwm_device *pwm) { int rc = 0; struct qpnp_lpg_chip *chip; unsigned long flags; chip = pwm->chip; spin_lock_irqsave(&pwm->chip->lpg_lock, flags); if (QPNP_IS_PWM_CONFIG_SELECTED( chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL]) || chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED) { rc = qpnp_lpg_configure_pwm_state(pwm, QPNP_PWM_ENABLE); } else if (!(chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED)) { rc = qpnp_lpg_configure_lut_state(pwm, QPNP_LUT_ENABLE); } spin_unlock_irqrestore(&pwm->chip->lpg_lock, flags); if (rc) pr_err("Failed to enable PWM channel: %d\n", pwm->pwm_config.channel_id); return rc; } /* APIs */ /** * pwm_request - request a PWM device * @channel_id: PWM id or channel * @lable: the label to identify the user */ struct pwm_device *pwm_request(int pwm_id, const char *lable) { struct qpnp_lpg_chip *chip; struct pwm_device *pwm; unsigned long flags; chip = radix_tree_lookup(&lpg_dev_tree, pwm_id); if (!chip) { pr_err("Could not find PWM Device for the\n"); pr_err("input pwm channel %d\n", pwm_id); return ERR_PTR(-EINVAL); } spin_lock_irqsave(&chip->lpg_lock, flags); pwm = &chip->pwm_dev; if (pwm->pwm_config.in_use) { pr_err("PWM device associated with the"); pr_err("input pwm id: %d is in use by %s", pwm_id, pwm->pwm_config.lable); pwm = ERR_PTR(-EBUSY); } else { pwm->pwm_config.in_use = 1; pwm->pwm_config.lable = lable; } spin_unlock_irqrestore(&chip->lpg_lock, flags); return pwm; } EXPORT_SYMBOL_GPL(pwm_request); /** * pwm_free - free a PWM device * @pwm: the PWM device */ void pwm_free(struct pwm_device *pwm) { struct qpnp_pwm_config *pwm_config; unsigned long flags; if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) { pr_err("Invalid pwm handle or no pwm_chip\n"); return; } spin_lock_irqsave(&pwm->chip->lpg_lock, flags); pwm_config = &pwm->pwm_config; if (pwm_config->in_use) { qpnp_lpg_configure_pwm_state(pwm, QPNP_PWM_DISABLE); if (!(pwm->chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED)) qpnp_lpg_configure_lut_state(pwm, QPNP_LUT_DISABLE); pwm_config->in_use = 0; pwm_config->lable = NULL; } spin_unlock_irqrestore(&pwm->chip->lpg_lock, flags); } EXPORT_SYMBOL_GPL(pwm_free); /** * pwm_config - change a PWM device configuration * @pwm: the PWM device * @period_ns: period in nanoseconds * @duty_ns: duty cycle in nanoseconds */ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) { int rc; unsigned long flags; if (pwm == NULL || IS_ERR(pwm) || duty_ns > period_ns || (unsigned)period_ns < PM_PWM_PERIOD_MIN * NSEC_PER_USEC) { pr_err("Invalid pwm handle or parameters\n"); return -EINVAL; } if (!pwm->pwm_config.in_use) return -EINVAL; spin_lock_irqsave(&pwm->chip->lpg_lock, flags); rc = _pwm_config(pwm, LVL_NSEC, duty_ns, period_ns); spin_unlock_irqrestore(&pwm->chip->lpg_lock, flags); if (rc) pr_err("Failed to configure PWM mode\n"); return rc; } EXPORT_SYMBOL_GPL(pwm_config); /** * pwm_config_us - change a PWM device configuration * @pwm: the PWM device * @period_us: period in microseconds * @duty_us: duty cycle in microseconds */ int pwm_config_us(struct pwm_device *pwm, int duty_us, int period_us) { int rc; unsigned long flags; if (pwm == NULL || IS_ERR(pwm) || duty_us > period_us || (unsigned)period_us > PM_PWM_PERIOD_MAX || (unsigned)period_us < PM_PWM_PERIOD_MIN) { pr_err("Invalid pwm handle or parameters\n"); return -EINVAL; } if (!pwm->pwm_config.in_use) return -EINVAL; spin_lock_irqsave(&pwm->chip->lpg_lock, flags); rc = _pwm_config(pwm, LVL_USEC, duty_us, period_us); spin_unlock_irqrestore(&pwm->chip->lpg_lock, flags); if (rc) pr_err("Failed to configure PWM mode\n"); return rc; } EXPORT_SYMBOL_GPL(pwm_config_us); /** * pwm_enable - start a PWM output toggling * @pwm: the PWM device */ int pwm_enable(struct pwm_device *pwm) { struct qpnp_pwm_config *p_config; if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) { pr_err("Invalid pwm handle or no pwm_chip\n"); return -EINVAL; } p_config = &pwm->pwm_config; if (!p_config->in_use) { pr_err("channel_id: %d: stale handle?\n", p_config->channel_id); return -EINVAL; } return _pwm_enable(pwm); } EXPORT_SYMBOL_GPL(pwm_enable); /** * pwm_disable - stop a PWM output toggling * @pwm: the PWM device */ void pwm_disable(struct pwm_device *pwm) { struct qpnp_pwm_config *pwm_config; struct qpnp_lpg_chip *chip; unsigned long flags; int rc = 0; if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) { pr_err("Invalid pwm handle or no pwm_chip\n"); return; } spin_lock_irqsave(&pwm->chip->lpg_lock, flags); chip = pwm->chip; pwm_config = &pwm->pwm_config; if (pwm_config->in_use) { if (QPNP_IS_PWM_CONFIG_SELECTED( chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL]) || chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED) { rc = qpnp_lpg_configure_pwm_state(pwm, QPNP_PWM_DISABLE); } else if (!(chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED)) { rc = qpnp_lpg_configure_lut_state(pwm, QPNP_LUT_DISABLE); } } spin_unlock_irqrestore(&pwm->chip->lpg_lock, flags); if (rc) pr_err("Failed to disable PWM channel: %d\n", pwm_config->channel_id); } EXPORT_SYMBOL_GPL(pwm_disable); /** * pwm_change_mode - Change the PWM mode configuration * @pwm: the PWM device * @mode: Mode selection value */ int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode) { int rc; unsigned long flags; if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) { pr_err("Invalid pwm handle or no pwm_chip\n"); return -EINVAL; } if (mode < PM_PWM_MODE_PWM || mode > PM_PWM_MODE_LPG) { pr_err("Invalid mode value\n"); return -EINVAL; } spin_lock_irqsave(&pwm->chip->lpg_lock, flags); if (mode) rc = qpnp_configure_lpg_control(pwm); else rc = qpnp_configure_pwm_control(pwm); spin_unlock_irqrestore(&pwm->chip->lpg_lock, flags); if (rc) pr_err("Failed to change the mode\n"); return rc; } EXPORT_SYMBOL_GPL(pwm_change_mode); /** * pwm_config_period - change PWM period * * @pwm: the PWM device * @pwm_p: period in struct qpnp_lpg_period */ int pwm_config_period(struct pwm_device *pwm, struct pwm_period_config *period) { struct qpnp_pwm_config *pwm_config; struct qpnp_lpg_config *lpg_config; struct qpnp_lpg_chip *chip; unsigned long flags; int rc = 0; if (pwm == NULL || IS_ERR(pwm) || period == NULL) return -EINVAL; if (pwm->chip == NULL) return -ENODEV; spin_lock_irqsave(&pwm->chip->lpg_lock, flags); chip = pwm->chip; pwm_config = &pwm->pwm_config; lpg_config = &chip->lpg_config; if (!pwm_config->in_use) { rc = -EINVAL; goto out_unlock; } pwm_config->period.pwm_size = period->pwm_size; pwm_config->period.clk = period->clk; pwm_config->period.pre_div = period->pre_div; pwm_config->period.pre_div_exp = period->pre_div_exp; qpnp_lpg_save_period(pwm); rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid, SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_LPG_PWM_SIZE_CLK), &chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK], 1); if (rc) { pr_err("Write failed: QPNP_LPG_PWM_SIZE_CLK register, rc: %d\n", rc); goto out_unlock; } rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid, SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_LPG_PWM_FREQ_PREDIV_CLK), &chip->qpnp_lpg_registers[QPNP_LPG_PWM_FREQ_PREDIV_CLK], 1); if (rc) { pr_err("Failed to write to QPNP_LPG_PWM_FREQ_PREDIV_CLK\n"); pr_err("register, rc = %d\n", rc); } out_unlock: spin_unlock_irqrestore(&pwm->chip->lpg_lock, flags); return rc; } EXPORT_SYMBOL(pwm_config_period); /** * pwm_config_pwm_value - change a PWM device configuration * @pwm: the PWM device * @pwm_value: the duty cycle in raw PWM value (< 2^pwm_size) */ int pwm_config_pwm_value(struct pwm_device *pwm, int pwm_value) { struct qpnp_lpg_config *lpg_config; struct qpnp_pwm_config *pwm_config; unsigned long flags; int rc = 0; if (pwm == NULL || IS_ERR(pwm)) { pr_err("Invalid parameter passed\n"); return -EINVAL; } if (pwm->chip == NULL) { pr_err("Invalid device handle\n"); return -ENODEV; } lpg_config = &pwm->chip->lpg_config; pwm_config = &pwm->pwm_config; spin_lock_irqsave(&pwm->chip->lpg_lock, flags); if (!pwm_config->in_use || !pwm_config->pwm_period) { rc = -EINVAL; pr_err("PWM channel isn't in use or period value missing\n"); goto out_unlock; } if (pwm_config->pwm_value == pwm_value) goto out_unlock; pwm_config->pwm_value = pwm_value; rc = qpnp_lpg_save_pwm_value(pwm); if (rc) pr_err("Could not update PWM value for channel %d rc=%d\n", pwm_config->channel_id, rc); out_unlock: spin_unlock_irqrestore(&pwm->chip->lpg_lock, flags); return rc; } EXPORT_SYMBOL_GPL(pwm_config_pwm_value); /** * pwm_lut_config - change LPG LUT device configuration * @pwm: the PWM device * @period_us: period in micro second * @duty_pct: array of duty cycles in percent, like 20, 50. * @lut_params: Lookup table parameters */ int pwm_lut_config(struct pwm_device *pwm, int period_us, int duty_pct[], struct lut_params lut_params) { unsigned long flags; int rc = 0; if (pwm == NULL || IS_ERR(pwm) || !lut_params.idx_len) { pr_err("Invalid pwm handle or idx_len=0\n"); return -EINVAL; } if (pwm->chip == NULL) return -ENODEV; if (pwm->chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED) { pr_err("LUT mode isn't supported\n"); return -EINVAL; } if (!pwm->pwm_config.in_use) { pr_err("channel_id: %d: stale handle?\n", pwm->pwm_config.channel_id); return -EINVAL; } if (duty_pct == NULL && !(lut_params.flags & PM_PWM_LUT_NO_TABLE)) { pr_err("Invalid duty_pct with flag\n"); return -EINVAL; } if ((lut_params.start_idx + lut_params.idx_len) > pwm->chip->lpg_config.lut_size) { pr_err("Exceed LUT limit\n"); return -EINVAL; } if ((unsigned)period_us > PM_PWM_PERIOD_MAX || (unsigned)period_us < PM_PWM_PERIOD_MIN) { pr_err("Period out of range\n"); return -EINVAL; } spin_lock_irqsave(&pwm->chip->lpg_lock, flags); rc = _pwm_lut_config(pwm, period_us, duty_pct, lut_params); spin_unlock_irqrestore(&pwm->chip->lpg_lock, flags); if (rc) pr_err("Failed to configure LUT\n"); return rc; } EXPORT_SYMBOL_GPL(pwm_lut_config); static int qpnp_parse_pwm_dt_config(struct device_node *of_pwm_node, struct device_node *of_parent, struct qpnp_lpg_chip *chip) { int rc, period; struct pwm_device *pwm_dev = &chip->pwm_dev; rc = of_property_read_u32(of_parent, "qcom,period", (u32 *)&period); if (rc) { pr_err("node is missing PWM Period prop"); return rc; } rc = of_property_read_u32(of_pwm_node, "qcom,duty", &pwm_dev->pwm_config.pwm_duty); if (rc) { pr_err("node is missing PWM Duty prop"); return rc; } rc = _pwm_config(pwm_dev, LVL_USEC, pwm_dev->pwm_config.pwm_duty, period); return rc; } #define qpnp_check_optional_dt_bindings(func) \ do { \ rc = func; \ if (rc && rc != -EINVAL) \ goto out; \ rc = 0; \ } while (0); static int qpnp_parse_lpg_dt_config(struct device_node *of_lpg_node, struct device_node *of_parent, struct qpnp_lpg_chip *chip) { int rc, period, list_size, start_idx, *duty_pct_list; struct pwm_device *pwm_dev = &chip->pwm_dev; struct qpnp_lpg_config *lpg_config = &chip->lpg_config; struct qpnp_lut_config *lut_config = &lpg_config->lut_config; struct lut_params lut_params; rc = of_property_read_u32(of_parent, "qcom,period", &period); if (rc) { pr_err("node is missing PWM Period prop"); return rc; } if (!of_get_property(of_lpg_node, "qcom,duty-percents", &list_size)) { pr_err("node is missing duty-pct list"); return rc; } rc = of_property_read_u32(of_lpg_node, "cell-index", &start_idx); if (rc) { pr_err("Missing start index"); return rc; } list_size /= sizeof(u32); if (list_size + start_idx > lpg_config->lut_size) { pr_err("duty pct list size overflows\n"); return -EINVAL; } duty_pct_list = kzalloc(sizeof(u32) * list_size, GFP_KERNEL); if (!duty_pct_list) { pr_err("kzalloc failed on duty_pct_list\n"); return -ENOMEM; } rc = of_property_read_u32_array(of_lpg_node, "qcom,duty-percents", duty_pct_list, list_size); if (rc) { pr_err("invalid or missing property:\n"); pr_err("qcom,duty-pcts-list\n"); kfree(duty_pct_list); return rc; } /* Read optional properties */ qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node, "qcom,ramp-step-duration", &lut_config->ramp_step_ms)); qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node, "qcom,lpg-lut-pause-hi", &lut_config->lut_pause_hi_cnt)); qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node, "qcom,lpg-lut-pause-lo", &lut_config->lut_pause_lo_cnt)); qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node, "qcom,lpg-lut-ramp-direction", (u32 *)&lut_config->ramp_direction)); qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node, "qcom,lpg-lut-pattern-repeat", (u32 *)&lut_config->pattern_repeat)); qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node, "qcom,lpg-lut-ramp-toggle", (u32 *)&lut_config->ramp_toggle)); qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node, "qcom,lpg-lut-enable-pause-hi", (u32 *)&lut_config->enable_pause_hi)); qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node, "qcom,lpg-lut-enable-pause-lo", (u32 *)&lut_config->enable_pause_lo)); qpnp_set_lut_params(&lut_params, lut_config, start_idx, list_size); _pwm_lut_config(pwm_dev, period, duty_pct_list, lut_params); out: kfree(duty_pct_list); return rc; } /* Fill in lpg device elements based on values found in device tree. */ static int qpnp_parse_dt_config(struct spmi_device *spmi, struct qpnp_lpg_chip *chip) { int rc, enable, lut_entry_size; const char *lable; struct resource *res; struct device_node *node; int found_pwm_subnode = 0; int found_lpg_subnode = 0; struct device_node *of_node = spmi->dev.of_node; struct pwm_device *pwm_dev = &chip->pwm_dev; struct qpnp_lpg_config *lpg_config = &chip->lpg_config; struct qpnp_lut_config *lut_config = &lpg_config->lut_config; int force_pwm_size = 0; rc = of_property_read_u32(of_node, "qcom,channel-id", &pwm_dev->pwm_config.channel_id); if (rc) { dev_err(&spmi->dev, "%s: node is missing LPG channel id\n", __func__); goto out; } /* * For cetrain LPG channels PWM size can be forced. So that * for every requested pwm period closest pwm frequency is * selected in qpnp_lpg_calc_period() for the forced pwm size. */ rc = of_property_read_u32(of_node, "qcom,force-pwm-size", &force_pwm_size); if (qpnp_check_gpled_lpg_channel(pwm_dev->pwm_config.channel_id)) { if (!(force_pwm_size == QPNP_PWM_SIZE_7_BIT || force_pwm_size == QPNP_PWM_SIZE_8_BIT)) force_pwm_size = 0; } else if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE) { if (!(force_pwm_size == QPNP_PWM_SIZE_6_BIT || force_pwm_size == QPNP_PWM_SIZE_9_BIT)) force_pwm_size = 0; } else if (!(force_pwm_size == QPNP_PWM_SIZE_6_BIT || force_pwm_size == QPNP_PWM_SIZE_7_BIT || force_pwm_size == QPNP_PWM_SIZE_9_BIT)) force_pwm_size = 0; pwm_dev->pwm_config.force_pwm_size = force_pwm_size; res = spmi_get_resource_byname(spmi, NULL, IORESOURCE_MEM, QPNP_LPG_CHANNEL_BASE); if (!res) { dev_err(&spmi->dev, "%s: node is missing base address\n", __func__); return -EINVAL; } lpg_config->base_addr = res->start; res = spmi_get_resource_byname(spmi, NULL, IORESOURCE_MEM, QPNP_LPG_LUT_BASE); if (!res) { chip->flags |= QPNP_PWM_LUT_NOT_SUPPORTED; } else { lpg_config->lut_base_addr = res->start; /* Each entry of LUT is of 2 bytes for generic LUT and of 1 byte * for KPDBL/GLED LUT. */ lpg_config->lut_size = resource_size(res) >> 1; lut_entry_size = sizeof(u16); if (qpnp_check_gpled_lpg_channel( pwm_dev->pwm_config.channel_id)) { lpg_config->lut_size = resource_size(res); lut_entry_size = sizeof(u8); } lut_config->duty_pct_list = kzalloc(lpg_config->lut_size * lut_entry_size, GFP_KERNEL); if (!lut_config->duty_pct_list) { pr_err("can not allocate duty pct list\n"); return -ENOMEM; } } for_each_child_of_node(of_node, node) { rc = of_property_read_string(node, "label", &lable); if (rc) { dev_err(&spmi->dev, "%s: Missing lable property\n", __func__); goto out; } if (!strncmp(lable, "pwm", 3)) { rc = qpnp_parse_pwm_dt_config(node, of_node, chip); if (rc) goto out; found_pwm_subnode = 1; } else if (!strncmp(lable, "lpg", 3) && !(chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED)) { qpnp_parse_lpg_dt_config(node, of_node, chip); if (rc) goto out; found_lpg_subnode = 1; } else { dev_err(&spmi->dev, "%s: Invalid value for lable prop", __func__); } } rc = of_property_read_u32(of_node, "qcom,mode-select", &enable); if (rc) goto read_opt_props; if ((enable == PM_PWM_MODE_PWM && found_pwm_subnode == 0) || (enable == PM_PWM_MODE_LPG && found_lpg_subnode == 0)) { dev_err(&spmi->dev, "%s: Invalid mode select\n", __func__); rc = -EINVAL; goto out; } pwm_change_mode(pwm_dev, enable); _pwm_enable(pwm_dev); read_opt_props: /* Initialize optional config parameters from DT if provided */ of_property_read_string(node, "qcom,channel-owner", &pwm_dev->pwm_config.lable); return 0; out: kfree(lut_config->duty_pct_list); return rc; } static int __devinit qpnp_pwm_probe(struct spmi_device *spmi) { struct qpnp_lpg_chip *chip; int rc, id; chip = kzalloc(sizeof *chip, GFP_KERNEL); if (chip == NULL) { pr_err("kzalloc() failed.\n"); return -ENOMEM; } spin_lock_init(&chip->lpg_lock); chip->spmi_dev = spmi; chip->pwm_dev.chip = chip; dev_set_drvdata(&spmi->dev, chip); rc = qpnp_parse_dt_config(spmi, chip); if (rc) goto failed_config; id = chip->pwm_dev.pwm_config.channel_id; spmi_ext_register_readl(chip->spmi_dev->ctrl, chip->spmi_dev->sid, chip->lpg_config.base_addr + SPMI_LPG_REVISION2_OFFSET, (u8 *) &chip->revision, 1); if (chip->revision < QPNP_LPG_REVISION_0 || chip->revision > QPNP_LPG_REVISION_1) { pr_err("Unknown LPG revision detected, rev:%d\n", chip->revision); rc = -EINVAL; goto failed_insert; } spmi_ext_register_readl(chip->spmi_dev->ctrl, chip->spmi_dev->sid, chip->lpg_config.base_addr + SPMI_LPG_SUB_TYPE_OFFSET, &chip->sub_type, 1); rc = radix_tree_insert(&lpg_dev_tree, id, chip); if (rc) { dev_err(&spmi->dev, "%s: Failed to register LPG Channel %d\n", __func__, id); goto failed_insert; } return 0; failed_insert: kfree(chip->lpg_config.lut_config.duty_pct_list); failed_config: dev_set_drvdata(&spmi->dev, NULL); kfree(chip); return rc; } static int __devexit qpnp_pwm_remove(struct spmi_device *spmi) { struct qpnp_lpg_chip *chip; struct qpnp_lpg_config *lpg_config; chip = dev_get_drvdata(&spmi->dev); dev_set_drvdata(&spmi->dev, NULL); if (chip) { lpg_config = &chip->lpg_config; kfree(lpg_config->lut_config.duty_pct_list); kfree(chip); } return 0; } static struct of_device_id spmi_match_table[] = { { .compatible = QPNP_LPG_DRIVER_NAME, }, {} }; static const struct spmi_device_id qpnp_lpg_id[] = { { QPNP_LPG_DRIVER_NAME, 0 }, { } }; MODULE_DEVICE_TABLE(spmi, qpnp_lpg_id); static struct spmi_driver qpnp_lpg_driver = { .driver = { .name = QPNP_LPG_DRIVER_NAME, .of_match_table = spmi_match_table, .owner = THIS_MODULE, }, .probe = qpnp_pwm_probe, .remove = __devexit_p(qpnp_pwm_remove), .id_table = qpnp_lpg_id, }; /** * qpnp_lpg_init() - register spmi driver for qpnp-lpg */ int __init qpnp_lpg_init(void) { return spmi_driver_register(&qpnp_lpg_driver); } static void __exit qpnp_lpg_exit(void) { spmi_driver_unregister(&qpnp_lpg_driver); } MODULE_DESCRIPTION("QPNP PMIC LPG driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" QPNP_LPG_DRIVER_NAME); subsys_initcall(qpnp_lpg_init); module_exit(qpnp_lpg_exit);
gpl-2.0
javelinanddart/android_kernel_caf_ville
arch/x86/um/sys_call_table_32.c
782
1420
/* * System call table for UML/i386, copied from arch/x86/kernel/syscall_*.c * with some changes for UML. */ #include <linux/linkage.h> #include <linux/sys.h> #include <linux/cache.h> #include <generated/user_constants.h> #define __NO_STUBS /* * Below you can see, in terms of #define's, the differences between the x86-64 * and the UML syscall table. */ /* Not going to be implemented by UML, since we have no hardware. */ #define sys_iopl sys_ni_syscall #define sys_ioperm sys_ni_syscall #define sys_vm86old sys_ni_syscall #define sys_vm86 sys_ni_syscall #define old_mmap sys_old_mmap #define ptregs_fork sys_fork #define ptregs_execve sys_execve #define ptregs_iopl sys_iopl #define ptregs_vm86old sys_vm86old #define ptregs_clone sys_clone #define ptregs_vm86 sys_vm86 #define ptregs_sigaltstack sys_sigaltstack #define ptregs_vfork sys_vfork #define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void sym(void) ; #include <asm/syscalls_32.h> #undef __SYSCALL_I386 #define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym, typedef asmlinkage void (*sys_call_ptr_t)(void); extern asmlinkage void sys_ni_syscall(void); const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { /* * Smells like a compiler bug -- it doesn't work * when the & below is removed. */ [0 ... __NR_syscall_max] = &sys_ni_syscall, #include <asm/syscalls_32.h> }; int syscall_table_size = sizeof(sys_call_table);
gpl-2.0
snq-/bravo-kernel
fs/ntfs/namei.c
782
14422
/* * namei.c - NTFS kernel directory inode operations. Part of the Linux-NTFS * project. * * Copyright (c) 2001-2006 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/dcache.h> #include <linux/exportfs.h> #include <linux/security.h> #include "attrib.h" #include "debug.h" #include "dir.h" #include "mft.h" #include "ntfs.h" /** * ntfs_lookup - find the inode represented by a dentry in a directory inode * @dir_ino: directory inode in which to look for the inode * @dent: dentry representing the inode to look for * @nd: lookup nameidata * * In short, ntfs_lookup() looks for the inode represented by the dentry @dent * in the directory inode @dir_ino and if found attaches the inode to the * dentry @dent. * * In more detail, the dentry @dent specifies which inode to look for by * supplying the name of the inode in @dent->d_name.name. ntfs_lookup() * converts the name to Unicode and walks the contents of the directory inode * @dir_ino looking for the converted Unicode name. If the name is found in the * directory, the corresponding inode is loaded by calling ntfs_iget() on its * inode number and the inode is associated with the dentry @dent via a call to * d_splice_alias(). * * If the name is not found in the directory, a NULL inode is inserted into the * dentry @dent via a call to d_add(). The dentry is then termed a negative * dentry. * * Only if an actual error occurs, do we return an error via ERR_PTR(). * * In order to handle the case insensitivity issues of NTFS with regards to the * dcache and the dcache requiring only one dentry per directory, we deal with * dentry aliases that only differ in case in ->ntfs_lookup() while maintaining * a case sensitive dcache. This means that we get the full benefit of dcache * speed when the file/directory is looked up with the same case as returned by * ->ntfs_readdir() but that a lookup for any other case (or for the short file * name) will not find anything in dcache and will enter ->ntfs_lookup() * instead, where we search the directory for a fully matching file name * (including case) and if that is not found, we search for a file name that * matches with different case and if that has non-POSIX semantics we return * that. We actually do only one search (case sensitive) and keep tabs on * whether we have found a case insensitive match in the process. * * To simplify matters for us, we do not treat the short vs long filenames as * two hard links but instead if the lookup matches a short filename, we * return the dentry for the corresponding long filename instead. * * There are three cases we need to distinguish here: * * 1) @dent perfectly matches (i.e. including case) a directory entry with a * file name in the WIN32 or POSIX namespaces. In this case * ntfs_lookup_inode_by_name() will return with name set to NULL and we * just d_splice_alias() @dent. * 2) @dent matches (not including case) a directory entry with a file name in * the WIN32 namespace. In this case ntfs_lookup_inode_by_name() will return * with name set to point to a kmalloc()ed ntfs_name structure containing * the properly cased little endian Unicode name. We convert the name to the * current NLS code page, search if a dentry with this name already exists * and if so return that instead of @dent. At this point things are * complicated by the possibility of 'disconnected' dentries due to NFS * which we deal with appropriately (see the code comments). The VFS will * then destroy the old @dent and use the one we returned. If a dentry is * not found, we allocate a new one, d_splice_alias() it, and return it as * above. * 3) @dent matches either perfectly or not (i.e. we don't care about case) a * directory entry with a file name in the DOS namespace. In this case * ntfs_lookup_inode_by_name() will return with name set to point to a * kmalloc()ed ntfs_name structure containing the mft reference (cpu endian) * of the inode. We use the mft reference to read the inode and to find the * file name in the WIN32 namespace corresponding to the matched short file * name. We then convert the name to the current NLS code page, and proceed * searching for a dentry with this name, etc, as in case 2), above. * * Locking: Caller must hold i_mutex on the directory. */ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent, struct nameidata *nd) { ntfs_volume *vol = NTFS_SB(dir_ino->i_sb); struct inode *dent_inode; ntfschar *uname; ntfs_name *name = NULL; MFT_REF mref; unsigned long dent_ino; int uname_len; ntfs_debug("Looking up %s in directory inode 0x%lx.", dent->d_name.name, dir_ino->i_ino); /* Convert the name of the dentry to Unicode. */ uname_len = ntfs_nlstoucs(vol, dent->d_name.name, dent->d_name.len, &uname); if (uname_len < 0) { if (uname_len != -ENAMETOOLONG) ntfs_error(vol->sb, "Failed to convert name to " "Unicode."); return ERR_PTR(uname_len); } mref = ntfs_lookup_inode_by_name(NTFS_I(dir_ino), uname, uname_len, &name); kmem_cache_free(ntfs_name_cache, uname); if (!IS_ERR_MREF(mref)) { dent_ino = MREF(mref); ntfs_debug("Found inode 0x%lx. Calling ntfs_iget.", dent_ino); dent_inode = ntfs_iget(vol->sb, dent_ino); if (likely(!IS_ERR(dent_inode))) { /* Consistency check. */ if (is_bad_inode(dent_inode) || MSEQNO(mref) == NTFS_I(dent_inode)->seq_no || dent_ino == FILE_MFT) { /* Perfect WIN32/POSIX match. -- Case 1. */ if (!name) { ntfs_debug("Done. (Case 1.)"); return d_splice_alias(dent_inode, dent); } /* * We are too indented. Handle imperfect * matches and short file names further below. */ goto handle_name; } ntfs_error(vol->sb, "Found stale reference to inode " "0x%lx (reference sequence number = " "0x%x, inode sequence number = 0x%x), " "returning -EIO. Run chkdsk.", dent_ino, MSEQNO(mref), NTFS_I(dent_inode)->seq_no); iput(dent_inode); dent_inode = ERR_PTR(-EIO); } else ntfs_error(vol->sb, "ntfs_iget(0x%lx) failed with " "error code %li.", dent_ino, PTR_ERR(dent_inode)); kfree(name); /* Return the error code. */ return (struct dentry *)dent_inode; } /* It is guaranteed that @name is no longer allocated at this point. */ if (MREF_ERR(mref) == -ENOENT) { ntfs_debug("Entry was not found, adding negative dentry."); /* The dcache will handle negative entries. */ d_add(dent, NULL); ntfs_debug("Done."); return NULL; } ntfs_error(vol->sb, "ntfs_lookup_ino_by_name() failed with error " "code %i.", -MREF_ERR(mref)); return ERR_PTR(MREF_ERR(mref)); // TODO: Consider moving this lot to a separate function! (AIA) handle_name: { MFT_RECORD *m; ntfs_attr_search_ctx *ctx; ntfs_inode *ni = NTFS_I(dent_inode); int err; struct qstr nls_name; nls_name.name = NULL; if (name->type != FILE_NAME_DOS) { /* Case 2. */ ntfs_debug("Case 2."); nls_name.len = (unsigned)ntfs_ucstonls(vol, (ntfschar*)&name->name, name->len, (unsigned char**)&nls_name.name, 0); kfree(name); } else /* if (name->type == FILE_NAME_DOS) */ { /* Case 3. */ FILE_NAME_ATTR *fn; ntfs_debug("Case 3."); kfree(name); /* Find the WIN32 name corresponding to the matched DOS name. */ ni = NTFS_I(dent_inode); m = map_mft_record(ni); if (IS_ERR(m)) { err = PTR_ERR(m); m = NULL; ctx = NULL; goto err_out; } ctx = ntfs_attr_get_search_ctx(ni, m); if (unlikely(!ctx)) { err = -ENOMEM; goto err_out; } do { ATTR_RECORD *a; u32 val_len; err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, 0, 0, NULL, 0, ctx); if (unlikely(err)) { ntfs_error(vol->sb, "Inode corrupt: No WIN32 " "namespace counterpart to DOS " "file name. Run chkdsk."); if (err == -ENOENT) err = -EIO; goto err_out; } /* Consistency checks. */ a = ctx->attr; if (a->non_resident || a->flags) goto eio_err_out; val_len = le32_to_cpu(a->data.resident.value_length); if (le16_to_cpu(a->data.resident.value_offset) + val_len > le32_to_cpu(a->length)) goto eio_err_out; fn = (FILE_NAME_ATTR*)((u8*)ctx->attr + le16_to_cpu( ctx->attr->data.resident.value_offset)); if ((u32)(fn->file_name_length * sizeof(ntfschar) + sizeof(FILE_NAME_ATTR)) > val_len) goto eio_err_out; } while (fn->file_name_type != FILE_NAME_WIN32); /* Convert the found WIN32 name to current NLS code page. */ nls_name.len = (unsigned)ntfs_ucstonls(vol, (ntfschar*)&fn->file_name, fn->file_name_length, (unsigned char**)&nls_name.name, 0); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(ni); } m = NULL; ctx = NULL; /* Check if a conversion error occurred. */ if ((signed)nls_name.len < 0) { err = (signed)nls_name.len; goto err_out; } nls_name.hash = full_name_hash(nls_name.name, nls_name.len); dent = d_add_ci(dent, dent_inode, &nls_name); kfree(nls_name.name); return dent; eio_err_out: ntfs_error(vol->sb, "Illegal file name attribute. Run chkdsk."); err = -EIO; err_out: if (ctx) ntfs_attr_put_search_ctx(ctx); if (m) unmap_mft_record(ni); iput(dent_inode); ntfs_error(vol->sb, "Failed, returning error code %i.", err); return ERR_PTR(err); } } /** * Inode operations for directories. */ const struct inode_operations ntfs_dir_inode_ops = { .lookup = ntfs_lookup, /* VFS: Lookup directory. */ }; /** * ntfs_get_parent - find the dentry of the parent of a given directory dentry * @child_dent: dentry of the directory whose parent directory to find * * Find the dentry for the parent directory of the directory specified by the * dentry @child_dent. This function is called from * fs/exportfs/expfs.c::find_exported_dentry() which in turn is called from the * default ->decode_fh() which is export_decode_fh() in the same file. * * The code is based on the ext3 ->get_parent() implementation found in * fs/ext3/namei.c::ext3_get_parent(). * * Note: ntfs_get_parent() is called with @child_dent->d_inode->i_mutex down. * * Return the dentry of the parent directory on success or the error code on * error (IS_ERR() is true). */ static struct dentry *ntfs_get_parent(struct dentry *child_dent) { struct inode *vi = child_dent->d_inode; ntfs_inode *ni = NTFS_I(vi); MFT_RECORD *mrec; ntfs_attr_search_ctx *ctx; ATTR_RECORD *attr; FILE_NAME_ATTR *fn; unsigned long parent_ino; int err; ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); /* Get the mft record of the inode belonging to the child dentry. */ mrec = map_mft_record(ni); if (IS_ERR(mrec)) return (struct dentry *)mrec; /* Find the first file name attribute in the mft record. */ ctx = ntfs_attr_get_search_ctx(ni, mrec); if (unlikely(!ctx)) { unmap_mft_record(ni); return ERR_PTR(-ENOMEM); } try_next: err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) { ntfs_attr_put_search_ctx(ctx); unmap_mft_record(ni); if (err == -ENOENT) ntfs_error(vi->i_sb, "Inode 0x%lx does not have a " "file name attribute. Run chkdsk.", vi->i_ino); return ERR_PTR(err); } attr = ctx->attr; if (unlikely(attr->non_resident)) goto try_next; fn = (FILE_NAME_ATTR *)((u8 *)attr + le16_to_cpu(attr->data.resident.value_offset)); if (unlikely((u8 *)fn + le32_to_cpu(attr->data.resident.value_length) > (u8*)attr + le32_to_cpu(attr->length))) goto try_next; /* Get the inode number of the parent directory. */ parent_ino = MREF_LE(fn->parent_directory); /* Release the search context and the mft record of the child. */ ntfs_attr_put_search_ctx(ctx); unmap_mft_record(ni); return d_obtain_alias(ntfs_iget(vi->i_sb, parent_ino)); } static struct inode *ntfs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; inode = ntfs_iget(sb, ino); if (!IS_ERR(inode)) { if (is_bad_inode(inode) || inode->i_generation != generation) { iput(inode); inode = ERR_PTR(-ESTALE); } } return inode; } static struct dentry *ntfs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ntfs_nfs_get_inode); } static struct dentry *ntfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ntfs_nfs_get_inode); } /** * Export operations allowing NFS exporting of mounted NTFS partitions. * * We use the default ->encode_fh() for now. Note that they * use 32 bits to store the inode number which is an unsigned long so on 64-bit * architectures is usually 64 bits so it would all fail horribly on huge * volumes. I guess we need to define our own encode and decode fh functions * that store 64-bit inode numbers at some point but for now we will ignore the * problem... * * We also use the default ->get_name() helper (used by ->decode_fh() via * fs/exportfs/expfs.c::find_exported_dentry()) as that is completely fs * independent. * * The default ->get_parent() just returns -EACCES so we have to provide our * own and the default ->get_dentry() is incompatible with NTFS due to not * allowing the inode number 0 which is used in NTFS for the system file $MFT * and due to using iget() whereas NTFS needs ntfs_iget(). */ const struct export_operations ntfs_export_ops = { .get_parent = ntfs_get_parent, /* Find the parent of a given directory. */ .fh_to_dentry = ntfs_fh_to_dentry, .fh_to_parent = ntfs_fh_to_parent, };
gpl-2.0
bio4554/android-4.1
drivers/net/can/sja1000/sja1000_isa.c
1294
8350
/* * Copyright (C) 2009 Wolfgang Grandegger <wg@grandegger.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the version 2 of the GNU General Public License * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/can/dev.h> #include <linux/can/platform/sja1000.h> #include "sja1000.h" #define DRV_NAME "sja1000_isa" #define MAXDEV 8 MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the ISA bus"); MODULE_LICENSE("GPL v2"); #define CLK_DEFAULT 16000000 /* 16 MHz */ #define CDR_DEFAULT (CDR_CBP | CDR_CLK_OFF) #define OCR_DEFAULT OCR_TX0_PUSHPULL static unsigned long port[MAXDEV]; static unsigned long mem[MAXDEV]; static int irq[MAXDEV]; static int clk[MAXDEV]; static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; static spinlock_t indirect_lock[MAXDEV]; /* lock for indirect access mode */ module_param_array(port, ulong, NULL, S_IRUGO); MODULE_PARM_DESC(port, "I/O port number"); module_param_array(mem, ulong, NULL, S_IRUGO); MODULE_PARM_DESC(mem, "I/O memory address"); module_param_array(indirect, int, NULL, S_IRUGO); MODULE_PARM_DESC(indirect, "Indirect access via address and data port"); module_param_array(irq, int, NULL, S_IRUGO); MODULE_PARM_DESC(irq, "IRQ number"); module_param_array(clk, int, NULL, S_IRUGO); MODULE_PARM_DESC(clk, "External oscillator clock frequency " "(default=16000000 [16 MHz])"); module_param_array(cdr, byte, NULL, S_IRUGO); MODULE_PARM_DESC(cdr, "Clock divider register " "(default=0x48 [CDR_CBP | CDR_CLK_OFF])"); module_param_array(ocr, byte, NULL, S_IRUGO); MODULE_PARM_DESC(ocr, "Output control register " "(default=0x18 [OCR_TX0_PUSHPULL])"); #define SJA1000_IOSIZE 0x20 #define SJA1000_IOSIZE_INDIRECT 0x02 static struct platform_device *sja1000_isa_devs[MAXDEV]; static u8 sja1000_isa_mem_read_reg(const struct sja1000_priv *priv, int reg) { return readb(priv->reg_base + reg); } static void sja1000_isa_mem_write_reg(const struct sja1000_priv *priv, int reg, u8 val) { writeb(val, priv->reg_base + reg); } static u8 sja1000_isa_port_read_reg(const struct sja1000_priv *priv, int reg) { return inb((unsigned long)priv->reg_base + reg); } static void sja1000_isa_port_write_reg(const struct sja1000_priv *priv, int reg, u8 val) { outb(val, (unsigned long)priv->reg_base + reg); } static u8 sja1000_isa_port_read_reg_indirect(const struct sja1000_priv *priv, int reg) { unsigned long flags, base = (unsigned long)priv->reg_base; u8 readval; spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags); outb(reg, base); readval = inb(base + 1); spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags); return readval; } static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv, int reg, u8 val) { unsigned long flags, base = (unsigned long)priv->reg_base; spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags); outb(reg, base); outb(val, base + 1); spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags); } static int sja1000_isa_probe(struct platform_device *pdev) { struct net_device *dev; struct sja1000_priv *priv; void __iomem *base = NULL; int iosize = SJA1000_IOSIZE; int idx = pdev->id; int err; dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n", idx, port[idx], mem[idx], irq[idx]); if (mem[idx]) { if (!request_mem_region(mem[idx], iosize, DRV_NAME)) { err = -EBUSY; goto exit; } base = ioremap_nocache(mem[idx], iosize); if (!base) { err = -ENOMEM; goto exit_release; } } else { if (indirect[idx] > 0 || (indirect[idx] == -1 && indirect[0] > 0)) iosize = SJA1000_IOSIZE_INDIRECT; if (!request_region(port[idx], iosize, DRV_NAME)) { err = -EBUSY; goto exit; } } dev = alloc_sja1000dev(0); if (!dev) { err = -ENOMEM; goto exit_unmap; } priv = netdev_priv(dev); dev->irq = irq[idx]; priv->irq_flags = IRQF_SHARED; if (mem[idx]) { priv->reg_base = base; dev->base_addr = mem[idx]; priv->read_reg = sja1000_isa_mem_read_reg; priv->write_reg = sja1000_isa_mem_write_reg; } else { priv->reg_base = (void __iomem *)port[idx]; dev->base_addr = port[idx]; if (iosize == SJA1000_IOSIZE_INDIRECT) { priv->read_reg = sja1000_isa_port_read_reg_indirect; priv->write_reg = sja1000_isa_port_write_reg_indirect; spin_lock_init(&indirect_lock[idx]); } else { priv->read_reg = sja1000_isa_port_read_reg; priv->write_reg = sja1000_isa_port_write_reg; } } if (clk[idx]) priv->can.clock.freq = clk[idx] / 2; else if (clk[0]) priv->can.clock.freq = clk[0] / 2; else priv->can.clock.freq = CLK_DEFAULT / 2; if (ocr[idx] != 0xff) priv->ocr = ocr[idx]; else if (ocr[0] != 0xff) priv->ocr = ocr[0]; else priv->ocr = OCR_DEFAULT; if (cdr[idx] != 0xff) priv->cdr = cdr[idx]; else if (cdr[0] != 0xff) priv->cdr = cdr[0]; else priv->cdr = CDR_DEFAULT; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->dev_id = idx; err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); goto exit_unmap; } dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n", DRV_NAME, priv->reg_base, dev->irq); return 0; exit_unmap: if (mem[idx]) iounmap(base); exit_release: if (mem[idx]) release_mem_region(mem[idx], iosize); else release_region(port[idx], iosize); exit: return err; } static int sja1000_isa_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sja1000_priv *priv = netdev_priv(dev); int idx = pdev->id; unregister_sja1000dev(dev); if (mem[idx]) { iounmap(priv->reg_base); release_mem_region(mem[idx], SJA1000_IOSIZE); } else { if (priv->read_reg == sja1000_isa_port_read_reg_indirect) release_region(port[idx], SJA1000_IOSIZE_INDIRECT); else release_region(port[idx], SJA1000_IOSIZE); } free_sja1000dev(dev); return 0; } static struct platform_driver sja1000_isa_driver = { .probe = sja1000_isa_probe, .remove = sja1000_isa_remove, .driver = { .name = DRV_NAME, }, }; static int __init sja1000_isa_init(void) { int idx, err; for (idx = 0; idx < MAXDEV; idx++) { if ((port[idx] || mem[idx]) && irq[idx]) { sja1000_isa_devs[idx] = platform_device_alloc(DRV_NAME, idx); if (!sja1000_isa_devs[idx]) { err = -ENOMEM; goto exit_free_devices; } err = platform_device_add(sja1000_isa_devs[idx]); if (err) { platform_device_put(sja1000_isa_devs[idx]); goto exit_free_devices; } pr_debug("%s: platform device %d: port=%#lx, mem=%#lx, " "irq=%d\n", DRV_NAME, idx, port[idx], mem[idx], irq[idx]); } else if (idx == 0 || port[idx] || mem[idx]) { pr_err("%s: insufficient parameters supplied\n", DRV_NAME); err = -EINVAL; goto exit_free_devices; } } err = platform_driver_register(&sja1000_isa_driver); if (err) goto exit_free_devices; pr_info("Legacy %s driver for max. %d devices registered\n", DRV_NAME, MAXDEV); return 0; exit_free_devices: while (--idx >= 0) { if (sja1000_isa_devs[idx]) platform_device_unregister(sja1000_isa_devs[idx]); } return err; } static void __exit sja1000_isa_exit(void) { int idx; platform_driver_unregister(&sja1000_isa_driver); for (idx = 0; idx < MAXDEV; idx++) { if (sja1000_isa_devs[idx]) platform_device_unregister(sja1000_isa_devs[idx]); } } module_init(sja1000_isa_init); module_exit(sja1000_isa_exit);
gpl-2.0
ZHAW-INES/rioxo-linux-2.6
arch/arm/mach-l7200/core.c
1550
2726
/* * linux/arch/arm/mm/mm-lusl7200.c * * Copyright (C) 2000 Steve Hill (sjhill@cotw.com) * * Extra MM routines for L7200 architecture */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/device.h> #include <asm/types.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <asm/page.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> /* * IRQ base register */ #define IRQ_BASE (IO_BASE_2 + 0x1000) /* * Normal IRQ registers */ #define IRQ_STATUS (*(volatile unsigned long *) (IRQ_BASE + 0x000)) #define IRQ_RAWSTATUS (*(volatile unsigned long *) (IRQ_BASE + 0x004)) #define IRQ_ENABLE (*(volatile unsigned long *) (IRQ_BASE + 0x008)) #define IRQ_ENABLECLEAR (*(volatile unsigned long *) (IRQ_BASE + 0x00c)) #define IRQ_SOFT (*(volatile unsigned long *) (IRQ_BASE + 0x010)) #define IRQ_SOURCESEL (*(volatile unsigned long *) (IRQ_BASE + 0x018)) /* * Fast IRQ registers */ #define FIQ_STATUS (*(volatile unsigned long *) (IRQ_BASE + 0x100)) #define FIQ_RAWSTATUS (*(volatile unsigned long *) (IRQ_BASE + 0x104)) #define FIQ_ENABLE (*(volatile unsigned long *) (IRQ_BASE + 0x108)) #define FIQ_ENABLECLEAR (*(volatile unsigned long *) (IRQ_BASE + 0x10c)) #define FIQ_SOFT (*(volatile unsigned long *) (IRQ_BASE + 0x110)) #define FIQ_SOURCESEL (*(volatile unsigned long *) (IRQ_BASE + 0x118)) static void l7200_mask_irq(unsigned int irq) { IRQ_ENABLECLEAR = 1 << irq; } static void l7200_unmask_irq(unsigned int irq) { IRQ_ENABLE = 1 << irq; } static struct irq_chip l7200_irq_chip = { .ack = l7200_mask_irq, .mask = l7200_mask_irq, .unmask = l7200_unmask_irq }; static void __init l7200_init_irq(void) { int irq; IRQ_ENABLECLEAR = 0xffffffff; /* clear all interrupt enables */ FIQ_ENABLECLEAR = 0xffffffff; /* clear all fast interrupt enables */ for (irq = 0; irq < NR_IRQS; irq++) { set_irq_chip(irq, &l7200_irq_chip); set_irq_flags(irq, IRQF_VALID); set_irq_handler(irq, handle_level_irq); } init_FIQ(); } static struct map_desc l7200_io_desc[] __initdata = { { IO_BASE, IO_START, IO_SIZE, MT_DEVICE }, { IO_BASE_2, IO_START_2, IO_SIZE_2, MT_DEVICE }, { AUX_BASE, AUX_START, AUX_SIZE, MT_DEVICE }, { FLASH1_BASE, FLASH1_START, FLASH1_SIZE, MT_DEVICE }, { FLASH2_BASE, FLASH2_START, FLASH2_SIZE, MT_DEVICE } }; static void __init l7200_map_io(void) { iotable_init(l7200_io_desc, ARRAY_SIZE(l7200_io_desc)); } MACHINE_START(L7200, "LinkUp Systems L7200") /* Maintainer: Steve Hill / Scott McConnell */ .phys_io = 0x80040000, .io_pg_offst = ((0xd0000000) >> 18) & 0xfffc, .map_io = l7200_map_io, .init_irq = l7200_init_irq, MACHINE_END
gpl-2.0
wuby986/Sixty-4Stroke-kernel
drivers/bluetooth/btmrvl_sdio.c
2062
27695
/** * Marvell BT-over-SDIO driver: SDIO interface related functions. * * Copyright (C) 2009, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. **/ #include <linux/firmware.h> #include <linux/slab.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio_func.h> #include <linux/module.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btmrvl_drv.h" #include "btmrvl_sdio.h" #define VERSION "1.0" /* The btmrvl_sdio_remove() callback function is called * when user removes this module from kernel space or ejects * the card from the slot. The driver handles these 2 cases * differently. * If the user is removing the module, a MODULE_SHUTDOWN_REQ * command is sent to firmware and interrupt will be disabled. * If the card is removed, there is no need to send command * or disable interrupt. * * The variable 'user_rmmod' is used to distinguish these two * scenarios. This flag is initialized as FALSE in case the card * is removed, and will be set to TRUE for module removal when * module_exit function is called. */ static u8 user_rmmod; static u8 sdio_ireg; static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = { .cfg = 0x03, .host_int_mask = 0x04, .host_intstatus = 0x05, .card_status = 0x20, .sq_read_base_addr_a0 = 0x10, .sq_read_base_addr_a1 = 0x11, .card_fw_status0 = 0x40, .card_fw_status1 = 0x41, .card_rx_len = 0x42, .card_rx_unit = 0x43, .io_port_0 = 0x00, .io_port_1 = 0x01, .io_port_2 = 0x02, }; static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = { .cfg = 0x00, .host_int_mask = 0x02, .host_intstatus = 0x03, .card_status = 0x30, .sq_read_base_addr_a0 = 0x40, .sq_read_base_addr_a1 = 0x41, .card_revision = 0x5c, .card_fw_status0 = 0x60, .card_fw_status1 = 0x61, .card_rx_len = 0x62, .card_rx_unit = 0x63, .io_port_0 = 0x78, .io_port_1 = 0x79, .io_port_2 = 0x7a, }; static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = { .cfg = 0x00, .host_int_mask = 0x02, .host_intstatus = 0x03, .card_status = 0x50, .sq_read_base_addr_a0 = 0x60, .sq_read_base_addr_a1 = 0x61, .card_revision = 0xbc, .card_fw_status0 = 0xc0, .card_fw_status1 = 0xc1, .card_rx_len = 0xc2, .card_rx_unit = 0xc3, .io_port_0 = 0xd8, .io_port_1 = 0xd9, .io_port_2 = 0xda, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = { .helper = "mrvl/sd8688_helper.bin", .firmware = "mrvl/sd8688.bin", .reg = &btmrvl_reg_8688, .sd_blksz_fw_dl = 64, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = { .helper = NULL, .firmware = "mrvl/sd8787_uapsta.bin", .reg = &btmrvl_reg_87xx, .sd_blksz_fw_dl = 256, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = { .helper = NULL, .firmware = "mrvl/sd8797_uapsta.bin", .reg = &btmrvl_reg_87xx, .sd_blksz_fw_dl = 256, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = { .helper = NULL, .firmware = "mrvl/sd8897_uapsta.bin", .reg = &btmrvl_reg_88xx, .sd_blksz_fw_dl = 256, }; static const struct sdio_device_id btmrvl_sdio_ids[] = { /* Marvell SD8688 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105), .driver_data = (unsigned long) &btmrvl_sdio_sd8688 }, /* Marvell SD8787 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A), .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, /* Marvell SD8787 Bluetooth AMP device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B), .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, /* Marvell SD8797 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, /* Marvell SD8897 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E), .driver_data = (unsigned long) &btmrvl_sdio_sd8897 }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(sdio, btmrvl_sdio_ids); static int btmrvl_sdio_get_rx_unit(struct btmrvl_sdio_card *card) { u8 reg; int ret; reg = sdio_readb(card->func, card->reg->card_rx_unit, &ret); if (!ret) card->rx_unit = reg; return ret; } static int btmrvl_sdio_read_fw_status(struct btmrvl_sdio_card *card, u16 *dat) { u8 fws0, fws1; int ret; *dat = 0; fws0 = sdio_readb(card->func, card->reg->card_fw_status0, &ret); if (ret) return -EIO; fws1 = sdio_readb(card->func, card->reg->card_fw_status1, &ret); if (ret) return -EIO; *dat = (((u16) fws1) << 8) | fws0; return 0; } static int btmrvl_sdio_read_rx_len(struct btmrvl_sdio_card *card, u16 *dat) { u8 reg; int ret; reg = sdio_readb(card->func, card->reg->card_rx_len, &ret); if (!ret) *dat = (u16) reg << card->rx_unit; return ret; } static int btmrvl_sdio_enable_host_int_mask(struct btmrvl_sdio_card *card, u8 mask) { int ret; sdio_writeb(card->func, mask, card->reg->host_int_mask, &ret); if (ret) { BT_ERR("Unable to enable the host interrupt!"); ret = -EIO; } return ret; } static int btmrvl_sdio_disable_host_int_mask(struct btmrvl_sdio_card *card, u8 mask) { u8 host_int_mask; int ret; host_int_mask = sdio_readb(card->func, card->reg->host_int_mask, &ret); if (ret) return -EIO; host_int_mask &= ~mask; sdio_writeb(card->func, host_int_mask, card->reg->host_int_mask, &ret); if (ret < 0) { BT_ERR("Unable to disable the host interrupt!"); return -EIO; } return 0; } static int btmrvl_sdio_poll_card_status(struct btmrvl_sdio_card *card, u8 bits) { unsigned int tries; u8 status; int ret; for (tries = 0; tries < MAX_POLL_TRIES * 1000; tries++) { status = sdio_readb(card->func, card->reg->card_status, &ret); if (ret) goto failed; if ((status & bits) == bits) return ret; udelay(1); } ret = -ETIMEDOUT; failed: BT_ERR("FAILED! ret=%d", ret); return ret; } static int btmrvl_sdio_verify_fw_download(struct btmrvl_sdio_card *card, int pollnum) { u16 firmwarestat; int tries, ret; /* Wait for firmware to become ready */ for (tries = 0; tries < pollnum; tries++) { sdio_claim_host(card->func); ret = btmrvl_sdio_read_fw_status(card, &firmwarestat); sdio_release_host(card->func); if (ret < 0) continue; if (firmwarestat == FIRMWARE_READY) return 0; msleep(10); } return -ETIMEDOUT; } static int btmrvl_sdio_download_helper(struct btmrvl_sdio_card *card) { const struct firmware *fw_helper = NULL; const u8 *helper = NULL; int ret; void *tmphlprbuf = NULL; int tmphlprbufsz, hlprblknow, helperlen; u8 *helperbuf; u32 tx_len; ret = request_firmware(&fw_helper, card->helper, &card->func->dev); if ((ret < 0) || !fw_helper) { BT_ERR("request_firmware(helper) failed, error code = %d", ret); ret = -ENOENT; goto done; } helper = fw_helper->data; helperlen = fw_helper->size; BT_DBG("Downloading helper image (%d bytes), block size %d bytes", helperlen, SDIO_BLOCK_SIZE); tmphlprbufsz = ALIGN_SZ(BTM_UPLD_SIZE, BTSDIO_DMA_ALIGN); tmphlprbuf = kzalloc(tmphlprbufsz, GFP_KERNEL); if (!tmphlprbuf) { BT_ERR("Unable to allocate buffer for helper." " Terminating download"); ret = -ENOMEM; goto done; } helperbuf = (u8 *) ALIGN_ADDR(tmphlprbuf, BTSDIO_DMA_ALIGN); /* Perform helper data transfer */ tx_len = (FIRMWARE_TRANSFER_NBLOCK * SDIO_BLOCK_SIZE) - SDIO_HEADER_LEN; hlprblknow = 0; do { ret = btmrvl_sdio_poll_card_status(card, CARD_IO_READY | DN_LD_CARD_RDY); if (ret < 0) { BT_ERR("Helper download poll status timeout @ %d", hlprblknow); goto done; } /* Check if there is more data? */ if (hlprblknow >= helperlen) break; if (helperlen - hlprblknow < tx_len) tx_len = helperlen - hlprblknow; /* Little-endian */ helperbuf[0] = ((tx_len & 0x000000ff) >> 0); helperbuf[1] = ((tx_len & 0x0000ff00) >> 8); helperbuf[2] = ((tx_len & 0x00ff0000) >> 16); helperbuf[3] = ((tx_len & 0xff000000) >> 24); memcpy(&helperbuf[SDIO_HEADER_LEN], &helper[hlprblknow], tx_len); /* Now send the data */ ret = sdio_writesb(card->func, card->ioport, helperbuf, FIRMWARE_TRANSFER_NBLOCK * SDIO_BLOCK_SIZE); if (ret < 0) { BT_ERR("IO error during helper download @ %d", hlprblknow); goto done; } hlprblknow += tx_len; } while (true); BT_DBG("Transferring helper image EOF block"); memset(helperbuf, 0x0, SDIO_BLOCK_SIZE); ret = sdio_writesb(card->func, card->ioport, helperbuf, SDIO_BLOCK_SIZE); if (ret < 0) { BT_ERR("IO error in writing helper image EOF block"); goto done; } ret = 0; done: kfree(tmphlprbuf); release_firmware(fw_helper); return ret; } static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card) { const struct firmware *fw_firmware = NULL; const u8 *firmware = NULL; int firmwarelen, tmpfwbufsz, ret; unsigned int tries, offset; u8 base0, base1; void *tmpfwbuf = NULL; u8 *fwbuf; u16 len, blksz_dl = card->sd_blksz_fw_dl; int txlen = 0, tx_blocks = 0, count = 0; ret = request_firmware(&fw_firmware, card->firmware, &card->func->dev); if ((ret < 0) || !fw_firmware) { BT_ERR("request_firmware(firmware) failed, error code = %d", ret); ret = -ENOENT; goto done; } firmware = fw_firmware->data; firmwarelen = fw_firmware->size; BT_DBG("Downloading FW image (%d bytes)", firmwarelen); tmpfwbufsz = ALIGN_SZ(BTM_UPLD_SIZE, BTSDIO_DMA_ALIGN); tmpfwbuf = kzalloc(tmpfwbufsz, GFP_KERNEL); if (!tmpfwbuf) { BT_ERR("Unable to allocate buffer for firmware." " Terminating download"); ret = -ENOMEM; goto done; } /* Ensure aligned firmware buffer */ fwbuf = (u8 *) ALIGN_ADDR(tmpfwbuf, BTSDIO_DMA_ALIGN); /* Perform firmware data transfer */ offset = 0; do { ret = btmrvl_sdio_poll_card_status(card, CARD_IO_READY | DN_LD_CARD_RDY); if (ret < 0) { BT_ERR("FW download with helper poll status" " timeout @ %d", offset); goto done; } /* Check if there is more data ? */ if (offset >= firmwarelen) break; for (tries = 0; tries < MAX_POLL_TRIES; tries++) { base0 = sdio_readb(card->func, card->reg->sq_read_base_addr_a0, &ret); if (ret) { BT_ERR("BASE0 register read failed:" " base0 = 0x%04X(%d)." " Terminating download", base0, base0); ret = -EIO; goto done; } base1 = sdio_readb(card->func, card->reg->sq_read_base_addr_a1, &ret); if (ret) { BT_ERR("BASE1 register read failed:" " base1 = 0x%04X(%d)." " Terminating download", base1, base1); ret = -EIO; goto done; } len = (((u16) base1) << 8) | base0; if (len) break; udelay(10); } if (!len) break; else if (len > BTM_UPLD_SIZE) { BT_ERR("FW download failure @%d, invalid length %d", offset, len); ret = -EINVAL; goto done; } txlen = len; if (len & BIT(0)) { count++; if (count > MAX_WRITE_IOMEM_RETRY) { BT_ERR("FW download failure @%d, " "over max retry count", offset); ret = -EIO; goto done; } BT_ERR("FW CRC error indicated by the helper: " "len = 0x%04X, txlen = %d", len, txlen); len &= ~BIT(0); /* Set txlen to 0 so as to resend from same offset */ txlen = 0; } else { count = 0; /* Last block ? */ if (firmwarelen - offset < txlen) txlen = firmwarelen - offset; tx_blocks = (txlen + blksz_dl - 1) / blksz_dl; memcpy(fwbuf, &firmware[offset], txlen); } ret = sdio_writesb(card->func, card->ioport, fwbuf, tx_blocks * blksz_dl); if (ret < 0) { BT_ERR("FW download, writesb(%d) failed @%d", count, offset); sdio_writeb(card->func, HOST_CMD53_FIN, card->reg->cfg, &ret); if (ret) BT_ERR("writeb failed (CFG)"); } offset += txlen; } while (true); BT_DBG("FW download over, size %d bytes", offset); ret = 0; done: kfree(tmpfwbuf); release_firmware(fw_firmware); return ret; } static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv) { u16 buf_len = 0; int ret, num_blocks, blksz; struct sk_buff *skb = NULL; u32 type; u8 *payload = NULL; struct hci_dev *hdev = priv->btmrvl_dev.hcidev; struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; if (!card || !card->func) { BT_ERR("card or function is NULL!"); ret = -EINVAL; goto exit; } /* Read the length of data to be transferred */ ret = btmrvl_sdio_read_rx_len(card, &buf_len); if (ret < 0) { BT_ERR("read rx_len failed"); ret = -EIO; goto exit; } blksz = SDIO_BLOCK_SIZE; num_blocks = DIV_ROUND_UP(buf_len, blksz); if (buf_len <= SDIO_HEADER_LEN || (num_blocks * blksz) > ALLOC_BUF_SIZE) { BT_ERR("invalid packet length: %d", buf_len); ret = -EINVAL; goto exit; } /* Allocate buffer */ skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC); if (skb == NULL) { BT_ERR("No free skb"); goto exit; } if ((unsigned long) skb->data & (BTSDIO_DMA_ALIGN - 1)) { skb_put(skb, (unsigned long) skb->data & (BTSDIO_DMA_ALIGN - 1)); skb_pull(skb, (unsigned long) skb->data & (BTSDIO_DMA_ALIGN - 1)); } payload = skb->data; ret = sdio_readsb(card->func, payload, card->ioport, num_blocks * blksz); if (ret < 0) { BT_ERR("readsb failed: %d", ret); ret = -EIO; goto exit; } /* This is SDIO specific header length: byte[2][1][0], type: byte[3] * (HCI_COMMAND = 1, ACL_DATA = 2, SCO_DATA = 3, 0xFE = Vendor) */ buf_len = payload[0]; buf_len |= payload[1] << 8; buf_len |= payload[2] << 16; if (buf_len > blksz * num_blocks) { BT_ERR("Skip incorrect packet: hdrlen %d buffer %d", buf_len, blksz * num_blocks); ret = -EIO; goto exit; } type = payload[3]; switch (type) { case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: case HCI_EVENT_PKT: bt_cb(skb)->pkt_type = type; skb->dev = (void *)hdev; skb_put(skb, buf_len); skb_pull(skb, SDIO_HEADER_LEN); if (type == HCI_EVENT_PKT) { if (btmrvl_check_evtpkt(priv, skb)) hci_recv_frame(skb); } else { hci_recv_frame(skb); } hdev->stat.byte_rx += buf_len; break; case MRVL_VENDOR_PKT: bt_cb(skb)->pkt_type = HCI_VENDOR_PKT; skb->dev = (void *)hdev; skb_put(skb, buf_len); skb_pull(skb, SDIO_HEADER_LEN); if (btmrvl_process_event(priv, skb)) hci_recv_frame(skb); hdev->stat.byte_rx += buf_len; break; default: BT_ERR("Unknown packet type:%d", type); BT_ERR("hex: %*ph", blksz * num_blocks, payload); kfree_skb(skb); skb = NULL; break; } exit: if (ret) { hdev->stat.err_rx++; kfree_skb(skb); } return ret; } static int btmrvl_sdio_process_int_status(struct btmrvl_private *priv) { ulong flags; u8 ireg; struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; spin_lock_irqsave(&priv->driver_lock, flags); ireg = sdio_ireg; sdio_ireg = 0; spin_unlock_irqrestore(&priv->driver_lock, flags); sdio_claim_host(card->func); if (ireg & DN_LD_HOST_INT_STATUS) { if (priv->btmrvl_dev.tx_dnld_rdy) BT_DBG("tx_done already received: " " int_status=0x%x", ireg); else priv->btmrvl_dev.tx_dnld_rdy = true; } if (ireg & UP_LD_HOST_INT_STATUS) btmrvl_sdio_card_to_host(priv); sdio_release_host(card->func); return 0; } static void btmrvl_sdio_interrupt(struct sdio_func *func) { struct btmrvl_private *priv; struct btmrvl_sdio_card *card; ulong flags; u8 ireg = 0; int ret; card = sdio_get_drvdata(func); if (!card || !card->priv) { BT_ERR("sbi_interrupt(%p) card or priv is " "NULL, card=%p\n", func, card); return; } priv = card->priv; ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret); if (ret) { BT_ERR("sdio_readb: read int status register failed"); return; } if (ireg != 0) { /* * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS * Clear the interrupt status register and re-enable the * interrupt. */ BT_DBG("ireg = 0x%x", ireg); sdio_writeb(card->func, ~(ireg) & (DN_LD_HOST_INT_STATUS | UP_LD_HOST_INT_STATUS), card->reg->host_intstatus, &ret); if (ret) { BT_ERR("sdio_writeb: clear int status register failed"); return; } } spin_lock_irqsave(&priv->driver_lock, flags); sdio_ireg |= ireg; spin_unlock_irqrestore(&priv->driver_lock, flags); btmrvl_interrupt(priv); } static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card) { struct sdio_func *func; u8 reg; int ret = 0; if (!card || !card->func) { BT_ERR("Error: card or function is NULL!"); ret = -EINVAL; goto failed; } func = card->func; sdio_claim_host(func); ret = sdio_enable_func(func); if (ret) { BT_ERR("sdio_enable_func() failed: ret=%d", ret); ret = -EIO; goto release_host; } ret = sdio_claim_irq(func, btmrvl_sdio_interrupt); if (ret) { BT_ERR("sdio_claim_irq failed: ret=%d", ret); ret = -EIO; goto disable_func; } ret = sdio_set_block_size(card->func, SDIO_BLOCK_SIZE); if (ret) { BT_ERR("cannot set SDIO block size"); ret = -EIO; goto release_irq; } reg = sdio_readb(func, card->reg->io_port_0, &ret); if (ret < 0) { ret = -EIO; goto release_irq; } card->ioport = reg; reg = sdio_readb(func, card->reg->io_port_1, &ret); if (ret < 0) { ret = -EIO; goto release_irq; } card->ioport |= (reg << 8); reg = sdio_readb(func, card->reg->io_port_2, &ret); if (ret < 0) { ret = -EIO; goto release_irq; } card->ioport |= (reg << 16); BT_DBG("SDIO FUNC%d IO port: 0x%x", func->num, card->ioport); sdio_set_drvdata(func, card); sdio_release_host(func); return 0; release_irq: sdio_release_irq(func); disable_func: sdio_disable_func(func); release_host: sdio_release_host(func); failed: return ret; } static int btmrvl_sdio_unregister_dev(struct btmrvl_sdio_card *card) { if (card && card->func) { sdio_claim_host(card->func); sdio_release_irq(card->func); sdio_disable_func(card->func); sdio_release_host(card->func); sdio_set_drvdata(card->func, NULL); } return 0; } static int btmrvl_sdio_enable_host_int(struct btmrvl_sdio_card *card) { int ret; if (!card || !card->func) return -EINVAL; sdio_claim_host(card->func); ret = btmrvl_sdio_enable_host_int_mask(card, HIM_ENABLE); btmrvl_sdio_get_rx_unit(card); sdio_release_host(card->func); return ret; } static int btmrvl_sdio_disable_host_int(struct btmrvl_sdio_card *card) { int ret; if (!card || !card->func) return -EINVAL; sdio_claim_host(card->func); ret = btmrvl_sdio_disable_host_int_mask(card, HIM_DISABLE); sdio_release_host(card->func); return ret; } static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv, u8 *payload, u16 nb) { struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; int ret = 0; int buf_block_len; int blksz; int i = 0; u8 *buf = NULL; void *tmpbuf = NULL; int tmpbufsz; if (!card || !card->func) { BT_ERR("card or function is NULL!"); return -EINVAL; } buf = payload; if ((unsigned long) payload & (BTSDIO_DMA_ALIGN - 1)) { tmpbufsz = ALIGN_SZ(nb, BTSDIO_DMA_ALIGN); tmpbuf = kzalloc(tmpbufsz, GFP_KERNEL); if (!tmpbuf) return -ENOMEM; buf = (u8 *) ALIGN_ADDR(tmpbuf, BTSDIO_DMA_ALIGN); memcpy(buf, payload, nb); } blksz = SDIO_BLOCK_SIZE; buf_block_len = (nb + blksz - 1) / blksz; sdio_claim_host(card->func); do { /* Transfer data to card */ ret = sdio_writesb(card->func, card->ioport, buf, buf_block_len * blksz); if (ret < 0) { i++; BT_ERR("i=%d writesb failed: %d", i, ret); BT_ERR("hex: %*ph", nb, payload); ret = -EIO; if (i > MAX_WRITE_IOMEM_RETRY) goto exit; } } while (ret); priv->btmrvl_dev.tx_dnld_rdy = false; exit: sdio_release_host(card->func); kfree(tmpbuf); return ret; } static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card) { int ret; u8 fws0; int pollnum = MAX_POLL_TRIES; if (!card || !card->func) { BT_ERR("card or function is NULL!"); return -EINVAL; } if (!btmrvl_sdio_verify_fw_download(card, 1)) { BT_DBG("Firmware already downloaded!"); return 0; } sdio_claim_host(card->func); /* Check if other function driver is downloading the firmware */ fws0 = sdio_readb(card->func, card->reg->card_fw_status0, &ret); if (ret) { BT_ERR("Failed to read FW downloading status!"); ret = -EIO; goto done; } if (fws0) { BT_DBG("BT not the winner (%#x). Skip FW downloading", fws0); /* Give other function more time to download the firmware */ pollnum *= 10; } else { if (card->helper) { ret = btmrvl_sdio_download_helper(card); if (ret) { BT_ERR("Failed to download helper!"); ret = -EIO; goto done; } } if (btmrvl_sdio_download_fw_w_helper(card)) { BT_ERR("Failed to download firmware!"); ret = -EIO; goto done; } } sdio_release_host(card->func); /* * winner or not, with this test the FW synchronizes when the * module can continue its initialization */ if (btmrvl_sdio_verify_fw_download(card, pollnum)) { BT_ERR("FW failed to be active in time!"); return -ETIMEDOUT; } return 0; done: sdio_release_host(card->func); return ret; } static int btmrvl_sdio_wakeup_fw(struct btmrvl_private *priv) { struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; int ret = 0; if (!card || !card->func) { BT_ERR("card or function is NULL!"); return -EINVAL; } sdio_claim_host(card->func); sdio_writeb(card->func, HOST_POWER_UP, card->reg->cfg, &ret); sdio_release_host(card->func); BT_DBG("wake up firmware"); return ret; } static int btmrvl_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { int ret = 0; struct btmrvl_private *priv = NULL; struct btmrvl_sdio_card *card = NULL; BT_INFO("vendor=0x%x, device=0x%x, class=%d, fn=%d", id->vendor, id->device, id->class, func->num); card = devm_kzalloc(&func->dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->func = func; if (id->driver_data) { struct btmrvl_sdio_device *data = (void *) id->driver_data; card->helper = data->helper; card->firmware = data->firmware; card->reg = data->reg; card->sd_blksz_fw_dl = data->sd_blksz_fw_dl; } if (btmrvl_sdio_register_dev(card) < 0) { BT_ERR("Failed to register BT device!"); return -ENODEV; } /* Disable the interrupts on the card */ btmrvl_sdio_disable_host_int(card); if (btmrvl_sdio_download_fw(card)) { BT_ERR("Downloading firmware failed!"); ret = -ENODEV; goto unreg_dev; } btmrvl_sdio_enable_host_int(card); priv = btmrvl_add_card(card); if (!priv) { BT_ERR("Initializing card failed!"); ret = -ENODEV; goto disable_host_int; } card->priv = priv; /* Initialize the interface specific function pointers */ priv->hw_host_to_card = btmrvl_sdio_host_to_card; priv->hw_wakeup_firmware = btmrvl_sdio_wakeup_fw; priv->hw_process_int_status = btmrvl_sdio_process_int_status; if (btmrvl_register_hdev(priv)) { BT_ERR("Register hdev failed!"); ret = -ENODEV; goto disable_host_int; } priv->btmrvl_dev.psmode = 1; btmrvl_enable_ps(priv); priv->btmrvl_dev.gpio_gap = 0xffff; btmrvl_send_hscfg_cmd(priv); return 0; disable_host_int: btmrvl_sdio_disable_host_int(card); unreg_dev: btmrvl_sdio_unregister_dev(card); return ret; } static void btmrvl_sdio_remove(struct sdio_func *func) { struct btmrvl_sdio_card *card; if (func) { card = sdio_get_drvdata(func); if (card) { /* Send SHUTDOWN command & disable interrupt * if user removes the module. */ if (user_rmmod) { btmrvl_send_module_cfg_cmd(card->priv, MODULE_SHUTDOWN_REQ); btmrvl_sdio_disable_host_int(card); } BT_DBG("unregester dev"); btmrvl_sdio_unregister_dev(card); btmrvl_remove_card(card->priv); } } } static int btmrvl_sdio_suspend(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); struct btmrvl_sdio_card *card; struct btmrvl_private *priv; mmc_pm_flag_t pm_flags; struct hci_dev *hcidev; if (func) { pm_flags = sdio_get_host_pm_caps(func); BT_DBG("%s: suspend: PM flags = 0x%x", sdio_func_id(func), pm_flags); if (!(pm_flags & MMC_PM_KEEP_POWER)) { BT_ERR("%s: cannot remain alive while suspended", sdio_func_id(func)); return -ENOSYS; } card = sdio_get_drvdata(func); if (!card || !card->priv) { BT_ERR("card or priv structure is not valid"); return 0; } } else { BT_ERR("sdio_func is not specified"); return 0; } priv = card->priv; if (priv->adapter->hs_state != HS_ACTIVATED) { if (btmrvl_enable_hs(priv)) { BT_ERR("HS not actived, suspend failed!"); return -EBUSY; } } hcidev = priv->btmrvl_dev.hcidev; BT_DBG("%s: SDIO suspend", hcidev->name); hci_suspend_dev(hcidev); skb_queue_purge(&priv->adapter->tx_queue); priv->adapter->is_suspended = true; /* We will keep the power when hs enabled successfully */ if (priv->adapter->hs_state == HS_ACTIVATED) { BT_DBG("suspend with MMC_PM_KEEP_POWER"); return sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); } else { BT_DBG("suspend without MMC_PM_KEEP_POWER"); return 0; } } static int btmrvl_sdio_resume(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); struct btmrvl_sdio_card *card; struct btmrvl_private *priv; mmc_pm_flag_t pm_flags; struct hci_dev *hcidev; if (func) { pm_flags = sdio_get_host_pm_caps(func); BT_DBG("%s: resume: PM flags = 0x%x", sdio_func_id(func), pm_flags); card = sdio_get_drvdata(func); if (!card || !card->priv) { BT_ERR("card or priv structure is not valid"); return 0; } } else { BT_ERR("sdio_func is not specified"); return 0; } priv = card->priv; if (!priv->adapter->is_suspended) { BT_DBG("device already resumed"); return 0; } priv->adapter->is_suspended = false; hcidev = priv->btmrvl_dev.hcidev; BT_DBG("%s: SDIO resume", hcidev->name); hci_resume_dev(hcidev); priv->hw_wakeup_firmware(priv); priv->adapter->hs_state = HS_DEACTIVATED; BT_DBG("%s: HS DEACTIVATED in resume!", hcidev->name); return 0; } static const struct dev_pm_ops btmrvl_sdio_pm_ops = { .suspend = btmrvl_sdio_suspend, .resume = btmrvl_sdio_resume, }; static struct sdio_driver bt_mrvl_sdio = { .name = "btmrvl_sdio", .id_table = btmrvl_sdio_ids, .probe = btmrvl_sdio_probe, .remove = btmrvl_sdio_remove, .drv = { .owner = THIS_MODULE, .pm = &btmrvl_sdio_pm_ops, } }; static int __init btmrvl_sdio_init_module(void) { if (sdio_register_driver(&bt_mrvl_sdio) != 0) { BT_ERR("SDIO Driver Registration Failed"); return -ENODEV; } /* Clear the flag in case user removes the card. */ user_rmmod = 0; return 0; } static void __exit btmrvl_sdio_exit_module(void) { /* Set the flag as user is removing this module. */ user_rmmod = 1; sdio_unregister_driver(&bt_mrvl_sdio); } module_init(btmrvl_sdio_init_module); module_exit(btmrvl_sdio_exit_module); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell BT-over-SDIO driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE("mrvl/sd8688_helper.bin"); MODULE_FIRMWARE("mrvl/sd8688.bin"); MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
gpl-2.0
beroid/android_kernel_cyanogen_msm8916
net/ipv4/sysfs_net_ipv4.c
2830
2488
/* * net/ipv4/sysfs_net_ipv4.c * * sysfs-based networking knobs (so we can, unlike with sysctl, control perms) * * Copyright (C) 2008 Google, Inc. * * Robert Love <rlove@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kobject.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/init.h> #include <net/tcp.h> #define CREATE_IPV4_FILE(_name, _var) \ static ssize_t _name##_show(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf) \ { \ return sprintf(buf, "%d\n", _var); \ } \ static ssize_t _name##_store(struct kobject *kobj, \ struct kobj_attribute *attr, \ const char *buf, size_t count) \ { \ int val, ret; \ ret = sscanf(buf, "%d", &val); \ if (ret != 1) \ return -EINVAL; \ if (val < 0) \ return -EINVAL; \ _var = val; \ return count; \ } \ static struct kobj_attribute _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]); CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]); CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]); CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]); CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]); CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]); CREATE_IPV4_FILE(tcp_delack_seg, sysctl_tcp_delack_seg); CREATE_IPV4_FILE(tcp_use_userconfig, sysctl_tcp_use_userconfig); static struct attribute *ipv4_attrs[] = { &tcp_wmem_min_attr.attr, &tcp_wmem_def_attr.attr, &tcp_wmem_max_attr.attr, &tcp_rmem_min_attr.attr, &tcp_rmem_def_attr.attr, &tcp_rmem_max_attr.attr, &tcp_delack_seg_attr.attr, &tcp_use_userconfig_attr.attr, NULL }; static struct attribute_group ipv4_attr_group = { .attrs = ipv4_attrs, }; static __init int sysfs_ipv4_init(void) { struct kobject *ipv4_kobject; int ret; ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj); if (!ipv4_kobject) return -ENOMEM; ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group); if (ret) { kobject_put(ipv4_kobject); return ret; } return 0; } subsys_initcall(sysfs_ipv4_init);
gpl-2.0
sgs3/SGH-T999V_Kernel
drivers/pci/hotplug/pciehp_acpi.c
3086
4210
/* * ACPI related functions for PCI Express Hot Plug driver. * * Copyright (C) 2008 Kenji Kaneshige * Copyright (C) 2008 Fujitsu Limited. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/acpi.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/slab.h> #include "pciehp.h" #define PCIEHP_DETECT_PCIE (0) #define PCIEHP_DETECT_ACPI (1) #define PCIEHP_DETECT_AUTO (2) #define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO struct dummy_slot { u32 number; struct list_head list; }; static int slot_detection_mode; static char *pciehp_detect_mode; module_param(pciehp_detect_mode, charp, 0444); MODULE_PARM_DESC(pciehp_detect_mode, "Slot detection mode: pcie, acpi, auto\n" " pcie - Use PCIe based slot detection\n" " acpi - Use ACPI for slot detection\n" " auto(default) - Auto select mode. Use acpi option if duplicate\n" " slot ids are found. Otherwise, use pcie option\n"); int pciehp_acpi_slot_detection_check(struct pci_dev *dev) { if (slot_detection_mode != PCIEHP_DETECT_ACPI) return 0; if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev))) return 0; return -ENODEV; } static int __init parse_detect_mode(void) { if (!pciehp_detect_mode) return PCIEHP_DETECT_DEFAULT; if (!strcmp(pciehp_detect_mode, "pcie")) return PCIEHP_DETECT_PCIE; if (!strcmp(pciehp_detect_mode, "acpi")) return PCIEHP_DETECT_ACPI; if (!strcmp(pciehp_detect_mode, "auto")) return PCIEHP_DETECT_AUTO; warn("bad specifier '%s' for pciehp_detect_mode. Use default\n", pciehp_detect_mode); return PCIEHP_DETECT_DEFAULT; } static int __initdata dup_slot_id; static int __initdata acpi_slot_detected; static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); /* Dummy driver for dumplicate name detection */ static int __init dummy_probe(struct pcie_device *dev) { int pos; u32 slot_cap; acpi_handle handle; struct dummy_slot *slot, *tmp; struct pci_dev *pdev = dev->port; pos = pci_pcie_cap(pdev); if (!pos) return -ENODEV; pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap); slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) return -ENOMEM; slot->number = slot_cap >> 19; list_for_each_entry(tmp, &dummy_slots, list) { if (tmp->number == slot->number) dup_slot_id++; } list_add_tail(&slot->list, &dummy_slots); handle = DEVICE_ACPI_HANDLE(&pdev->dev); if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) acpi_slot_detected = 1; return -ENODEV; /* dummy driver always returns error */ } static struct pcie_port_service_driver __initdata dummy_driver = { .name = "pciehp_dummy", .port_type = PCIE_ANY_PORT, .service = PCIE_PORT_SERVICE_HP, .probe = dummy_probe, }; static int __init select_detection_mode(void) { struct dummy_slot *slot, *tmp; if (pcie_port_service_register(&dummy_driver)) return PCIEHP_DETECT_ACPI; pcie_port_service_unregister(&dummy_driver); list_for_each_entry_safe(slot, tmp, &dummy_slots, list) { list_del(&slot->list); kfree(slot); } if (acpi_slot_detected && dup_slot_id) return PCIEHP_DETECT_ACPI; return PCIEHP_DETECT_PCIE; } void __init pciehp_acpi_slot_detection_init(void) { slot_detection_mode = parse_detect_mode(); if (slot_detection_mode != PCIEHP_DETECT_AUTO) goto out; slot_detection_mode = select_detection_mode(); out: if (slot_detection_mode == PCIEHP_DETECT_ACPI) info("Using ACPI for slot detection.\n"); }
gpl-2.0
hvaibhav/am335x-linux
drivers/video/fsl-diu-fb.c
3342
45870
/* * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved. * * Freescale DIU Frame Buffer device driver * * Authors: Hongjun Chen <hong-jun.chen@freescale.com> * Paul Widmer <paul.widmer@freescale.com> * Srikanth Srinivasan <srikanth.srinivasan@freescale.com> * York Sun <yorksun@freescale.com> * * Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/spinlock.h> #include <sysdev/fsl_soc.h> #include <linux/fsl-diu-fb.h> #include "edid.h" #define NUM_AOIS 5 /* 1 for plane 0, 2 for planes 1 & 2 each */ /* HW cursor parameters */ #define MAX_CURS 32 /* INT_STATUS/INT_MASK field descriptions */ #define INT_VSYNC 0x01 /* Vsync interrupt */ #define INT_VSYNC_WB 0x02 /* Vsync interrupt for write back operation */ #define INT_UNDRUN 0x04 /* Under run exception interrupt */ #define INT_PARERR 0x08 /* Display parameters error interrupt */ #define INT_LS_BF_VS 0x10 /* Lines before vsync. interrupt */ /* * List of supported video modes * * The first entry is the default video mode. The remain entries are in * order if increasing resolution and frequency. The 320x240-60 mode is * the initial AOI for the second and third planes. */ static struct fb_videomode __devinitdata fsl_diu_mode_db[] = { { .refresh = 60, .xres = 1024, .yres = 768, .pixclock = 15385, .left_margin = 160, .right_margin = 24, .upper_margin = 29, .lower_margin = 3, .hsync_len = 136, .vsync_len = 6, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 60, .xres = 320, .yres = 240, .pixclock = 79440, .left_margin = 16, .right_margin = 16, .upper_margin = 16, .lower_margin = 5, .hsync_len = 48, .vsync_len = 1, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 60, .xres = 640, .yres = 480, .pixclock = 39722, .left_margin = 48, .right_margin = 16, .upper_margin = 33, .lower_margin = 10, .hsync_len = 96, .vsync_len = 2, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 72, .xres = 640, .yres = 480, .pixclock = 32052, .left_margin = 128, .right_margin = 24, .upper_margin = 28, .lower_margin = 9, .hsync_len = 40, .vsync_len = 3, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 75, .xres = 640, .yres = 480, .pixclock = 31747, .left_margin = 120, .right_margin = 16, .upper_margin = 16, .lower_margin = 1, .hsync_len = 64, .vsync_len = 3, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 90, .xres = 640, .yres = 480, .pixclock = 25057, .left_margin = 120, .right_margin = 32, .upper_margin = 14, .lower_margin = 25, .hsync_len = 40, .vsync_len = 14, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 100, .xres = 640, .yres = 480, .pixclock = 22272, .left_margin = 48, .right_margin = 32, .upper_margin = 17, .lower_margin = 22, .hsync_len = 128, .vsync_len = 12, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 60, .xres = 800, .yres = 480, .pixclock = 33805, .left_margin = 96, .right_margin = 24, .upper_margin = 10, .lower_margin = 3, .hsync_len = 72, .vsync_len = 7, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 60, .xres = 800, .yres = 600, .pixclock = 25000, .left_margin = 88, .right_margin = 40, .upper_margin = 23, .lower_margin = 1, .hsync_len = 128, .vsync_len = 4, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 60, .xres = 854, .yres = 480, .pixclock = 31518, .left_margin = 104, .right_margin = 16, .upper_margin = 13, .lower_margin = 1, .hsync_len = 88, .vsync_len = 3, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 70, .xres = 1024, .yres = 768, .pixclock = 16886, .left_margin = 3, .right_margin = 3, .upper_margin = 2, .lower_margin = 2, .hsync_len = 40, .vsync_len = 18, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 75, .xres = 1024, .yres = 768, .pixclock = 15009, .left_margin = 3, .right_margin = 3, .upper_margin = 2, .lower_margin = 2, .hsync_len = 80, .vsync_len = 32, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 60, .xres = 1280, .yres = 480, .pixclock = 18939, .left_margin = 353, .right_margin = 47, .upper_margin = 39, .lower_margin = 4, .hsync_len = 8, .vsync_len = 2, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 60, .xres = 1280, .yres = 720, .pixclock = 13426, .left_margin = 192, .right_margin = 64, .upper_margin = 22, .lower_margin = 1, .hsync_len = 136, .vsync_len = 3, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 60, .xres = 1280, .yres = 1024, .pixclock = 9375, .left_margin = 38, .right_margin = 128, .upper_margin = 2, .lower_margin = 7, .hsync_len = 216, .vsync_len = 37, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 70, .xres = 1280, .yres = 1024, .pixclock = 9380, .left_margin = 6, .right_margin = 6, .upper_margin = 4, .lower_margin = 4, .hsync_len = 60, .vsync_len = 94, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 75, .xres = 1280, .yres = 1024, .pixclock = 9380, .left_margin = 6, .right_margin = 6, .upper_margin = 4, .lower_margin = 4, .hsync_len = 60, .vsync_len = 15, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, { .refresh = 60, .xres = 1920, .yres = 1080, .pixclock = 5787, .left_margin = 328, .right_margin = 120, .upper_margin = 34, .lower_margin = 1, .hsync_len = 208, .vsync_len = 3, .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }, }; static char *fb_mode; static unsigned long default_bpp = 32; static enum fsl_diu_monitor_port monitor_port; static char *monitor_string; #if defined(CONFIG_NOT_COHERENT_CACHE) static u8 *coherence_data; static size_t coherence_data_size; static unsigned int d_cache_line_size; #endif static DEFINE_SPINLOCK(diu_lock); enum mfb_index { PLANE0 = 0, /* Plane 0, only one AOI that fills the screen */ PLANE1_AOI0, /* Plane 1, first AOI */ PLANE1_AOI1, /* Plane 1, second AOI */ PLANE2_AOI0, /* Plane 2, first AOI */ PLANE2_AOI1, /* Plane 2, second AOI */ }; struct mfb_info { enum mfb_index index; char *id; int registered; unsigned long pseudo_palette[16]; struct diu_ad *ad; int cursor_reset; unsigned char g_alpha; unsigned int count; int x_aoi_d; /* aoi display x offset to physical screen */ int y_aoi_d; /* aoi display y offset to physical screen */ struct fsl_diu_data *parent; u8 *edid_data; }; /** * struct fsl_diu_data - per-DIU data structure * @dma_addr: DMA address of this structure * @fsl_diu_info: fb_info objects, one per AOI * @dev_attr: sysfs structure * @irq: IRQ * @monitor_port: the monitor port this DIU is connected to * @diu_reg: pointer to the DIU hardware registers * @reg_lock: spinlock for register access * @dummy_aoi: video buffer for the 4x4 32-bit dummy AOI * dummy_ad: DIU Area Descriptor for the dummy AOI * @ad[]: Area Descriptors for each real AOI * @gamma: gamma color table * @cursor: hardware cursor data * * This data structure must be allocated with 32-byte alignment, so that the * internal fields can be aligned properly. */ struct fsl_diu_data { dma_addr_t dma_addr; struct fb_info fsl_diu_info[NUM_AOIS]; struct mfb_info mfb[NUM_AOIS]; struct device_attribute dev_attr; unsigned int irq; enum fsl_diu_monitor_port monitor_port; struct diu __iomem *diu_reg; spinlock_t reg_lock; u8 dummy_aoi[4 * 4 * 4]; struct diu_ad dummy_ad __aligned(8); struct diu_ad ad[NUM_AOIS] __aligned(8); u8 gamma[256 * 3] __aligned(32); u8 cursor[MAX_CURS * MAX_CURS * 2] __aligned(32); } __aligned(32); /* Determine the DMA address of a member of the fsl_diu_data structure */ #define DMA_ADDR(p, f) ((p)->dma_addr + offsetof(struct fsl_diu_data, f)) static struct mfb_info mfb_template[] = { { .index = PLANE0, .id = "Panel0", .registered = 0, .count = 0, .x_aoi_d = 0, .y_aoi_d = 0, }, { .index = PLANE1_AOI0, .id = "Panel1 AOI0", .registered = 0, .g_alpha = 0xff, .count = 0, .x_aoi_d = 0, .y_aoi_d = 0, }, { .index = PLANE1_AOI1, .id = "Panel1 AOI1", .registered = 0, .g_alpha = 0xff, .count = 0, .x_aoi_d = 0, .y_aoi_d = 480, }, { .index = PLANE2_AOI0, .id = "Panel2 AOI0", .registered = 0, .g_alpha = 0xff, .count = 0, .x_aoi_d = 640, .y_aoi_d = 0, }, { .index = PLANE2_AOI1, .id = "Panel2 AOI1", .registered = 0, .g_alpha = 0xff, .count = 0, .x_aoi_d = 640, .y_aoi_d = 480, }, }; /** * fsl_diu_name_to_port - convert a port name to a monitor port enum * * Takes the name of a monitor port ("dvi", "lvds", or "dlvds") and returns * the enum fsl_diu_monitor_port that corresponds to that string. * * For compatibility with older versions, a number ("0", "1", or "2") is also * supported. * * If the string is unknown, DVI is assumed. * * If the particular port is not supported by the platform, another port * (platform-specific) is chosen instead. */ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s) { enum fsl_diu_monitor_port port = FSL_DIU_PORT_DVI; unsigned long val; if (s) { if (!strict_strtoul(s, 10, &val) && (val <= 2)) port = (enum fsl_diu_monitor_port) val; else if (strncmp(s, "lvds", 4) == 0) port = FSL_DIU_PORT_LVDS; else if (strncmp(s, "dlvds", 5) == 0) port = FSL_DIU_PORT_DLVDS; } return diu_ops.valid_monitor_port(port); } /* * Workaround for failed writing desc register of planes. * Needed with MPC5121 DIU rev 2.0 silicon. */ void wr_reg_wa(u32 *reg, u32 val) { do { out_be32(reg, val); } while (in_be32(reg) != val); } static void fsl_diu_enable_panel(struct fb_info *info) { struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par; struct diu_ad *ad = mfbi->ad; struct fsl_diu_data *data = mfbi->parent; struct diu __iomem *hw = data->diu_reg; switch (mfbi->index) { case PLANE0: if (hw->desc[0] != ad->paddr) wr_reg_wa(&hw->desc[0], ad->paddr); break; case PLANE1_AOI0: cmfbi = &data->mfb[2]; if (hw->desc[1] != ad->paddr) { /* AOI0 closed */ if (cmfbi->count > 0) /* AOI1 open */ ad->next_ad = cpu_to_le32(cmfbi->ad->paddr); else ad->next_ad = 0; wr_reg_wa(&hw->desc[1], ad->paddr); } break; case PLANE2_AOI0: cmfbi = &data->mfb[4]; if (hw->desc[2] != ad->paddr) { /* AOI0 closed */ if (cmfbi->count > 0) /* AOI1 open */ ad->next_ad = cpu_to_le32(cmfbi->ad->paddr); else ad->next_ad = 0; wr_reg_wa(&hw->desc[2], ad->paddr); } break; case PLANE1_AOI1: pmfbi = &data->mfb[1]; ad->next_ad = 0; if (hw->desc[1] == data->dummy_ad.paddr) wr_reg_wa(&hw->desc[1], ad->paddr); else /* AOI0 open */ pmfbi->ad->next_ad = cpu_to_le32(ad->paddr); break; case PLANE2_AOI1: pmfbi = &data->mfb[3]; ad->next_ad = 0; if (hw->desc[2] == data->dummy_ad.paddr) wr_reg_wa(&hw->desc[2], ad->paddr); else /* AOI0 was open */ pmfbi->ad->next_ad = cpu_to_le32(ad->paddr); break; } } static void fsl_diu_disable_panel(struct fb_info *info) { struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par; struct diu_ad *ad = mfbi->ad; struct fsl_diu_data *data = mfbi->parent; struct diu __iomem *hw = data->diu_reg; switch (mfbi->index) { case PLANE0: if (hw->desc[0] != data->dummy_ad.paddr) wr_reg_wa(&hw->desc[0], data->dummy_ad.paddr); break; case PLANE1_AOI0: cmfbi = &data->mfb[2]; if (cmfbi->count > 0) /* AOI1 is open */ wr_reg_wa(&hw->desc[1], cmfbi->ad->paddr); /* move AOI1 to the first */ else /* AOI1 was closed */ wr_reg_wa(&hw->desc[1], data->dummy_ad.paddr); /* close AOI 0 */ break; case PLANE2_AOI0: cmfbi = &data->mfb[4]; if (cmfbi->count > 0) /* AOI1 is open */ wr_reg_wa(&hw->desc[2], cmfbi->ad->paddr); /* move AOI1 to the first */ else /* AOI1 was closed */ wr_reg_wa(&hw->desc[2], data->dummy_ad.paddr); /* close AOI 0 */ break; case PLANE1_AOI1: pmfbi = &data->mfb[1]; if (hw->desc[1] != ad->paddr) { /* AOI1 is not the first in the chain */ if (pmfbi->count > 0) /* AOI0 is open, must be the first */ pmfbi->ad->next_ad = 0; } else /* AOI1 is the first in the chain */ wr_reg_wa(&hw->desc[1], data->dummy_ad.paddr); /* close AOI 1 */ break; case PLANE2_AOI1: pmfbi = &data->mfb[3]; if (hw->desc[2] != ad->paddr) { /* AOI1 is not the first in the chain */ if (pmfbi->count > 0) /* AOI0 is open, must be the first */ pmfbi->ad->next_ad = 0; } else /* AOI1 is the first in the chain */ wr_reg_wa(&hw->desc[2], data->dummy_ad.paddr); /* close AOI 1 */ break; } } static void enable_lcdc(struct fb_info *info) { struct mfb_info *mfbi = info->par; struct fsl_diu_data *data = mfbi->parent; struct diu __iomem *hw = data->diu_reg; out_be32(&hw->diu_mode, MFB_MODE1); } static void disable_lcdc(struct fb_info *info) { struct mfb_info *mfbi = info->par; struct fsl_diu_data *data = mfbi->parent; struct diu __iomem *hw = data->diu_reg; out_be32(&hw->diu_mode, 0); } static void adjust_aoi_size_position(struct fb_var_screeninfo *var, struct fb_info *info) { struct mfb_info *lower_aoi_mfbi, *upper_aoi_mfbi, *mfbi = info->par; struct fsl_diu_data *data = mfbi->parent; int available_height, upper_aoi_bottom; enum mfb_index index = mfbi->index; int lower_aoi_is_open, upper_aoi_is_open; __u32 base_plane_width, base_plane_height, upper_aoi_height; base_plane_width = data->fsl_diu_info[0].var.xres; base_plane_height = data->fsl_diu_info[0].var.yres; if (mfbi->x_aoi_d < 0) mfbi->x_aoi_d = 0; if (mfbi->y_aoi_d < 0) mfbi->y_aoi_d = 0; switch (index) { case PLANE0: if (mfbi->x_aoi_d != 0) mfbi->x_aoi_d = 0; if (mfbi->y_aoi_d != 0) mfbi->y_aoi_d = 0; break; case PLANE1_AOI0: case PLANE2_AOI0: lower_aoi_mfbi = data->fsl_diu_info[index+1].par; lower_aoi_is_open = lower_aoi_mfbi->count > 0 ? 1 : 0; if (var->xres > base_plane_width) var->xres = base_plane_width; if ((mfbi->x_aoi_d + var->xres) > base_plane_width) mfbi->x_aoi_d = base_plane_width - var->xres; if (lower_aoi_is_open) available_height = lower_aoi_mfbi->y_aoi_d; else available_height = base_plane_height; if (var->yres > available_height) var->yres = available_height; if ((mfbi->y_aoi_d + var->yres) > available_height) mfbi->y_aoi_d = available_height - var->yres; break; case PLANE1_AOI1: case PLANE2_AOI1: upper_aoi_mfbi = data->fsl_diu_info[index-1].par; upper_aoi_height = data->fsl_diu_info[index-1].var.yres; upper_aoi_bottom = upper_aoi_mfbi->y_aoi_d + upper_aoi_height; upper_aoi_is_open = upper_aoi_mfbi->count > 0 ? 1 : 0; if (var->xres > base_plane_width) var->xres = base_plane_width; if ((mfbi->x_aoi_d + var->xres) > base_plane_width) mfbi->x_aoi_d = base_plane_width - var->xres; if (mfbi->y_aoi_d < 0) mfbi->y_aoi_d = 0; if (upper_aoi_is_open) { if (mfbi->y_aoi_d < upper_aoi_bottom) mfbi->y_aoi_d = upper_aoi_bottom; available_height = base_plane_height - upper_aoi_bottom; } else available_height = base_plane_height; if (var->yres > available_height) var->yres = available_height; if ((mfbi->y_aoi_d + var->yres) > base_plane_height) mfbi->y_aoi_d = base_plane_height - var->yres; break; } } /* * Checks to see if the hardware supports the state requested by var passed * in. This function does not alter the hardware state! If the var passed in * is slightly off by what the hardware can support then we alter the var * PASSED in to what we can do. If the hardware doesn't support mode change * a -EINVAL will be returned by the upper layers. */ static int fsl_diu_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { if (var->xres_virtual < var->xres) var->xres_virtual = var->xres; if (var->yres_virtual < var->yres) var->yres_virtual = var->yres; if (var->xoffset < 0) var->xoffset = 0; if (var->yoffset < 0) var->yoffset = 0; if (var->xoffset + info->var.xres > info->var.xres_virtual) var->xoffset = info->var.xres_virtual - info->var.xres; if (var->yoffset + info->var.yres > info->var.yres_virtual) var->yoffset = info->var.yres_virtual - info->var.yres; if ((var->bits_per_pixel != 32) && (var->bits_per_pixel != 24) && (var->bits_per_pixel != 16)) var->bits_per_pixel = default_bpp; switch (var->bits_per_pixel) { case 16: var->red.length = 5; var->red.offset = 11; var->red.msb_right = 0; var->green.length = 6; var->green.offset = 5; var->green.msb_right = 0; var->blue.length = 5; var->blue.offset = 0; var->blue.msb_right = 0; var->transp.length = 0; var->transp.offset = 0; var->transp.msb_right = 0; break; case 24: var->red.length = 8; var->red.offset = 0; var->red.msb_right = 0; var->green.length = 8; var->green.offset = 8; var->green.msb_right = 0; var->blue.length = 8; var->blue.offset = 16; var->blue.msb_right = 0; var->transp.length = 0; var->transp.offset = 0; var->transp.msb_right = 0; break; case 32: var->red.length = 8; var->red.offset = 16; var->red.msb_right = 0; var->green.length = 8; var->green.offset = 8; var->green.msb_right = 0; var->blue.length = 8; var->blue.offset = 0; var->blue.msb_right = 0; var->transp.length = 8; var->transp.offset = 24; var->transp.msb_right = 0; break; } var->height = -1; var->width = -1; var->grayscale = 0; /* Copy nonstd field to/from sync for fbset usage */ var->sync |= var->nonstd; var->nonstd |= var->sync; adjust_aoi_size_position(var, info); return 0; } static void set_fix(struct fb_info *info) { struct fb_fix_screeninfo *fix = &info->fix; struct fb_var_screeninfo *var = &info->var; struct mfb_info *mfbi = info->par; strncpy(fix->id, mfbi->id, sizeof(fix->id)); fix->line_length = var->xres_virtual * var->bits_per_pixel / 8; fix->type = FB_TYPE_PACKED_PIXELS; fix->accel = FB_ACCEL_NONE; fix->visual = FB_VISUAL_TRUECOLOR; fix->xpanstep = 1; fix->ypanstep = 1; } static void update_lcdc(struct fb_info *info) { struct fb_var_screeninfo *var = &info->var; struct mfb_info *mfbi = info->par; struct fsl_diu_data *data = mfbi->parent; struct diu __iomem *hw; int i, j; u8 *gamma_table_base; u32 temp; hw = data->diu_reg; diu_ops.set_monitor_port(data->monitor_port); gamma_table_base = data->gamma; /* Prep for DIU init - gamma table, cursor table */ for (i = 0; i <= 2; i++) for (j = 0; j <= 255; j++) *gamma_table_base++ = j; if (diu_ops.set_gamma_table) diu_ops.set_gamma_table(data->monitor_port, data->gamma); disable_lcdc(info); /* Program DIU registers */ out_be32(&hw->gamma, DMA_ADDR(data, gamma)); out_be32(&hw->cursor, DMA_ADDR(data, cursor)); out_be32(&hw->bgnd, 0x007F7F7F); /* BGND */ out_be32(&hw->bgnd_wb, 0); /* BGND_WB */ out_be32(&hw->disp_size, (var->yres << 16 | var->xres)); /* DISP SIZE */ out_be32(&hw->wb_size, 0); /* WB SIZE */ out_be32(&hw->wb_mem_addr, 0); /* WB MEM ADDR */ /* Horizontal and vertical configuration register */ temp = var->left_margin << 22 | /* BP_H */ var->hsync_len << 11 | /* PW_H */ var->right_margin; /* FP_H */ out_be32(&hw->hsyn_para, temp); temp = var->upper_margin << 22 | /* BP_V */ var->vsync_len << 11 | /* PW_V */ var->lower_margin; /* FP_V */ out_be32(&hw->vsyn_para, temp); diu_ops.set_pixel_clock(var->pixclock); out_be32(&hw->syn_pol, 0); /* SYNC SIGNALS POLARITY */ out_be32(&hw->thresholds, 0x00037800); /* The Thresholds */ out_be32(&hw->int_status, 0); /* INTERRUPT STATUS */ out_be32(&hw->plut, 0x01F5F666); /* Enable the DIU */ enable_lcdc(info); } static int map_video_memory(struct fb_info *info) { u32 smem_len = info->fix.line_length * info->var.yres_virtual; void *p; p = alloc_pages_exact(smem_len, GFP_DMA | __GFP_ZERO); if (!p) { dev_err(info->dev, "unable to allocate fb memory\n"); return -ENOMEM; } mutex_lock(&info->mm_lock); info->screen_base = p; info->fix.smem_start = virt_to_phys(info->screen_base); info->fix.smem_len = smem_len; mutex_unlock(&info->mm_lock); info->screen_size = info->fix.smem_len; return 0; } static void unmap_video_memory(struct fb_info *info) { void *p = info->screen_base; size_t l = info->fix.smem_len; mutex_lock(&info->mm_lock); info->screen_base = NULL; info->fix.smem_start = 0; info->fix.smem_len = 0; mutex_unlock(&info->mm_lock); if (p) free_pages_exact(p, l); } /* * Using the fb_var_screeninfo in fb_info we set the aoi of this * particular framebuffer. It is a light version of fsl_diu_set_par. */ static int fsl_diu_set_aoi(struct fb_info *info) { struct fb_var_screeninfo *var = &info->var; struct mfb_info *mfbi = info->par; struct diu_ad *ad = mfbi->ad; /* AOI should not be greater than display size */ ad->offset_xyi = cpu_to_le32((var->yoffset << 16) | var->xoffset); ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d); return 0; } /** * fsl_diu_get_pixel_format: return the pixel format for a given color depth * * The pixel format is a 32-bit value that determine which bits in each * pixel are to be used for each color. This is the default function used * if the platform does not define its own version. */ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel) { #define PF_BYTE_F 0x10000000 #define PF_ALPHA_C_MASK 0x0E000000 #define PF_ALPHA_C_SHIFT 25 #define PF_BLUE_C_MASK 0x01800000 #define PF_BLUE_C_SHIFT 23 #define PF_GREEN_C_MASK 0x00600000 #define PF_GREEN_C_SHIFT 21 #define PF_RED_C_MASK 0x00180000 #define PF_RED_C_SHIFT 19 #define PF_PALETTE 0x00040000 #define PF_PIXEL_S_MASK 0x00030000 #define PF_PIXEL_S_SHIFT 16 #define PF_COMP_3_MASK 0x0000F000 #define PF_COMP_3_SHIFT 12 #define PF_COMP_2_MASK 0x00000F00 #define PF_COMP_2_SHIFT 8 #define PF_COMP_1_MASK 0x000000F0 #define PF_COMP_1_SHIFT 4 #define PF_COMP_0_MASK 0x0000000F #define PF_COMP_0_SHIFT 0 #define MAKE_PF(alpha, red, blue, green, size, c0, c1, c2, c3) \ cpu_to_le32(PF_BYTE_F | (alpha << PF_ALPHA_C_SHIFT) | \ (blue << PF_BLUE_C_SHIFT) | (green << PF_GREEN_C_SHIFT) | \ (red << PF_RED_C_SHIFT) | (c3 << PF_COMP_3_SHIFT) | \ (c2 << PF_COMP_2_SHIFT) | (c1 << PF_COMP_1_SHIFT) | \ (c0 << PF_COMP_0_SHIFT) | (size << PF_PIXEL_S_SHIFT)) switch (bits_per_pixel) { case 32: /* 0x88883316 */ return MAKE_PF(3, 2, 0, 1, 3, 8, 8, 8, 8); case 24: /* 0x88082219 */ return MAKE_PF(4, 0, 1, 2, 2, 0, 8, 8, 8); case 16: /* 0x65053118 */ return MAKE_PF(4, 2, 1, 0, 1, 5, 6, 5, 0); default: pr_err("fsl-diu: unsupported color depth %u\n", bits_per_pixel); return 0; } } /* * Using the fb_var_screeninfo in fb_info we set the resolution of this * particular framebuffer. This function alters the fb_fix_screeninfo stored * in fb_info. It does not alter var in fb_info since we are using that * data. This means we depend on the data in var inside fb_info to be * supported by the hardware. fsl_diu_check_var is always called before * fsl_diu_set_par to ensure this. */ static int fsl_diu_set_par(struct fb_info *info) { unsigned long len; struct fb_var_screeninfo *var = &info->var; struct mfb_info *mfbi = info->par; struct fsl_diu_data *data = mfbi->parent; struct diu_ad *ad = mfbi->ad; struct diu __iomem *hw; hw = data->diu_reg; set_fix(info); mfbi->cursor_reset = 1; len = info->var.yres_virtual * info->fix.line_length; /* Alloc & dealloc each time resolution/bpp change */ if (len != info->fix.smem_len) { if (info->fix.smem_start) unmap_video_memory(info); /* Memory allocation for framebuffer */ if (map_video_memory(info)) { dev_err(info->dev, "unable to allocate fb memory 1\n"); return -ENOMEM; } } if (diu_ops.get_pixel_format) ad->pix_fmt = diu_ops.get_pixel_format(data->monitor_port, var->bits_per_pixel); else ad->pix_fmt = fsl_diu_get_pixel_format(var->bits_per_pixel); ad->addr = cpu_to_le32(info->fix.smem_start); ad->src_size_g_alpha = cpu_to_le32((var->yres_virtual << 12) | var->xres_virtual) | mfbi->g_alpha; /* AOI should not be greater than display size */ ad->aoi_size = cpu_to_le32((var->yres << 16) | var->xres); ad->offset_xyi = cpu_to_le32((var->yoffset << 16) | var->xoffset); ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d); /* Disable chroma keying function */ ad->ckmax_r = 0; ad->ckmax_g = 0; ad->ckmax_b = 0; ad->ckmin_r = 255; ad->ckmin_g = 255; ad->ckmin_b = 255; if (mfbi->index == PLANE0) update_lcdc(info); return 0; } static inline __u32 CNVT_TOHW(__u32 val, __u32 width) { return ((val << width) + 0x7FFF - val) >> 16; } /* * Set a single color register. The values supplied have a 16 bit magnitude * which needs to be scaled in this function for the hardware. Things to take * into consideration are how many color registers, if any, are supported with * the current color visual. With truecolor mode no color palettes are * supported. Here a pseudo palette is created which we store the value in * pseudo_palette in struct fb_info. For pseudocolor mode we have a limited * color palette. */ static int fsl_diu_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info) { int ret = 1; /* * If greyscale is true, then we convert the RGB value * to greyscale no matter what visual we are using. */ if (info->var.grayscale) red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16; switch (info->fix.visual) { case FB_VISUAL_TRUECOLOR: /* * 16-bit True Colour. We encode the RGB value * according to the RGB bitfield information. */ if (regno < 16) { u32 *pal = info->pseudo_palette; u32 v; red = CNVT_TOHW(red, info->var.red.length); green = CNVT_TOHW(green, info->var.green.length); blue = CNVT_TOHW(blue, info->var.blue.length); transp = CNVT_TOHW(transp, info->var.transp.length); v = (red << info->var.red.offset) | (green << info->var.green.offset) | (blue << info->var.blue.offset) | (transp << info->var.transp.offset); pal[regno] = v; ret = 0; } break; } return ret; } /* * Pan (or wrap, depending on the `vmode' field) the display using the * 'xoffset' and 'yoffset' fields of the 'var' structure. If the values * don't fit, return -EINVAL. */ static int fsl_diu_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { if ((info->var.xoffset == var->xoffset) && (info->var.yoffset == var->yoffset)) return 0; /* No change, do nothing */ if (var->xoffset < 0 || var->yoffset < 0 || var->xoffset + info->var.xres > info->var.xres_virtual || var->yoffset + info->var.yres > info->var.yres_virtual) return -EINVAL; info->var.xoffset = var->xoffset; info->var.yoffset = var->yoffset; if (var->vmode & FB_VMODE_YWRAP) info->var.vmode |= FB_VMODE_YWRAP; else info->var.vmode &= ~FB_VMODE_YWRAP; fsl_diu_set_aoi(info); return 0; } static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct mfb_info *mfbi = info->par; struct diu_ad *ad = mfbi->ad; struct mfb_chroma_key ck; unsigned char global_alpha; struct aoi_display_offset aoi_d; __u32 pix_fmt; void __user *buf = (void __user *)arg; if (!arg) return -EINVAL; switch (cmd) { case MFB_SET_PIXFMT_OLD: dev_warn(info->dev, "MFB_SET_PIXFMT value of 0x%08x is deprecated.\n", MFB_SET_PIXFMT_OLD); case MFB_SET_PIXFMT: if (copy_from_user(&pix_fmt, buf, sizeof(pix_fmt))) return -EFAULT; ad->pix_fmt = pix_fmt; break; case MFB_GET_PIXFMT_OLD: dev_warn(info->dev, "MFB_GET_PIXFMT value of 0x%08x is deprecated.\n", MFB_GET_PIXFMT_OLD); case MFB_GET_PIXFMT: pix_fmt = ad->pix_fmt; if (copy_to_user(buf, &pix_fmt, sizeof(pix_fmt))) return -EFAULT; break; case MFB_SET_AOID: if (copy_from_user(&aoi_d, buf, sizeof(aoi_d))) return -EFAULT; mfbi->x_aoi_d = aoi_d.x_aoi_d; mfbi->y_aoi_d = aoi_d.y_aoi_d; fsl_diu_check_var(&info->var, info); fsl_diu_set_aoi(info); break; case MFB_GET_AOID: aoi_d.x_aoi_d = mfbi->x_aoi_d; aoi_d.y_aoi_d = mfbi->y_aoi_d; if (copy_to_user(buf, &aoi_d, sizeof(aoi_d))) return -EFAULT; break; case MFB_GET_ALPHA: global_alpha = mfbi->g_alpha; if (copy_to_user(buf, &global_alpha, sizeof(global_alpha))) return -EFAULT; break; case MFB_SET_ALPHA: /* set panel information */ if (copy_from_user(&global_alpha, buf, sizeof(global_alpha))) return -EFAULT; ad->src_size_g_alpha = (ad->src_size_g_alpha & (~0xff)) | (global_alpha & 0xff); mfbi->g_alpha = global_alpha; break; case MFB_SET_CHROMA_KEY: /* set panel winformation */ if (copy_from_user(&ck, buf, sizeof(ck))) return -EFAULT; if (ck.enable && (ck.red_max < ck.red_min || ck.green_max < ck.green_min || ck.blue_max < ck.blue_min)) return -EINVAL; if (!ck.enable) { ad->ckmax_r = 0; ad->ckmax_g = 0; ad->ckmax_b = 0; ad->ckmin_r = 255; ad->ckmin_g = 255; ad->ckmin_b = 255; } else { ad->ckmax_r = ck.red_max; ad->ckmax_g = ck.green_max; ad->ckmax_b = ck.blue_max; ad->ckmin_r = ck.red_min; ad->ckmin_g = ck.green_min; ad->ckmin_b = ck.blue_min; } break; default: dev_err(info->dev, "unknown ioctl command (0x%08X)\n", cmd); return -ENOIOCTLCMD; } return 0; } /* turn on fb if count == 1 */ static int fsl_diu_open(struct fb_info *info, int user) { struct mfb_info *mfbi = info->par; int res = 0; /* free boot splash memory on first /dev/fb0 open */ if ((mfbi->index == PLANE0) && diu_ops.release_bootmem) diu_ops.release_bootmem(); spin_lock(&diu_lock); mfbi->count++; if (mfbi->count == 1) { fsl_diu_check_var(&info->var, info); res = fsl_diu_set_par(info); if (res < 0) mfbi->count--; else fsl_diu_enable_panel(info); } spin_unlock(&diu_lock); return res; } /* turn off fb if count == 0 */ static int fsl_diu_release(struct fb_info *info, int user) { struct mfb_info *mfbi = info->par; int res = 0; spin_lock(&diu_lock); mfbi->count--; if (mfbi->count == 0) fsl_diu_disable_panel(info); spin_unlock(&diu_lock); return res; } static struct fb_ops fsl_diu_ops = { .owner = THIS_MODULE, .fb_check_var = fsl_diu_check_var, .fb_set_par = fsl_diu_set_par, .fb_setcolreg = fsl_diu_setcolreg, .fb_pan_display = fsl_diu_pan_display, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_ioctl = fsl_diu_ioctl, .fb_open = fsl_diu_open, .fb_release = fsl_diu_release, }; static int __devinit install_fb(struct fb_info *info) { int rc; struct mfb_info *mfbi = info->par; const char *aoi_mode, *init_aoi_mode = "320x240"; struct fb_videomode *db = fsl_diu_mode_db; unsigned int dbsize = ARRAY_SIZE(fsl_diu_mode_db); int has_default_mode = 1; info->var.activate = FB_ACTIVATE_NOW; info->fbops = &fsl_diu_ops; info->flags = FBINFO_DEFAULT | FBINFO_VIRTFB | FBINFO_PARTIAL_PAN_OK | FBINFO_READS_FAST; info->pseudo_palette = mfbi->pseudo_palette; rc = fb_alloc_cmap(&info->cmap, 16, 0); if (rc) return rc; if (mfbi->index == PLANE0) { if (mfbi->edid_data) { /* Now build modedb from EDID */ fb_edid_to_monspecs(mfbi->edid_data, &info->monspecs); fb_videomode_to_modelist(info->monspecs.modedb, info->monspecs.modedb_len, &info->modelist); db = info->monspecs.modedb; dbsize = info->monspecs.modedb_len; } aoi_mode = fb_mode; } else { aoi_mode = init_aoi_mode; } rc = fb_find_mode(&info->var, info, aoi_mode, db, dbsize, NULL, default_bpp); if (!rc) { /* * For plane 0 we continue and look into * driver's internal modedb. */ if ((mfbi->index == PLANE0) && mfbi->edid_data) has_default_mode = 0; else return -EINVAL; } if (!has_default_mode) { rc = fb_find_mode(&info->var, info, aoi_mode, fsl_diu_mode_db, ARRAY_SIZE(fsl_diu_mode_db), NULL, default_bpp); if (rc) has_default_mode = 1; } /* Still not found, use preferred mode from database if any */ if (!has_default_mode && info->monspecs.modedb) { struct fb_monspecs *specs = &info->monspecs; struct fb_videomode *modedb = &specs->modedb[0]; /* * Get preferred timing. If not found, * first mode in database will be used. */ if (specs->misc & FB_MISC_1ST_DETAIL) { int i; for (i = 0; i < specs->modedb_len; i++) { if (specs->modedb[i].flag & FB_MODE_IS_FIRST) { modedb = &specs->modedb[i]; break; } } } info->var.bits_per_pixel = default_bpp; fb_videomode_to_var(&info->var, modedb); } if (fsl_diu_check_var(&info->var, info)) { dev_err(info->dev, "fsl_diu_check_var failed\n"); unmap_video_memory(info); fb_dealloc_cmap(&info->cmap); return -EINVAL; } if (register_framebuffer(info) < 0) { dev_err(info->dev, "register_framebuffer failed\n"); unmap_video_memory(info); fb_dealloc_cmap(&info->cmap); return -EINVAL; } mfbi->registered = 1; dev_info(info->dev, "%s registered successfully\n", mfbi->id); return 0; } static void uninstall_fb(struct fb_info *info) { struct mfb_info *mfbi = info->par; if (!mfbi->registered) return; if (mfbi->index == PLANE0) kfree(mfbi->edid_data); unregister_framebuffer(info); unmap_video_memory(info); if (&info->cmap) fb_dealloc_cmap(&info->cmap); mfbi->registered = 0; } static irqreturn_t fsl_diu_isr(int irq, void *dev_id) { struct diu __iomem *hw = dev_id; unsigned int status = in_be32(&hw->int_status); if (status) { /* This is the workaround for underrun */ if (status & INT_UNDRUN) { out_be32(&hw->diu_mode, 0); udelay(1); out_be32(&hw->diu_mode, 1); } #if defined(CONFIG_NOT_COHERENT_CACHE) else if (status & INT_VSYNC) { unsigned int i; for (i = 0; i < coherence_data_size; i += d_cache_line_size) __asm__ __volatile__ ( "dcbz 0, %[input]" ::[input]"r"(&coherence_data[i])); } #endif return IRQ_HANDLED; } return IRQ_NONE; } static int request_irq_local(struct fsl_diu_data *data) { struct diu __iomem *hw = data->diu_reg; u32 ints; int ret; /* Read to clear the status */ in_be32(&hw->int_status); ret = request_irq(data->irq, fsl_diu_isr, 0, "fsl-diu-fb", hw); if (!ret) { ints = INT_PARERR | INT_LS_BF_VS; #if !defined(CONFIG_NOT_COHERENT_CACHE) ints |= INT_VSYNC; #endif /* Read to clear the status */ in_be32(&hw->int_status); out_be32(&hw->int_mask, ints); } return ret; } static void free_irq_local(struct fsl_diu_data *data) { struct diu __iomem *hw = data->diu_reg; /* Disable all LCDC interrupt */ out_be32(&hw->int_mask, 0x1f); free_irq(data->irq, NULL); } #ifdef CONFIG_PM /* * Power management hooks. Note that we won't be called from IRQ context, * unlike the blank functions above, so we may sleep. */ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state) { struct fsl_diu_data *data; data = dev_get_drvdata(&ofdev->dev); disable_lcdc(data->fsl_diu_info); return 0; } static int fsl_diu_resume(struct platform_device *ofdev) { struct fsl_diu_data *data; data = dev_get_drvdata(&ofdev->dev); enable_lcdc(data->fsl_diu_info); return 0; } #else #define fsl_diu_suspend NULL #define fsl_diu_resume NULL #endif /* CONFIG_PM */ static ssize_t store_monitor(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { enum fsl_diu_monitor_port old_monitor_port; struct fsl_diu_data *data = container_of(attr, struct fsl_diu_data, dev_attr); old_monitor_port = data->monitor_port; data->monitor_port = fsl_diu_name_to_port(buf); if (old_monitor_port != data->monitor_port) { /* All AOIs need adjust pixel format * fsl_diu_set_par only change the pixsel format here * unlikely to fail. */ unsigned int i; for (i=0; i < NUM_AOIS; i++) fsl_diu_set_par(&data->fsl_diu_info[i]); } return count; } static ssize_t show_monitor(struct device *device, struct device_attribute *attr, char *buf) { struct fsl_diu_data *data = container_of(attr, struct fsl_diu_data, dev_attr); switch (data->monitor_port) { case FSL_DIU_PORT_DVI: return sprintf(buf, "DVI\n"); case FSL_DIU_PORT_LVDS: return sprintf(buf, "Single-link LVDS\n"); case FSL_DIU_PORT_DLVDS: return sprintf(buf, "Dual-link LVDS\n"); } return 0; } static int __devinit fsl_diu_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mfb_info *mfbi; struct fsl_diu_data *data; int diu_mode; dma_addr_t dma_addr; /* DMA addr of fsl_diu_data struct */ unsigned int i; int ret; data = dma_alloc_coherent(&pdev->dev, sizeof(struct fsl_diu_data), &dma_addr, GFP_DMA | __GFP_ZERO); if (!data) return -ENOMEM; data->dma_addr = dma_addr; /* * dma_alloc_coherent() uses a page allocator, so the address is * always page-aligned. We need the memory to be 32-byte aligned, * so that's good. However, if one day the allocator changes, we * need to catch that. It's not worth the effort to handle unaligned * alloctions now because it's highly unlikely to ever be a problem. */ if ((unsigned long)data & 31) { dev_err(&pdev->dev, "misaligned allocation"); ret = -ENOMEM; goto error; } spin_lock_init(&data->reg_lock); for (i = 0; i < NUM_AOIS; i++) { struct fb_info *info = &data->fsl_diu_info[i]; info->device = &pdev->dev; info->par = &data->mfb[i]; /* * We store the physical address of the AD in the reserved * 'paddr' field of the AD itself. */ data->ad[i].paddr = DMA_ADDR(data, ad[i]); info->fix.smem_start = 0; /* Initialize the AOI data structure */ mfbi = info->par; memcpy(mfbi, &mfb_template[i], sizeof(struct mfb_info)); mfbi->parent = data; mfbi->ad = &data->ad[i]; if (mfbi->index == PLANE0) { const u8 *prop; int len; /* Get EDID */ prop = of_get_property(np, "edid", &len); if (prop && len == EDID_LENGTH) mfbi->edid_data = kmemdup(prop, EDID_LENGTH, GFP_KERNEL); } } data->diu_reg = of_iomap(np, 0); if (!data->diu_reg) { dev_err(&pdev->dev, "cannot map DIU registers\n"); ret = -EFAULT; goto error; } diu_mode = in_be32(&data->diu_reg->diu_mode); if (diu_mode == MFB_MODE0) out_be32(&data->diu_reg->diu_mode, 0); /* disable DIU */ /* Get the IRQ of the DIU */ data->irq = irq_of_parse_and_map(np, 0); if (!data->irq) { dev_err(&pdev->dev, "could not get DIU IRQ\n"); ret = -EINVAL; goto error; } data->monitor_port = monitor_port; /* Initialize the dummy Area Descriptor */ data->dummy_ad.addr = cpu_to_le32(DMA_ADDR(data, dummy_aoi)); data->dummy_ad.pix_fmt = 0x88882317; data->dummy_ad.src_size_g_alpha = cpu_to_le32((4 << 12) | 4); data->dummy_ad.aoi_size = cpu_to_le32((4 << 16) | 2); data->dummy_ad.offset_xyi = 0; data->dummy_ad.offset_xyd = 0; data->dummy_ad.next_ad = 0; data->dummy_ad.paddr = DMA_ADDR(data, dummy_ad); /* * Let DIU display splash screen if it was pre-initialized * by the bootloader, set dummy area descriptor otherwise. */ if (diu_mode == MFB_MODE0) out_be32(&data->diu_reg->desc[0], data->dummy_ad.paddr); out_be32(&data->diu_reg->desc[1], data->dummy_ad.paddr); out_be32(&data->diu_reg->desc[2], data->dummy_ad.paddr); for (i = 0; i < NUM_AOIS; i++) { ret = install_fb(&data->fsl_diu_info[i]); if (ret) { dev_err(&pdev->dev, "could not register fb %d\n", i); goto error; } } if (request_irq_local(data)) { dev_err(&pdev->dev, "could not claim irq\n"); goto error; } sysfs_attr_init(&data->dev_attr.attr); data->dev_attr.attr.name = "monitor"; data->dev_attr.attr.mode = S_IRUGO|S_IWUSR; data->dev_attr.show = show_monitor; data->dev_attr.store = store_monitor; ret = device_create_file(&pdev->dev, &data->dev_attr); if (ret) { dev_err(&pdev->dev, "could not create sysfs file %s\n", data->dev_attr.attr.name); } dev_set_drvdata(&pdev->dev, data); return 0; error: for (i = 0; i < NUM_AOIS; i++) uninstall_fb(&data->fsl_diu_info[i]); iounmap(data->diu_reg); dma_free_coherent(&pdev->dev, sizeof(struct fsl_diu_data), data, data->dma_addr); return ret; } static int fsl_diu_remove(struct platform_device *pdev) { struct fsl_diu_data *data; int i; data = dev_get_drvdata(&pdev->dev); disable_lcdc(&data->fsl_diu_info[0]); free_irq_local(data); for (i = 0; i < NUM_AOIS; i++) uninstall_fb(&data->fsl_diu_info[i]); iounmap(data->diu_reg); dma_free_coherent(&pdev->dev, sizeof(struct fsl_diu_data), data, data->dma_addr); return 0; } #ifndef MODULE static int __init fsl_diu_setup(char *options) { char *opt; unsigned long val; if (!options || !*options) return 0; while ((opt = strsep(&options, ",")) != NULL) { if (!*opt) continue; if (!strncmp(opt, "monitor=", 8)) { monitor_port = fsl_diu_name_to_port(opt + 8); } else if (!strncmp(opt, "bpp=", 4)) { if (!strict_strtoul(opt + 4, 10, &val)) default_bpp = val; } else fb_mode = opt; } return 0; } #endif static struct of_device_id fsl_diu_match[] = { #ifdef CONFIG_PPC_MPC512x { .compatible = "fsl,mpc5121-diu", }, #endif { .compatible = "fsl,diu", }, {} }; MODULE_DEVICE_TABLE(of, fsl_diu_match); static struct platform_driver fsl_diu_driver = { .driver = { .name = "fsl-diu-fb", .owner = THIS_MODULE, .of_match_table = fsl_diu_match, }, .probe = fsl_diu_probe, .remove = fsl_diu_remove, .suspend = fsl_diu_suspend, .resume = fsl_diu_resume, }; static int __init fsl_diu_init(void) { #ifdef CONFIG_NOT_COHERENT_CACHE struct device_node *np; const u32 *prop; #endif int ret; #ifndef MODULE char *option; /* * For kernel boot options (in 'video=xxxfb:<options>' format) */ if (fb_get_options("fslfb", &option)) return -ENODEV; fsl_diu_setup(option); #else monitor_port = fsl_diu_name_to_port(monitor_string); #endif pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n"); #ifdef CONFIG_NOT_COHERENT_CACHE np = of_find_node_by_type(NULL, "cpu"); if (!np) { pr_err("fsl-diu-fb: can't find 'cpu' device node\n"); return -ENODEV; } prop = of_get_property(np, "d-cache-size", NULL); if (prop == NULL) { pr_err("fsl-diu-fb: missing 'd-cache-size' property' " "in 'cpu' node\n"); of_node_put(np); return -ENODEV; } /* * Freescale PLRU requires 13/8 times the cache size to do a proper * displacement flush */ coherence_data_size = be32_to_cpup(prop) * 13; coherence_data_size /= 8; prop = of_get_property(np, "d-cache-line-size", NULL); if (prop == NULL) { pr_err("fsl-diu-fb: missing 'd-cache-line-size' property' " "in 'cpu' node\n"); of_node_put(np); return -ENODEV; } d_cache_line_size = be32_to_cpup(prop); of_node_put(np); coherence_data = vmalloc(coherence_data_size); if (!coherence_data) return -ENOMEM; #endif ret = platform_driver_register(&fsl_diu_driver); if (ret) { pr_err("fsl-diu-fb: failed to register platform driver\n"); #if defined(CONFIG_NOT_COHERENT_CACHE) vfree(coherence_data); #endif } return ret; } static void __exit fsl_diu_exit(void) { platform_driver_unregister(&fsl_diu_driver); #if defined(CONFIG_NOT_COHERENT_CACHE) vfree(coherence_data); #endif } module_init(fsl_diu_init); module_exit(fsl_diu_exit); MODULE_AUTHOR("York Sun <yorksun@freescale.com>"); MODULE_DESCRIPTION("Freescale DIU framebuffer driver"); MODULE_LICENSE("GPL"); module_param_named(mode, fb_mode, charp, 0); MODULE_PARM_DESC(mode, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" "); module_param_named(bpp, default_bpp, ulong, 0); MODULE_PARM_DESC(bpp, "Specify bit-per-pixel if not specified in 'mode'"); module_param_named(monitor, monitor_string, charp, 0); MODULE_PARM_DESC(monitor, "Specify the monitor port " "(\"dvi\", \"lvds\", or \"dlvds\") if supported by the platform");
gpl-2.0
TheNameIsNigel/android_kernel_carbon_msm8928
drivers/mtd/ubi/cdev.c
4622
25353
/* * Copyright (c) International Business Machines Corp., 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: Artem Bityutskiy (Битюцкий Артём) */ /* * This file includes implementation of UBI character device operations. * * There are two kinds of character devices in UBI: UBI character devices and * UBI volume character devices. UBI character devices allow users to * manipulate whole volumes: create, remove, and re-size them. Volume character * devices provide volume I/O capabilities. * * Major and minor numbers are assigned dynamically to both UBI and volume * character devices. * * Well, there is the third kind of character devices - the UBI control * character device, which allows to manipulate by UBI devices - create and * delete them. In other words, it is used for attaching and detaching MTD * devices. */ #include <linux/module.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <linux/capability.h> #include <linux/uaccess.h> #include <linux/compat.h> #include <linux/math64.h> #include <mtd/ubi-user.h> #include "ubi.h" /** * get_exclusive - get exclusive access to an UBI volume. * @desc: volume descriptor * * This function changes UBI volume open mode to "exclusive". Returns previous * mode value (positive integer) in case of success and a negative error code * in case of failure. */ static int get_exclusive(struct ubi_volume_desc *desc) { int users, err; struct ubi_volume *vol = desc->vol; spin_lock(&vol->ubi->volumes_lock); users = vol->readers + vol->writers + vol->exclusive; ubi_assert(users > 0); if (users > 1) { dbg_err("%d users for volume %d", users, vol->vol_id); err = -EBUSY; } else { vol->readers = vol->writers = 0; vol->exclusive = 1; err = desc->mode; desc->mode = UBI_EXCLUSIVE; } spin_unlock(&vol->ubi->volumes_lock); return err; } /** * revoke_exclusive - revoke exclusive mode. * @desc: volume descriptor * @mode: new mode to switch to */ static void revoke_exclusive(struct ubi_volume_desc *desc, int mode) { struct ubi_volume *vol = desc->vol; spin_lock(&vol->ubi->volumes_lock); ubi_assert(vol->readers == 0 && vol->writers == 0); ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE); vol->exclusive = 0; if (mode == UBI_READONLY) vol->readers = 1; else if (mode == UBI_READWRITE) vol->writers = 1; else vol->exclusive = 1; spin_unlock(&vol->ubi->volumes_lock); desc->mode = mode; } static int vol_cdev_open(struct inode *inode, struct file *file) { struct ubi_volume_desc *desc; int vol_id = iminor(inode) - 1, mode, ubi_num; ubi_num = ubi_major2num(imajor(inode)); if (ubi_num < 0) return ubi_num; if (file->f_mode & FMODE_WRITE) mode = UBI_READWRITE; else mode = UBI_READONLY; dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode); desc = ubi_open_volume(ubi_num, vol_id, mode); if (IS_ERR(desc)) return PTR_ERR(desc); file->private_data = desc; return 0; } static int vol_cdev_release(struct inode *inode, struct file *file) { struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; dbg_gen("release device %d, volume %d, mode %d", vol->ubi->ubi_num, vol->vol_id, desc->mode); if (vol->updating) { ubi_warn("update of volume %d not finished, volume is damaged", vol->vol_id); ubi_assert(!vol->changing_leb); vol->updating = 0; vfree(vol->upd_buf); } else if (vol->changing_leb) { dbg_gen("only %lld of %lld bytes received for atomic LEB change" " for volume %d:%d, cancel", vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id); vol->changing_leb = 0; vfree(vol->upd_buf); } ubi_close_volume(desc); return 0; } static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin) { struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; loff_t new_offset; if (vol->updating) { /* Update is in progress, seeking is prohibited */ dbg_err("updating"); return -EBUSY; } switch (origin) { case 0: /* SEEK_SET */ new_offset = offset; break; case 1: /* SEEK_CUR */ new_offset = file->f_pos + offset; break; case 2: /* SEEK_END */ new_offset = vol->used_bytes + offset; break; default: return -EINVAL; } if (new_offset < 0 || new_offset > vol->used_bytes) { dbg_err("bad seek %lld", new_offset); return -EINVAL; } dbg_gen("seek volume %d, offset %lld, origin %d, new offset %lld", vol->vol_id, offset, origin, new_offset); file->f_pos = new_offset; return new_offset; } static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct ubi_volume_desc *desc = file->private_data; struct ubi_device *ubi = desc->vol->ubi; struct inode *inode = file->f_path.dentry->d_inode; int err; mutex_lock(&inode->i_mutex); err = ubi_sync(ubi->ubi_num); mutex_unlock(&inode->i_mutex); return err; } static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, loff_t *offp) { struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; int err, lnum, off, len, tbuf_size; size_t count_save = count; void *tbuf; dbg_gen("read %zd bytes from offset %lld of volume %d", count, *offp, vol->vol_id); if (vol->updating) { dbg_err("updating"); return -EBUSY; } if (vol->upd_marker) { dbg_err("damaged volume, update marker is set"); return -EBADF; } if (*offp == vol->used_bytes || count == 0) return 0; if (vol->corrupted) dbg_gen("read from corrupted volume %d", vol->vol_id); if (*offp + count > vol->used_bytes) count_save = count = vol->used_bytes - *offp; tbuf_size = vol->usable_leb_size; if (count < tbuf_size) tbuf_size = ALIGN(count, ubi->min_io_size); tbuf = vmalloc(tbuf_size); if (!tbuf) return -ENOMEM; len = count > tbuf_size ? tbuf_size : count; lnum = div_u64_rem(*offp, vol->usable_leb_size, &off); do { cond_resched(); if (off + len >= vol->usable_leb_size) len = vol->usable_leb_size - off; err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0); if (err) break; off += len; if (off == vol->usable_leb_size) { lnum += 1; off -= vol->usable_leb_size; } count -= len; *offp += len; err = copy_to_user(buf, tbuf, len); if (err) { err = -EFAULT; break; } buf += len; len = count > tbuf_size ? tbuf_size : count; } while (count); vfree(tbuf); return err ? err : count_save - count; } /* * This function allows to directly write to dynamic UBI volumes, without * issuing the volume update operation. */ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, size_t count, loff_t *offp) { struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; int lnum, off, len, tbuf_size, err = 0; size_t count_save = count; char *tbuf; if (!vol->direct_writes) return -EPERM; dbg_gen("requested: write %zd bytes to offset %lld of volume %u", count, *offp, vol->vol_id); if (vol->vol_type == UBI_STATIC_VOLUME) return -EROFS; lnum = div_u64_rem(*offp, vol->usable_leb_size, &off); if (off & (ubi->min_io_size - 1)) { dbg_err("unaligned position"); return -EINVAL; } if (*offp + count > vol->used_bytes) count_save = count = vol->used_bytes - *offp; /* We can write only in fractions of the minimum I/O unit */ if (count & (ubi->min_io_size - 1)) { dbg_err("unaligned write length"); return -EINVAL; } tbuf_size = vol->usable_leb_size; if (count < tbuf_size) tbuf_size = ALIGN(count, ubi->min_io_size); tbuf = vmalloc(tbuf_size); if (!tbuf) return -ENOMEM; len = count > tbuf_size ? tbuf_size : count; while (count) { cond_resched(); if (off + len >= vol->usable_leb_size) len = vol->usable_leb_size - off; err = copy_from_user(tbuf, buf, len); if (err) { err = -EFAULT; break; } err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len, UBI_UNKNOWN); if (err) break; off += len; if (off == vol->usable_leb_size) { lnum += 1; off -= vol->usable_leb_size; } count -= len; *offp += len; buf += len; len = count > tbuf_size ? tbuf_size : count; } vfree(tbuf); return err ? err : count_save - count; } static ssize_t vol_cdev_write(struct file *file, const char __user *buf, size_t count, loff_t *offp) { int err = 0; struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; if (!vol->updating && !vol->changing_leb) return vol_cdev_direct_write(file, buf, count, offp); if (vol->updating) err = ubi_more_update_data(ubi, vol, buf, count); else err = ubi_more_leb_change_data(ubi, vol, buf, count); if (err < 0) { ubi_err("cannot accept more %zd bytes of data, error %d", count, err); return err; } if (err) { /* * The operation is finished, @err contains number of actually * written bytes. */ count = err; if (vol->changing_leb) { revoke_exclusive(desc, UBI_READWRITE); return count; } err = ubi_check_volume(ubi, vol->vol_id); if (err < 0) return err; if (err) { ubi_warn("volume %d on UBI device %d is corrupted", vol->vol_id, ubi->ubi_num); vol->corrupted = 1; } vol->checked = 1; ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED); revoke_exclusive(desc, UBI_READWRITE); } return count; } static long vol_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int err = 0; struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; void __user *argp = (void __user *)arg; switch (cmd) { /* Volume update command */ case UBI_IOCVOLUP: { int64_t bytes, rsvd_bytes; if (!capable(CAP_SYS_RESOURCE)) { err = -EPERM; break; } err = copy_from_user(&bytes, argp, sizeof(int64_t)); if (err) { err = -EFAULT; break; } if (desc->mode == UBI_READONLY) { err = -EROFS; break; } rsvd_bytes = (long long)vol->reserved_pebs * ubi->leb_size-vol->data_pad; if (bytes < 0 || bytes > rsvd_bytes) { err = -EINVAL; break; } err = get_exclusive(desc); if (err < 0) break; err = ubi_start_update(ubi, vol, bytes); if (bytes == 0) revoke_exclusive(desc, UBI_READWRITE); break; } /* Atomic logical eraseblock change command */ case UBI_IOCEBCH: { struct ubi_leb_change_req req; err = copy_from_user(&req, argp, sizeof(struct ubi_leb_change_req)); if (err) { err = -EFAULT; break; } if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) { err = -EROFS; break; } /* Validate the request */ err = -EINVAL; if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || req.bytes < 0 || req.lnum >= vol->usable_leb_size) break; if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM && req.dtype != UBI_UNKNOWN) break; err = get_exclusive(desc); if (err < 0) break; err = ubi_start_leb_change(ubi, vol, &req); if (req.bytes == 0) revoke_exclusive(desc, UBI_READWRITE); break; } /* Logical eraseblock erasure command */ case UBI_IOCEBER: { int32_t lnum; err = get_user(lnum, (__user int32_t *)argp); if (err) { err = -EFAULT; break; } if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) { err = -EROFS; break; } if (lnum < 0 || lnum >= vol->reserved_pebs) { err = -EINVAL; break; } dbg_gen("erase LEB %d:%d", vol->vol_id, lnum); err = ubi_eba_unmap_leb(ubi, vol, lnum); if (err) break; err = ubi_wl_flush(ubi); break; } /* Logical eraseblock map command */ case UBI_IOCEBMAP: { struct ubi_map_req req; err = copy_from_user(&req, argp, sizeof(struct ubi_map_req)); if (err) { err = -EFAULT; break; } err = ubi_leb_map(desc, req.lnum, req.dtype); break; } /* Logical eraseblock un-map command */ case UBI_IOCEBUNMAP: { int32_t lnum; err = get_user(lnum, (__user int32_t *)argp); if (err) { err = -EFAULT; break; } err = ubi_leb_unmap(desc, lnum); break; } /* Check if logical eraseblock is mapped command */ case UBI_IOCEBISMAP: { int32_t lnum; err = get_user(lnum, (__user int32_t *)argp); if (err) { err = -EFAULT; break; } err = ubi_is_mapped(desc, lnum); break; } /* Set volume property command */ case UBI_IOCSETVOLPROP: { struct ubi_set_vol_prop_req req; err = copy_from_user(&req, argp, sizeof(struct ubi_set_vol_prop_req)); if (err) { err = -EFAULT; break; } switch (req.property) { case UBI_VOL_PROP_DIRECT_WRITE: mutex_lock(&ubi->device_mutex); desc->vol->direct_writes = !!req.value; mutex_unlock(&ubi->device_mutex); break; default: err = -EINVAL; break; } break; } default: err = -ENOTTY; break; } return err; } /** * verify_mkvol_req - verify volume creation request. * @ubi: UBI device description object * @req: the request to check * * This function zero if the request is correct, and %-EINVAL if not. */ static int verify_mkvol_req(const struct ubi_device *ubi, const struct ubi_mkvol_req *req) { int n, err = -EINVAL; if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 || req->name_len < 0) goto bad; if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) && req->vol_id != UBI_VOL_NUM_AUTO) goto bad; if (req->alignment == 0) goto bad; if (req->bytes == 0) goto bad; if (req->vol_type != UBI_DYNAMIC_VOLUME && req->vol_type != UBI_STATIC_VOLUME) goto bad; if (req->alignment > ubi->leb_size) goto bad; n = req->alignment & (ubi->min_io_size - 1); if (req->alignment != 1 && n) goto bad; if (!req->name[0] || !req->name_len) goto bad; if (req->name_len > UBI_VOL_NAME_MAX) { err = -ENAMETOOLONG; goto bad; } n = strnlen(req->name, req->name_len + 1); if (n != req->name_len) goto bad; return 0; bad: dbg_err("bad volume creation request"); ubi_dbg_dump_mkvol_req(req); return err; } /** * verify_rsvol_req - verify volume re-size request. * @ubi: UBI device description object * @req: the request to check * * This function returns zero if the request is correct, and %-EINVAL if not. */ static int verify_rsvol_req(const struct ubi_device *ubi, const struct ubi_rsvol_req *req) { if (req->bytes <= 0) return -EINVAL; if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) return -EINVAL; return 0; } /** * rename_volumes - rename UBI volumes. * @ubi: UBI device description object * @req: volumes re-name request * * This is a helper function for the volume re-name IOCTL which validates the * the request, opens the volume and calls corresponding volumes management * function. Returns zero in case of success and a negative error code in case * of failure. */ static int rename_volumes(struct ubi_device *ubi, struct ubi_rnvol_req *req) { int i, n, err; struct list_head rename_list; struct ubi_rename_entry *re, *re1; if (req->count < 0 || req->count > UBI_MAX_RNVOL) return -EINVAL; if (req->count == 0) return 0; /* Validate volume IDs and names in the request */ for (i = 0; i < req->count; i++) { if (req->ents[i].vol_id < 0 || req->ents[i].vol_id >= ubi->vtbl_slots) return -EINVAL; if (req->ents[i].name_len < 0) return -EINVAL; if (req->ents[i].name_len > UBI_VOL_NAME_MAX) return -ENAMETOOLONG; req->ents[i].name[req->ents[i].name_len] = '\0'; n = strlen(req->ents[i].name); if (n != req->ents[i].name_len) err = -EINVAL; } /* Make sure volume IDs and names are unique */ for (i = 0; i < req->count - 1; i++) { for (n = i + 1; n < req->count; n++) { if (req->ents[i].vol_id == req->ents[n].vol_id) { dbg_err("duplicated volume id %d", req->ents[i].vol_id); return -EINVAL; } if (!strcmp(req->ents[i].name, req->ents[n].name)) { dbg_err("duplicated volume name \"%s\"", req->ents[i].name); return -EINVAL; } } } /* Create the re-name list */ INIT_LIST_HEAD(&rename_list); for (i = 0; i < req->count; i++) { int vol_id = req->ents[i].vol_id; int name_len = req->ents[i].name_len; const char *name = req->ents[i].name; re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); if (!re) { err = -ENOMEM; goto out_free; } re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); if (IS_ERR(re->desc)) { err = PTR_ERR(re->desc); dbg_err("cannot open volume %d, error %d", vol_id, err); kfree(re); goto out_free; } /* Skip this re-naming if the name does not really change */ if (re->desc->vol->name_len == name_len && !memcmp(re->desc->vol->name, name, name_len)) { ubi_close_volume(re->desc); kfree(re); continue; } re->new_name_len = name_len; memcpy(re->new_name, name, name_len); list_add_tail(&re->list, &rename_list); dbg_msg("will rename volume %d from \"%s\" to \"%s\"", vol_id, re->desc->vol->name, name); } if (list_empty(&rename_list)) return 0; /* Find out the volumes which have to be removed */ list_for_each_entry(re, &rename_list, list) { struct ubi_volume_desc *desc; int no_remove_needed = 0; /* * Volume @re->vol_id is going to be re-named to * @re->new_name, while its current name is @name. If a volume * with name @re->new_name currently exists, it has to be * removed, unless it is also re-named in the request (@req). */ list_for_each_entry(re1, &rename_list, list) { if (re->new_name_len == re1->desc->vol->name_len && !memcmp(re->new_name, re1->desc->vol->name, re1->desc->vol->name_len)) { no_remove_needed = 1; break; } } if (no_remove_needed) continue; /* * It seems we need to remove volume with name @re->new_name, * if it exists. */ desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, UBI_EXCLUSIVE); if (IS_ERR(desc)) { err = PTR_ERR(desc); if (err == -ENODEV) /* Re-naming into a non-existing volume name */ continue; /* The volume exists but busy, or an error occurred */ dbg_err("cannot open volume \"%s\", error %d", re->new_name, err); goto out_free; } re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); if (!re1) { err = -ENOMEM; ubi_close_volume(desc); goto out_free; } re1->remove = 1; re1->desc = desc; list_add(&re1->list, &rename_list); dbg_msg("will remove volume %d, name \"%s\"", re1->desc->vol->vol_id, re1->desc->vol->name); } mutex_lock(&ubi->device_mutex); err = ubi_rename_volumes(ubi, &rename_list); mutex_unlock(&ubi->device_mutex); out_free: list_for_each_entry_safe(re, re1, &rename_list, list) { ubi_close_volume(re->desc); list_del(&re->list); kfree(re); } return err; } static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int err = 0; struct ubi_device *ubi; struct ubi_volume_desc *desc; void __user *argp = (void __user *)arg; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; ubi = ubi_get_by_major(imajor(file->f_mapping->host)); if (!ubi) return -ENODEV; switch (cmd) { /* Create volume command */ case UBI_IOCMKVOL: { struct ubi_mkvol_req req; dbg_gen("create volume"); err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req)); if (err) { err = -EFAULT; break; } err = verify_mkvol_req(ubi, &req); if (err) break; mutex_lock(&ubi->device_mutex); err = ubi_create_volume(ubi, &req); mutex_unlock(&ubi->device_mutex); if (err) break; err = put_user(req.vol_id, (__user int32_t *)argp); if (err) err = -EFAULT; break; } /* Remove volume command */ case UBI_IOCRMVOL: { int vol_id; dbg_gen("remove volume"); err = get_user(vol_id, (__user int32_t *)argp); if (err) { err = -EFAULT; break; } desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); if (IS_ERR(desc)) { err = PTR_ERR(desc); break; } mutex_lock(&ubi->device_mutex); err = ubi_remove_volume(desc, 0); mutex_unlock(&ubi->device_mutex); /* * The volume is deleted (unless an error occurred), and the * 'struct ubi_volume' object will be freed when * 'ubi_close_volume()' will call 'put_device()'. */ ubi_close_volume(desc); break; } /* Re-size volume command */ case UBI_IOCRSVOL: { int pebs; struct ubi_rsvol_req req; dbg_gen("re-size volume"); err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req)); if (err) { err = -EFAULT; break; } err = verify_rsvol_req(ubi, &req); if (err) break; desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE); if (IS_ERR(desc)) { err = PTR_ERR(desc); break; } pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1, desc->vol->usable_leb_size); mutex_lock(&ubi->device_mutex); err = ubi_resize_volume(desc, pebs); mutex_unlock(&ubi->device_mutex); ubi_close_volume(desc); break; } /* Re-name volumes command */ case UBI_IOCRNVOL: { struct ubi_rnvol_req *req; dbg_msg("re-name volumes"); req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL); if (!req) { err = -ENOMEM; break; }; err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req)); if (err) { err = -EFAULT; kfree(req); break; } err = rename_volumes(ubi, req); kfree(req); break; } default: err = -ENOTTY; break; } ubi_put_device(ubi); return err; } static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int err = 0; void __user *argp = (void __user *)arg; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; switch (cmd) { /* Attach an MTD device command */ case UBI_IOCATT: { struct ubi_attach_req req; struct mtd_info *mtd; dbg_gen("attach MTD device"); err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req)); if (err) { err = -EFAULT; break; } if (req.mtd_num < 0 || (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) { err = -EINVAL; break; } mtd = get_mtd_device(NULL, req.mtd_num); if (IS_ERR(mtd)) { err = PTR_ERR(mtd); break; } /* * Note, further request verification is done by * 'ubi_attach_mtd_dev()'. */ mutex_lock(&ubi_devices_mutex); err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset); mutex_unlock(&ubi_devices_mutex); if (err < 0) put_mtd_device(mtd); else /* @err contains UBI device number */ err = put_user(err, (__user int32_t *)argp); break; } /* Detach an MTD device command */ case UBI_IOCDET: { int ubi_num; dbg_gen("dettach MTD device"); err = get_user(ubi_num, (__user int32_t *)argp); if (err) { err = -EFAULT; break; } mutex_lock(&ubi_devices_mutex); err = ubi_detach_mtd_dev(ubi_num, 0); mutex_unlock(&ubi_devices_mutex); break; } default: err = -ENOTTY; break; } return err; } #ifdef CONFIG_COMPAT static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned long translated_arg = (unsigned long)compat_ptr(arg); return vol_cdev_ioctl(file, cmd, translated_arg); } static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned long translated_arg = (unsigned long)compat_ptr(arg); return ubi_cdev_ioctl(file, cmd, translated_arg); } static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned long translated_arg = (unsigned long)compat_ptr(arg); return ctrl_cdev_ioctl(file, cmd, translated_arg); } #else #define vol_cdev_compat_ioctl NULL #define ubi_cdev_compat_ioctl NULL #define ctrl_cdev_compat_ioctl NULL #endif /* UBI volume character device operations */ const struct file_operations ubi_vol_cdev_operations = { .owner = THIS_MODULE, .open = vol_cdev_open, .release = vol_cdev_release, .llseek = vol_cdev_llseek, .read = vol_cdev_read, .write = vol_cdev_write, .fsync = vol_cdev_fsync, .unlocked_ioctl = vol_cdev_ioctl, .compat_ioctl = vol_cdev_compat_ioctl, }; /* UBI character device operations */ const struct file_operations ubi_cdev_operations = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = ubi_cdev_ioctl, .compat_ioctl = ubi_cdev_compat_ioctl, }; /* UBI control character device operations */ const struct file_operations ubi_ctrl_cdev_operations = { .owner = THIS_MODULE, .unlocked_ioctl = ctrl_cdev_ioctl, .compat_ioctl = ctrl_cdev_compat_ioctl, .llseek = no_llseek, };
gpl-2.0
StarKissed/starkissed-kernel-roth
drivers/staging/iio/dac/ad5360.c
4878
13856
/* * Analog devices AD5360, AD5361, AD5362, AD5363, AD5370, AD5371, AD5373 * multi-channel Digital to Analog Converters driver * * Copyright 2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/device.h> #include <linux/err.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/regulator/consumer.h> #include "../iio.h" #include "../sysfs.h" #include "dac.h" #define AD5360_CMD(x) ((x) << 22) #define AD5360_ADDR(x) ((x) << 16) #define AD5360_READBACK_TYPE(x) ((x) << 13) #define AD5360_READBACK_ADDR(x) ((x) << 7) #define AD5360_CHAN_ADDR(chan) ((chan) + 0x8) #define AD5360_CMD_WRITE_DATA 0x3 #define AD5360_CMD_WRITE_OFFSET 0x2 #define AD5360_CMD_WRITE_GAIN 0x1 #define AD5360_CMD_SPECIAL_FUNCTION 0x0 /* Special function register addresses */ #define AD5360_REG_SF_NOP 0x0 #define AD5360_REG_SF_CTRL 0x1 #define AD5360_REG_SF_OFS(x) (0x2 + (x)) #define AD5360_REG_SF_READBACK 0x5 #define AD5360_SF_CTRL_PWR_DOWN BIT(0) #define AD5360_READBACK_X1A 0x0 #define AD5360_READBACK_X1B 0x1 #define AD5360_READBACK_OFFSET 0x2 #define AD5360_READBACK_GAIN 0x3 #define AD5360_READBACK_SF 0x4 /** * struct ad5360_chip_info - chip specific information * @channel_template: channel specification template * @num_channels: number of channels * @channels_per_group: number of channels per group * @num_vrefs: number of vref supplies for the chip */ struct ad5360_chip_info { struct iio_chan_spec channel_template; unsigned int num_channels; unsigned int channels_per_group; unsigned int num_vrefs; }; /** * struct ad5360_state - driver instance specific data * @spi: spi_device * @chip_info: chip model specific constants, available modes etc * @vref_reg: vref supply regulators * @ctrl: control register cache * @data: spi transfer buffers */ struct ad5360_state { struct spi_device *spi; const struct ad5360_chip_info *chip_info; struct regulator_bulk_data vref_reg[3]; unsigned int ctrl; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ union { __be32 d32; u8 d8[4]; } data[2] ____cacheline_aligned; }; enum ad5360_type { ID_AD5360, ID_AD5361, ID_AD5362, ID_AD5363, ID_AD5370, ID_AD5371, ID_AD5372, ID_AD5373, }; #define AD5360_CHANNEL(bits) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .output = 1, \ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT | \ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | \ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, \ .scan_type = IIO_ST('u', (bits), 16, 16 - (bits)) \ } static const struct ad5360_chip_info ad5360_chip_info_tbl[] = { [ID_AD5360] = { .channel_template = AD5360_CHANNEL(16), .num_channels = 16, .channels_per_group = 8, .num_vrefs = 2, }, [ID_AD5361] = { .channel_template = AD5360_CHANNEL(14), .num_channels = 16, .channels_per_group = 8, .num_vrefs = 2, }, [ID_AD5362] = { .channel_template = AD5360_CHANNEL(16), .num_channels = 8, .channels_per_group = 4, .num_vrefs = 2, }, [ID_AD5363] = { .channel_template = AD5360_CHANNEL(14), .num_channels = 8, .channels_per_group = 4, .num_vrefs = 2, }, [ID_AD5370] = { .channel_template = AD5360_CHANNEL(16), .num_channels = 40, .channels_per_group = 8, .num_vrefs = 2, }, [ID_AD5371] = { .channel_template = AD5360_CHANNEL(14), .num_channels = 40, .channels_per_group = 8, .num_vrefs = 3, }, [ID_AD5372] = { .channel_template = AD5360_CHANNEL(16), .num_channels = 32, .channels_per_group = 8, .num_vrefs = 2, }, [ID_AD5373] = { .channel_template = AD5360_CHANNEL(14), .num_channels = 32, .channels_per_group = 8, .num_vrefs = 2, }, }; static unsigned int ad5360_get_channel_vref_index(struct ad5360_state *st, unsigned int channel) { unsigned int i; /* The first groups have their own vref, while the remaining groups * share the last vref */ i = channel / st->chip_info->channels_per_group; if (i >= st->chip_info->num_vrefs) i = st->chip_info->num_vrefs - 1; return i; } static int ad5360_get_channel_vref(struct ad5360_state *st, unsigned int channel) { unsigned int i = ad5360_get_channel_vref_index(st, channel); return regulator_get_voltage(st->vref_reg[i].consumer); } static int ad5360_write_unlocked(struct iio_dev *indio_dev, unsigned int cmd, unsigned int addr, unsigned int val, unsigned int shift) { struct ad5360_state *st = iio_priv(indio_dev); val <<= shift; val |= AD5360_CMD(cmd) | AD5360_ADDR(addr); st->data[0].d32 = cpu_to_be32(val); return spi_write(st->spi, &st->data[0].d8[1], 3); } static int ad5360_write(struct iio_dev *indio_dev, unsigned int cmd, unsigned int addr, unsigned int val, unsigned int shift) { int ret; mutex_lock(&indio_dev->mlock); ret = ad5360_write_unlocked(indio_dev, cmd, addr, val, shift); mutex_unlock(&indio_dev->mlock); return ret; } static int ad5360_read(struct iio_dev *indio_dev, unsigned int type, unsigned int addr) { struct ad5360_state *st = iio_priv(indio_dev); struct spi_message m; int ret; struct spi_transfer t[] = { { .tx_buf = &st->data[0].d8[1], .len = 3, .cs_change = 1, }, { .rx_buf = &st->data[1].d8[1], .len = 3, }, }; spi_message_init(&m); spi_message_add_tail(&t[0], &m); spi_message_add_tail(&t[1], &m); mutex_lock(&indio_dev->mlock); st->data[0].d32 = cpu_to_be32(AD5360_CMD(AD5360_CMD_SPECIAL_FUNCTION) | AD5360_ADDR(AD5360_REG_SF_READBACK) | AD5360_READBACK_TYPE(type) | AD5360_READBACK_ADDR(addr)); ret = spi_sync(st->spi, &m); if (ret >= 0) ret = be32_to_cpu(st->data[1].d32) & 0xffff; mutex_unlock(&indio_dev->mlock); return ret; } static ssize_t ad5360_read_dac_powerdown(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad5360_state *st = iio_priv(indio_dev); return sprintf(buf, "%d\n", (bool)(st->ctrl & AD5360_SF_CTRL_PWR_DOWN)); } static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set, unsigned int clr) { struct ad5360_state *st = iio_priv(indio_dev); unsigned int ret; mutex_lock(&indio_dev->mlock); st->ctrl |= set; st->ctrl &= ~clr; ret = ad5360_write_unlocked(indio_dev, AD5360_CMD_SPECIAL_FUNCTION, AD5360_REG_SF_CTRL, st->ctrl, 0); mutex_unlock(&indio_dev->mlock); return ret; } static ssize_t ad5360_write_dac_powerdown(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); bool pwr_down; int ret; ret = strtobool(buf, &pwr_down); if (ret) return ret; if (pwr_down) ret = ad5360_update_ctrl(indio_dev, AD5360_SF_CTRL_PWR_DOWN, 0); else ret = ad5360_update_ctrl(indio_dev, 0, AD5360_SF_CTRL_PWR_DOWN); return ret ? ret : len; } static IIO_DEVICE_ATTR(out_voltage_powerdown, S_IRUGO | S_IWUSR, ad5360_read_dac_powerdown, ad5360_write_dac_powerdown, 0); static struct attribute *ad5360_attributes[] = { &iio_dev_attr_out_voltage_powerdown.dev_attr.attr, NULL, }; static const struct attribute_group ad5360_attribute_group = { .attrs = ad5360_attributes, }; static int ad5360_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct ad5360_state *st = iio_priv(indio_dev); int max_val = (1 << chan->scan_type.realbits); unsigned int ofs_index; switch (mask) { case 0: if (val >= max_val || val < 0) return -EINVAL; return ad5360_write(indio_dev, AD5360_CMD_WRITE_DATA, chan->address, val, chan->scan_type.shift); case IIO_CHAN_INFO_CALIBBIAS: if (val >= max_val || val < 0) return -EINVAL; return ad5360_write(indio_dev, AD5360_CMD_WRITE_OFFSET, chan->address, val, chan->scan_type.shift); case IIO_CHAN_INFO_CALIBSCALE: if (val >= max_val || val < 0) return -EINVAL; return ad5360_write(indio_dev, AD5360_CMD_WRITE_GAIN, chan->address, val, chan->scan_type.shift); case IIO_CHAN_INFO_OFFSET: if (val <= -max_val || val > 0) return -EINVAL; val = -val; /* offset is supposed to have the same scale as raw, but it * is always 14bits wide, so on a chip where the raw value has * more bits, we need to shift offset. */ val >>= (chan->scan_type.realbits - 14); /* There is one DAC offset register per vref. Changing one * channels offset will also change the offset for all other * channels which share the same vref supply. */ ofs_index = ad5360_get_channel_vref_index(st, chan->channel); return ad5360_write(indio_dev, AD5360_CMD_SPECIAL_FUNCTION, AD5360_REG_SF_OFS(ofs_index), val, 0); default: break; } return -EINVAL; } static int ad5360_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long m) { struct ad5360_state *st = iio_priv(indio_dev); unsigned int ofs_index; int scale_uv; int ret; switch (m) { case 0: ret = ad5360_read(indio_dev, AD5360_READBACK_X1A, chan->address); if (ret < 0) return ret; *val = ret >> chan->scan_type.shift; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: /* vout = 4 * vref * dac_code */ scale_uv = ad5360_get_channel_vref(st, chan->channel) * 4 * 100; if (scale_uv < 0) return scale_uv; scale_uv >>= (chan->scan_type.realbits); *val = scale_uv / 100000; *val2 = (scale_uv % 100000) * 10; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_CALIBBIAS: ret = ad5360_read(indio_dev, AD5360_READBACK_OFFSET, chan->address); if (ret < 0) return ret; *val = ret; return IIO_VAL_INT; case IIO_CHAN_INFO_CALIBSCALE: ret = ad5360_read(indio_dev, AD5360_READBACK_GAIN, chan->address); if (ret < 0) return ret; *val = ret; return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: ofs_index = ad5360_get_channel_vref_index(st, chan->channel); ret = ad5360_read(indio_dev, AD5360_READBACK_SF, AD5360_REG_SF_OFS(ofs_index)); if (ret < 0) return ret; ret <<= (chan->scan_type.realbits - 14); *val = -ret; return IIO_VAL_INT; } return -EINVAL; } static const struct iio_info ad5360_info = { .read_raw = ad5360_read_raw, .write_raw = ad5360_write_raw, .attrs = &ad5360_attribute_group, .driver_module = THIS_MODULE, }; static const char * const ad5360_vref_name[] = { "vref0", "vref1", "vref2" }; static int __devinit ad5360_alloc_channels(struct iio_dev *indio_dev) { struct ad5360_state *st = iio_priv(indio_dev); struct iio_chan_spec *channels; unsigned int i; channels = kcalloc(st->chip_info->num_channels, sizeof(struct iio_chan_spec), GFP_KERNEL); if (!channels) return -ENOMEM; for (i = 0; i < st->chip_info->num_channels; ++i) { channels[i] = st->chip_info->channel_template; channels[i].channel = i; channels[i].address = AD5360_CHAN_ADDR(i); } indio_dev->channels = channels; return 0; } static int __devinit ad5360_probe(struct spi_device *spi) { enum ad5360_type type = spi_get_device_id(spi)->driver_data; struct iio_dev *indio_dev; struct ad5360_state *st; unsigned int i; int ret; indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { dev_err(&spi->dev, "Failed to allocate iio device\n"); return -ENOMEM; } st = iio_priv(indio_dev); spi_set_drvdata(spi, indio_dev); st->chip_info = &ad5360_chip_info_tbl[type]; st->spi = spi; indio_dev->dev.parent = &spi->dev; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->info = &ad5360_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->num_channels = st->chip_info->num_channels; ret = ad5360_alloc_channels(indio_dev); if (ret) { dev_err(&spi->dev, "Failed to allocate channel spec: %d\n", ret); goto error_free; } for (i = 0; i < st->chip_info->num_vrefs; ++i) st->vref_reg[i].supply = ad5360_vref_name[i]; ret = regulator_bulk_get(&st->spi->dev, st->chip_info->num_vrefs, st->vref_reg); if (ret) { dev_err(&spi->dev, "Failed to request vref regulators: %d\n", ret); goto error_free_channels; } ret = regulator_bulk_enable(st->chip_info->num_vrefs, st->vref_reg); if (ret) { dev_err(&spi->dev, "Failed to enable vref regulators: %d\n", ret); goto error_free_reg; } ret = iio_device_register(indio_dev); if (ret) { dev_err(&spi->dev, "Failed to register iio device: %d\n", ret); goto error_disable_reg; } return 0; error_disable_reg: regulator_bulk_disable(st->chip_info->num_vrefs, st->vref_reg); error_free_reg: regulator_bulk_free(st->chip_info->num_vrefs, st->vref_reg); error_free_channels: kfree(indio_dev->channels); error_free: iio_free_device(indio_dev); return ret; } static int __devexit ad5360_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5360_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); kfree(indio_dev->channels); regulator_bulk_disable(st->chip_info->num_vrefs, st->vref_reg); regulator_bulk_free(st->chip_info->num_vrefs, st->vref_reg); iio_free_device(indio_dev); return 0; } static const struct spi_device_id ad5360_ids[] = { { "ad5360", ID_AD5360 }, { "ad5361", ID_AD5361 }, { "ad5362", ID_AD5362 }, { "ad5363", ID_AD5363 }, { "ad5370", ID_AD5370 }, { "ad5371", ID_AD5371 }, { "ad5372", ID_AD5372 }, { "ad5373", ID_AD5373 }, {} }; MODULE_DEVICE_TABLE(spi, ad5360_ids); static struct spi_driver ad5360_driver = { .driver = { .name = "ad5360", .owner = THIS_MODULE, }, .probe = ad5360_probe, .remove = __devexit_p(ad5360_remove), .id_table = ad5360_ids, }; module_spi_driver(ad5360_driver); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("Analog Devices AD5360/61/62/63/70/71/72/73 DAC"); MODULE_LICENSE("GPL v2");
gpl-2.0
devil1210/EvilKernel
drivers/scsi/aic7xxx/aic7xxx_osm.c
5134
73156
/* * Adaptec AIC7xxx device driver for Linux. * * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.c#235 $ * * Copyright (c) 1994 John Aycock * The University of Calgary Department of Computer Science. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide, * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux, * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual, * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the * ANSI SCSI-2 specification (draft 10c), ... * * -------------------------------------------------------------------------- * * Modifications by Daniel M. Eischen (deischen@iworks.InterWorks.org): * * Substantially modified to include support for wide and twin bus * adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes, * SCB paging, and other rework of the code. * * -------------------------------------------------------------------------- * Copyright (c) 1994-2000 Justin T. Gibbs. * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * *--------------------------------------------------------------------------- * * Thanks also go to (in alphabetical order) the following: * * Rory Bolt - Sequencer bug fixes * Jay Estabrook - Initial DEC Alpha support * Doug Ledford - Much needed abort/reset bug fixes * Kai Makisara - DMAing of SCBs * * A Boot time option was also added for not resetting the scsi bus. * * Form: aic7xxx=extended * aic7xxx=no_reset * aic7xxx=verbose * * Daniel M. Eischen, deischen@iworks.InterWorks.org, 1/23/97 * * Id: aic7xxx.c,v 4.1 1997/06/12 08:23:42 deang Exp */ /* * Further driver modifications made by Doug Ledford <dledford@redhat.com> * * Copyright (c) 1997-1999 Doug Ledford * * These changes are released under the same licensing terms as the FreeBSD * driver written by Justin Gibbs. Please see his Copyright notice above * for the exact terms and conditions covering my changes as well as the * warranty statement. * * Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include * but are not limited to: * * 1: Import of the latest FreeBSD sequencer code for this driver * 2: Modification of kernel code to accommodate different sequencer semantics * 3: Extensive changes throughout kernel portion of driver to improve * abort/reset processing and error hanndling * 4: Other work contributed by various people on the Internet * 5: Changes to printk information and verbosity selection code * 6: General reliability related changes, especially in IRQ management * 7: Modifications to the default probe/attach order for supported cards * 8: SMP friendliness has been improved * */ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include <scsi/scsicam.h> static struct scsi_transport_template *ahc_linux_transport_template = NULL; #include <linux/init.h> /* __setup */ #include <linux/mm.h> /* For fetching system memory size */ #include <linux/blkdev.h> /* For block_size() */ #include <linux/delay.h> /* For ssleep/msleep */ #include <linux/slab.h> /* * Set this to the delay in seconds after SCSI bus reset. * Note, we honor this only for the initial bus reset. * The scsi error recovery code performs its own bus settle * delay handling for error recovery actions. */ #ifdef CONFIG_AIC7XXX_RESET_DELAY_MS #define AIC7XXX_RESET_DELAY CONFIG_AIC7XXX_RESET_DELAY_MS #else #define AIC7XXX_RESET_DELAY 5000 #endif /* * Control collection of SCSI transfer statistics for the /proc filesystem. * * NOTE: Do NOT enable this when running on kernels version 1.2.x and below. * NOTE: This does affect performance since it has to maintain statistics. */ #ifdef CONFIG_AIC7XXX_PROC_STATS #define AIC7XXX_PROC_STATS #endif /* * To change the default number of tagged transactions allowed per-device, * add a line to the lilo.conf file like: * append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}" * which will result in the first four devices on the first two * controllers being set to a tagged queue depth of 32. * * The tag_commands is an array of 16 to allow for wide and twin adapters. * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15 * for channel 1. */ typedef struct { uint8_t tag_commands[16]; /* Allow for wide/twin adapters. */ } adapter_tag_info_t; /* * Modify this as you see fit for your system. * * 0 tagged queuing disabled * 1 <= n <= 253 n == max tags ever dispatched. * * The driver will throttle the number of commands dispatched to a * device if it returns queue full. For devices with a fixed maximum * queue depth, the driver will eventually determine this depth and * lock it in (a console message is printed to indicate that a lock * has occurred). On some devices, queue full is returned for a temporary * resource shortage. These devices will return queue full at varying * depths. The driver will throttle back when the queue fulls occur and * attempt to slowly increase the depth over time as the device recovers * from the resource shortage. * * In this example, the first line will disable tagged queueing for all * the devices on the first probed aic7xxx adapter. * * The second line enables tagged queueing with 4 commands/LUN for IDs * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the * driver to attempt to use up to 64 tags for ID 1. * * The third line is the same as the first line. * * The fourth line disables tagged queueing for devices 0 and 3. It * enables tagged queueing for the other IDs, with 16 commands/LUN * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for * IDs 2, 5-7, and 9-15. */ /* * NOTE: The below structure is for reference only, the actual structure * to modify in order to change things is just below this comment block. adapter_tag_info_t aic7xxx_tag_info[] = { {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}}, {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}} }; */ #ifdef CONFIG_AIC7XXX_CMDS_PER_DEVICE #define AIC7XXX_CMDS_PER_DEVICE CONFIG_AIC7XXX_CMDS_PER_DEVICE #else #define AIC7XXX_CMDS_PER_DEVICE AHC_MAX_QUEUE #endif #define AIC7XXX_CONFIGED_TAG_COMMANDS { \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE \ } /* * By default, use the number of commands specified by * the users kernel configuration. */ static adapter_tag_info_t aic7xxx_tag_info[] = { {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS} }; /* * There should be a specific return value for this in scsi.h, but * it seems that most drivers ignore it. */ #define DID_UNDERFLOW DID_ERROR void ahc_print_path(struct ahc_softc *ahc, struct scb *scb) { printk("(scsi%d:%c:%d:%d): ", ahc->platform_data->host->host_no, scb != NULL ? SCB_GET_CHANNEL(ahc, scb) : 'X', scb != NULL ? SCB_GET_TARGET(ahc, scb) : -1, scb != NULL ? SCB_GET_LUN(scb) : -1); } /* * XXX - these options apply unilaterally to _all_ 274x/284x/294x * cards in the system. This should be fixed. Exceptions to this * rule are noted in the comments. */ /* * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This * has no effect on any later resets that might occur due to things like * SCSI bus timeouts. */ static uint32_t aic7xxx_no_reset; /* * Should we force EXTENDED translation on a controller. * 0 == Use whatever is in the SEEPROM or default to off * 1 == Use whatever is in the SEEPROM or default to on */ static uint32_t aic7xxx_extended; /* * PCI bus parity checking of the Adaptec controllers. This is somewhat * dubious at best. To my knowledge, this option has never actually * solved a PCI parity problem, but on certain machines with broken PCI * chipset configurations where stray PCI transactions with bad parity are * the norm rather than the exception, the error messages can be overwhelming. * It's included in the driver for completeness. * 0 = Shut off PCI parity check * non-0 = reverse polarity pci parity checking */ static uint32_t aic7xxx_pci_parity = ~0; /* * There are lots of broken chipsets in the world. Some of them will * violate the PCI spec when we issue byte sized memory writes to our * controller. I/O mapped register access, if allowed by the given * platform, will work in almost all cases. */ uint32_t aic7xxx_allow_memio = ~0; /* * So that we can set how long each device is given as a selection timeout. * The table of values goes like this: * 0 - 256ms * 1 - 128ms * 2 - 64ms * 3 - 32ms * We default to 256ms because some older devices need a longer time * to respond to initial selection. */ static uint32_t aic7xxx_seltime; /* * Certain devices do not perform any aging on commands. Should the * device be saturated by commands in one portion of the disk, it is * possible for transactions on far away sectors to never be serviced. * To handle these devices, we can periodically send an ordered tag to * force all outstanding transactions to be serviced prior to a new * transaction. */ static uint32_t aic7xxx_periodic_otag; /* * Module information and settable options. */ static char *aic7xxx = NULL; MODULE_AUTHOR("Maintainer: Hannes Reinecke <hare@suse.de>"); MODULE_DESCRIPTION("Adaptec AIC77XX/78XX SCSI Host Bus Adapter driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(AIC7XXX_DRIVER_VERSION); module_param(aic7xxx, charp, 0444); MODULE_PARM_DESC(aic7xxx, "period-delimited options string:\n" " verbose Enable verbose/diagnostic logging\n" " allow_memio Allow device registers to be memory mapped\n" " debug Bitmask of debug values to enable\n" " no_probe Toggle EISA/VLB controller probing\n" " probe_eisa_vl Toggle EISA/VLB controller probing\n" " no_reset Suppress initial bus resets\n" " extended Enable extended geometry on all controllers\n" " periodic_otag Send an ordered tagged transaction\n" " periodically to prevent tag starvation.\n" " This may be required by some older disk\n" " drives or RAID arrays.\n" " tag_info:<tag_str> Set per-target tag depth\n" " global_tag_depth:<int> Global tag depth for every target\n" " on every bus\n" " seltime:<int> Selection Timeout\n" " (0/256ms,1/128ms,2/64ms,3/32ms)\n" "\n" " Sample modprobe configuration file:\n" " # Toggle EISA/VLB probing\n" " # Set tag depth on Controller 1/Target 1 to 10 tags\n" " # Shorten the selection timeout to 128ms\n" "\n" " options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n" ); static void ahc_linux_handle_scsi_status(struct ahc_softc *, struct scsi_device *, struct scb *); static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, struct scsi_cmnd *cmd); static void ahc_linux_freeze_simq(struct ahc_softc *ahc); static void ahc_linux_release_simq(struct ahc_softc *ahc); static int ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag); static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc); static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_linux_device_queue_depth(struct scsi_device *); static int ahc_linux_run_command(struct ahc_softc*, struct ahc_linux_device *, struct scsi_cmnd *); static void ahc_linux_setup_tag_info_global(char *p); static int aic7xxx_setup(char *s); static int ahc_linux_unit; /************************** OS Utility Wrappers *******************************/ void ahc_delay(long usec) { /* * udelay on Linux can have problems for * multi-millisecond waits. Wait at most * 1024us per call. */ while (usec > 0) { udelay(usec % 1024); usec -= 1024; } } /***************************** Low Level I/O **********************************/ uint8_t ahc_inb(struct ahc_softc * ahc, long port) { uint8_t x; if (ahc->tag == BUS_SPACE_MEMIO) { x = readb(ahc->bsh.maddr + port); } else { x = inb(ahc->bsh.ioport + port); } mb(); return (x); } void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val) { if (ahc->tag == BUS_SPACE_MEMIO) { writeb(val, ahc->bsh.maddr + port); } else { outb(val, ahc->bsh.ioport + port); } mb(); } void ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count) { int i; /* * There is probably a more efficient way to do this on Linux * but we don't use this for anything speed critical and this * should work. */ for (i = 0; i < count; i++) ahc_outb(ahc, port, *array++); } void ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count) { int i; /* * There is probably a more efficient way to do this on Linux * but we don't use this for anything speed critical and this * should work. */ for (i = 0; i < count; i++) *array++ = ahc_inb(ahc, port); } /********************************* Inlines ************************************/ static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*); static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len); static void ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb) { struct scsi_cmnd *cmd; cmd = scb->io_ctx; ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE); scsi_dma_unmap(cmd); } static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len) { int consumed; if ((scb->sg_count + 1) > AHC_NSEG) panic("Too few segs for dma mapping. " "Increase AHC_NSEG\n"); consumed = 1; sg->addr = ahc_htole32(addr & 0xFFFFFFFF); scb->platform_data->xfer_len += len; if (sizeof(dma_addr_t) > 4 && (ahc->flags & AHC_39BIT_ADDRESSING) != 0) len |= (addr >> 8) & AHC_SG_HIGH_ADDR_MASK; sg->len = ahc_htole32(len); return (consumed); } /* * Return a string describing the driver. */ static const char * ahc_linux_info(struct Scsi_Host *host) { static char buffer[512]; char ahc_info[256]; char *bp; struct ahc_softc *ahc; bp = &buffer[0]; ahc = *(struct ahc_softc **)host->hostdata; memset(bp, 0, sizeof(buffer)); strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev " AIC7XXX_DRIVER_VERSION "\n" " <"); strcat(bp, ahc->description); strcat(bp, ">\n" " "); ahc_controller_info(ahc, ahc_info); strcat(bp, ahc_info); strcat(bp, "\n"); return (bp); } /* * Queue an SCB to the controller. */ static int ahc_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) { struct ahc_softc *ahc; struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device); int rtn = SCSI_MLQUEUE_HOST_BUSY; unsigned long flags; ahc = *(struct ahc_softc **)cmd->device->host->hostdata; ahc_lock(ahc, &flags); if (ahc->platform_data->qfrozen == 0) { cmd->scsi_done = scsi_done; cmd->result = CAM_REQ_INPROG << 16; rtn = ahc_linux_run_command(ahc, dev, cmd); } ahc_unlock(ahc, &flags); return rtn; } static DEF_SCSI_QCMD(ahc_linux_queue) static inline struct scsi_target ** ahc_linux_target_in_softc(struct scsi_target *starget) { struct ahc_softc *ahc = *((struct ahc_softc **)dev_to_shost(&starget->dev)->hostdata); unsigned int target_offset; target_offset = starget->id; if (starget->channel != 0) target_offset += 8; return &ahc->platform_data->starget[target_offset]; } static int ahc_linux_target_alloc(struct scsi_target *starget) { struct ahc_softc *ahc = *((struct ahc_softc **)dev_to_shost(&starget->dev)->hostdata); struct seeprom_config *sc = ahc->seep_config; unsigned long flags; struct scsi_target **ahc_targp = ahc_linux_target_in_softc(starget); unsigned short scsirate; struct ahc_devinfo devinfo; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; char channel = starget->channel + 'A'; unsigned int our_id = ahc->our_id; unsigned int target_offset; target_offset = starget->id; if (starget->channel != 0) target_offset += 8; if (starget->channel) our_id = ahc->our_id_b; ahc_lock(ahc, &flags); BUG_ON(*ahc_targp != NULL); *ahc_targp = starget; if (sc) { int maxsync = AHC_SYNCRATE_DT; int ultra = 0; int flags = sc->device_flags[target_offset]; if (ahc->flags & AHC_NEWEEPROM_FMT) { if (flags & CFSYNCHISULTRA) ultra = 1; } else if (flags & CFULTRAEN) ultra = 1; /* AIC nutcase; 10MHz appears as ultra = 1, CFXFER = 0x04 * change it to ultra=0, CFXFER = 0 */ if(ultra && (flags & CFXFER) == 0x04) { ultra = 0; flags &= ~CFXFER; } if ((ahc->features & AHC_ULTRA2) != 0) { scsirate = (flags & CFXFER) | (ultra ? 0x8 : 0); } else { scsirate = (flags & CFXFER) << 4; maxsync = ultra ? AHC_SYNCRATE_ULTRA : AHC_SYNCRATE_FAST; } spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0; if (!(flags & CFSYNCH)) spi_max_offset(starget) = 0; spi_min_period(starget) = ahc_find_period(ahc, scsirate, maxsync); tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id, starget->id, &tstate); } ahc_compile_devinfo(&devinfo, our_id, starget->id, CAM_LUN_WILDCARD, channel, ROLE_INITIATOR); ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0, AHC_TRANS_GOAL, /*paused*/FALSE); ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_GOAL, /*paused*/FALSE); ahc_unlock(ahc, &flags); return 0; } static void ahc_linux_target_destroy(struct scsi_target *starget) { struct scsi_target **ahc_targp = ahc_linux_target_in_softc(starget); *ahc_targp = NULL; } static int ahc_linux_slave_alloc(struct scsi_device *sdev) { struct ahc_softc *ahc = *((struct ahc_softc **)sdev->host->hostdata); struct scsi_target *starget = sdev->sdev_target; struct ahc_linux_device *dev; if (bootverbose) printk("%s: Slave Alloc %d\n", ahc_name(ahc), sdev->id); dev = scsi_transport_device_data(sdev); memset(dev, 0, sizeof(*dev)); /* * We start out life using untagged * transactions of which we allow one. */ dev->openings = 1; /* * Set maxtags to 0. This will be changed if we * later determine that we are dealing with * a tagged queuing capable device. */ dev->maxtags = 0; spi_period(starget) = 0; return 0; } static int ahc_linux_slave_configure(struct scsi_device *sdev) { struct ahc_softc *ahc; ahc = *((struct ahc_softc **)sdev->host->hostdata); if (bootverbose) sdev_printk(KERN_INFO, sdev, "Slave Configure\n"); ahc_linux_device_queue_depth(sdev); /* Initial Domain Validation */ if (!spi_initial_dv(sdev->sdev_target)) spi_dv_device(sdev); return 0; } #if defined(__i386__) /* * Return the disk geometry for the given SCSI device. */ static int ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { uint8_t *bh; int heads; int sectors; int cylinders; int ret; int extended; struct ahc_softc *ahc; u_int channel; ahc = *((struct ahc_softc **)sdev->host->hostdata); channel = sdev_channel(sdev); bh = scsi_bios_ptable(bdev); if (bh) { ret = scsi_partsize(bh, capacity, &geom[2], &geom[0], &geom[1]); kfree(bh); if (ret != -1) return (ret); } heads = 64; sectors = 32; cylinders = aic_sector_div(capacity, heads, sectors); if (aic7xxx_extended != 0) extended = 1; else if (channel == 0) extended = (ahc->flags & AHC_EXTENDED_TRANS_A) != 0; else extended = (ahc->flags & AHC_EXTENDED_TRANS_B) != 0; if (extended && cylinders >= 1024) { heads = 255; sectors = 63; cylinders = aic_sector_div(capacity, heads, sectors); } geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; return (0); } #endif /* * Abort the current SCSI command(s). */ static int ahc_linux_abort(struct scsi_cmnd *cmd) { int error; error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT); if (error != 0) printk("aic7xxx_abort returns 0x%x\n", error); return (error); } /* * Attempt to send a target reset message to the device that timed out. */ static int ahc_linux_dev_reset(struct scsi_cmnd *cmd) { int error; error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET); if (error != 0) printk("aic7xxx_dev_reset returns 0x%x\n", error); return (error); } /* * Reset the SCSI bus. */ static int ahc_linux_bus_reset(struct scsi_cmnd *cmd) { struct ahc_softc *ahc; int found; unsigned long flags; ahc = *(struct ahc_softc **)cmd->device->host->hostdata; ahc_lock(ahc, &flags); found = ahc_reset_channel(ahc, scmd_channel(cmd) + 'A', /*initiate reset*/TRUE); ahc_unlock(ahc, &flags); if (bootverbose) printk("%s: SCSI bus reset delivered. " "%d SCBs aborted.\n", ahc_name(ahc), found); return SUCCESS; } struct scsi_host_template aic7xxx_driver_template = { .module = THIS_MODULE, .name = "aic7xxx", .proc_name = "aic7xxx", .proc_info = ahc_linux_proc_info, .info = ahc_linux_info, .queuecommand = ahc_linux_queue, .eh_abort_handler = ahc_linux_abort, .eh_device_reset_handler = ahc_linux_dev_reset, .eh_bus_reset_handler = ahc_linux_bus_reset, #if defined(__i386__) .bios_param = ahc_linux_biosparam, #endif .can_queue = AHC_MAX_QUEUE, .this_id = -1, .max_sectors = 8192, .cmd_per_lun = 2, .use_clustering = ENABLE_CLUSTERING, .slave_alloc = ahc_linux_slave_alloc, .slave_configure = ahc_linux_slave_configure, .target_alloc = ahc_linux_target_alloc, .target_destroy = ahc_linux_target_destroy, }; /**************************** Tasklet Handler *********************************/ /******************************** Macros **************************************/ #define BUILD_SCSIID(ahc, cmd) \ ((((cmd)->device->id << TID_SHIFT) & TID) \ | (((cmd)->device->channel == 0) ? (ahc)->our_id : (ahc)->our_id_b) \ | (((cmd)->device->channel == 0) ? 0 : TWIN_CHNLB)) /******************************** Bus DMA *************************************/ int ahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent, bus_size_t alignment, bus_size_t boundary, dma_addr_t lowaddr, dma_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag) { bus_dma_tag_t dmat; dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC); if (dmat == NULL) return (ENOMEM); /* * Linux is very simplistic about DMA memory. For now don't * maintain all specification information. Once Linux supplies * better facilities for doing these operations, or the * needs of this particular driver change, we might need to do * more here. */ dmat->alignment = alignment; dmat->boundary = boundary; dmat->maxsize = maxsize; *ret_tag = dmat; return (0); } void ahc_dma_tag_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat) { kfree(dmat); } int ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { *vaddr = pci_alloc_consistent(ahc->dev_softc, dmat->maxsize, mapp); if (*vaddr == NULL) return ENOMEM; return 0; } void ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat, void* vaddr, bus_dmamap_t map) { pci_free_consistent(ahc->dev_softc, dmat->maxsize, vaddr, map); } int ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb, void *cb_arg, int flags) { /* * Assume for now that this will only be used during * initialization and not for per-transaction buffer mapping. */ bus_dma_segment_t stack_sg; stack_sg.ds_addr = map; stack_sg.ds_len = dmat->maxsize; cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0); return (0); } void ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map) { } int ahc_dmamap_unload(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map) { /* Nothing to do */ return (0); } static void ahc_linux_setup_tag_info_global(char *p) { int tags, i, j; tags = simple_strtoul(p + 1, NULL, 0) & 0xff; printk("Setting Global Tags= %d\n", tags); for (i = 0; i < ARRAY_SIZE(aic7xxx_tag_info); i++) { for (j = 0; j < AHC_NUM_TARGETS; j++) { aic7xxx_tag_info[i].tag_commands[j] = tags; } } } static void ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value) { if ((instance >= 0) && (targ >= 0) && (instance < ARRAY_SIZE(aic7xxx_tag_info)) && (targ < AHC_NUM_TARGETS)) { aic7xxx_tag_info[instance].tag_commands[targ] = value & 0xff; if (bootverbose) printk("tag_info[%d:%d] = %d\n", instance, targ, value); } } static char * ahc_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth, void (*callback)(u_long, int, int, int32_t), u_long callback_arg) { char *tok_end; char *tok_end2; int i; int instance; int targ; int done; char tok_list[] = {'.', ',', '{', '}', '\0'}; /* All options use a ':' name/arg separator */ if (*opt_arg != ':') return (opt_arg); opt_arg++; instance = -1; targ = -1; done = FALSE; /* * Restore separator that may be in * the middle of our option argument. */ tok_end = strchr(opt_arg, '\0'); if (tok_end < end) *tok_end = ','; while (!done) { switch (*opt_arg) { case '{': if (instance == -1) { instance = 0; } else { if (depth > 1) { if (targ == -1) targ = 0; } else { printk("Malformed Option %s\n", opt_name); done = TRUE; } } opt_arg++; break; case '}': if (targ != -1) targ = -1; else if (instance != -1) instance = -1; opt_arg++; break; case ',': case '.': if (instance == -1) done = TRUE; else if (targ >= 0) targ++; else if (instance >= 0) instance++; opt_arg++; break; case '\0': done = TRUE; break; default: tok_end = end; for (i = 0; tok_list[i]; i++) { tok_end2 = strchr(opt_arg, tok_list[i]); if ((tok_end2) && (tok_end2 < tok_end)) tok_end = tok_end2; } callback(callback_arg, instance, targ, simple_strtol(opt_arg, NULL, 0)); opt_arg = tok_end; break; } } return (opt_arg); } /* * Handle Linux boot parameters. This routine allows for assigning a value * to a parameter with a ':' between the parameter and the value. * ie. aic7xxx=stpwlev:1,extended */ static int aic7xxx_setup(char *s) { int i, n; char *p; char *end; static const struct { const char *name; uint32_t *flag; } options[] = { { "extended", &aic7xxx_extended }, { "no_reset", &aic7xxx_no_reset }, { "verbose", &aic7xxx_verbose }, { "allow_memio", &aic7xxx_allow_memio}, #ifdef AHC_DEBUG { "debug", &ahc_debug }, #endif { "periodic_otag", &aic7xxx_periodic_otag }, { "pci_parity", &aic7xxx_pci_parity }, { "seltime", &aic7xxx_seltime }, { "tag_info", NULL }, { "global_tag_depth", NULL }, { "dv", NULL } }; end = strchr(s, '\0'); /* * XXX ia64 gcc isn't smart enough to know that ARRAY_SIZE * will never be 0 in this case. */ n = 0; while ((p = strsep(&s, ",.")) != NULL) { if (*p == '\0') continue; for (i = 0; i < ARRAY_SIZE(options); i++) { n = strlen(options[i].name); if (strncmp(options[i].name, p, n) == 0) break; } if (i == ARRAY_SIZE(options)) continue; if (strncmp(p, "global_tag_depth", n) == 0) { ahc_linux_setup_tag_info_global(p + n); } else if (strncmp(p, "tag_info", n) == 0) { s = ahc_parse_brace_option("tag_info", p + n, end, 2, ahc_linux_setup_tag_info, 0); } else if (p[n] == ':') { *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0); } else if (strncmp(p, "verbose", n) == 0) { *(options[i].flag) = 1; } else { *(options[i].flag) ^= 0xFFFFFFFF; } } return 1; } __setup("aic7xxx=", aic7xxx_setup); uint32_t aic7xxx_verbose; int ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *template) { char buf[80]; struct Scsi_Host *host; char *new_name; u_long s; int retval; template->name = ahc->description; host = scsi_host_alloc(template, sizeof(struct ahc_softc *)); if (host == NULL) return (ENOMEM); *((struct ahc_softc **)host->hostdata) = ahc; ahc->platform_data->host = host; host->can_queue = AHC_MAX_QUEUE; host->cmd_per_lun = 2; /* XXX No way to communicate the ID for multiple channels */ host->this_id = ahc->our_id; host->irq = ahc->platform_data->irq; host->max_id = (ahc->features & AHC_WIDE) ? 16 : 8; host->max_lun = AHC_NUM_LUNS; host->max_channel = (ahc->features & AHC_TWIN) ? 1 : 0; host->sg_tablesize = AHC_NSEG; ahc_lock(ahc, &s); ahc_set_unit(ahc, ahc_linux_unit++); ahc_unlock(ahc, &s); sprintf(buf, "scsi%d", host->host_no); new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC); if (new_name != NULL) { strcpy(new_name, buf); ahc_set_name(ahc, new_name); } host->unique_id = ahc->unit; ahc_linux_initialize_scsi_bus(ahc); ahc_intr_enable(ahc, TRUE); host->transportt = ahc_linux_transport_template; retval = scsi_add_host(host, (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); if (retval) { printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n"); scsi_host_put(host); return retval; } scsi_scan_host(host); return 0; } /* * Place the SCSI bus into a known state by either resetting it, * or forcing transfer negotiations on the next command to any * target. */ void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc) { int i; int numtarg; unsigned long s; i = 0; numtarg = 0; ahc_lock(ahc, &s); if (aic7xxx_no_reset != 0) ahc->flags &= ~(AHC_RESET_BUS_A|AHC_RESET_BUS_B); if ((ahc->flags & AHC_RESET_BUS_A) != 0) ahc_reset_channel(ahc, 'A', /*initiate_reset*/TRUE); else numtarg = (ahc->features & AHC_WIDE) ? 16 : 8; if ((ahc->features & AHC_TWIN) != 0) { if ((ahc->flags & AHC_RESET_BUS_B) != 0) { ahc_reset_channel(ahc, 'B', /*initiate_reset*/TRUE); } else { if (numtarg == 0) i = 8; numtarg += 8; } } /* * Force negotiation to async for all targets that * will not see an initial bus reset. */ for (; i < numtarg; i++) { struct ahc_devinfo devinfo; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int our_id; u_int target_id; char channel; channel = 'A'; our_id = ahc->our_id; target_id = i; if (i > 7 && (ahc->features & AHC_TWIN) != 0) { channel = 'B'; our_id = ahc->our_id_b; target_id = i % 8; } tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id, &tstate); ahc_compile_devinfo(&devinfo, our_id, target_id, CAM_LUN_WILDCARD, channel, ROLE_INITIATOR); ahc_update_neg_request(ahc, &devinfo, tstate, tinfo, AHC_NEG_ALWAYS); } ahc_unlock(ahc, &s); /* Give the bus some time to recover */ if ((ahc->flags & (AHC_RESET_BUS_A|AHC_RESET_BUS_B)) != 0) { ahc_linux_freeze_simq(ahc); msleep(AIC7XXX_RESET_DELAY); ahc_linux_release_simq(ahc); } } int ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg) { ahc->platform_data = kmalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC); if (ahc->platform_data == NULL) return (ENOMEM); memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data)); ahc->platform_data->irq = AHC_LINUX_NOIRQ; ahc_lockinit(ahc); ahc->seltime = (aic7xxx_seltime & 0x3) << 4; ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4; if (aic7xxx_pci_parity == 0) ahc->flags |= AHC_DISABLE_PCI_PERR; return (0); } void ahc_platform_free(struct ahc_softc *ahc) { struct scsi_target *starget; int i; if (ahc->platform_data != NULL) { /* destroy all of the device and target objects */ for (i = 0; i < AHC_NUM_TARGETS; i++) { starget = ahc->platform_data->starget[i]; if (starget != NULL) { ahc->platform_data->starget[i] = NULL; } } if (ahc->platform_data->irq != AHC_LINUX_NOIRQ) free_irq(ahc->platform_data->irq, ahc); if (ahc->tag == BUS_SPACE_PIO && ahc->bsh.ioport != 0) release_region(ahc->bsh.ioport, 256); if (ahc->tag == BUS_SPACE_MEMIO && ahc->bsh.maddr != NULL) { iounmap(ahc->bsh.maddr); release_mem_region(ahc->platform_data->mem_busaddr, 0x1000); } if (ahc->platform_data->host) scsi_host_put(ahc->platform_data->host); kfree(ahc->platform_data); } } void ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb) { ahc_platform_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ); } void ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev, struct ahc_devinfo *devinfo, ahc_queue_alg alg) { struct ahc_linux_device *dev; int was_queuing; int now_queuing; if (sdev == NULL) return; dev = scsi_transport_device_data(sdev); was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED); switch (alg) { default: case AHC_QUEUE_NONE: now_queuing = 0; break; case AHC_QUEUE_BASIC: now_queuing = AHC_DEV_Q_BASIC; break; case AHC_QUEUE_TAGGED: now_queuing = AHC_DEV_Q_TAGGED; break; } if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) == 0 && (was_queuing != now_queuing) && (dev->active != 0)) { dev->flags |= AHC_DEV_FREEZE_TIL_EMPTY; dev->qfrozen++; } dev->flags &= ~(AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED|AHC_DEV_PERIODIC_OTAG); if (now_queuing) { u_int usertags; usertags = ahc_linux_user_tagdepth(ahc, devinfo); if (!was_queuing) { /* * Start out aggressively and allow our * dynamic queue depth algorithm to take * care of the rest. */ dev->maxtags = usertags; dev->openings = dev->maxtags - dev->active; } if (dev->maxtags == 0) { /* * Queueing is disabled by the user. */ dev->openings = 1; } else if (alg == AHC_QUEUE_TAGGED) { dev->flags |= AHC_DEV_Q_TAGGED; if (aic7xxx_periodic_otag != 0) dev->flags |= AHC_DEV_PERIODIC_OTAG; } else dev->flags |= AHC_DEV_Q_BASIC; } else { /* We can only have one opening. */ dev->maxtags = 0; dev->openings = 1 - dev->active; } switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) { case AHC_DEV_Q_BASIC: scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); scsi_activate_tcq(sdev, dev->openings + dev->active); break; case AHC_DEV_Q_TAGGED: scsi_set_tag_type(sdev, MSG_ORDERED_TAG); scsi_activate_tcq(sdev, dev->openings + dev->active); break; default: /* * We allow the OS to queue 2 untagged transactions to * us at any time even though we can only execute them * serially on the controller/device. This should * remove some latency. */ scsi_deactivate_tcq(sdev, 2); break; } } int ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { return 0; } static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { static int warned_user; u_int tags; tags = 0; if ((ahc->user_discenable & devinfo->target_mask) != 0) { if (ahc->unit >= ARRAY_SIZE(aic7xxx_tag_info)) { if (warned_user == 0) { printk(KERN_WARNING "aic7xxx: WARNING: Insufficient tag_info instances\n" "aic7xxx: for installed controllers. Using defaults\n" "aic7xxx: Please update the aic7xxx_tag_info array in\n" "aic7xxx: the aic7xxx_osm..c source file.\n"); warned_user++; } tags = AHC_MAX_QUEUE; } else { adapter_tag_info_t *tag_info; tag_info = &aic7xxx_tag_info[ahc->unit]; tags = tag_info->tag_commands[devinfo->target_offset]; if (tags > AHC_MAX_QUEUE) tags = AHC_MAX_QUEUE; } } return (tags); } /* * Determines the queue depth for a given device. */ static void ahc_linux_device_queue_depth(struct scsi_device *sdev) { struct ahc_devinfo devinfo; u_int tags; struct ahc_softc *ahc = *((struct ahc_softc **)sdev->host->hostdata); ahc_compile_devinfo(&devinfo, sdev->sdev_target->channel == 0 ? ahc->our_id : ahc->our_id_b, sdev->sdev_target->id, sdev->lun, sdev->sdev_target->channel == 0 ? 'A' : 'B', ROLE_INITIATOR); tags = ahc_linux_user_tagdepth(ahc, &devinfo); if (tags != 0 && sdev->tagged_supported != 0) { ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_TAGGED); ahc_send_async(ahc, devinfo.channel, devinfo.target, devinfo.lun, AC_TRANSFER_NEG); ahc_print_devinfo(ahc, &devinfo); printk("Tagged Queuing enabled. Depth %d\n", tags); } else { ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_NONE); ahc_send_async(ahc, devinfo.channel, devinfo.target, devinfo.lun, AC_TRANSFER_NEG); } } static int ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev, struct scsi_cmnd *cmd) { struct scb *scb; struct hardware_scb *hscb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; uint16_t mask; struct scb_tailq *untagged_q = NULL; int nseg; /* * Schedule us to run later. The only reason we are not * running is because the whole controller Q is frozen. */ if (ahc->platform_data->qfrozen != 0) return SCSI_MLQUEUE_HOST_BUSY; /* * We only allow one untagged transaction * per target in the initiator role unless * we are storing a full busy target *lun* * table in SCB space. */ if (!blk_rq_tagged(cmd->request) && (ahc->features & AHC_SCB_BTT) == 0) { int target_offset; target_offset = cmd->device->id + cmd->device->channel * 8; untagged_q = &(ahc->untagged_queues[target_offset]); if (!TAILQ_EMPTY(untagged_q)) /* if we're already executing an untagged command * we're busy to another */ return SCSI_MLQUEUE_DEVICE_BUSY; } nseg = scsi_dma_map(cmd); if (nseg < 0) return SCSI_MLQUEUE_HOST_BUSY; /* * Get an scb to use. */ scb = ahc_get_scb(ahc); if (!scb) { scsi_dma_unmap(cmd); return SCSI_MLQUEUE_HOST_BUSY; } scb->io_ctx = cmd; scb->platform_data->dev = dev; hscb = scb->hscb; cmd->host_scribble = (char *)scb; /* * Fill out basics of the HSCB. */ hscb->control = 0; hscb->scsiid = BUILD_SCSIID(ahc, cmd); hscb->lun = cmd->device->lun; mask = SCB_GET_TARGET_MASK(ahc, scb); tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb), SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahc, scb), &tstate); hscb->scsirate = tinfo->scsirate; hscb->scsioffset = tinfo->curr.offset; if ((tstate->ultraenb & mask) != 0) hscb->control |= ULTRAENB; if ((ahc->user_discenable & mask) != 0) hscb->control |= DISCENB; if ((tstate->auto_negotiate & mask) != 0) { scb->flags |= SCB_AUTO_NEGOTIATE; scb->hscb->control |= MK_MESSAGE; } if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) { int msg_bytes; uint8_t tag_msgs[2]; msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs); if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) { hscb->control |= tag_msgs[0]; if (tag_msgs[0] == MSG_ORDERED_TASK) dev->commands_since_idle_or_otag = 0; } else if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH && (dev->flags & AHC_DEV_Q_TAGGED) != 0) { hscb->control |= MSG_ORDERED_TASK; dev->commands_since_idle_or_otag = 0; } else { hscb->control |= MSG_SIMPLE_TASK; } } hscb->cdb_len = cmd->cmd_len; if (hscb->cdb_len <= 12) { memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len); } else { memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len); scb->flags |= SCB_CDB32_PTR; } scb->platform_data->xfer_len = 0; ahc_set_residual(scb, 0); ahc_set_sense_residual(scb, 0); scb->sg_count = 0; if (nseg > 0) { struct ahc_dma_seg *sg; struct scatterlist *cur_seg; int i; /* Copy the segments into the SG list. */ sg = scb->sg_list; /* * The sg_count may be larger than nseg if * a transfer crosses a 32bit page. */ scsi_for_each_sg(cmd, cur_seg, nseg, i) { dma_addr_t addr; bus_size_t len; int consumed; addr = sg_dma_address(cur_seg); len = sg_dma_len(cur_seg); consumed = ahc_linux_map_seg(ahc, scb, sg, addr, len); sg += consumed; scb->sg_count += consumed; } sg--; sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); /* * Reset the sg list pointer. */ scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); /* * Copy the first SG into the "current" * data pointer area. */ scb->hscb->dataptr = scb->sg_list->addr; scb->hscb->datacnt = scb->sg_list->len; } else { scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); scb->hscb->dataptr = 0; scb->hscb->datacnt = 0; scb->sg_count = 0; } LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); dev->openings--; dev->active++; dev->commands_issued++; if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0) dev->commands_since_idle_or_otag++; scb->flags |= SCB_ACTIVE; if (untagged_q) { TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); scb->flags |= SCB_UNTAGGEDQ; } ahc_queue_scb(ahc, scb); return 0; } /* * SCSI controller interrupt handler. */ irqreturn_t ahc_linux_isr(int irq, void *dev_id) { struct ahc_softc *ahc; u_long flags; int ours; ahc = (struct ahc_softc *) dev_id; ahc_lock(ahc, &flags); ours = ahc_intr(ahc); ahc_unlock(ahc, &flags); return IRQ_RETVAL(ours); } void ahc_platform_flushwork(struct ahc_softc *ahc) { } void ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun, ac_code code) { switch (code) { case AC_TRANSFER_NEG: { char buf[80]; struct scsi_target *starget; struct ahc_linux_target *targ; struct info_str info; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; int target_offset; unsigned int target_ppr_options; BUG_ON(target == CAM_TARGET_WILDCARD); info.buffer = buf; info.length = sizeof(buf); info.offset = 0; info.pos = 0; tinfo = ahc_fetch_transinfo(ahc, channel, channel == 'A' ? ahc->our_id : ahc->our_id_b, target, &tstate); /* * Don't bother reporting results while * negotiations are still pending. */ if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options) if (bootverbose == 0) break; /* * Don't bother reporting results that * are identical to those last reported. */ target_offset = target; if (channel == 'B') target_offset += 8; starget = ahc->platform_data->starget[target_offset]; if (starget == NULL) break; targ = scsi_transport_target_data(starget); target_ppr_options = (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0) + (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0) + (spi_iu(starget) ? MSG_EXT_PPR_IU_REQ : 0); if (tinfo->curr.period == spi_period(starget) && tinfo->curr.width == spi_width(starget) && tinfo->curr.offset == spi_offset(starget) && tinfo->curr.ppr_options == target_ppr_options) if (bootverbose == 0) break; spi_period(starget) = tinfo->curr.period; spi_width(starget) = tinfo->curr.width; spi_offset(starget) = tinfo->curr.offset; spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0; spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0; spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0; spi_display_xfer_agreement(starget); break; } case AC_SENT_BDR: { WARN_ON(lun != CAM_LUN_WILDCARD); scsi_report_device_reset(ahc->platform_data->host, channel - 'A', target); break; } case AC_BUS_RESET: if (ahc->platform_data->host != NULL) { scsi_report_bus_reset(ahc->platform_data->host, channel - 'A'); } break; default: panic("ahc_send_async: Unexpected async event"); } } /* * Calls the higher level scsi done function and frees the scb. */ void ahc_done(struct ahc_softc *ahc, struct scb *scb) { struct scsi_cmnd *cmd; struct ahc_linux_device *dev; LIST_REMOVE(scb, pending_links); if ((scb->flags & SCB_UNTAGGEDQ) != 0) { struct scb_tailq *untagged_q; int target_offset; target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); untagged_q = &(ahc->untagged_queues[target_offset]); TAILQ_REMOVE(untagged_q, scb, links.tqe); BUG_ON(!TAILQ_EMPTY(untagged_q)); } else if ((scb->flags & SCB_ACTIVE) == 0) { /* * Transactions aborted from the untagged queue may * not have been dispatched to the controller, so * only check the SCB_ACTIVE flag for tagged transactions. */ printk("SCB %d done'd twice\n", scb->hscb->tag); ahc_dump_card_state(ahc); panic("Stopping for safety"); } cmd = scb->io_ctx; dev = scb->platform_data->dev; dev->active--; dev->openings++; if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) { cmd->result &= ~(CAM_DEV_QFRZN << 16); dev->qfrozen--; } ahc_linux_unmap_scb(ahc, scb); /* * Guard against stale sense data. * The Linux mid-layer assumes that sense * was retrieved anytime the first byte of * the sense buffer looks "sane". */ cmd->sense_buffer[0] = 0; if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) { uint32_t amount_xferred; amount_xferred = ahc_get_transfer_length(scb) - ahc_get_residual(scb); if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MISC) != 0) { ahc_print_path(ahc, scb); printk("Set CAM_UNCOR_PARITY\n"); } #endif ahc_set_transaction_status(scb, CAM_UNCOR_PARITY); #ifdef AHC_REPORT_UNDERFLOWS /* * This code is disabled by default as some * clients of the SCSI system do not properly * initialize the underflow parameter. This * results in spurious termination of commands * that complete as expected (e.g. underflow is * allowed as command can return variable amounts * of data. */ } else if (amount_xferred < scb->io_ctx->underflow) { u_int i; ahc_print_path(ahc, scb); printk("CDB:"); for (i = 0; i < scb->io_ctx->cmd_len; i++) printk(" 0x%x", scb->io_ctx->cmnd[i]); printk("\n"); ahc_print_path(ahc, scb); printk("Saw underflow (%ld of %ld bytes). " "Treated as error\n", ahc_get_residual(scb), ahc_get_transfer_length(scb)); ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); #endif } else { ahc_set_transaction_status(scb, CAM_REQ_CMP); } } else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) { ahc_linux_handle_scsi_status(ahc, cmd->device, scb); } if (dev->openings == 1 && ahc_get_transaction_status(scb) == CAM_REQ_CMP && ahc_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL) dev->tag_success_count++; /* * Some devices deal with temporary internal resource * shortages by returning queue full. When the queue * full occurrs, we throttle back. Slowly try to get * back to our previous queue depth. */ if ((dev->openings + dev->active) < dev->maxtags && dev->tag_success_count > AHC_TAG_SUCCESS_INTERVAL) { dev->tag_success_count = 0; dev->openings++; } if (dev->active == 0) dev->commands_since_idle_or_otag = 0; if ((scb->flags & SCB_RECOVERY_SCB) != 0) { printk("Recovery SCB completes\n"); if (ahc_get_transaction_status(scb) == CAM_BDR_SENT || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED) ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); if (ahc->platform_data->eh_done) complete(ahc->platform_data->eh_done); } ahc_free_scb(ahc, scb); ahc_linux_queue_cmd_complete(ahc, cmd); } static void ahc_linux_handle_scsi_status(struct ahc_softc *ahc, struct scsi_device *sdev, struct scb *scb) { struct ahc_devinfo devinfo; struct ahc_linux_device *dev = scsi_transport_device_data(sdev); ahc_compile_devinfo(&devinfo, ahc->our_id, sdev->sdev_target->id, sdev->lun, sdev->sdev_target->channel == 0 ? 'A' : 'B', ROLE_INITIATOR); /* * We don't currently trust the mid-layer to * properly deal with queue full or busy. So, * when one occurs, we tell the mid-layer to * unconditionally requeue the command to us * so that we can retry it ourselves. We also * implement our own throttling mechanism so * we don't clobber the device with too many * commands. */ switch (ahc_get_scsi_status(scb)) { default: break; case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: { struct scsi_cmnd *cmd; /* * Copy sense information to the OS's cmd * structure if it is available. */ cmd = scb->io_ctx; if (scb->flags & SCB_SENSE) { u_int sense_size; sense_size = min(sizeof(struct scsi_sense_data) - ahc_get_sense_residual(scb), (u_long)SCSI_SENSE_BUFFERSIZE); memcpy(cmd->sense_buffer, ahc_get_sense_buf(ahc, scb), sense_size); if (sense_size < SCSI_SENSE_BUFFERSIZE) memset(&cmd->sense_buffer[sense_size], 0, SCSI_SENSE_BUFFERSIZE - sense_size); cmd->result |= (DRIVER_SENSE << 24); #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_SENSE) { int i; printk("Copied %d bytes of sense data:", sense_size); for (i = 0; i < sense_size; i++) { if ((i & 0xF) == 0) printk("\n"); printk("0x%x ", cmd->sense_buffer[i]); } printk("\n"); } #endif } break; } case SCSI_STATUS_QUEUE_FULL: { /* * By the time the core driver has returned this * command, all other commands that were queued * to us but not the device have been returned. * This ensures that dev->active is equal to * the number of commands actually queued to * the device. */ dev->tag_success_count = 0; if (dev->active != 0) { /* * Drop our opening count to the number * of commands currently outstanding. */ dev->openings = 0; /* ahc_print_path(ahc, scb); printk("Dropping tag count to %d\n", dev->active); */ if (dev->active == dev->tags_on_last_queuefull) { dev->last_queuefull_same_count++; /* * If we repeatedly see a queue full * at the same queue depth, this * device has a fixed number of tag * slots. Lock in this tag depth * so we stop seeing queue fulls from * this device. */ if (dev->last_queuefull_same_count == AHC_LOCK_TAGS_COUNT) { dev->maxtags = dev->active; ahc_print_path(ahc, scb); printk("Locking max tag count at %d\n", dev->active); } } else { dev->tags_on_last_queuefull = dev->active; dev->last_queuefull_same_count = 0; } ahc_set_transaction_status(scb, CAM_REQUEUE_REQ); ahc_set_scsi_status(scb, SCSI_STATUS_OK); ahc_platform_set_tags(ahc, sdev, &devinfo, (dev->flags & AHC_DEV_Q_BASIC) ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED); break; } /* * Drop down to a single opening, and treat this * as if the target returned BUSY SCSI status. */ dev->openings = 1; ahc_set_scsi_status(scb, SCSI_STATUS_BUSY); ahc_platform_set_tags(ahc, sdev, &devinfo, (dev->flags & AHC_DEV_Q_BASIC) ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED); break; } } } static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, struct scsi_cmnd *cmd) { /* * Map CAM error codes into Linux Error codes. We * avoid the conversion so that the DV code has the * full error information available when making * state change decisions. */ { u_int new_status; switch (ahc_cmd_get_transaction_status(cmd)) { case CAM_REQ_INPROG: case CAM_REQ_CMP: case CAM_SCSI_STATUS_ERROR: new_status = DID_OK; break; case CAM_REQ_ABORTED: new_status = DID_ABORT; break; case CAM_BUSY: new_status = DID_BUS_BUSY; break; case CAM_REQ_INVALID: case CAM_PATH_INVALID: new_status = DID_BAD_TARGET; break; case CAM_SEL_TIMEOUT: new_status = DID_NO_CONNECT; break; case CAM_SCSI_BUS_RESET: case CAM_BDR_SENT: new_status = DID_RESET; break; case CAM_UNCOR_PARITY: new_status = DID_PARITY; break; case CAM_CMD_TIMEOUT: new_status = DID_TIME_OUT; break; case CAM_UA_ABORT: case CAM_REQ_CMP_ERR: case CAM_AUTOSENSE_FAIL: case CAM_NO_HBA: case CAM_DATA_RUN_ERR: case CAM_UNEXP_BUSFREE: case CAM_SEQUENCE_FAIL: case CAM_CCB_LEN_ERR: case CAM_PROVIDE_FAIL: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_REQ_TOO_BIG: new_status = DID_ERROR; break; case CAM_REQUEUE_REQ: new_status = DID_REQUEUE; break; default: /* We should never get here */ new_status = DID_ERROR; break; } ahc_cmd_set_transaction_status(cmd, new_status); } cmd->scsi_done(cmd); } static void ahc_linux_freeze_simq(struct ahc_softc *ahc) { unsigned long s; ahc_lock(ahc, &s); ahc->platform_data->qfrozen++; if (ahc->platform_data->qfrozen == 1) { scsi_block_requests(ahc->platform_data->host); /* XXX What about Twin channels? */ ahc_platform_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ); } ahc_unlock(ahc, &s); } static void ahc_linux_release_simq(struct ahc_softc *ahc) { u_long s; int unblock_reqs; unblock_reqs = 0; ahc_lock(ahc, &s); if (ahc->platform_data->qfrozen > 0) ahc->platform_data->qfrozen--; if (ahc->platform_data->qfrozen == 0) unblock_reqs = 1; ahc_unlock(ahc, &s); /* * There is still a race here. The mid-layer * should keep its own freeze count and use * a bottom half handler to run the queues * so we can unblock with our own lock held. */ if (unblock_reqs) scsi_unblock_requests(ahc->platform_data->host); } static int ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag) { struct ahc_softc *ahc; struct ahc_linux_device *dev; struct scb *pending_scb; u_int saved_scbptr; u_int active_scb_index; u_int last_phase; u_int saved_scsiid; u_int cdb_byte; int retval; int was_paused; int paused; int wait; int disconnected; unsigned long flags; pending_scb = NULL; paused = FALSE; wait = FALSE; ahc = *(struct ahc_softc **)cmd->device->host->hostdata; scmd_printk(KERN_INFO, cmd, "Attempting to queue a%s message\n", flag == SCB_ABORT ? "n ABORT" : " TARGET RESET"); printk("CDB:"); for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++) printk(" 0x%x", cmd->cmnd[cdb_byte]); printk("\n"); ahc_lock(ahc, &flags); /* * First determine if we currently own this command. * Start by searching the device queue. If not found * there, check the pending_scb list. If not found * at all, and the system wanted us to just abort the * command, return success. */ dev = scsi_transport_device_data(cmd->device); if (dev == NULL) { /* * No target device for this command exists, * so we must not still own the command. */ printk("%s:%d:%d:%d: Is not an active device\n", ahc_name(ahc), cmd->device->channel, cmd->device->id, cmd->device->lun); retval = SUCCESS; goto no_cmd; } if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0 && ahc_search_untagged_queues(ahc, cmd, cmd->device->id, cmd->device->channel + 'A', cmd->device->lun, CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) { printk("%s:%d:%d:%d: Command found on untagged queue\n", ahc_name(ahc), cmd->device->channel, cmd->device->id, cmd->device->lun); retval = SUCCESS; goto done; } /* * See if we can find a matching cmd in the pending list. */ LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { if (pending_scb->io_ctx == cmd) break; } if (pending_scb == NULL && flag == SCB_DEVICE_RESET) { /* Any SCB for this device will do for a target reset */ LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd), scmd_channel(cmd) + 'A', CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_INITIATOR)) break; } } if (pending_scb == NULL) { scmd_printk(KERN_INFO, cmd, "Command not found\n"); goto no_cmd; } if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) { /* * We can't queue two recovery actions using the same SCB */ retval = FAILED; goto done; } /* * Ensure that the card doesn't do anything * behind our back and that we didn't "just" miss * an interrupt that would affect this cmd. */ was_paused = ahc_is_paused(ahc); ahc_pause_and_flushwork(ahc); paused = TRUE; if ((pending_scb->flags & SCB_ACTIVE) == 0) { scmd_printk(KERN_INFO, cmd, "Command already completed\n"); goto no_cmd; } printk("%s: At time of recovery, card was %spaused\n", ahc_name(ahc), was_paused ? "" : "not "); ahc_dump_card_state(ahc); disconnected = TRUE; if (flag == SCB_ABORT) { if (ahc_search_qinfifo(ahc, cmd->device->id, cmd->device->channel + 'A', cmd->device->lun, pending_scb->hscb->tag, ROLE_INITIATOR, CAM_REQ_ABORTED, SEARCH_COMPLETE) > 0) { printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n", ahc_name(ahc), cmd->device->channel, cmd->device->id, cmd->device->lun); retval = SUCCESS; goto done; } } else if (ahc_search_qinfifo(ahc, cmd->device->id, cmd->device->channel + 'A', cmd->device->lun, pending_scb->hscb->tag, ROLE_INITIATOR, /*status*/0, SEARCH_COUNT) > 0) { disconnected = FALSE; } if (disconnected && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) { struct scb *bus_scb; bus_scb = ahc_lookup_scb(ahc, ahc_inb(ahc, SCB_TAG)); if (bus_scb == pending_scb) disconnected = FALSE; else if (flag != SCB_ABORT && ahc_inb(ahc, SAVED_SCSIID) == pending_scb->hscb->scsiid && ahc_inb(ahc, SAVED_LUN) == SCB_GET_LUN(pending_scb)) disconnected = FALSE; } /* * At this point, pending_scb is the scb associated with the * passed in command. That command is currently active on the * bus, is in the disconnected state, or we're hoping to find * a command for the same target active on the bus to abuse to * send a BDR. Queue the appropriate message based on which of * these states we are in. */ last_phase = ahc_inb(ahc, LASTPHASE); saved_scbptr = ahc_inb(ahc, SCBPTR); active_scb_index = ahc_inb(ahc, SCB_TAG); saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); if (last_phase != P_BUSFREE && (pending_scb->hscb->tag == active_scb_index || (flag == SCB_DEVICE_RESET && SCSIID_TARGET(ahc, saved_scsiid) == scmd_id(cmd)))) { /* * We're active on the bus, so assert ATN * and hope that the target responds. */ pending_scb = ahc_lookup_scb(ahc, active_scb_index); pending_scb->flags |= SCB_RECOVERY_SCB|flag; ahc_outb(ahc, MSG_OUT, HOST_MSG); ahc_outb(ahc, SCSISIGO, last_phase|ATNO); scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n"); wait = TRUE; } else if (disconnected) { /* * Actually re-queue this SCB in an attempt * to select the device before it reconnects. * In either case (selection or reselection), * we will now issue the approprate message * to the timed-out device. * * Set the MK_MESSAGE control bit indicating * that we desire to send a message. We * also set the disconnected flag since * in the paging case there is no guarantee * that our SCB control byte matches the * version on the card. We don't want the * sequencer to abort the command thinking * an unsolicited reselection occurred. */ pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED; pending_scb->flags |= SCB_RECOVERY_SCB|flag; /* * Remove any cached copy of this SCB in the * disconnected list in preparation for the * queuing of our abort SCB. We use the * same element in the SCB, SCB_NEXT, for * both the qinfifo and the disconnected list. */ ahc_search_disc_list(ahc, cmd->device->id, cmd->device->channel + 'A', cmd->device->lun, pending_scb->hscb->tag, /*stop_on_first*/TRUE, /*remove*/TRUE, /*save_state*/FALSE); /* * In the non-paging case, the sequencer will * never re-reference the in-core SCB. * To make sure we are notified during * reselection, set the MK_MESSAGE flag in * the card's copy of the SCB. */ if ((ahc->flags & AHC_PAGESCBS) == 0) { ahc_outb(ahc, SCBPTR, pending_scb->hscb->tag); ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL)|MK_MESSAGE); } /* * Clear out any entries in the QINFIFO first * so we are the next SCB for this target * to run. */ ahc_search_qinfifo(ahc, cmd->device->id, cmd->device->channel + 'A', cmd->device->lun, SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahc_qinfifo_requeue_tail(ahc, pending_scb); ahc_outb(ahc, SCBPTR, saved_scbptr); ahc_print_path(ahc, pending_scb); printk("Device is disconnected, re-queuing SCB\n"); wait = TRUE; } else { scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n"); retval = FAILED; goto done; } no_cmd: /* * Our assumption is that if we don't have the command, no * recovery action was required, so we return success. Again, * the semantics of the mid-layer recovery engine are not * well defined, so this may change in time. */ retval = SUCCESS; done: if (paused) ahc_unpause(ahc); if (wait) { DECLARE_COMPLETION_ONSTACK(done); ahc->platform_data->eh_done = &done; ahc_unlock(ahc, &flags); printk("Recovery code sleeping\n"); if (!wait_for_completion_timeout(&done, 5 * HZ)) { ahc_lock(ahc, &flags); ahc->platform_data->eh_done = NULL; ahc_unlock(ahc, &flags); printk("Timer Expired\n"); retval = FAILED; } printk("Recovery code awake\n"); } else ahc_unlock(ahc, &flags); return (retval); } void ahc_platform_dump_card_state(struct ahc_softc *ahc) { } static void ahc_linux_set_width(struct scsi_target *starget, int width) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); struct ahc_devinfo devinfo; unsigned long flags; ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); ahc_lock(ahc, &flags); ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE); ahc_unlock(ahc, &flags); } static void ahc_linux_set_period(struct scsi_target *starget, int period) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); struct ahc_tmode_tstate *tstate; struct ahc_initiator_tinfo *tinfo = ahc_fetch_transinfo(ahc, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahc_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options; unsigned long flags; unsigned long offset = tinfo->goal.offset; const struct ahc_syncrate *syncrate; if (offset == 0) offset = MAX_OFFSET; if (period < 9) period = 9; /* 12.5ns is our minimum */ if (period == 9) { if (spi_max_width(starget)) ppr_options |= MSG_EXT_PPR_DT_REQ; else /* need wide for DT and need DT for 12.5 ns */ period = 10; } ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); /* all PPR requests apart from QAS require wide transfers */ if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) { if (spi_width(starget) == 0) ppr_options &= MSG_EXT_PPR_QAS_REQ; } syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); ahc_lock(ahc, &flags); ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_GOAL, FALSE); ahc_unlock(ahc, &flags); } static void ahc_linux_set_offset(struct scsi_target *starget, int offset) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); struct ahc_tmode_tstate *tstate; struct ahc_initiator_tinfo *tinfo = ahc_fetch_transinfo(ahc, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahc_devinfo devinfo; unsigned int ppr_options = 0; unsigned int period = 0; unsigned long flags; const struct ahc_syncrate *syncrate = NULL; ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); if (offset != 0) { syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); period = tinfo->goal.period; ppr_options = tinfo->goal.ppr_options; } ahc_lock(ahc, &flags); ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_GOAL, FALSE); ahc_unlock(ahc, &flags); } static void ahc_linux_set_dt(struct scsi_target *starget, int dt) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); struct ahc_tmode_tstate *tstate; struct ahc_initiator_tinfo *tinfo = ahc_fetch_transinfo(ahc, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahc_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options & ~MSG_EXT_PPR_DT_REQ; unsigned int period = tinfo->goal.period; unsigned int width = tinfo->goal.width; unsigned long flags; const struct ahc_syncrate *syncrate; if (dt && spi_max_width(starget)) { ppr_options |= MSG_EXT_PPR_DT_REQ; if (!width) ahc_linux_set_width(starget, 1); } else if (period == 9) period = 10; /* if resetting DT, period must be >= 25ns */ ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,AHC_SYNCRATE_DT); ahc_lock(ahc, &flags); ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset, ppr_options, AHC_TRANS_GOAL, FALSE); ahc_unlock(ahc, &flags); } #if 0 /* FIXME: This code claims to support IU and QAS. However, the actual * sequencer code and aic7xxx_core have no support for these parameters and * will get into a bad state if they're negotiated. Do not enable this * unless you know what you're doing */ static void ahc_linux_set_qas(struct scsi_target *starget, int qas) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); struct ahc_tmode_tstate *tstate; struct ahc_initiator_tinfo *tinfo = ahc_fetch_transinfo(ahc, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahc_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options & ~MSG_EXT_PPR_QAS_REQ; unsigned int period = tinfo->goal.period; unsigned long flags; struct ahc_syncrate *syncrate; if (qas) ppr_options |= MSG_EXT_PPR_QAS_REQ; ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); ahc_lock(ahc, &flags); ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset, ppr_options, AHC_TRANS_GOAL, FALSE); ahc_unlock(ahc, &flags); } static void ahc_linux_set_iu(struct scsi_target *starget, int iu) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); struct ahc_tmode_tstate *tstate; struct ahc_initiator_tinfo *tinfo = ahc_fetch_transinfo(ahc, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahc_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options & ~MSG_EXT_PPR_IU_REQ; unsigned int period = tinfo->goal.period; unsigned long flags; struct ahc_syncrate *syncrate; if (iu) ppr_options |= MSG_EXT_PPR_IU_REQ; ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); ahc_lock(ahc, &flags); ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset, ppr_options, AHC_TRANS_GOAL, FALSE); ahc_unlock(ahc, &flags); } #endif static void ahc_linux_get_signalling(struct Scsi_Host *shost) { struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata; unsigned long flags; u8 mode; if (!(ahc->features & AHC_ULTRA2)) { /* non-LVD chipset, may not have SBLKCTL reg */ spi_signalling(shost) = ahc->features & AHC_HVD ? SPI_SIGNAL_HVD : SPI_SIGNAL_SE; return; } ahc_lock(ahc, &flags); ahc_pause(ahc); mode = ahc_inb(ahc, SBLKCTL); ahc_unpause(ahc); ahc_unlock(ahc, &flags); if (mode & ENAB40) spi_signalling(shost) = SPI_SIGNAL_LVD; else if (mode & ENAB20) spi_signalling(shost) = SPI_SIGNAL_SE; else spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; } static struct spi_function_template ahc_linux_transport_functions = { .set_offset = ahc_linux_set_offset, .show_offset = 1, .set_period = ahc_linux_set_period, .show_period = 1, .set_width = ahc_linux_set_width, .show_width = 1, .set_dt = ahc_linux_set_dt, .show_dt = 1, #if 0 .set_iu = ahc_linux_set_iu, .show_iu = 1, .set_qas = ahc_linux_set_qas, .show_qas = 1, #endif .get_signalling = ahc_linux_get_signalling, }; static int __init ahc_linux_init(void) { /* * If we've been passed any parameters, process them now. */ if (aic7xxx) aic7xxx_setup(aic7xxx); ahc_linux_transport_template = spi_attach_transport(&ahc_linux_transport_functions); if (!ahc_linux_transport_template) return -ENODEV; scsi_transport_reserve_device(ahc_linux_transport_template, sizeof(struct ahc_linux_device)); ahc_linux_pci_init(); ahc_linux_eisa_init(); return 0; } static void ahc_linux_exit(void) { ahc_linux_pci_exit(); ahc_linux_eisa_exit(); spi_release_transport(ahc_linux_transport_template); } module_init(ahc_linux_init); module_exit(ahc_linux_exit);
gpl-2.0
pengdonglin137/linux-4.9
drivers/pcmcia/pxa2xx_mainstone.c
9742
4364
/* * linux/drivers/pcmcia/pxa2xx_mainstone.c * * Mainstone PCMCIA specific routines. * * Created: May 12, 2004 * Author: Nicolas Pitre * Copyright: MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <pcmcia/ss.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <mach/pxa2xx-regs.h> #include <mach/mainstone.h> #include "soc_common.h" static int mst_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { /* * Setup default state of GPIO outputs * before we enable them as outputs. */ if (skt->nr == 0) { skt->socket.pci_irq = MAINSTONE_S0_IRQ; skt->stat[SOC_STAT_CD].irq = MAINSTONE_S0_CD_IRQ; skt->stat[SOC_STAT_CD].name = "PCMCIA0 CD"; skt->stat[SOC_STAT_BVD1].irq = MAINSTONE_S0_STSCHG_IRQ; skt->stat[SOC_STAT_BVD1].name = "PCMCIA0 STSCHG"; } else { skt->socket.pci_irq = MAINSTONE_S1_IRQ; skt->stat[SOC_STAT_CD].irq = MAINSTONE_S1_CD_IRQ; skt->stat[SOC_STAT_CD].name = "PCMCIA1 CD"; skt->stat[SOC_STAT_BVD1].irq = MAINSTONE_S1_STSCHG_IRQ; skt->stat[SOC_STAT_BVD1].name = "PCMCIA1 STSCHG"; } return 0; } static unsigned long mst_pcmcia_status[2]; static void mst_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { unsigned long status, flip; status = (skt->nr == 0) ? MST_PCMCIA0 : MST_PCMCIA1; flip = (status ^ mst_pcmcia_status[skt->nr]) & MST_PCMCIA_nSTSCHG_BVD1; /* * Workaround for STSCHG which can't be deasserted: * We therefore disable/enable corresponding IRQs * as needed to avoid IRQ locks. */ if (flip) { mst_pcmcia_status[skt->nr] = status; if (status & MST_PCMCIA_nSTSCHG_BVD1) enable_irq( (skt->nr == 0) ? MAINSTONE_S0_STSCHG_IRQ : MAINSTONE_S1_STSCHG_IRQ ); else disable_irq( (skt->nr == 0) ? MAINSTONE_S0_STSCHG_IRQ : MAINSTONE_S1_STSCHG_IRQ ); } state->detect = (status & MST_PCMCIA_nCD) ? 0 : 1; state->ready = (status & MST_PCMCIA_nIRQ) ? 1 : 0; state->bvd1 = (status & MST_PCMCIA_nSTSCHG_BVD1) ? 1 : 0; state->bvd2 = (status & MST_PCMCIA_nSPKR_BVD2) ? 1 : 0; state->vs_3v = (status & MST_PCMCIA_nVS1) ? 0 : 1; state->vs_Xv = (status & MST_PCMCIA_nVS2) ? 0 : 1; } static int mst_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { unsigned long power = 0; int ret = 0; switch (state->Vcc) { case 0: power |= MST_PCMCIA_PWR_VCC_0; break; case 33: power |= MST_PCMCIA_PWR_VCC_33; break; case 50: power |= MST_PCMCIA_PWR_VCC_50; break; default: printk(KERN_ERR "%s(): bad Vcc %u\n", __func__, state->Vcc); ret = -1; } switch (state->Vpp) { case 0: power |= MST_PCMCIA_PWR_VPP_0; break; case 120: power |= MST_PCMCIA_PWR_VPP_120; break; default: if(state->Vpp == state->Vcc) { power |= MST_PCMCIA_PWR_VPP_VCC; } else { printk(KERN_ERR "%s(): bad Vpp %u\n", __func__, state->Vpp); ret = -1; } } if (state->flags & SS_RESET) power |= MST_PCMCIA_RESET; switch (skt->nr) { case 0: MST_PCMCIA0 = power; break; case 1: MST_PCMCIA1 = power; break; default: ret = -1; } return ret; } static struct pcmcia_low_level mst_pcmcia_ops __initdata = { .owner = THIS_MODULE, .hw_init = mst_pcmcia_hw_init, .socket_state = mst_pcmcia_socket_state, .configure_socket = mst_pcmcia_configure_socket, .nr = 2, }; static struct platform_device *mst_pcmcia_device; static int __init mst_pcmcia_init(void) { int ret; if (!machine_is_mainstone()) return -ENODEV; mst_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!mst_pcmcia_device) return -ENOMEM; ret = platform_device_add_data(mst_pcmcia_device, &mst_pcmcia_ops, sizeof(mst_pcmcia_ops)); if (ret == 0) ret = platform_device_add(mst_pcmcia_device); if (ret) platform_device_put(mst_pcmcia_device); return ret; } static void __exit mst_pcmcia_exit(void) { platform_device_unregister(mst_pcmcia_device); } fs_initcall(mst_pcmcia_init); module_exit(mst_pcmcia_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa2xx-pcmcia");
gpl-2.0
rbrune/android_kernel_google_steelhead_orig
drivers/media/dvb/b2c2/flexcop-fe-tuner.c
9998
16141
/* * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III * flexcop-fe-tuner.c - methods for frontend attachment and DiSEqC controlling * see flexcop.c for copyright information */ #include <media/tuner.h> #include "flexcop.h" #include "mt312.h" #include "stv0299.h" #include "s5h1420.h" #include "itd1000.h" #include "cx24113.h" #include "cx24123.h" #include "isl6421.h" #include "mt352.h" #include "bcm3510.h" #include "nxt200x.h" #include "dvb-pll.h" #include "lgdt330x.h" #include "tuner-simple.h" #include "stv0297.h" /* Can we use the specified front-end? Remember that if we are compiled * into the kernel we can't call code that's in modules. */ #define FE_SUPPORTED(fe) (defined(CONFIG_DVB_##fe) || \ (defined(CONFIG_DVB_##fe##_MODULE) && defined(MODULE))) /* lnb control */ #if FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299) static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct flexcop_device *fc = fe->dvb->priv; flexcop_ibi_value v; deb_tuner("polarity/voltage = %u\n", voltage); v = fc->read_ibi_reg(fc, misc_204); switch (voltage) { case SEC_VOLTAGE_OFF: v.misc_204.ACPI1_sig = 1; break; case SEC_VOLTAGE_13: v.misc_204.ACPI1_sig = 0; v.misc_204.LNB_L_H_sig = 0; break; case SEC_VOLTAGE_18: v.misc_204.ACPI1_sig = 0; v.misc_204.LNB_L_H_sig = 1; break; default: err("unknown SEC_VOLTAGE value"); return -EINVAL; } return fc->write_ibi_reg(fc, misc_204, v); } #endif #if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312) static int flexcop_sleep(struct dvb_frontend* fe) { struct flexcop_device *fc = fe->dvb->priv; if (fc->fe_sleep) return fc->fe_sleep(fe); return 0; } #endif /* SkyStar2 DVB-S rev 2.3 */ #if FE_SUPPORTED(MT312) && FE_SUPPORTED(PLL) static int flexcop_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { /* u16 wz_half_period_for_45_mhz[] = { 0x01ff, 0x0154, 0x00ff, 0x00cc }; */ struct flexcop_device *fc = fe->dvb->priv; flexcop_ibi_value v; u16 ax; v.raw = 0; deb_tuner("tone = %u\n",tone); switch (tone) { case SEC_TONE_ON: ax = 0x01ff; break; case SEC_TONE_OFF: ax = 0; break; default: err("unknown SEC_TONE value"); return -EINVAL; } v.lnb_switch_freq_200.LNB_CTLPrescaler_sig = 1; /* divide by 2 */ v.lnb_switch_freq_200.LNB_CTLHighCount_sig = ax; v.lnb_switch_freq_200.LNB_CTLLowCount_sig = ax == 0 ? 0x1ff : ax; return fc->write_ibi_reg(fc,lnb_switch_freq_200,v); } static void flexcop_diseqc_send_bit(struct dvb_frontend* fe, int data) { flexcop_set_tone(fe, SEC_TONE_ON); udelay(data ? 500 : 1000); flexcop_set_tone(fe, SEC_TONE_OFF); udelay(data ? 1000 : 500); } static void flexcop_diseqc_send_byte(struct dvb_frontend* fe, int data) { int i, par = 1, d; for (i = 7; i >= 0; i--) { d = (data >> i) & 1; par ^= d; flexcop_diseqc_send_bit(fe, d); } flexcop_diseqc_send_bit(fe, par); } static int flexcop_send_diseqc_msg(struct dvb_frontend *fe, int len, u8 *msg, unsigned long burst) { int i; flexcop_set_tone(fe, SEC_TONE_OFF); mdelay(16); for (i = 0; i < len; i++) flexcop_diseqc_send_byte(fe,msg[i]); mdelay(16); if (burst != -1) { if (burst) flexcop_diseqc_send_byte(fe, 0xff); else { flexcop_set_tone(fe, SEC_TONE_ON); mdelay(12); udelay(500); flexcop_set_tone(fe, SEC_TONE_OFF); } msleep(20); } return 0; } static int flexcop_diseqc_send_master_cmd(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *cmd) { return flexcop_send_diseqc_msg(fe, cmd->msg_len, cmd->msg, 0); } static int flexcop_diseqc_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t minicmd) { return flexcop_send_diseqc_msg(fe, 0, NULL, minicmd); } static struct mt312_config skystar23_samsung_tbdu18132_config = { .demod_address = 0x0e, }; static int skystar2_rev23_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { struct dvb_frontend_ops *ops; fc->fe = dvb_attach(mt312_attach, &skystar23_samsung_tbdu18132_config, i2c); if (!fc->fe) return 0; if (!dvb_attach(dvb_pll_attach, fc->fe, 0x61, i2c, DVB_PLL_SAMSUNG_TBDU18132)) return 0; ops = &fc->fe->ops; ops->diseqc_send_master_cmd = flexcop_diseqc_send_master_cmd; ops->diseqc_send_burst = flexcop_diseqc_send_burst; ops->set_tone = flexcop_set_tone; ops->set_voltage = flexcop_set_voltage; fc->fe_sleep = ops->sleep; ops->sleep = flexcop_sleep; return 1; } #else #define skystar2_rev23_attach NULL #endif /* SkyStar2 DVB-S rev 2.6 */ #if FE_SUPPORTED(STV0299) && FE_SUPPORTED(PLL) static int samsung_tbmu24112_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32 ratio) { u8 aclk = 0; u8 bclk = 0; if (srate < 1500000) { aclk = 0xb7; bclk = 0x47; } else if (srate < 3000000) { aclk = 0xb7; bclk = 0x4b; } else if (srate < 7000000) { aclk = 0xb7; bclk = 0x4f; } else if (srate < 14000000) { aclk = 0xb7; bclk = 0x53; } else if (srate < 30000000) { aclk = 0xb6; bclk = 0x53; } else if (srate < 45000000) { aclk = 0xb4; bclk = 0x51; } stv0299_writereg(fe, 0x13, aclk); stv0299_writereg(fe, 0x14, bclk); stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff); stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff); stv0299_writereg(fe, 0x21, ratio & 0xf0); return 0; } static u8 samsung_tbmu24112_inittab[] = { 0x01, 0x15, 0x02, 0x30, 0x03, 0x00, 0x04, 0x7D, 0x05, 0x35, 0x06, 0x02, 0x07, 0x00, 0x08, 0xC3, 0x0C, 0x00, 0x0D, 0x81, 0x0E, 0x23, 0x0F, 0x12, 0x10, 0x7E, 0x11, 0x84, 0x12, 0xB9, 0x13, 0x88, 0x14, 0x89, 0x15, 0xC9, 0x16, 0x00, 0x17, 0x5C, 0x18, 0x00, 0x19, 0x00, 0x1A, 0x00, 0x1C, 0x00, 0x1D, 0x00, 0x1E, 0x00, 0x1F, 0x3A, 0x20, 0x2E, 0x21, 0x80, 0x22, 0xFF, 0x23, 0xC1, 0x28, 0x00, 0x29, 0x1E, 0x2A, 0x14, 0x2B, 0x0F, 0x2C, 0x09, 0x2D, 0x05, 0x31, 0x1F, 0x32, 0x19, 0x33, 0xFE, 0x34, 0x93, 0xff, 0xff, }; static struct stv0299_config samsung_tbmu24112_config = { .demod_address = 0x68, .inittab = samsung_tbmu24112_inittab, .mclk = 88000000UL, .invert = 0, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_LK, .volt13_op0_op1 = STV0299_VOLT13_OP1, .min_delay_ms = 100, .set_symbol_rate = samsung_tbmu24112_set_symbol_rate, }; static int skystar2_rev26_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { fc->fe = dvb_attach(stv0299_attach, &samsung_tbmu24112_config, i2c); if (!fc->fe) return 0; if (!dvb_attach(dvb_pll_attach, fc->fe, 0x61, i2c, DVB_PLL_SAMSUNG_TBMU24112)) return 0; fc->fe->ops.set_voltage = flexcop_set_voltage; fc->fe_sleep = fc->fe->ops.sleep; fc->fe->ops.sleep = flexcop_sleep; return 1; } #else #define skystar2_rev26_attach NULL #endif /* SkyStar2 DVB-S rev 2.7 */ #if FE_SUPPORTED(S5H1420) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_ITD1000) static struct s5h1420_config skystar2_rev2_7_s5h1420_config = { .demod_address = 0x53, .invert = 1, .repeated_start_workaround = 1, .serial_mpeg = 1, }; static struct itd1000_config skystar2_rev2_7_itd1000_config = { .i2c_address = 0x61, }; static int skystar2_rev27_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { flexcop_ibi_value r108; struct i2c_adapter *i2c_tuner; /* enable no_base_addr - no repeated start when reading */ fc->fc_i2c_adap[0].no_base_addr = 1; fc->fe = dvb_attach(s5h1420_attach, &skystar2_rev2_7_s5h1420_config, i2c); if (!fc->fe) goto fail; i2c_tuner = s5h1420_get_tuner_i2c_adapter(fc->fe); if (!i2c_tuner) goto fail; fc->fe_sleep = fc->fe->ops.sleep; fc->fe->ops.sleep = flexcop_sleep; /* enable no_base_addr - no repeated start when reading */ fc->fc_i2c_adap[2].no_base_addr = 1; if (!dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap, 0x08, 1, 1)) { err("ISL6421 could NOT be attached"); goto fail_isl; } info("ISL6421 successfully attached"); /* the ITD1000 requires a lower i2c clock - is it a problem ? */ r108.raw = 0x00000506; fc->write_ibi_reg(fc, tw_sm_c_108, r108); if (!dvb_attach(itd1000_attach, fc->fe, i2c_tuner, &skystar2_rev2_7_itd1000_config)) { err("ITD1000 could NOT be attached"); /* Should i2c clock be restored? */ goto fail_isl; } info("ITD1000 successfully attached"); return 1; fail_isl: fc->fc_i2c_adap[2].no_base_addr = 0; fail: /* for the next devices we need it again */ fc->fc_i2c_adap[0].no_base_addr = 0; return 0; } #else #define skystar2_rev27_attach NULL #endif /* SkyStar2 rev 2.8 */ #if FE_SUPPORTED(CX24123) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_CX24113) static struct cx24123_config skystar2_rev2_8_cx24123_config = { .demod_address = 0x55, .dont_use_pll = 1, .agc_callback = cx24113_agc_callback, }; static const struct cx24113_config skystar2_rev2_8_cx24113_config = { .i2c_addr = 0x54, .xtal_khz = 10111, }; static int skystar2_rev28_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { struct i2c_adapter *i2c_tuner; fc->fe = dvb_attach(cx24123_attach, &skystar2_rev2_8_cx24123_config, i2c); if (!fc->fe) return 0; i2c_tuner = cx24123_get_tuner_i2c_adapter(fc->fe); if (!i2c_tuner) return 0; if (!dvb_attach(cx24113_attach, fc->fe, &skystar2_rev2_8_cx24113_config, i2c_tuner)) { err("CX24113 could NOT be attached"); return 0; } info("CX24113 successfully attached"); fc->fc_i2c_adap[2].no_base_addr = 1; if (!dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap, 0x08, 0, 0)) { err("ISL6421 could NOT be attached"); fc->fc_i2c_adap[2].no_base_addr = 0; return 0; } info("ISL6421 successfully attached"); /* TODO on i2c_adap[1] addr 0x11 (EEPROM) there seems to be an * IR-receiver (PIC16F818) - but the card has no input for that ??? */ return 1; } #else #define skystar2_rev28_attach NULL #endif /* AirStar DVB-T */ #if FE_SUPPORTED(MT352) && FE_SUPPORTED(PLL) static int samsung_tdtc9251dh0_demod_init(struct dvb_frontend *fe) { static u8 mt352_clock_config[] = { 0x89, 0x18, 0x2d }; static u8 mt352_reset[] = { 0x50, 0x80 }; static u8 mt352_adc_ctl_1_cfg[] = { 0x8E, 0x40 }; static u8 mt352_agc_cfg[] = { 0x67, 0x28, 0xa1 }; static u8 mt352_capt_range_cfg[] = { 0x75, 0x32 }; mt352_write(fe, mt352_clock_config, sizeof(mt352_clock_config)); udelay(2000); mt352_write(fe, mt352_reset, sizeof(mt352_reset)); mt352_write(fe, mt352_adc_ctl_1_cfg, sizeof(mt352_adc_ctl_1_cfg)); mt352_write(fe, mt352_agc_cfg, sizeof(mt352_agc_cfg)); mt352_write(fe, mt352_capt_range_cfg, sizeof(mt352_capt_range_cfg)); return 0; } static struct mt352_config samsung_tdtc9251dh0_config = { .demod_address = 0x0f, .demod_init = samsung_tdtc9251dh0_demod_init, }; static int airstar_dvbt_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { fc->fe = dvb_attach(mt352_attach, &samsung_tdtc9251dh0_config, i2c); if (!fc->fe) return 0; return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, DVB_PLL_SAMSUNG_TDTC9251DH0); } #else #define airstar_dvbt_attach NULL #endif /* AirStar ATSC 1st generation */ #if FE_SUPPORTED(BCM3510) static int flexcop_fe_request_firmware(struct dvb_frontend *fe, const struct firmware **fw, char* name) { struct flexcop_device *fc = fe->dvb->priv; return request_firmware(fw, name, fc->dev); } static struct bcm3510_config air2pc_atsc_first_gen_config = { .demod_address = 0x0f, .request_firmware = flexcop_fe_request_firmware, }; static int airstar_atsc1_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { fc->fe = dvb_attach(bcm3510_attach, &air2pc_atsc_first_gen_config, i2c); return fc->fe != NULL; } #else #define airstar_atsc1_attach NULL #endif /* AirStar ATSC 2nd generation */ #if FE_SUPPORTED(NXT200X) && FE_SUPPORTED(PLL) static struct nxt200x_config samsung_tbmv_config = { .demod_address = 0x0a, }; static int airstar_atsc2_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { fc->fe = dvb_attach(nxt200x_attach, &samsung_tbmv_config, i2c); if (!fc->fe) return 0; return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, DVB_PLL_SAMSUNG_TBMV); } #else #define airstar_atsc2_attach NULL #endif /* AirStar ATSC 3rd generation */ #if FE_SUPPORTED(LGDT330X) static struct lgdt330x_config air2pc_atsc_hd5000_config = { .demod_address = 0x59, .demod_chip = LGDT3303, .serial_mpeg = 0x04, .clock_polarity_flip = 1, }; static int airstar_atsc3_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { fc->fe = dvb_attach(lgdt330x_attach, &air2pc_atsc_hd5000_config, i2c); if (!fc->fe) return 0; return !!dvb_attach(simple_tuner_attach, fc->fe, i2c, 0x61, TUNER_LG_TDVS_H06XF); } #else #define airstar_atsc3_attach NULL #endif /* CableStar2 DVB-C */ #if FE_SUPPORTED(STV0297) && FE_SUPPORTED(PLL) static u8 alps_tdee4_stv0297_inittab[] = { 0x80, 0x01, 0x80, 0x00, 0x81, 0x01, 0x81, 0x00, 0x00, 0x48, 0x01, 0x58, 0x03, 0x00, 0x04, 0x00, 0x07, 0x00, 0x08, 0x00, 0x30, 0xff, 0x31, 0x9d, 0x32, 0xff, 0x33, 0x00, 0x34, 0x29, 0x35, 0x55, 0x36, 0x80, 0x37, 0x6e, 0x38, 0x9c, 0x40, 0x1a, 0x41, 0xfe, 0x42, 0x33, 0x43, 0x00, 0x44, 0xff, 0x45, 0x00, 0x46, 0x00, 0x49, 0x04, 0x4a, 0x51, 0x4b, 0xf8, 0x52, 0x30, 0x53, 0x06, 0x59, 0x06, 0x5a, 0x5e, 0x5b, 0x04, 0x61, 0x49, 0x62, 0x0a, 0x70, 0xff, 0x71, 0x04, 0x72, 0x00, 0x73, 0x00, 0x74, 0x0c, 0x80, 0x20, 0x81, 0x00, 0x82, 0x30, 0x83, 0x00, 0x84, 0x04, 0x85, 0x22, 0x86, 0x08, 0x87, 0x1b, 0x88, 0x00, 0x89, 0x00, 0x90, 0x00, 0x91, 0x04, 0xa0, 0x86, 0xa1, 0x00, 0xa2, 0x00, 0xb0, 0x91, 0xb1, 0x0b, 0xc0, 0x5b, 0xc1, 0x10, 0xc2, 0x12, 0xd0, 0x02, 0xd1, 0x00, 0xd2, 0x00, 0xd3, 0x00, 0xd4, 0x02, 0xd5, 0x00, 0xde, 0x00, 0xdf, 0x01, 0xff, 0xff, }; static struct stv0297_config alps_tdee4_stv0297_config = { .demod_address = 0x1c, .inittab = alps_tdee4_stv0297_inittab, }; static int cablestar2_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { fc->fc_i2c_adap[0].no_base_addr = 1; fc->fe = dvb_attach(stv0297_attach, &alps_tdee4_stv0297_config, i2c); if (!fc->fe) goto fail; /* This tuner doesn't use the stv0297's I2C gate, but instead the * tuner is connected to a different flexcop I2C adapter. */ if (fc->fe->ops.i2c_gate_ctrl) fc->fe->ops.i2c_gate_ctrl(fc->fe, 0); fc->fe->ops.i2c_gate_ctrl = NULL; if (!dvb_attach(dvb_pll_attach, fc->fe, 0x61, &fc->fc_i2c_adap[2].i2c_adap, DVB_PLL_TDEE4)) goto fail; return 1; fail: /* Reset for next frontend to try */ fc->fc_i2c_adap[0].no_base_addr = 0; return 0; } #else #define cablestar2_attach NULL #endif static struct { flexcop_device_type_t type; int (*attach)(struct flexcop_device *, struct i2c_adapter *); } flexcop_frontends[] = { { FC_SKY_REV27, skystar2_rev27_attach }, { FC_SKY_REV28, skystar2_rev28_attach }, { FC_SKY_REV26, skystar2_rev26_attach }, { FC_AIR_DVBT, airstar_dvbt_attach }, { FC_AIR_ATSC2, airstar_atsc2_attach }, { FC_AIR_ATSC3, airstar_atsc3_attach }, { FC_AIR_ATSC1, airstar_atsc1_attach }, { FC_CABLE, cablestar2_attach }, { FC_SKY_REV23, skystar2_rev23_attach }, }; /* try to figure out the frontend */ int flexcop_frontend_init(struct flexcop_device *fc) { int i; for (i = 0; i < ARRAY_SIZE(flexcop_frontends); i++) { if (!flexcop_frontends[i].attach) continue; /* type needs to be set before, because of some workarounds * done based on the probed card type */ fc->dev_type = flexcop_frontends[i].type; if (flexcop_frontends[i].attach(fc, &fc->fc_i2c_adap[0].i2c_adap)) goto fe_found; /* Clean up partially attached frontend */ if (fc->fe) { dvb_frontend_detach(fc->fe); fc->fe = NULL; } } fc->dev_type = FC_UNK; err("no frontend driver found for this B2C2/FlexCop adapter"); return -ENODEV; fe_found: info("found '%s' .", fc->fe->ops.info.name); if (dvb_register_frontend(&fc->dvb_adapter, fc->fe)) { err("frontend registration failed!"); dvb_frontend_detach(fc->fe); fc->fe = NULL; return -EINVAL; } fc->init_state |= FC_STATE_FE_INIT; return 0; } void flexcop_frontend_exit(struct flexcop_device *fc) { if (fc->init_state & FC_STATE_FE_INIT) { dvb_unregister_frontend(fc->fe); dvb_frontend_detach(fc->fe); } fc->init_state &= ~FC_STATE_FE_INIT; }
gpl-2.0
CyanogenMod/android_kernel_samsung_exynos5420
tools/power/cpupower/utils/helpers/msr.c
10254
2347
#if defined(__i386__) || defined(__x86_64__) #include <fcntl.h> #include <stdio.h> #include <unistd.h> #include <stdint.h> #include "helpers/helpers.h" /* Intel specific MSRs */ #define MSR_IA32_PERF_STATUS 0x198 #define MSR_IA32_MISC_ENABLES 0x1a0 #define MSR_IA32_ENERGY_PERF_BIAS 0x1b0 #define MSR_NEHALEM_TURBO_RATIO_LIMIT 0x1ad /* * read_msr * * Will return 0 on success and -1 on failure. * Possible errno values could be: * EFAULT -If the read/write did not fully complete * EIO -If the CPU does not support MSRs * ENXIO -If the CPU does not exist */ int read_msr(int cpu, unsigned int idx, unsigned long long *val) { int fd; char msr_file_name[64]; sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu); fd = open(msr_file_name, O_RDONLY); if (fd < 0) return -1; if (lseek(fd, idx, SEEK_CUR) == -1) goto err; if (read(fd, val, sizeof *val) != sizeof *val) goto err; close(fd); return 0; err: close(fd); return -1; } /* * write_msr * * Will return 0 on success and -1 on failure. * Possible errno values could be: * EFAULT -If the read/write did not fully complete * EIO -If the CPU does not support MSRs * ENXIO -If the CPU does not exist */ int write_msr(int cpu, unsigned int idx, unsigned long long val) { int fd; char msr_file_name[64]; sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu); fd = open(msr_file_name, O_WRONLY); if (fd < 0) return -1; if (lseek(fd, idx, SEEK_CUR) == -1) goto err; if (write(fd, &val, sizeof val) != sizeof val) goto err; close(fd); return 0; err: close(fd); return -1; } int msr_intel_get_perf_bias(unsigned int cpu) { unsigned long long val; int ret; if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_PERF_BIAS)) return -1; ret = read_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &val); if (ret) return ret; return val; } int msr_intel_set_perf_bias(unsigned int cpu, unsigned int val) { int ret; if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_PERF_BIAS)) return -1; ret = write_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, val); if (ret) return ret; return 0; } unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu) { unsigned long long val; int ret; if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_HAS_TURBO_RATIO)) return -1; ret = read_msr(cpu, MSR_NEHALEM_TURBO_RATIO_LIMIT, &val); if (ret) return ret; return val; } #endif
gpl-2.0
billchen1977/kernel_sony_msm8x60
net/irda/irlan/irlan_filter.c
11278
6478
/********************************************************************* * * Filename: irlan_filter.c * Version: * Description: * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Fri Jan 29 11:16:38 1999 * Modified at: Sat Oct 30 12:58:45 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/skbuff.h> #include <linux/random.h> #include <linux/seq_file.h> #include <net/irda/irlan_common.h> #include <net/irda/irlan_filter.h> /* * Function irlan_filter_request (self, skb) * * Handle filter request from client peer device * */ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_operation == DYNAMIC)) { IRDA_DEBUG(0, "Giving peer a dynamic Ethernet address\n"); self->provider.mac_address[0] = 0x40; self->provider.mac_address[1] = 0x00; self->provider.mac_address[2] = 0x00; self->provider.mac_address[3] = 0x00; /* Use arbitration value to generate MAC address */ if (self->provider.access_type == ACCESS_PEER) { self->provider.mac_address[4] = self->provider.send_arb_val & 0xff; self->provider.mac_address[5] = (self->provider.send_arb_val >> 8) & 0xff; } else { /* Just generate something for now */ get_random_bytes(self->provider.mac_address+4, 1); get_random_bytes(self->provider.mac_address+5, 1); } skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x03; irlan_insert_string_param(skb, "FILTER_MODE", "NONE"); irlan_insert_short_param(skb, "MAX_ENTRY", 0x0001); irlan_insert_array_param(skb, "FILTER_ENTRY", self->provider.mac_address, 6); return; } if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_mode == FILTER)) { IRDA_DEBUG(0, "Directed filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_mode == NONE)) { IRDA_DEBUG(0, "Directed filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_BROADCAST) && (self->provider.filter_mode == FILTER)) { IRDA_DEBUG(0, "Broadcast filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_BROADCAST) && (self->provider.filter_mode == NONE)) { IRDA_DEBUG(0, "Broadcast filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_mode == FILTER)) { IRDA_DEBUG(0, "Multicast filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_mode == NONE)) { IRDA_DEBUG(0, "Multicast filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; } if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_operation == GET)) { IRDA_DEBUG(0, "Multicast filter get\n"); skb->data[0] = 0x00; /* Success? */ skb->data[1] = 0x02; irlan_insert_string_param(skb, "FILTER_MODE", "NONE"); irlan_insert_short_param(skb, "MAX_ENTRY", 16); return; } skb->data[0] = 0x00; /* Command not supported */ skb->data[1] = 0x00; IRDA_DEBUG(0, "Not implemented!\n"); } /* * Function check_request_param (self, param, value) * * Check parameters in request from peer device * */ void irlan_check_command_param(struct irlan_cb *self, char *param, char *value) { IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); IRDA_DEBUG(4, "%s, %s\n", param, value); /* * This is experimental!! DB. */ if (strcmp(param, "MODE") == 0) { IRDA_DEBUG(0, "%s()\n", __func__ ); self->use_udata = TRUE; return; } /* * FILTER_TYPE */ if (strcmp(param, "FILTER_TYPE") == 0) { if (strcmp(value, "DIRECTED") == 0) { self->provider.filter_type = IRLAN_DIRECTED; return; } if (strcmp(value, "MULTICAST") == 0) { self->provider.filter_type = IRLAN_MULTICAST; return; } if (strcmp(value, "BROADCAST") == 0) { self->provider.filter_type = IRLAN_BROADCAST; return; } } /* * FILTER_MODE */ if (strcmp(param, "FILTER_MODE") == 0) { if (strcmp(value, "ALL") == 0) { self->provider.filter_mode = ALL; return; } if (strcmp(value, "FILTER") == 0) { self->provider.filter_mode = FILTER; return; } if (strcmp(value, "NONE") == 0) { self->provider.filter_mode = FILTER; return; } } /* * FILTER_OPERATION */ if (strcmp(param, "FILTER_OPERATION") == 0) { if (strcmp(value, "DYNAMIC") == 0) { self->provider.filter_operation = DYNAMIC; return; } if (strcmp(value, "GET") == 0) { self->provider.filter_operation = GET; return; } } } /* * Function irlan_print_filter (filter_type, buf) * * Print status of filter. Used by /proc file system * */ #ifdef CONFIG_PROC_FS #define MASK2STR(m,s) { .mask = m, .str = s } void irlan_print_filter(struct seq_file *seq, int filter_type) { static struct { int mask; const char *str; } filter_mask2str[] = { MASK2STR(IRLAN_DIRECTED, "DIRECTED"), MASK2STR(IRLAN_FUNCTIONAL, "FUNCTIONAL"), MASK2STR(IRLAN_GROUP, "GROUP"), MASK2STR(IRLAN_MAC_FRAME, "MAC_FRAME"), MASK2STR(IRLAN_MULTICAST, "MULTICAST"), MASK2STR(IRLAN_BROADCAST, "BROADCAST"), MASK2STR(IRLAN_IPX_SOCKET, "IPX_SOCKET"), MASK2STR(0, NULL) }, *p; for (p = filter_mask2str; p->str; p++) { if (filter_type & p->mask) seq_printf(seq, "%s ", p->str); } seq_putc(seq, '\n'); } #undef MASK2STR #endif
gpl-2.0
fhunleth/htc-kernel-incrediblec
drivers/misc/bma150_spi.c
15
10866
/* drivers/misc/bma150_spi.c - bma150 G-sensor driver * * Copyright (C) 2009 HTC Corporation. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/miscdevice.h> #include <asm/uaccess.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/bma150.h> #include <asm/gpio.h> #include <linux/earlysuspend.h> #include <linux/platform_device.h> #include <mach/atmega_microp.h> struct early_suspend bma_early_suspend; static struct bma150_platform_data *this_pdata; static struct mutex gsensor_RW_mutex; static struct mutex gsensor_set_mode_mutex; static int spi_microp_enable(uint8_t on) { int ret; ret = microp_spi_vote_enable(SPI_GSENSOR, on); if (ret < 0) printk(KERN_ERR "%s: i2c_write_block fail\n", __func__); return ret; } static int spi_gsensor_read(uint8_t *data) { int ret; mutex_lock(&gsensor_RW_mutex); ret = microp_i2c_write(MICROP_I2C_WCMD_GSENSOR_REG_DATA_REQ, data, 1); if (ret < 0) { printk(KERN_ERR "%s: i2c_write_block fail\n", __func__); mutex_unlock(&gsensor_RW_mutex); return ret; } ret = microp_i2c_read(MICROP_I2C_RCMD_GSENSOR_REG_DATA, data, 2); if (ret < 0) { printk(KERN_ERR "%s: i2c_read_block fail\n", __func__); mutex_unlock(&gsensor_RW_mutex); return ret; } mutex_unlock(&gsensor_RW_mutex); return ret; } static int spi_gsensor_write(uint8_t *data) { int ret; mutex_lock(&gsensor_RW_mutex); ret = microp_i2c_write(MICROP_I2C_WCMD_GSENSOR_REG, data, 2); if (ret < 0) { printk(KERN_ERR "%s: i2c_write_block fail\n", __func__); mutex_unlock(&gsensor_RW_mutex); return ret; } mutex_unlock(&gsensor_RW_mutex); return ret; } static int spi_gsensor_init_hw(void) { char buffer[2]; memset(buffer, 0x0, sizeof(buffer)); buffer[0] = RANGE_BWIDTH_REG; if (spi_gsensor_read(buffer) < 0) return -EIO; /*printk("spi_gsensor_init_hw,read RANGE_BWIDTH_REG = %x " , buffer[1]);*/ buffer[1] = (buffer[1]&0xe0); buffer[0] = RANGE_BWIDTH_REG; if (spi_gsensor_write(buffer) < 0) return -EIO; buffer[0] = SMB150_CONF2_REG; if (spi_gsensor_read(buffer) < 0) return -EIO; buffer[1] = buffer[1]|1<<3; buffer[0] = SMB150_CONF2_REG; if (spi_gsensor_write(buffer) < 0) return -EIO; return 0; } /* static int spi_gsensor_read_version(void) { uint8_t buffer[2]; int ret = -EIO; buffer[0] = VERSION_REG; buffer[1] = 1; ret = spi_gsensor_read(buffer); if (ret < 0) { printk(KERN_ERR "%s: get al_version fail(%d)\n", __func__, ret); return ret; } printk(KERN_INFO "%s: al_version: 0x%2.2X\n", __func__, buffer[0]); buffer[0] = CHIP_ID_REG; buffer[1] = 1; ret = spi_gsensor_read(buffer); if (ret < 0) { printk(KERN_ERR "%s: get chip_id fail(%d)\n", __func__, ret); return ret; } printk(KERN_INFO "%s: chip_id: 0x%2.2X\n", __func__, buffer[0]); return 0; } */ static int spi_bma150_TransRBuff(short *rbuf) { int ret; unsigned char buffer[6]; memset(buffer, 0, 6); mutex_lock(&gsensor_RW_mutex); buffer[0] = 1; ret = microp_i2c_write(MICROP_I2C_WCMD_GSENSOR_DATA_REQ, buffer, 1); if (ret < 0) { printk(KERN_ERR "%s: i2c_write_block fail\n", __func__); mutex_unlock(&gsensor_RW_mutex); return ret; } if (this_pdata && this_pdata->microp_new_cmd && this_pdata->microp_new_cmd == 1) { /*printk(KERN_DEBUG "%s: New MicroP command\n", __func__);*/ ret = microp_i2c_read(MICROP_I2C_RCMD_GSENSOR_DATA, buffer, 6); rbuf[0] = buffer[0]<<2|buffer[1]>>6; if (rbuf[0]&0x200) rbuf[0] -= 1<<10; rbuf[1] = buffer[2]<<2|buffer[3]>>6; if (rbuf[1]&0x200) rbuf[1] -= 1<<10; rbuf[2] = buffer[4]<<2|buffer[5]>>6; if (rbuf[2]&0x200) rbuf[2] -= 1<<10; } else { /* For Passion with V01 ~ V05 Microp */ /*printk(KERN_DEBUG "%s: Old MicroP command\n", __func__);*/ ret = microp_i2c_read(MICROP_I2C_RCMD_GSENSOR_X_DATA, buffer, 2); if (ret < 0) { printk(KERN_ERR "%s: i2c_read_block fail\n", __func__); mutex_unlock(&gsensor_RW_mutex); return ret; } rbuf[0] = buffer[0]<<2|buffer[1]>>6; if (rbuf[0]&0x200) rbuf[0] -= 1<<10; ret = microp_i2c_read(MICROP_I2C_RCMD_GSENSOR_Y_DATA, buffer, 2); if (ret < 0) { printk(KERN_ERR "%s: i2c_read_block fail\n", __func__); mutex_unlock(&gsensor_RW_mutex); return ret; } rbuf[1] = buffer[0]<<2|buffer[1]>>6; if (rbuf[1]&0x200) rbuf[1] -= 1<<10; ret = microp_i2c_read(MICROP_I2C_RCMD_GSENSOR_Z_DATA, buffer, 2); if (ret < 0) { printk(KERN_ERR "%s: i2c_read_block fail\n", __func__); mutex_unlock(&gsensor_RW_mutex); return ret; } rbuf[2] = buffer[0]<<2|buffer[1]>>6; if (rbuf[2]&0x200) rbuf[2] -= 1<<10; } /* printk("X=%d, Y=%d, Z=%d\n",rbuf[0],rbuf[1],rbuf[2]);*/ /* printk(KERN_DEBUG "%s: 0x%2.2X 0x%2.2X 0x%2.2X \ 0x%2.2X 0x%2.2X 0x%2.2X\n", __func__, buffer[0], buffer[1], buffer[2], \ buffer[3], buffer[4], buffer[5]);*/ mutex_unlock(&gsensor_RW_mutex); return 1; } static int __spi_bma150_set_mode(char mode) { char buffer[2]; int ret; mutex_lock(&gsensor_set_mode_mutex); if (mode == BMA_MODE_NORMAL) { spi_microp_enable(1); printk(KERN_INFO "%s: BMA get into NORMAL mode!\n", __func__); } buffer[0] = SMB150_CTRL_REG; ret = spi_gsensor_read(buffer); if (ret < 0) { mutex_unlock(&gsensor_set_mode_mutex); return -1; } buffer[1] = (buffer[1]&0xfe)|mode; buffer[0] = SMB150_CTRL_REG; ret = spi_gsensor_write(buffer); if (mode == BMA_MODE_SLEEP) { spi_microp_enable(0); printk(KERN_INFO "%s: BMA get into SLEEP mode!\n", __func__); } mutex_unlock(&gsensor_set_mode_mutex); return ret; } static int spi_bma150_open(struct inode *inode, struct file *file) { return nonseekable_open(inode, file); } static int spi_bma150_release(struct inode *inode, struct file *file) { return 0; } static int spi_bma150_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; char rwbuf[8]; char *toRbuf; int ret = -1; short buf[8], temp; switch (cmd) { case BMA_IOCTL_READ: case BMA_IOCTL_WRITE: case BMA_IOCTL_SET_MODE: if (copy_from_user(&rwbuf, argp, sizeof(rwbuf))) return -EFAULT; break; case BMA_IOCTL_READ_ACCELERATION: if (copy_from_user(&buf, argp, sizeof(buf))) return -EFAULT; break; default: break; } switch (cmd) { case BMA_IOCTL_INIT: ret = spi_gsensor_init_hw(); if (ret < 0) return ret; break; case BMA_IOCTL_READ: if (rwbuf[0] < 1) return -EINVAL; ret = spi_gsensor_read(&rwbuf[1]); if (ret < 0) return ret; break; case BMA_IOCTL_WRITE: if (rwbuf[0] < 2) return -EINVAL; ret = spi_gsensor_write(&rwbuf[1]); if (ret < 0) return ret; break; case BMA_IOCTL_READ_ACCELERATION: ret = spi_bma150_TransRBuff(&buf[0]); if (ret < 0) return ret; break; case BMA_IOCTL_SET_MODE: /*printk(KERN_DEBUG "%s: BMA_IOCTL_SET_MODE by ioctl = %d\n", __func__,rwbuf[0]);*/ ret = __spi_bma150_set_mode(rwbuf[0]); if (ret < 0) return ret; break; case BMA_IOCTL_GET_INT: temp = 0; break; case BMA_IOCTL_GET_CHIP_LAYOUT: if (this_pdata) temp = this_pdata->chip_layout; break; default: return -ENOTTY; } switch (cmd) { case BMA_IOCTL_READ: toRbuf = &rwbuf[1]; if (copy_to_user(argp, toRbuf, sizeof(rwbuf)-1)) return -EFAULT; break; case BMA_IOCTL_READ_ACCELERATION: if (copy_to_user(argp, &buf, sizeof(buf))) return -EFAULT; break; case BMA_IOCTL_GET_INT: if (copy_to_user(argp, &temp, sizeof(temp))) return -EFAULT; break; case BMA_IOCTL_GET_CHIP_LAYOUT: if (copy_to_user(argp, &temp, sizeof(temp))) return -EFAULT; break; default: break; } return 0; } static struct file_operations spi_bma_fops = { .owner = THIS_MODULE, .open = spi_bma150_open, .release = spi_bma150_release, .ioctl = spi_bma150_ioctl, }; static struct miscdevice spi_bma_device = { .minor = MISC_DYNAMIC_MINOR, .name = "bma150", .fops = &spi_bma_fops, }; static void bma150_early_suspend(struct early_suspend *handler) { int ret = 0; ret = __spi_bma150_set_mode(BMA_MODE_SLEEP); /*printk(KERN_DEBUG "%s: spi_bma150_set_mode returned = %d!\n", __func__, ret);*/ } static void bma150_early_resume(struct early_suspend *handler) { /*printk(KERN_DEBUG "%s: spi_bma150_set_mode returned = %d!\n", __func__, ret);*/ } static int spi_gsensor_initial(void) { int ret; /* ret = spi_microp_enable(1); if (ret < 0) { printk(KERN_ERR "%s: spi_microp_enable fail\n", __func__); return ret; }*/ /* ret = spi_gsensor_read_version(); if (ret < 0) { printk(KERN_ERR "%s: get version fail\n", __func__); return ret; }*/ /* ret = microp_gsensor_init_hw(client); if (ret < 0) { printk(KERN_ERR "%s: init g-sensor fail\n", __func__); return ret; } */ ret = misc_register(&spi_bma_device); if (ret < 0) { printk(KERN_ERR "%s: init misc_register fail\n", __func__); return ret; } mutex_init(&gsensor_RW_mutex); mutex_init(&gsensor_set_mode_mutex); ret = spi_microp_enable(1); if (ret) { printk(KERN_ERR "%s: spi_microp_enable(1) fail!\n", __func__); goto err_spi_enable; } ret = __spi_bma150_set_mode(BMA_MODE_SLEEP); if (ret) { printk(KERN_ERR "%s: set BMA_MODE_SLEEP fail!\n", __func__); goto err_set_mode; } bma_early_suspend.suspend = bma150_early_suspend; bma_early_suspend.resume = bma150_early_resume; register_early_suspend(&bma_early_suspend); return 0; err_set_mode: spi_microp_enable(0); err_spi_enable: misc_deregister(&spi_bma_device); return ret; } static int spi_bma150_probe(struct platform_device *pdev) { printk(KERN_INFO "%s: G-sensor connect with microP: " "start initial\n", __func__); this_pdata = pdev->dev.platform_data; /* printk(KERN_DEBUG "%s: this_pdata->microp_new_cmd = %d\n", __func__, this_pdata->microp_new_cmd); */ spi_gsensor_initial(); return 0; } static int spi_bma150_remove(struct platform_device *pdev) { mutex_destroy(&gsensor_set_mode_mutex); return 0; } static struct platform_driver spi_bma150_driver = { .probe = spi_bma150_probe, .remove = spi_bma150_remove, .driver = { .name = BMA150_G_SENSOR_NAME, .owner = THIS_MODULE, }, }; static int __init spi_bma150_init(void) { return platform_driver_register(&spi_bma150_driver); } static void __exit spi_bma150_exit(void) { platform_driver_unregister(&spi_bma150_driver); } module_init(spi_bma150_init); module_exit(spi_bma150_exit); MODULE_DESCRIPTION("BMA150 G-sensor driver"); MODULE_LICENSE("GPL");
gpl-2.0
pastewka/lammps
tools/phonon/disp.cpp
15
77954
#include <vector> #include "string.h" #include "phonon.h" #include "green.h" #include "timer.h" #include "global.h" #ifdef UseSPG extern "C"{ #include "spglib.h" } #endif /*------------------------------------------------------------------------------ * Private method to evaluate the phonon dispersion curves *----------------------------------------------------------------------------*/ void Phonon::pdisp() { // ask the output file name and write the header. char str[MAXLINE]; for (int ii = 0; ii < 80; ++ii) printf("="); printf("\n"); #ifdef UseSPG // ask method to generate q-lines int method = 2; printf("Please select your method to generate the phonon dispersion:\n"); printf(" 1. Manual, should always work;\n"); printf(" 2. Automatic, works only for 3D crystals (CMS49-299).\nYour choice [2]: "); if (count_words(fgets(str,MAXLINE,stdin)) > 0) method = atoi(strtok(str," \t\n\r\f")); method = 2 - method%2; printf("Your selection: %d\n", method); #endif printf("\nPlease input the filename to output the dispersion data [pdisp.dat]:"); if (count_words(fgets(str,MAXLINE,stdin)) < 1) strcpy(str, "pdisp.dat"); char *ptr = strtok(str," \t\n\r\f"); char *fname = new char[strlen(ptr)+1]; strcpy(fname,ptr); // to store the nodes of the dispersion curve std::vector<double> nodes; nodes.clear(); std::vector<std::string> ndstr; ndstr.clear(); std::vector<double *> qs, qe; qs.clear(); qe.clear(); std::vector<int> nqbin; nqbin.clear(); // now the calculate the dispersion curve double qstr[3], qend[3]; int nq = MAX(MAX(dynmat->nx,dynmat->ny),dynmat->nz)/2+1; qend[0] = qend[1] = qend[2] = 0.; double *egvs = new double [ndim]; #ifdef UseSPG if (method == 1){ #endif while (1){ for (int i = 0; i < 3; ++i) qstr[i] = qend[i]; printf("\nPlease input the start q-point in unit of B1->B3, q to exit [%g %g %g]: ", qstr[0], qstr[1], qstr[2]); int n = count_words(fgets(str, MAXLINE, stdin)); ptr = strtok(str, " \t\n\r\f"); if ((n == 1) && (strcmp(ptr,"q") == 0)) break; else if (n >= 3){ qstr[0] = atof(ptr); qstr[1] = atof(strtok(NULL, " \t\n\r\f")); qstr[2] = atof(strtok(NULL, " \t\n\r\f")); } do printf("Please input the end q-point in unit of B1->B3: "); while (count_words(fgets(str, MAXLINE, stdin)) < 3); qend[0] = atof(strtok(str, " \t\n\r\f")); qend[1] = atof(strtok(NULL, " \t\n\r\f")); qend[2] = atof(strtok(NULL, " \t\n\r\f")); printf("Please input the # of points along the line [%d]: ", nq); if (count_words(fgets(str, MAXLINE, stdin)) > 0) nq = atoi(strtok(str," \t\n\r\f")); nq = MAX(nq,2); double *qtmp = new double [3]; for (int i = 0; i < 3; ++i) qtmp[i] = qstr[i]; qs.push_back(qtmp); qtmp = new double [3]; for (int i = 0; i < 3; ++i) qtmp[i] = qend[i]; qe.push_back(qtmp); nqbin.push_back(nq); ndstr.push_back(""); } ndstr.push_back(""); #ifdef UseSPG } else { memory->grow(atpos, dynmat->nucell, 3, "pdisp:atpos"); memory->grow(attyp, dynmat->nucell, "pdisp:attyp"); num_atom = dynmat->nucell; // set default, in case system dimension under study is not 3. for (int i = 0; i < dynmat->nucell; ++i) for (int idim = 0; idim < 3; ++idim) atpos[i][idim] = 0.; for (int i = 0; i < 3; ++i) for (int j = 0; j < 3; ++j) latvec[i][j] = 0.; for (int i = 0; i < 3; ++i) latvec[i][i] = 1.; // get atomic type info for (int i = 0; i < num_atom; ++i) attyp[i] = dynmat->attyp[i]; // get unit cell vector info int ndim = 0; for (int idim = 0; idim < 3; ++idim) for (int jdim = 0; jdim < 3; ++jdim) latvec[jdim][idim] = dynmat->basevec[ndim++]; // get atom position in unit cell; fractional for (int i = 0; i < num_atom; ++i) for (int idim = 0; idim < sysdim; ++idim) atpos[i][idim] = dynmat->basis[i][idim]; // display the unit cell info read printf("\n"); for (int ii = 0; ii < 80; ++ii) printf("-"); printf("\n"); printf("The basis vectors of the unit cell:\n"); for (int idim = 0; idim < 3; ++idim) printf(" A%d = %lg %lg %lg\n", idim+1, latvec[0][idim], latvec[1][idim], latvec[2][idim]); printf("Atom(s) in the unit cell:\n"); printf(" No. type sx sy sz\n"); for (int i = 0; i < MIN(num_atom, NUMATOM); ++i) printf(" %d %d %lg %lg %lg\n", i+1, attyp[i], atpos[i][0], atpos[i][1], atpos[i][2]); if (num_atom > NUMATOM) printf(" ... (%d atoms omitted.)\n", num_atom-NUMATOM); char symbol[11]; double symprec = 1.e-4, pos[num_atom][3]; for (int i = 0; i < num_atom; ++i) for (int j = 0; j < 3; ++j) pos[i][j] = atpos[i][j]; int spgnum = spg_get_international(symbol, latvec, pos, attyp, num_atom, symprec); printf("The space group number of your unit cell is: %d => %s\n", spgnum, symbol); for (int ii = 0; ii < 80; ++ii) printf("-"); printf("\n"); // angles const double la = sqrt(latvec[0][0] * latvec[0][0] + latvec[0][1] * latvec[0][1] + latvec[0][2] * latvec[0][2]); const double lb = sqrt(latvec[1][0] * latvec[1][0] + latvec[1][1] * latvec[1][1] + latvec[1][2] * latvec[1][2]); const double lc = sqrt(latvec[2][0] * latvec[2][0] + latvec[2][1] * latvec[2][1] + latvec[2][2] * latvec[2][2]); const double cosa = sqrt(latvec[1][0] * latvec[2][0] + latvec[1][1] * latvec[2][1] + latvec[1][2] * latvec[2][2])/(lb*lc); const double cosg = sqrt(latvec[0][0] * latvec[1][0] + latvec[0][1] * latvec[1][1] + latvec[0][2] * latvec[1][2])/(la*lb); double ivec[3][3]; ndim = 0; for (int idim = 0; idim < 3; ++idim) for (int jdim = 0; jdim < 3; ++jdim) ivec[jdim][idim] = dynmat->ibasevec[ndim++]; const double ka = sqrt(ivec[0][0] * ivec[0][0] + ivec[0][1] * ivec[0][1] + ivec[0][2] * ivec[0][2]); const double kb = sqrt(ivec[1][0] * ivec[1][0] + ivec[1][1] * ivec[1][1] + ivec[1][2] * ivec[1][2]); const double coskg = sqrt(ivec[0][0] * ivec[1][0] + ivec[0][1] * ivec[1][1] + ivec[0][2] * ivec[1][2])/(ka*kb); double *qtmp; if (spgnum >= 1 && spgnum <= 2){ // Triclinic system if (fabs(coskg) > ZERO){ // A.14, TRI1a and TRI2a ndstr.push_back("X"); // X-G qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-Y qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qe.push_back(qtmp); ndstr.push_back("Y/L"); // L-G qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-Z qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Z/N"); // N-G qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-M qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("M/R"); // R-G qtmp = new double [3]; qtmp[0] = qtmp[2] = qtmp[1] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); } else { // A.14, TRI1b and TRI2b ndstr.push_back("X"); // X-G qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = -0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-Y qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[1] = qtmp[2] = 0.; qtmp[0] = 0.5; qe.push_back(qtmp); ndstr.push_back("Y/L"); // L-G qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = -0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-Z qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = -0.5; qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Z/N"); // N-G qtmp = new double [3]; qtmp[0] = qtmp[2] = -0.5; qtmp[1] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-M qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("M/R"); // R-G qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = -0.5; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); } } else if (spgnum >= 3 && spgnum <= 15){ // Monoclinic if (symbol[0] == 'P'){ // MCL-P const double eta = (1.-lb*cosa/lc)/(2.*(1.-cosa*cosa)); const double niu = 0.5 - eta * lc * cosa / lb; ndstr.push_back("{/Symbol G}"); // G-Y qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Y"); // Y-H qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = eta; qtmp[2] = 1.-niu; qe.push_back(qtmp); ndstr.push_back("H"); // H-C qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = eta; qtmp[2] = 1.-niu; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("C"); // C-E qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("E"); // E-M1 qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 1.-eta; qtmp[2] = niu; qe.push_back(qtmp); ndstr.push_back("M_1"); // M1-A qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 1.-eta; qtmp[2] = niu; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("A"); // A-X qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qe.push_back(qtmp); ndstr.push_back("X"); // X-H1 qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.; qtmp[2] = 1.-eta; qtmp[1] = niu; qe.push_back(qtmp); ndstr.push_back("H_1/M"); // M-D qtmp = new double [3]; qtmp[0] = 0.5; qtmp[2] = eta; qtmp[1] = 1.-niu; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qe.push_back(qtmp); ndstr.push_back("D"); // D-Z qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("Z/Y"); // Y-D qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qe.push_back(qtmp); ndstr.push_back("D"); } else { // MCL-C if (coskg < 0.){ // MCLC1 const double xi = (2. - lb * cosa / lc) / (4.*(1.-cosa*cosa)); const double eta = 0.5 + 2.*xi*lc/lb*cosa; const double psi = 0.75 - la * la /(4.*lb*lb*(1.-cosa*cosa)); const double phi = psi + (0.75 - psi) * lb / lc * cosa; ndstr.push_back("{/Symbol G}"); // G-Y qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("Y"); // Y-F qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 1.-xi; qtmp[2] = 1.-eta; qe.push_back(qtmp); ndstr.push_back("F"); // F-L qtmp = new double [3]; qtmp[0] = qtmp[1] = 1.-xi; qtmp[2] = 1.-eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("L"); // L-I qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = phi; qtmp[1] = 1.-phi; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("I/I_1"); // I1-Z qtmp = new double [3]; qtmp[0] = 1.-phi; qtmp[1] = -qtmp[0]; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-F1 qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = xi; qtmp[2] = eta; qe.push_back(qtmp); ndstr.push_back("F_1/Y"); // Y-X1 qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = psi; qtmp[1] = 1.-psi; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("X_1/X"); // X-G qtmp = new double [3]; qtmp[0] = 1.-psi; qtmp[1] = -qtmp[0]; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-N qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("N/M"); // M-G qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); } else if ( fabs(coskg) < ZERO) { // MCLC2 const double xi = (2. - lb * cosa / lc) / (4.*(1.-cosa*cosa)); const double eta = 0.5 + 2.*xi*lc/lb*cosa; const double psi = 0.75 - la * la /(4.*lb*lb*(1.-cosa*cosa)); const double phi = psi + (0.75 - psi) * lb / lc * cosa; ndstr.push_back("{/Symbol G}"); // G-Y qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("Y"); // Y-F qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 1.-xi; qtmp[2] = 1.-eta; qe.push_back(qtmp); ndstr.push_back("F"); // F-L qtmp = new double [3]; qtmp[0] = qtmp[1] = 1.-xi; qtmp[2] = 1.-eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("L"); // L-I qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = phi; qtmp[1] = 1.-phi; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("I/I_1"); // I1-Z qtmp = new double [3]; qtmp[0] = 1.-phi; qtmp[1] = -qtmp[0]; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-F1 qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = xi; qtmp[2] = eta; qe.push_back(qtmp); ndstr.push_back("F_1/N"); // N-G qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-M qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("M"); } else { double flag = lb / lc * cosa + lb * lb / (la * la *(1.-cosa*cosa)); if (fabs(flag) < ZERO){ // MCLC4 const double miu = 0.25*(1. + lb * lb / (la *la)); const double del = lb * lc * cosa / (2.*la*la); const double xi = miu - 0.25 + (1. - lb * cosa / lc)/(4.*(1.-cosa*cosa)); const double eta = 0.5 + 2.*xi*lc/lb*cosa; const double phi = 1. + xi - 2.*miu; const double psi = eta - 2.*del; ndstr.push_back("{/Symbol G}"); // G-Y qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = miu; qtmp[2] = del; qe.push_back(qtmp); ndstr.push_back("Y"); // Y-F qtmp = new double [3]; qtmp[0] = qtmp[1] = miu; qtmp[2] = del; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 1.-phi; qtmp[2] = 1.-psi; qe.push_back(qtmp); ndstr.push_back("F"); // F-H qtmp = new double [3]; qtmp[0] = qtmp[1] = 1.-phi; qtmp[2] = 1.-psi; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = xi; qtmp[2] = eta; qe.push_back(qtmp); ndstr.push_back("H"); // H-Z qtmp = new double [3]; qtmp[0] = qtmp[1] = xi; qtmp[2] = eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-I qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = -0.5; qe.push_back(qtmp); ndstr.push_back("I/H_1"); // H1-Y1 qtmp = new double [3]; qtmp[0] = 1.-xi; qtmp[1] = -xi; qtmp[2] = 1.-eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 1.-miu; qtmp[1] = -miu; qtmp[2] = -del; qe.push_back(qtmp); ndstr.push_back("Y_1"); // Y1-X qtmp = new double [3]; qtmp[0] = 1.-miu; qtmp[1] = -miu; qtmp[2] = -del; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = -0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("X"); // X-G qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = -0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-N qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("N/M"); // M-G qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); } else if (flag < 1.){ // MCLC3 const double miu = 0.25*(1. + lb * lb / (la *la)); const double del = lb * lc * cosa / (2.*la*la); const double xi = miu - 0.25 + (1. - lb * cosa / lc)/(4.*(1.-cosa*cosa)); const double eta = 0.5 + 2.*xi*lc/lb*cosa; const double phi = 1. + xi - 2.*miu; const double psi = eta - 2.*del; ndstr.push_back("{/Symbol G}"); // G-Y qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = miu; qtmp[2] = del; qe.push_back(qtmp); ndstr.push_back("Y"); // Y-F qtmp = new double [3]; qtmp[0] = qtmp[1] = miu; qtmp[2] = del; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 1.-phi; qtmp[2] = 1.-psi; qe.push_back(qtmp); ndstr.push_back("F"); // F-H qtmp = new double [3]; qtmp[0] = qtmp[1] = 1.-phi; qtmp[2] = 1.-psi; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = xi; qtmp[2] = eta; qe.push_back(qtmp); ndstr.push_back("H"); // H-Z qtmp = new double [3]; qtmp[0] = qtmp[1] = xi; qtmp[2] = eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-I qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = -0.5; qe.push_back(qtmp); ndstr.push_back("I/H_1"); // I-F1 qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = phi; qtmp[2] = phi - 1.; qtmp[1] = psi; qe.push_back(qtmp); ndstr.push_back("F_1/H_1"); // H1-Y1 qtmp = new double [3]; qtmp[0] = 1.-xi; qtmp[1] = -xi; qtmp[2] = 1.-eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 1.-miu; qtmp[1] = -miu; qtmp[2] = -del; qe.push_back(qtmp); ndstr.push_back("Y_1"); // Y1-X qtmp = new double [3]; qtmp[0] = 1.-miu; qtmp[1] = -miu; qtmp[2] = -del; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = -0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("X"); // X-G qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = -0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-N qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("N/M"); // M-G qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); } else { // MCLC5 const double xi = (lb*lb/(la*la) + (1.-lb*cosa/lc)/(1.-cosa*cosa))*0.25; const double eta = 0.5 + 2.*xi*lc*cosa/lb; const double miu = 0.5*eta + lb * lb /(4.*la*la) - lb*lc/(2.*la*la)*cosa; const double niu = 2.*miu - xi; const double omg = (4.*niu - 1. - lb*lb*(1.-cosa*cosa)/(la*la))*lc/(2.*lb*cosa); const double del = xi*lc*cosa/lb + omg*0.5 - 0.25; const double rho = 1.-xi*la*la/(lb*lb); ndstr.push_back("{/Symbol G}"); // G-Y qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = miu; qtmp[2] = del; qe.push_back(qtmp); ndstr.push_back("Y"); // Y-F qtmp = new double [3]; qtmp[0] = qtmp[1] = miu; qtmp[2] = del; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = niu; qtmp[2] = omg; qe.push_back(qtmp); ndstr.push_back("F"); // F-L qtmp = new double [3]; qtmp[0] = qtmp[1] = niu; qtmp[2] = omg; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Y"); // L-I qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = rho; qtmp[1] = 1.-rho; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("I/I_1"); // I1-Z qtmp = new double [3]; qtmp[0] = 1.-rho; qtmp[1] = -qtmp[0]; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-H qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = xi; qtmp[2] = eta; qe.push_back(qtmp); ndstr.push_back("H"); // H-F1 qtmp = new double [3]; qtmp[0] = qtmp[1] = xi; qtmp[2] = eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 1.-niu; qtmp[2] = 1.-omg; qe.push_back(qtmp); ndstr.push_back("F_1/H_1"); // H1-Y1 qtmp = new double [3]; qtmp[0] = 1.-xi; qtmp[1] = -xi; qtmp[2] = 1.-eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 1.-miu; qtmp[1] = -miu; qtmp[2] = -del; qe.push_back(qtmp); ndstr.push_back("Y_1"); // Y1-X qtmp = new double [3]; qtmp[0] = 1.-miu; qtmp[1] = -miu; qtmp[2] = -del; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("Y_1"); // X-G qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-N qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("N/M"); // M-G qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); } } } } else if (spgnum >= 16 && spgnum <= 74){ // Orthorhombic if (symbol[0] == 'P'){ // ORC ndstr.push_back("{/Symbol G}"); // G-X qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("X"); // X-S qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("S"); // S-G qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-Z qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-U qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qe.push_back(qtmp); ndstr.push_back("U"); // U-R qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("R"); // R-T qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("T"); // T-Z qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Z/Y"); // Y-T qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("T/U"); // U-X qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("X/S"); // S-R qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("R"); } else if (symbol[0] == 'F'){ // ORCF double flag = 1./(la*la) - 1./(lb*lb) - 1./(lc*lc); if (fabs(flag) < ZERO){ // ORCF3 const double xi = 0.25 * (1. + la*la/(lb*lb) - la*la/(lc*lc)); const double eta = 0.25 * (1. + la*la/(lb*lb) + la*la/(lc*lc)); ndstr.push_back("{/Symbol G}"); // G-Y qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qe.push_back(qtmp); ndstr.push_back("Y"); // Y-T qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 1.; qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("T"); // T-Z qtmp = new double [3]; qtmp[0] = 1.; qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-G qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-X qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = eta; qe.push_back(qtmp); ndstr.push_back("X"); // X-A1 qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.5-xi; qtmp[2] = 1.-xi; qe.push_back(qtmp); ndstr.push_back("A_1"); // A1-Y qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.5-xi; qtmp[2] = 1.-xi; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qe.push_back(qtmp); ndstr.push_back("Y/X"); // X-A qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.5+xi; qtmp[2] = xi; qe.push_back(qtmp); ndstr.push_back("A"); // A-Z qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.5+xi; qtmp[2] = xi; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("Z/L"); // L-G qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); } else if (flag > 0.){ // ORCF1 const double xi = 0.25 * (1. + la*la/(lb*lb) - la*la/(lc*lc)); const double eta = 0.25 * (1. + la*la/(lb*lb) + la*la/(lc*lc)); ndstr.push_back("{/Symbol G}"); // G-Y qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qe.push_back(qtmp); ndstr.push_back("Y"); // Y-T qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 1.; qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("T"); // T-Z qtmp = new double [3]; qtmp[0] = 1.; qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-G qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-X qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = eta; qe.push_back(qtmp); ndstr.push_back("X"); // X-A1 qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.5-xi; qtmp[2] = 1.-xi; qe.push_back(qtmp); ndstr.push_back("A_1"); // A1-Y qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.5-xi; qtmp[2] = 1.-xi; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qe.push_back(qtmp); ndstr.push_back("Y/T"); // T-X1 qtmp = new double [3]; qtmp[0] = 1.; qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 1.; qtmp[1] = qtmp[2] = 1.-eta; qe.push_back(qtmp); ndstr.push_back("X_1/X"); // X-A qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.5+xi; qtmp[2] = xi; qe.push_back(qtmp); ndstr.push_back("A"); // A-Z qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.5+xi; qtmp[2] = xi; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("Z/L"); // L-G qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); } else { // ORCF2 const double eta = 0.25 * (1. + la*la/(lb*lb) - la*la/(lc*lc)); const double phi = 0.25 * (1. + lc*lc/(lb*lb) - lc*lc/(la*la)); const double del = 0.25 * (1. + lb*lb/(la*la) - lb*lb/(lc*lc)); ndstr.push_back("{/Symbol G}"); // G-Y qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qe.push_back(qtmp); ndstr.push_back("Y"); // Y-C qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[2] = 0.5-eta; qtmp[1] = 1.-eta; qe.push_back(qtmp); ndstr.push_back("C"); // C-D qtmp = new double [3]; qtmp[0] = 0.5; qtmp[2] = 0.5-eta; qtmp[1] = 1.-eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5-del; qtmp[2] = 0.5; qtmp[1] = 1.-del; qe.push_back(qtmp); ndstr.push_back("D"); // D-X qtmp = new double [3]; qtmp[0] = 0.5-del; qtmp[2] = 0.5; qtmp[1] = 1.-del; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("X"); // X-G qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-Z qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-D1 qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5+del; qtmp[1] = 0.5; qtmp[2] = del; qe.push_back(qtmp); ndstr.push_back("D_1"); // D1-H qtmp = new double [3]; qtmp[0] = 0.5+del; qtmp[1] = 0.5; qtmp[2] = del; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 1.-phi; qtmp[1] = 0.5-phi; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("H"); // H-C qtmp = new double [3]; qtmp[0] = 1.-phi; qtmp[1] = 0.5-phi; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.5-eta; qtmp[2] = 1.-eta; qe.push_back(qtmp); ndstr.push_back("C/C_1"); // C1-Z qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.5+eta; qtmp[2] = eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("Z/X"); // X-H1 qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = phi; qtmp[1] = 0.5+phi; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("H_1/H"); // H-Y qtmp = new double [3]; qtmp[0] = 1.-phi; qtmp[1] = 0.5-phi; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qe.push_back(qtmp); ndstr.push_back("Y/L"); // L-G qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); } } else if (symbol[0] == 'C'){ // ORCC const double xi = 0.25 * (1. + la*la/(lb*lb)); ndstr.push_back("{/Symbol G}"); // G-X qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = xi; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("X"); // X-S qtmp = new double [3]; qtmp[0] = qtmp[1] = xi; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qe.push_back(qtmp); ndstr.push_back("S"); // S-R qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("R"); // R-A qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = xi; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("A"); // A-Z qtmp = new double [3]; qtmp[0] = qtmp[1] = xi; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-G qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-Y qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = -0.5; qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("Y"); // Y-X1 qtmp = new double [3]; qtmp[0] = -0.5; qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = -xi; qtmp[1] = 1.-xi; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("X_1"); // X1-A1 qtmp = new double [3]; qtmp[0] = -xi; qtmp[1] = 1.-xi; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = -xi; qtmp[1] = 1.-xi; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("A_1"); // A1-T qtmp = new double [3]; qtmp[0] = -xi; qtmp[1] = 1.-xi; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = -0.5; qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("T"); // T-Y qtmp = new double [3]; qtmp[0] = -0.5; qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = -0.5; qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("Y/Z"); // Z-T qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = -0.5; qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("T"); } else { // ORCI const double xi = 0.25 * (1. + la*la/(lc*lc)); const double eta = 0.25 * (1. + lb*lb/(lc*lc)); const double del = (lb*lb-la*la)/(4.*lc*lc); const double miu = (lb*lb+la*la)/(4.*lc*lc); ndstr.push_back("{/Symbol G}"); // G-X qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = -xi; qtmp[1] = qtmp[2] = xi; qe.push_back(qtmp); ndstr.push_back("X"); // X-L qtmp = new double [3]; qtmp[0] = -xi; qtmp[1] = qtmp[2] = xi; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = -miu; qtmp[1] = miu; qtmp[2] = 0.5-del; qe.push_back(qtmp); ndstr.push_back("L"); // L-T qtmp = new double [3]; qtmp[0] = -miu; qtmp[1] = miu; qtmp[2] = 0.5-del; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("T"); // T-W qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.25; qe.push_back(qtmp); ndstr.push_back("W"); // W-R qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.25; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qe.push_back(qtmp); ndstr.push_back("R"); // R-X1 qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = xi; qtmp[1] = 1.-xi; qtmp[2] = -xi; qe.push_back(qtmp); ndstr.push_back("X_1"); // X1-Z qtmp = new double [3]; qtmp[0] = xi; qtmp[1] = 1.-xi; qtmp[2] = -xi; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = -0.5; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-G qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = -0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-Y qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = eta; qtmp[1] = -eta; qe.push_back(qtmp); ndstr.push_back("Y"); // Y-S qtmp = new double [3]; qtmp[0] = qtmp[2] = eta; qtmp[1] = -eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("S"); // S-W qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.25; qe.push_back(qtmp); ndstr.push_back("W/L_1"); // L1-Y qtmp = new double [3]; qtmp[0] = miu; qtmp[1] = -miu; qtmp[2] = 0.5+del; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = eta; qtmp[1] = -eta; qe.push_back(qtmp); ndstr.push_back("Y/Y_1"); // Y1-Z qtmp = new double [3]; qtmp[0] = 1.-eta; qtmp[1] = eta; qtmp[2] = -eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = -0.5; qe.push_back(qtmp); ndstr.push_back("Z"); } } else if (spgnum >= 75 && spgnum <= 142){ // Tetragonal if (symbol[0] == 'P'){ // TET ndstr.push_back("{/Symbol G}"); // G-X qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qe.push_back(qtmp); ndstr.push_back("X"); // X-M qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("M"); // M-G qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-Z qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-R qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("R"); // R-A qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("A"); // A-Z qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Z/X"); // X-R qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.; qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("R/M"); // M-A qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("A"); } else { // BCT if (la > lc){ // BCT1 const double eta = 0.25 * (1. + lc*lc/(la*la)); ndstr.push_back("{/Symbol G}"); // G-X qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("X"); // X-M qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = -0.5; qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("M"); // M-G qtmp = new double [3]; qtmp[0] = -0.5; qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-Z qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = eta; qtmp[2] = -eta; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-P qtmp = new double [3]; qtmp[0] = qtmp[1] = eta; qtmp[2] = -eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.25; qe.push_back(qtmp); ndstr.push_back("P"); // P-N qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.25; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qe.push_back(qtmp); ndstr.push_back("N"); // N-Z1 qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = -eta; qtmp[1] = 1.-eta; qtmp[2] = eta; qe.push_back(qtmp); ndstr.push_back("Z_1"); // Z1-M qtmp = new double [3]; qtmp[0] = -eta; qtmp[1] = 1.-eta; qtmp[2] = eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = -0.5; qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("M"); // X-P qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.25; qe.push_back(qtmp); ndstr.push_back("P"); } else { // BCT2 const double eta = 0.25 * (1. + la*la/(lc*lc)); const double xi = la*la/(2.*lc*lc); ndstr.push_back("{/Symbol G}"); // G-X qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("X"); // X-Y qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = -xi; qtmp[1] = xi; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Y"); // Y-Sigma qtmp = new double [3]; qtmp[0] = -xi; qtmp[1] = xi; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = -eta; qtmp[1] = qtmp[2] = eta; qe.push_back(qtmp); ndstr.push_back("{/Symbol S}"); // Sigma-G qtmp = new double [3]; qtmp[0] = -eta; qtmp[1] = qtmp[2] = eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-Z qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = -0.5; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-Sigma_1 qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = -0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = eta; qtmp[1] = 1.-eta; qtmp[2] = -eta; qe.push_back(qtmp); ndstr.push_back("{/Symbol S}_1"); // Sigma_1-N qtmp = new double [3]; qtmp[0] = eta; qtmp[1] = 1.-eta; qtmp[2] = -eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qe.push_back(qtmp); ndstr.push_back("N"); // N-P qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = qtmp[1] = 0.25; qe.push_back(qtmp); ndstr.push_back("P"); // P-Y1 qtmp = new double [3]; qtmp[0] = qtmp[2] = qtmp[1] = 0.25; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = -xi; qe.push_back(qtmp); ndstr.push_back("Y_1"); // Y1-Z qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = -xi; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = -0.5; qe.push_back(qtmp); ndstr.push_back("Z"); // X-P qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = qtmp[1] = 0.25; qe.push_back(qtmp); ndstr.push_back("P"); } } } else if (spgnum >= 143 && spgnum <= 167){ // Trigonal if (cosg > 0.){ // RHL1 const double eta = (1.+4.*cosa)/(2.+4.*cosa); const double niu = 0.75 - 0.5*eta; ndstr.push_back("{/Symbol G}"); // G-L qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("L"); // L-B1 qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 1.-eta; qtmp[2] = eta - 1.; qe.push_back(qtmp); ndstr.push_back("B_1/B"); // B-Z qtmp = new double [3]; qtmp[0] = eta; qtmp[1] = 0.5; qtmp[2] = 1.-eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-G qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-X qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = niu; qtmp[1] = 0.; qtmp[2] = -niu; qe.push_back(qtmp); ndstr.push_back("X/Q"); // Q-F qtmp = new double [3]; qtmp[0] = 1.-niu; qtmp[1] = niu; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("F"); // F-P1 qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 1.-niu; qtmp[2] = 1.-eta; qe.push_back(qtmp); ndstr.push_back("P_1"); // P1-Z qtmp = new double [3]; qtmp[0] = qtmp[1] = 1.-niu; qtmp[2] = 1.-eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("Z/L"); // L-P qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = eta; qtmp[1] = qtmp[2] = niu; qe.push_back(qtmp); ndstr.push_back("P"); } else { // RHL2 const double eta = 0.5*(1.+cosa)/(1.-cosa); const double niu = 0.75 - 0.5*eta; ndstr.push_back("{/Symbol G}"); // G-P qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 1.-niu; qtmp[1] = -niu; qe.push_back(qtmp); ndstr.push_back("P"); // P-Z qtmp = new double [3]; qtmp[0] = qtmp[2] = 1.-niu; qtmp[1] = -niu; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = -0.5; qe.push_back(qtmp); ndstr.push_back("Z"); // Z-Q qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = -0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = eta; qe.push_back(qtmp); ndstr.push_back("Q"); // Q-G qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-F qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = -0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("F"); // F-P1 qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = -0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = niu; qtmp[1] = qtmp[2] = niu - 1.; qe.push_back(qtmp); ndstr.push_back("P_1"); // P1-Q1 qtmp = new double [3]; qtmp[0] = niu; qtmp[1] = qtmp[2] = niu - 1.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 1.-eta; qtmp[1] = qtmp[2] = -eta; qe.push_back(qtmp); ndstr.push_back("Q_1"); // Q1-L qtmp = new double [3]; qtmp[0] = 1.-eta; qtmp[1] = qtmp[2] = -eta; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("L"); // L-Z qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = -0.5; qe.push_back(qtmp); ndstr.push_back("Z"); } } else if (spgnum >= 168 && spgnum <= 194){ // Hexagonal ndstr.push_back("{/Symbol G}"); // G-M qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("M"); // M-K qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 1./3.; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("K"); // K-G qtmp = new double [3]; qtmp[0] = qtmp[1] = 1./3.; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-A qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("A"); // A-L qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qe.push_back(qtmp); ndstr.push_back("L"); // L-H qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 1./3.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("H"); // H-A qtmp = new double [3]; qtmp[0] = qtmp[1] = 1./3.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("A/L"); // L-M qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("M/K"); // K-H qtmp = new double [3]; qtmp[0] = qtmp[1] = 1./3.; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 1./3.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("H"); } else if (spgnum >= 195 && spgnum <= 230){ // Cubic if (symbol[0] == 'P'){ // CUB ndstr.push_back("{/Symbol G}"); // G-X qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qe.push_back(qtmp); ndstr.push_back("X"); // X-M qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("M"); // M-G qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-R qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("R"); // R-X qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.; qtmp[1] = 0.5; qe.push_back(qtmp); ndstr.push_back("X/M"); // M-R qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.5; qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("R"); } else if (symbol[0] == 'F'){ // FCC ndstr.push_back("{/Symbol G}"); // G-X qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qe.push_back(qtmp); ndstr.push_back("X"); // X-W qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.25; qtmp[2] = 0.75; qe.push_back(qtmp); ndstr.push_back("W"); // W-K qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.25; qtmp[2] = 0.75; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.375; qtmp[2] = 0.75; qe.push_back(qtmp); ndstr.push_back("K"); // K-G qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.375; qtmp[2] = 0.75; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-L qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("L"); // L-U qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.625; qtmp[1] = 0.25; qtmp[2] = 0.625; qe.push_back(qtmp); ndstr.push_back("U"); // U-W qtmp = new double [3]; qtmp[0] = 0.625; qtmp[1] = 0.25; qtmp[2] = 0.625; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.25; qtmp[2] = 0.75; qe.push_back(qtmp); ndstr.push_back("W"); // W-L qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.25; qtmp[2] = 0.75; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("L"); // L-K qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.375; qtmp[2] = 0.75; qe.push_back(qtmp); ndstr.push_back("K/U"); // U-X qtmp = new double [3]; qtmp[0] = 0.625; qtmp[1] = 0.25; qtmp[2] = 0.625; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = 0.5; qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("X"); } else { // BCC ndstr.push_back("{/Symbol G}"); // G-H qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = -0.5; qe.push_back(qtmp); ndstr.push_back("H"); // H-N qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = -0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("N"); // N-G qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qe.push_back(qtmp); ndstr.push_back("{/Symbol G}"); // G-P qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.25; qe.push_back(qtmp); ndstr.push_back("P"); // P-H qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.25; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[2] = 0.5; qtmp[1] = -0.5; qe.push_back(qtmp); ndstr.push_back("H/P"); // P-N qtmp = new double [3]; qtmp[0] = qtmp[1] = qtmp[2] = 0.25; qs.push_back(qtmp); qtmp = new double [3]; qtmp[0] = qtmp[1] = 0.; qtmp[2] = 0.5; qe.push_back(qtmp); ndstr.push_back("N"); } } else { printf("\nSorry, failed to identify the crystal system, please use the manual mode.\n"); } // to determine the number of points along each line, with a step size of 0.05 const double qs_inv = 1./QSTEP; int nbin = qs.size(); if (nbin > 0) printf("\nPhonon dispersion will be evaluated along lines:\n\t%s", ndstr[0].c_str()); for (int is = 0; is < nbin; ++is){ double *qstr = qs[is]; double *qend = qe[is]; double ql = 0.; for (int i = 0; i < 3; ++i) ql += (qend[i] - qstr[i])*(qend[i] - qstr[i]); int nqpt = MAX(int(sqrt(ql) * qs_inv + 0.5), 2); nqbin.push_back(nqpt); printf("-%s", ndstr[is+1].c_str()); } if (nbin > 0) printf("\n"); } #endif FILE *fp = fopen(fname, "w"); fprintf(fp,"# q qr freq\n"); fprintf(fp,"# 2pi/L 2pi/L %s\n", dynmat->funit); double qr = 0., dq, q[3], qinc[3]; int nbin = qs.size(); for (int is = 0; is < nbin; ++is){ double *qstr = qs[is]; double *qend = qe[is]; int nbin = nqbin[is]; for (int i = 0; i < 3; ++i) qinc[i] = (qend[i]-qstr[i])/double(nbin-1); dq = sqrt(qinc[0]*qinc[0]+qinc[1]*qinc[1]+qinc[2]*qinc[2]); nodes.push_back(qr); for (int i = 0; i < 3; ++i) q[i] = qstr[i]; for (int ii = 0; ii < nbin; ++ii){ double wii = 1.; dynmat->getDMq(q, &wii); if (wii > 0.){ dynmat->geteigen(egvs, 0); fprintf(fp,"%lg %lg %lg %lg ", q[0], q[1], q[2], qr); for (int i = 0; i < ndim; ++i) fprintf(fp," %lg", egvs[i]); } fprintf(fp,"\n"); for (int i = 0; i < 3; ++i) q[i] += qinc[i]; qr += dq; } qr -= dq; delete []qstr; delete []qend; } qs.clear(); qe.clear(); if (qr > 0.) nodes.push_back(qr); fclose(fp); delete []egvs; // write the gnuplot script which helps to visualize the result int nnd = nodes.size(); if (nnd > 1){ const char qmk = char(34); // " fp = fopen("pdisp.gnuplot", "w"); fprintf(fp,"set term post enha colo 20\nset out %cpdisp.eps%c\n\n", qmk, qmk); fprintf(fp,"set xlabel %cq%c\n", qmk, qmk); fprintf(fp,"set ylabel %cfrequency (THz)%c\n\n", qmk, qmk); fprintf(fp,"set xrange [0:%lg]\nset yrange [0:*]\n\n", nodes[nnd-1]); fprintf(fp,"set grid xtics\n"); fprintf(fp,"# {/Symbol G} will give you letter gamma in the label\nset xtics ("); for (int i = 0; i < nnd-1; ++i) fprintf(fp,"%c%s%c %lg, ", qmk, ndstr[i].c_str(), qmk, nodes[i]); fprintf(fp, "%c%s%c %lg)\n\n", qmk, ndstr[nnd-1].c_str(), qmk, nodes[nnd-1]); fprintf(fp, "unset key\n\n"); fprintf(fp, "plot %c%s%c u 4:5 w l lt 1", qmk, fname, qmk); for (int i = 1; i < ndim; ++i) fprintf(fp,",\\\n%c%c u 4:%d w l lt 1", qmk, qmk, i+5); fclose(fp); printf("\nPhonon dispersion data are written to: %s, you can visualize the results\n", fname); printf("by invoking: `gnuplot pdisp.gnuplot; gv pdisp.eps`\n"); } for (int ii = 0; ii < 80; ++ii) printf("="); printf("\n"); delete []fname; nodes.clear(); ndstr.clear(); return; }
gpl-2.0
ysei/linux-2.4.32-ipod
drivers/mtd/devices/doc2000.c
15
32506
/* * Linux driver for Disk-On-Chip 2000 and Millennium * (c) 1999 Machine Vision Holdings, Inc. * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org> * * $Id: doc2000.c,v 1.50 2002/12/10 15:05:42 gleixner Exp $ */ #include <linux/kernel.h> #include <linux/module.h> #include <asm/errno.h> #include <asm/io.h> #include <asm/uaccess.h> #include <linux/miscdevice.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/types.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/doc2000.h> #define DOC_SUPPORT_2000 #define DOC_SUPPORT_MILLENNIUM #ifdef DOC_SUPPORT_2000 #define DoC_is_2000(doc) (doc->ChipID == DOC_ChipID_Doc2k) #else #define DoC_is_2000(doc) (0) #endif #ifdef DOC_SUPPORT_MILLENNIUM #define DoC_is_Millennium(doc) (doc->ChipID == DOC_ChipID_DocMil) #else #define DoC_is_Millennium(doc) (0) #endif /* #define ECC_DEBUG */ /* I have no idea why some DoC chips can not use memcpy_from|to_io(). * This may be due to the different revisions of the ASIC controller built-in or * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment * this: #undef USE_MEMCPY */ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, u_char *eccbuf, int oobsel); static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf, u_char *eccbuf, int oobsel); static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len, size_t *retlen, u_char *buf); static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len, size_t *retlen, const u_char *buf); static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len, size_t *retlen, const u_char *buf); static int doc_erase (struct mtd_info *mtd, struct erase_info *instr); static struct mtd_info *doc2klist = NULL; /* Perform the required delay cycles by reading from the appropriate register */ static void DoC_Delay(struct DiskOnChip *doc, unsigned short cycles) { volatile char dummy; int i; for (i = 0; i < cycles; i++) { if (DoC_is_Millennium(doc)) dummy = ReadDOC(doc->virtadr, NOP); else dummy = ReadDOC(doc->virtadr, DOCStatus); } } /* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */ static int _DoC_WaitReady(struct DiskOnChip *doc) { unsigned long docptr = doc->virtadr; unsigned long timeo = jiffies + (HZ * 10); DEBUG(MTD_DEBUG_LEVEL3, "_DoC_WaitReady called for out-of-line wait\n"); /* Out-of-line routine to wait for chip response */ while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { if (time_after(jiffies, timeo)) { DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n"); return -EIO; } udelay(1); cond_resched(); } return 0; } static inline int DoC_WaitReady(struct DiskOnChip *doc) { unsigned long docptr = doc->virtadr; /* This is inline, to optimise the common case, where it's ready instantly */ int ret = 0; /* 4 read form NOP register should be issued in prior to the read from CDSNControl see Software Requirement 11.4 item 2. */ DoC_Delay(doc, 4); if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) /* Call the out-of-line routine to wait */ ret = _DoC_WaitReady(doc); /* issue 2 read from NOP register after reading from CDSNControl register see Software Requirement 11.4 item 2. */ DoC_Delay(doc, 2); return ret; } /* DoC_Command: Send a flash command to the flash chip through the CDSN Slow IO register to bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */ static inline int DoC_Command(struct DiskOnChip *doc, unsigned char command, unsigned char xtraflags) { unsigned long docptr = doc->virtadr; if (DoC_is_2000(doc)) xtraflags |= CDSN_CTRL_FLASH_IO; /* Assert the CLE (Command Latch Enable) line to the flash chip */ WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl); DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ if (DoC_is_Millennium(doc)) WriteDOC(command, docptr, CDSNSlowIO); /* Send the command */ WriteDOC_(command, docptr, doc->ioreg); /* Lower the CLE line */ WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl); DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ /* Wait for the chip to respond - Software requirement 11.4.1 (extended for any command) */ return DoC_WaitReady(doc); } /* DoC_Address: Set the current address for the flash chip through the CDSN Slow IO register to bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */ static int DoC_Address(struct DiskOnChip *doc, int numbytes, unsigned long ofs, unsigned char xtraflags1, unsigned char xtraflags2) { unsigned long docptr; int i; docptr = doc->virtadr; if (DoC_is_2000(doc)) xtraflags1 |= CDSN_CTRL_FLASH_IO; /* Assert the ALE (Address Latch Enable) line to the flash chip */ WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl); DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ /* Send the address */ /* Devices with 256-byte page are addressed as: Column (bits 0-7), Page (bits 8-15, 16-23, 24-31) * there is no device on the market with page256 and more than 24 bits. Devices with 512-byte page are addressed as: Column (bits 0-7), Page (bits 9-16, 17-24, 25-31) * 25-31 is sent only if the chip support it. * bit 8 changes the read command to be sent (NAND_CMD_READ0 or NAND_CMD_READ1). */ if (numbytes == ADDR_COLUMN || numbytes == ADDR_COLUMN_PAGE) { if (DoC_is_Millennium(doc)) WriteDOC(ofs & 0xff, docptr, CDSNSlowIO); WriteDOC_(ofs & 0xff, docptr, doc->ioreg); } if (doc->page256) { ofs = ofs >> 8; } else { ofs = ofs >> 9; } if (numbytes == ADDR_PAGE || numbytes == ADDR_COLUMN_PAGE) { for (i = 0; i < doc->pageadrlen; i++, ofs = ofs >> 8) { if (DoC_is_Millennium(doc)) WriteDOC(ofs & 0xff, docptr, CDSNSlowIO); WriteDOC_(ofs & 0xff, docptr, doc->ioreg); } } DoC_Delay(doc, 2); /* Needed for some slow flash chips. mf. */ /* FIXME: The SlowIO's for millennium could be replaced by a single WritePipeTerm here. mf. */ /* Lower the ALE line */ WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr, CDSNControl); DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ /* Wait for the chip to respond - Software requirement 11.4.1 */ return DoC_WaitReady(doc); } /* Read a buffer from DoC, taking care of Millennium odditys */ static void DoC_ReadBuf(struct DiskOnChip *doc, u_char * buf, int len) { volatile int dummy; int modulus = 0xffff; unsigned long docptr; int i; docptr = doc->virtadr; if (len <= 0) return; if (DoC_is_Millennium(doc)) { /* Read the data via the internal pipeline through CDSN IO register, see Pipelined Read Operations 11.3 */ dummy = ReadDOC(docptr, ReadPipeInit); /* Millennium should use the LastDataRead register - Pipeline Reads */ len--; /* This is needed for correctly ECC calculation */ modulus = 0xff; } for (i = 0; i < len; i++) buf[i] = ReadDOC_(docptr, doc->ioreg + (i & modulus)); if (DoC_is_Millennium(doc)) { buf[i] = ReadDOC(docptr, LastDataRead); } } /* Write a buffer to DoC, taking care of Millennium odditys */ static void DoC_WriteBuf(struct DiskOnChip *doc, const u_char * buf, int len) { unsigned long docptr; int i; docptr = doc->virtadr; if (len <= 0) return; for (i = 0; i < len; i++) WriteDOC_(buf[i], docptr, doc->ioreg + i); if (DoC_is_Millennium(doc)) { WriteDOC(0x00, docptr, WritePipeTerm); } } /* DoC_SelectChip: Select a given flash chip within the current floor */ static inline int DoC_SelectChip(struct DiskOnChip *doc, int chip) { unsigned long docptr = doc->virtadr; /* Software requirement 11.4.4 before writing DeviceSelect */ /* Deassert the CE line to eliminate glitches on the FCE# outputs */ WriteDOC(CDSN_CTRL_WP, docptr, CDSNControl); DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ /* Select the individual flash chip requested */ WriteDOC(chip, docptr, CDSNDeviceSelect); DoC_Delay(doc, 4); /* Reassert the CE line */ WriteDOC(CDSN_CTRL_CE | CDSN_CTRL_FLASH_IO | CDSN_CTRL_WP, docptr, CDSNControl); DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ /* Wait for it to be ready */ return DoC_WaitReady(doc); } /* DoC_SelectFloor: Select a given floor (bank of flash chips) */ static inline int DoC_SelectFloor(struct DiskOnChip *doc, int floor) { unsigned long docptr = doc->virtadr; /* Select the floor (bank) of chips required */ WriteDOC(floor, docptr, FloorSelect); /* Wait for the chip to be ready */ return DoC_WaitReady(doc); } /* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip) { int mfr, id, i, j; volatile char dummy; /* Page in the required floor/chip */ DoC_SelectFloor(doc, floor); DoC_SelectChip(doc, chip); /* Reset the chip */ if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) { DEBUG(MTD_DEBUG_LEVEL2, "DoC_Command (reset) for %d,%d returned true\n", floor, chip); return 0; } /* Read the NAND chip ID: 1. Send ReadID command */ if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) { DEBUG(MTD_DEBUG_LEVEL2, "DoC_Command (ReadID) for %d,%d returned true\n", floor, chip); return 0; } /* Read the NAND chip ID: 2. Send address byte zero */ DoC_Address(doc, ADDR_COLUMN, 0, CDSN_CTRL_WP, 0); /* Read the manufacturer and device id codes from the device */ /* CDSN Slow IO register see Software Requirement 11.4 item 5. */ dummy = ReadDOC(doc->virtadr, CDSNSlowIO); DoC_Delay(doc, 2); mfr = ReadDOC_(doc->virtadr, doc->ioreg); /* CDSN Slow IO register see Software Requirement 11.4 item 5. */ dummy = ReadDOC(doc->virtadr, CDSNSlowIO); DoC_Delay(doc, 2); id = ReadDOC_(doc->virtadr, doc->ioreg); /* No response - return failure */ if (mfr == 0xff || mfr == 0) return 0; /* Check it's the same as the first chip we identified. * M-Systems say that any given DiskOnChip device should only * contain _one_ type of flash part, although that's not a * hardware restriction. */ if (doc->mfr) { if (doc->mfr == mfr && doc->id == id) return 1; /* This is another the same the first */ else printk(KERN_WARNING "Flash chip at floor %d, chip %d is different:\n", floor, chip); } /* Print and store the manufacturer and ID codes. */ for (i = 0; nand_flash_ids[i].name != NULL; i++) { if (id == nand_flash_ids[i].id) { /* Try to identify manufacturer */ for (j = 0; nand_manuf_ids[j].id != 0x0; j++) { if (nand_manuf_ids[j].id == mfr) break; } printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, " "Chip ID: %2.2X (%s:%s)\n", mfr, id, nand_manuf_ids[j].name, nand_flash_ids[i].name); if (!doc->mfr) { doc->mfr = mfr; doc->id = id; doc->chipshift = nand_flash_ids[i].chipshift; doc->page256 = nand_flash_ids[i].page256; doc->pageadrlen = nand_flash_ids[i].chipshift > 25 ? 3 : 2; doc->erasesize = nand_flash_ids[i].erasesize; return 1; } return 0; } } /* We haven't fully identified the chip. Print as much as we know. */ printk(KERN_WARNING "Unknown flash chip found: %2.2X %2.2X\n", id, mfr); printk(KERN_WARNING "Please report to dwmw2@infradead.org\n"); return 0; } /* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */ static void DoC_ScanChips(struct DiskOnChip *this) { int floor, chip; int numchips[MAX_FLOORS]; int maxchips = MAX_CHIPS; int ret = 1; this->numchips = 0; this->mfr = 0; this->id = 0; if (DoC_is_Millennium(this)) maxchips = MAX_CHIPS_MIL; /* For each floor, find the number of valid chips it contains */ for (floor = 0; floor < MAX_FLOORS; floor++) { ret = 1; numchips[floor] = 0; for (chip = 0; chip < maxchips && ret != 0; chip++) { ret = DoC_IdentChip(this, floor, chip); if (ret) { numchips[floor]++; this->numchips++; } } } /* If there are none at all that we recognise, bail */ if (!this->numchips) { printk(KERN_NOTICE "No flash chips recognised.\n"); return; } /* Allocate an array to hold the information for each chip */ this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL); if (!this->chips) { printk(KERN_NOTICE "No memory for allocating chip info structures\n"); return; } ret = 0; /* Fill out the chip array with {floor, chipno} for each * detected chip in the device. */ for (floor = 0; floor < MAX_FLOORS; floor++) { for (chip = 0; chip < numchips[floor]; chip++) { this->chips[ret].floor = floor; this->chips[ret].chip = chip; this->chips[ret].curadr = 0; this->chips[ret].curmode = 0x50; ret++; } } /* Calculate and print the total size of the device */ this->totlen = this->numchips * (1 << this->chipshift); printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n", this->numchips, this->totlen >> 20); } static int DoC2k_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2) { int tmp1, tmp2, retval; if (doc1->physadr == doc2->physadr) return 1; /* Use the alias resolution register which was set aside for this * purpose. If it's value is the same on both chips, they might * be the same chip, and we write to one and check for a change in * the other. It's unclear if this register is usuable in the * DoC 2000 (it's in the Millennium docs), but it seems to work. */ tmp1 = ReadDOC(doc1->virtadr, AliasResolution); tmp2 = ReadDOC(doc2->virtadr, AliasResolution); if (tmp1 != tmp2) return 0; WriteDOC((tmp1 + 1) % 0xff, doc1->virtadr, AliasResolution); tmp2 = ReadDOC(doc2->virtadr, AliasResolution); if (tmp2 == (tmp1 + 1) % 0xff) retval = 1; else retval = 0; /* Restore register contents. May not be necessary, but do it just to * be safe. */ WriteDOC(tmp1, doc1->virtadr, AliasResolution); return retval; } static const char im_name[] = "DoC2k_init"; /* This routine is made available to other mtd code via * inter_module_register. It must only be accessed through * inter_module_get which will bump the use count of this module. The * addresses passed back in mtd are valid as long as the use count of * this module is non-zero, i.e. between inter_module_get and * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000. */ static void DoC2k_init(struct mtd_info *mtd) { struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv; struct DiskOnChip *old = NULL; /* We must avoid being called twice for the same device. */ if (doc2klist) old = (struct DiskOnChip *) doc2klist->priv; while (old) { if (DoC2k_is_alias(old, this)) { printk(KERN_NOTICE "Ignoring DiskOnChip 2000 at 0x%lX - already configured\n", this->physadr); iounmap((void *) this->virtadr); kfree(mtd); return; } if (old->nextdoc) old = (struct DiskOnChip *) old->nextdoc->priv; else old = NULL; } switch (this->ChipID) { case DOC_ChipID_Doc2k: mtd->name = "DiskOnChip 2000"; this->ioreg = DoC_2k_CDSN_IO; break; case DOC_ChipID_DocMil: mtd->name = "DiskOnChip Millennium"; this->ioreg = DoC_Mil_CDSN_IO; break; } printk(KERN_NOTICE "%s found at address 0x%lX\n", mtd->name, this->physadr); mtd->type = MTD_NANDFLASH; mtd->flags = MTD_CAP_NANDFLASH; mtd->size = 0; mtd->erasesize = 0; mtd->oobblock = 512; mtd->oobsize = 16; mtd->module = THIS_MODULE; mtd->erase = doc_erase; mtd->point = NULL; mtd->unpoint = NULL; mtd->read = doc_read; mtd->write = doc_write; mtd->read_ecc = doc_read_ecc; mtd->write_ecc = doc_write_ecc; mtd->read_oob = doc_read_oob; mtd->write_oob = doc_write_oob; mtd->sync = NULL; this->totlen = 0; this->numchips = 0; this->curfloor = -1; this->curchip = -1; init_MUTEX(&this->lock); /* Ident all the chips present. */ DoC_ScanChips(this); if (!this->totlen) { kfree(mtd); iounmap((void *) this->virtadr); } else { this->nextdoc = doc2klist; doc2klist = mtd; mtd->size = this->totlen; mtd->erasesize = this->erasesize; add_mtd_device(mtd); return; } } static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf) { /* Just a special case of doc_read_ecc */ return doc_read_ecc(mtd, from, len, retlen, buf, NULL, 0); } static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf, u_char * eccbuf, int oobsel) { struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv; unsigned long docptr; struct Nand *mychip; unsigned char syndrome[6]; volatile char dummy; int i, len256 = 0, ret=0; docptr = this->virtadr; /* Don't allow read past end of device */ if (from >= this->totlen) return -EINVAL; down(&this->lock); /* Don't allow a single read to cross a 512-byte block boundary */ if (from + len > ((from | 0x1ff) + 1)) len = ((from | 0x1ff) + 1) - from; /* The ECC will not be calculated correctly if less than 512 is read */ if (len != 0x200 && eccbuf) printk(KERN_WARNING "ECC needs a full sector read (adr: %lx size %lx)\n", (long) from, (long) len); /* printk("DoC_Read (adr: %lx size %lx)\n", (long) from, (long) len); */ /* Find the chip which is to be used and select it */ mychip = &this->chips[from >> (this->chipshift)]; if (this->curfloor != mychip->floor) { DoC_SelectFloor(this, mychip->floor); DoC_SelectChip(this, mychip->chip); } else if (this->curchip != mychip->chip) { DoC_SelectChip(this, mychip->chip); } this->curfloor = mychip->floor; this->curchip = mychip->chip; DoC_Command(this, (!this->page256 && (from & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0, CDSN_CTRL_WP); DoC_Address(this, ADDR_COLUMN_PAGE, from, CDSN_CTRL_WP, CDSN_CTRL_ECC_IO); if (eccbuf) { /* Prime the ECC engine */ WriteDOC(DOC_ECC_RESET, docptr, ECCConf); WriteDOC(DOC_ECC_EN, docptr, ECCConf); } else { /* disable the ECC engine */ WriteDOC(DOC_ECC_RESET, docptr, ECCConf); WriteDOC(DOC_ECC_DIS, docptr, ECCConf); } /* treat crossing 256-byte sector for 2M x 8bits devices */ if (this->page256 && from + len > (from | 0xff) + 1) { len256 = (from | 0xff) + 1 - from; DoC_ReadBuf(this, buf, len256); DoC_Command(this, NAND_CMD_READ0, CDSN_CTRL_WP); DoC_Address(this, ADDR_COLUMN_PAGE, from + len256, CDSN_CTRL_WP, CDSN_CTRL_ECC_IO); } DoC_ReadBuf(this, &buf[len256], len - len256); /* Let the caller know we completed it */ *retlen = len; if (eccbuf) { /* Read the ECC data through the DiskOnChip ECC logic */ /* Note: this will work even with 2M x 8bit devices as */ /* they have 8 bytes of OOB per 256 page. mf. */ DoC_ReadBuf(this, eccbuf, 6); /* Flush the pipeline */ if (DoC_is_Millennium(this)) { dummy = ReadDOC(docptr, ECCConf); dummy = ReadDOC(docptr, ECCConf); i = ReadDOC(docptr, ECCConf); } else { dummy = ReadDOC(docptr, 2k_ECCStatus); dummy = ReadDOC(docptr, 2k_ECCStatus); i = ReadDOC(docptr, 2k_ECCStatus); } /* Check the ECC Status */ if (i & 0x80) { int nb_errors; /* There was an ECC error */ #ifdef ECC_DEBUG printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from); #endif /* Read the ECC syndrom through the DiskOnChip ECC logic. These syndrome will be all ZERO when there is no error */ for (i = 0; i < 6; i++) { syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i); } nb_errors = doc_decode_ecc(buf, syndrome); #ifdef ECC_DEBUG printk(KERN_ERR "Errors corrected: %x\n", nb_errors); #endif if (nb_errors < 0) { /* We return error, but have actually done the read. Not that this can be told to user-space, via sys_read(), but at least MTD-aware stuff can know about it by checking *retlen */ ret = -EIO; } } #ifdef PSYCHO_DEBUG printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], eccbuf[4], eccbuf[5]); #endif /* disable the ECC engine */ WriteDOC(DOC_ECC_DIS, docptr , ECCConf); } /* according to 11.4.1, we need to wait for the busy line * drop if we read to the end of the page. */ if(0 == ((from + *retlen) & 0x1ff)) { DoC_WaitReady(this); } up(&this->lock); return ret; } static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf) { char eccbuf[6]; return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, 0); } static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf, u_char * eccbuf, int oobsel) { struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv; int di; /* Yes, DI is a hangover from when I was disassembling the binary driver */ unsigned long docptr; volatile char dummy; int len256 = 0; struct Nand *mychip; docptr = this->virtadr; /* Don't allow write past end of device */ if (to >= this->totlen) return -EINVAL; down(&this->lock); /* Don't allow a single write to cross a 512-byte block boundary */ if (to + len > ((to | 0x1ff) + 1)) len = ((to | 0x1ff) + 1) - to; /* The ECC will not be calculated correctly if less than 512 is written */ if (len != 0x200 && eccbuf) printk(KERN_WARNING "ECC needs a full sector write (adr: %lx size %lx)\n", (long) to, (long) len); /* printk("DoC_Write (adr: %lx size %lx)\n", (long) to, (long) len); */ /* Find the chip which is to be used and select it */ mychip = &this->chips[to >> (this->chipshift)]; if (this->curfloor != mychip->floor) { DoC_SelectFloor(this, mychip->floor); DoC_SelectChip(this, mychip->chip); } else if (this->curchip != mychip->chip) { DoC_SelectChip(this, mychip->chip); } this->curfloor = mychip->floor; this->curchip = mychip->chip; /* Set device to main plane of flash */ DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP); DoC_Command(this, (!this->page256 && (to & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0, CDSN_CTRL_WP); DoC_Command(this, NAND_CMD_SEQIN, 0); DoC_Address(this, ADDR_COLUMN_PAGE, to, 0, CDSN_CTRL_ECC_IO); if (eccbuf) { /* Prime the ECC engine */ WriteDOC(DOC_ECC_RESET, docptr, ECCConf); WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf); } else { /* disable the ECC engine */ WriteDOC(DOC_ECC_RESET, docptr, ECCConf); WriteDOC(DOC_ECC_DIS, docptr, ECCConf); } /* treat crossing 256-byte sector for 2M x 8bits devices */ if (this->page256 && to + len > (to | 0xff) + 1) { len256 = (to | 0xff) + 1 - to; DoC_WriteBuf(this, buf, len256); DoC_Command(this, NAND_CMD_PAGEPROG, 0); DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP); /* There's an implicit DoC_WaitReady() in DoC_Command */ dummy = ReadDOC(docptr, CDSNSlowIO); DoC_Delay(this, 2); if (ReadDOC_(docptr, this->ioreg) & 1) { printk(KERN_ERR "Error programming flash\n"); /* Error in programming */ *retlen = 0; up(&this->lock); return -EIO; } DoC_Command(this, NAND_CMD_SEQIN, 0); DoC_Address(this, ADDR_COLUMN_PAGE, to + len256, 0, CDSN_CTRL_ECC_IO); } DoC_WriteBuf(this, &buf[len256], len - len256); if (eccbuf) { WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_CE, docptr, CDSNControl); if (DoC_is_Millennium(this)) { WriteDOC(0, docptr, NOP); WriteDOC(0, docptr, NOP); WriteDOC(0, docptr, NOP); } else { WriteDOC_(0, docptr, this->ioreg); WriteDOC_(0, docptr, this->ioreg); WriteDOC_(0, docptr, this->ioreg); } /* Read the ECC data through the DiskOnChip ECC logic */ for (di = 0; di < 6; di++) { eccbuf[di] = ReadDOC(docptr, ECCSyndrome0 + di); } /* Reset the ECC engine */ WriteDOC(DOC_ECC_DIS, docptr, ECCConf); #ifdef PSYCHO_DEBUG printk ("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], eccbuf[4], eccbuf[5]); #endif } DoC_Command(this, NAND_CMD_PAGEPROG, 0); DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP); /* There's an implicit DoC_WaitReady() in DoC_Command */ dummy = ReadDOC(docptr, CDSNSlowIO); DoC_Delay(this, 2); if (ReadDOC_(docptr, this->ioreg) & 1) { printk(KERN_ERR "Error programming flash\n"); /* Error in programming */ *retlen = 0; up(&this->lock); return -EIO; } /* Let the caller know we completed it */ *retlen = len; if (eccbuf) { unsigned char x[8]; size_t dummy; int ret; /* Write the ECC data to flash */ for (di=0; di<6; di++) x[di] = eccbuf[di]; x[6]=0x55; x[7]=0x55; ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x); up(&this->lock); return ret; } up(&this->lock); return 0; } static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len, size_t * retlen, u_char * buf) { struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv; int len256 = 0, ret; unsigned long docptr; struct Nand *mychip; down(&this->lock); docptr = this->virtadr; mychip = &this->chips[ofs >> this->chipshift]; if (this->curfloor != mychip->floor) { DoC_SelectFloor(this, mychip->floor); DoC_SelectChip(this, mychip->chip); } else if (this->curchip != mychip->chip) { DoC_SelectChip(this, mychip->chip); } this->curfloor = mychip->floor; this->curchip = mychip->chip; /* update address for 2M x 8bit devices. OOB starts on the second */ /* page to maintain compatibility with doc_read_ecc. */ if (this->page256) { if (!(ofs & 0x8)) ofs += 0x100; else ofs -= 0x8; } DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP); DoC_Address(this, ADDR_COLUMN_PAGE, ofs, CDSN_CTRL_WP, 0); /* treat crossing 8-byte OOB data for 2M x 8bit devices */ /* Note: datasheet says it should automaticaly wrap to the */ /* next OOB block, but it didn't work here. mf. */ if (this->page256 && ofs + len > (ofs | 0x7) + 1) { len256 = (ofs | 0x7) + 1 - ofs; DoC_ReadBuf(this, buf, len256); DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP); DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff), CDSN_CTRL_WP, 0); } DoC_ReadBuf(this, &buf[len256], len - len256); *retlen = len; /* Reading the full OOB data drops us off of the end of the page, * causing the flash device to go into busy mode, so we need * to wait until ready 11.4.1 and Toshiba TC58256FT docs */ ret = DoC_WaitReady(this); up(&this->lock); return ret; } static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len, size_t * retlen, const u_char * buf) { struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv; int len256 = 0; unsigned long docptr = this->virtadr; struct Nand *mychip = &this->chips[ofs >> this->chipshift]; volatile int dummy; // printk("doc_write_oob(%lx, %d): %2.2X %2.2X %2.2X %2.2X ... %2.2X %2.2X .. %2.2X %2.2X\n",(long)ofs, len, // buf[0], buf[1], buf[2], buf[3], buf[8], buf[9], buf[14],buf[15]); /* Find the chip which is to be used and select it */ if (this->curfloor != mychip->floor) { DoC_SelectFloor(this, mychip->floor); DoC_SelectChip(this, mychip->chip); } else if (this->curchip != mychip->chip) { DoC_SelectChip(this, mychip->chip); } this->curfloor = mychip->floor; this->curchip = mychip->chip; /* disable the ECC engine */ WriteDOC (DOC_ECC_RESET, docptr, ECCConf); WriteDOC (DOC_ECC_DIS, docptr, ECCConf); /* Reset the chip, see Software Requirement 11.4 item 1. */ DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP); /* issue the Read2 command to set the pointer to the Spare Data Area. */ DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP); /* update address for 2M x 8bit devices. OOB starts on the second */ /* page to maintain compatibility with doc_read_ecc. */ if (this->page256) { if (!(ofs & 0x8)) ofs += 0x100; else ofs -= 0x8; } /* issue the Serial Data In command to initial the Page Program process */ DoC_Command(this, NAND_CMD_SEQIN, 0); DoC_Address(this, ADDR_COLUMN_PAGE, ofs, 0, 0); /* treat crossing 8-byte OOB data for 2M x 8bit devices */ /* Note: datasheet says it should automaticaly wrap to the */ /* next OOB block, but it didn't work here. mf. */ if (this->page256 && ofs + len > (ofs | 0x7) + 1) { len256 = (ofs | 0x7) + 1 - ofs; DoC_WriteBuf(this, buf, len256); DoC_Command(this, NAND_CMD_PAGEPROG, 0); DoC_Command(this, NAND_CMD_STATUS, 0); /* DoC_WaitReady() is implicit in DoC_Command */ dummy = ReadDOC(docptr, CDSNSlowIO); DoC_Delay(this, 2); if (ReadDOC_(docptr, this->ioreg) & 1) { printk(KERN_ERR "Error programming oob data\n"); /* There was an error */ *retlen = 0; return -EIO; } DoC_Command(this, NAND_CMD_SEQIN, 0); DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff), 0, 0); } DoC_WriteBuf(this, &buf[len256], len - len256); DoC_Command(this, NAND_CMD_PAGEPROG, 0); DoC_Command(this, NAND_CMD_STATUS, 0); /* DoC_WaitReady() is implicit in DoC_Command */ dummy = ReadDOC(docptr, CDSNSlowIO); DoC_Delay(this, 2); if (ReadDOC_(docptr, this->ioreg) & 1) { printk(KERN_ERR "Error programming oob data\n"); /* There was an error */ *retlen = 0; return -EIO; } *retlen = len; return 0; } static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len, size_t * retlen, const u_char * buf) { struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv; int ret; down(&this->lock); ret = doc_write_oob_nolock(mtd, ofs, len, retlen, buf); up(&this->lock); return ret; } static int doc_erase(struct mtd_info *mtd, struct erase_info *instr) { struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv; __u32 ofs = instr->addr; __u32 len = instr->len; volatile int dummy; unsigned long docptr; struct Nand *mychip; down(&this->lock); if (ofs & (mtd->erasesize-1) || len & (mtd->erasesize-1)) { up(&this->lock); return -EINVAL; } instr->state = MTD_ERASING; docptr = this->virtadr; /* FIXME: Do this in the background. Use timers or schedule_task() */ while(len) { mychip = &this->chips[ofs >> this->chipshift]; if (this->curfloor != mychip->floor) { DoC_SelectFloor(this, mychip->floor); DoC_SelectChip(this, mychip->chip); } else if (this->curchip != mychip->chip) { DoC_SelectChip(this, mychip->chip); } this->curfloor = mychip->floor; this->curchip = mychip->chip; DoC_Command(this, NAND_CMD_ERASE1, 0); DoC_Address(this, ADDR_PAGE, ofs, 0, 0); DoC_Command(this, NAND_CMD_ERASE2, 0); DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP); dummy = ReadDOC(docptr, CDSNSlowIO); DoC_Delay(this, 2); if (ReadDOC_(docptr, this->ioreg) & 1) { printk(KERN_ERR "Error erasing at 0x%x\n", ofs); /* There was an error */ instr->state = MTD_ERASE_FAILED; goto callback; } ofs += mtd->erasesize; len -= mtd->erasesize; } instr->state = MTD_ERASE_DONE; callback: if (instr->callback) instr->callback(instr); up(&this->lock); return 0; } /**************************************************************************** * * Module stuff * ****************************************************************************/ int __init init_doc2000(void) { inter_module_register(im_name, THIS_MODULE, &DoC2k_init); return 0; } static void __exit cleanup_doc2000(void) { struct mtd_info *mtd; struct DiskOnChip *this; while ((mtd = doc2klist)) { this = (struct DiskOnChip *) mtd->priv; doc2klist = this->nextdoc; del_mtd_device(mtd); iounmap((void *) this->virtadr); kfree(this->chips); kfree(mtd); } inter_module_unregister(im_name); } module_exit(cleanup_doc2000); module_init(init_doc2000); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al."); MODULE_DESCRIPTION("MTD driver for DiskOnChip 2000 and Millennium");
gpl-2.0
ahsparrow/xcsoar
src/Look/FlarmTrafficLook.cpp
15
3003
/* Copyright_License { XCSoar Glide Computer - http://www.xcsoar.org/ Copyright (C) 2000-2016 The XCSoar Project A detailed list of copyright holders can be found in the file "AUTHORS". This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. } */ #include "FlarmTrafficLook.hpp" #include "TrafficLook.hpp" #include "FontDescription.hpp" #include "Screen/Layout.hpp" void FlarmTrafficLook::Initialise(const TrafficLook &other, bool small, bool inverse) { passive_color = Color(0x99, 0x99, 0x99); warning_color = other.warning_color; alarm_color = other.alarm_color; default_color = inverse ? COLOR_WHITE : COLOR_BLACK; selection_color = COLOR_BLUE; background_color = inverse ? COLOR_BLACK : COLOR_WHITE; radar_color = COLOR_GRAY; warning_brush.Create(warning_color); alarm_brush.Create(alarm_color); default_brush.Create(default_color); passive_brush.Create(passive_color); selection_brush.Create(selection_color); radar_brush.Create(radar_color); team_brush_green.Create(other.team_color_green); team_brush_blue.Create(other.team_color_blue); team_brush_yellow.Create(other.team_color_yellow); team_brush_magenta.Create(other.team_color_magenta); unsigned width = Layout::FastScale(small ? 1u : 2u); warning_pen.Create(width, warning_color); alarm_pen.Create(width, alarm_color); default_pen.Create(width, default_color); passive_pen.Create(width, passive_color); selection_pen.Create(width, selection_color); team_pen_green.Create(width, other.team_color_green); team_pen_blue.Create(width, other.team_color_blue); team_pen_yellow.Create(width, other.team_color_yellow); team_pen_magenta.Create(width, other.team_color_magenta); plane_pen.Create(width, radar_color); radar_pen.Create(1, radar_color); unit_fraction_pen.Create(1, inverse ? COLOR_WHITE : COLOR_BLACK); no_traffic_font.Load(FontDescription(Layout::FontScale(22))); label_font.Load(FontDescription(Layout::FontScale(12))); side_info_font.Load(FontDescription(Layout::FontScale(small ? 8 : 12), true)); info_labels_font.Load(FontDescription(Layout::FontScale(12), true)); info_values_font.Load(FontDescription(Layout::FontScale(16))); info_units_font.Load(FontDescription(Layout::FontScale(8))); call_sign_font.Load(FontDescription(Layout::FontScale(24), true)); }
gpl-2.0
bsd-hacker/xen
tools/xenmon/xenbaked.c
15
33395
/****************************************************************************** * tools/xenbaked.c * * Tool for collecting raw trace buffer data from Xen and * performing some accumulation operations and other processing * on it. * * Copyright (C) 2004 by Intel Research Cambridge * Copyright (C) 2005 by Hewlett Packard, Palo Alto and Fort Collins * Copyright (C) 2006 by Hewlett Packard Fort Collins * * Authors: Diwaker Gupta, diwaker.gupta@hp.com * Rob Gardner, rob.gardner@hp.com * Lucy Cherkasova, lucy.cherkasova.hp.com * Much code based on xentrace, authored by Mark Williamson, * mark.a.williamson@intel.com * Date: November, 2005 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; under version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <time.h> #include <stdlib.h> #include <stdio.h> #include <sys/mman.h> #include <fcntl.h> #include <unistd.h> #include <errno.h> #include <signal.h> #include <xenctrl.h> #include <xen/xen.h> #include <string.h> #include <sys/select.h> #include <getopt.h> #define PERROR(_m, _a...) \ do { \ int __saved_errno = errno; \ fprintf(stderr, "ERROR: " _m " (%d = %s)\n" , ## _a , \ __saved_errno, strerror(__saved_errno)); \ errno = __saved_errno; \ } while (0) typedef struct { int counter; } atomic_t; #define _atomic_read(v) ((v).counter) #include <xen/trace.h> #include "xenbaked.h" /***** Compile time configuration of defaults ********************************/ /* when we've got more records than this waiting, we log it to the output */ #define NEW_DATA_THRESH 1 /* sleep for this long (milliseconds) between checking the trace buffers */ #define POLL_SLEEP_MILLIS 100 /* Size of time period represented by each sample */ #define MS_PER_SAMPLE 100 /* CPU Frequency */ #define MHZ #define CPU_FREQ 2660 MHZ /***** The code **************************************************************/ typedef struct settings_st { struct timespec poll_sleep; unsigned long new_data_thresh; unsigned long ms_per_sample; double cpu_freq; } settings_t; struct t_struct { const struct t_info *t_info; /* Structure with information about individual buffers */ struct t_buf **meta; /* Pointers to trace buffer metadata */ unsigned char **data; /* Pointers to trace buffer data areas */ }; settings_t opts; int interrupted = 0; /* gets set if we get a SIGHUP */ int rec_count = 0; int wakeups = 0; time_t start_time; int dom0_flips = 0; _new_qos_data *new_qos; _new_qos_data **cpu_qos_data; int global_cpu; uint64_t global_now; // array of currently running domains, indexed by cpu int *running = NULL; // number of cpu's on this platform int NCPU = 0; static void advance_next_datapoint(uint64_t); static void alloc_qos_data(int ncpu); static int process_record(int, struct t_rec *); static void qos_kill_thread(int domid); static void init_current(int ncpu) { running = calloc(ncpu, sizeof(int)); NCPU = ncpu; printf("Initialized with %d %s\n", ncpu, (ncpu == 1) ? "cpu" : "cpu's"); } static int is_current(int domain, int cpu) { // int i; // for (i=0; i<NCPU; i++) if (running[cpu] == domain) return 1; return 0; } #if 0 /* unused */ // return the domain that's currently running on the given cpu static int current(int cpu) { return running[cpu]; } #endif static void set_current(int cpu, int domain) { running[cpu] = domain; } static void close_handler(int signal) { interrupted = 1; } #if 0 void dump_record(int cpu, struct t_rec *x) { printf("record: cpu=%x, tsc=%lx, event=%x, d1=%lx\n", cpu, x->cycles, x->event, x->data[0]); } #endif /** * millis_to_timespec - convert a time in milliseconds to a struct timespec * @millis: time interval in milliseconds */ static struct timespec millis_to_timespec(unsigned long millis) { struct timespec spec; spec.tv_sec = millis / 1000; spec.tv_nsec = (millis % 1000) * 1000; return spec; } typedef struct { int event_count; int event_id; char *text; } stat_map_t; stat_map_t stat_map[] = { { 0, 0, "Other" }, { 0, TRC_SCHED_DOM_ADD, "Add Domain" }, { 0, TRC_SCHED_DOM_REM, "Remove Domain" }, { 0, TRC_SCHED_SLEEP, "Sleep" }, { 0, TRC_SCHED_WAKE, "Wake" }, { 0, TRC_SCHED_BLOCK, "Block" }, { 0, TRC_SCHED_SWITCH, "Switch" }, { 0, TRC_SCHED_S_TIMER_FN, "Timer Func"}, { 0, TRC_SCHED_SWITCH_INFPREV, "Switch Prev" }, { 0, TRC_SCHED_SWITCH_INFNEXT, "Switch Next" }, { 0, TRC_MEM_PAGE_GRANT_MAP, "Page Map" }, { 0, TRC_MEM_PAGE_GRANT_UNMAP, "Page Unmap" }, { 0, TRC_MEM_PAGE_GRANT_TRANSFER, "Page Transfer" }, { 0, 0, 0 } }; static void check_gotten_sum(void) { #if 0 uint64_t sum, ns; extern uint64_t total_ns_gotten(uint64_t*); double percent; int i; for (i=0; i<NCPU; i++) { new_qos = cpu_qos_data[i]; ns = billion; sum = total_ns_gotten(&ns); printf("[cpu%d] ns_gotten over all domains = %lldns, over %lldns\n", i, sum, ns); percent = (double) sum; percent = (100.0*percent) / (double)ns; printf(" ==> ns_gotten = %7.3f%%\n", percent); } #endif } static void dump_stats(void) { stat_map_t *smt = stat_map; time_t end_time, run_time; time(&end_time); run_time = end_time - start_time; printf("Event counts:\n"); while (smt->text != NULL) { printf("%08d\t%s\n", smt->event_count, smt->text); smt++; } printf("processed %d total records in %d seconds (%ld per second)\n", rec_count, (int)run_time, (long)(rec_count/run_time)); printf("woke up %d times in %d seconds (%ld per second)\n", wakeups, (int) run_time, (long)(wakeups/run_time)); check_gotten_sum(); } static void log_event(int event_id) { stat_map_t *smt = stat_map; // printf("event_id = 0x%x\n", event_id); while (smt->text != NULL) { if (smt->event_id == event_id) { smt->event_count++; return; } smt++; } if (smt->text == NULL) stat_map[0].event_count++; // other } int virq_port; xc_evtchn *xce_handle = NULL; /* Returns the event channel handle. */ /* Stolen from xenstore code */ static int eventchn_init(void) { int rc; // to revert to old way: if (0) return -1; xce_handle = xc_evtchn_open(NULL, 0); if (xce_handle == NULL) perror("Failed to open evtchn device"); if ((rc = xc_evtchn_bind_virq(xce_handle, VIRQ_TBUF)) == -1) perror("Failed to bind to domain exception virq port"); virq_port = rc; return xce_handle == NULL ? -1 : 0; } static void wait_for_event(void) { int ret; fd_set inset; evtchn_port_t port; struct timeval tv; int evtchn_fd; if (xce_handle == NULL) { nanosleep(&opts.poll_sleep, NULL); return; } evtchn_fd = xc_evtchn_fd(xce_handle); FD_ZERO(&inset); FD_SET(evtchn_fd, &inset); tv.tv_sec = 1; tv.tv_usec = 0; // tv = millis_to_timespec(&opts.poll_sleep); ret = select(evtchn_fd+1, &inset, NULL, NULL, &tv); if ( (ret == 1) && FD_ISSET(evtchn_fd, &inset)) { if ((port = xc_evtchn_pending(xce_handle)) == -1) perror("Failed to read from event fd"); // if (port == virq_port) // printf("got the event I was looking for\r\n"); if (xc_evtchn_unmask(xce_handle, port) == -1) perror("Failed to write to event fd"); } } static void get_tbufs(unsigned long *mfn, unsigned long *size) { xc_interface *xc_handle = xc_interface_open(0,0,0); int ret; if ( !xc_handle ) { exit(EXIT_FAILURE); } ret = xc_tbuf_enable(xc_handle, DEFAULT_TBUF_SIZE, mfn, size); if ( ret != 0 ) { perror("Couldn't enable trace buffers"); exit(1); } xc_interface_close(xc_handle); } static void disable_tracing(void) { xc_interface *xc_handle = xc_interface_open(0,0,0); xc_tbuf_disable(xc_handle); xc_interface_close(xc_handle); } /** * map_tbufs - memory map Xen trace buffers into user space * @tbufs_mfn: mfn of the trace buffers * @num: number of trace buffers to map * @size: size of each trace buffer * * Maps the Xen trace buffers them into process address space. */ static struct t_struct *map_tbufs(unsigned long tbufs_mfn, unsigned int num, unsigned long tinfo_size) { xc_interface *xc_handle; static struct t_struct tbufs = { 0 }; int i; xc_handle = xc_interface_open(0,0,0); if ( !xc_handle ) { exit(EXIT_FAILURE); } /* Map t_info metadata structure */ tbufs.t_info = xc_map_foreign_range(xc_handle, DOMID_XEN, tinfo_size, PROT_READ, tbufs_mfn); if ( tbufs.t_info == 0 ) { PERROR("Failed to mmap trace buffers"); exit(EXIT_FAILURE); } if ( tbufs.t_info->tbuf_size == 0 ) { fprintf(stderr, "%s: tbuf_size 0!\n", __func__); exit(EXIT_FAILURE); } /* Map per-cpu buffers */ tbufs.meta = (struct t_buf **)calloc(num, sizeof(struct t_buf *)); tbufs.data = (unsigned char **)calloc(num, sizeof(unsigned char *)); if ( tbufs.meta == NULL || tbufs.data == NULL ) { PERROR( "Failed to allocate memory for buffer pointers\n"); exit(EXIT_FAILURE); } for(i=0; i<num; i++) { const uint32_t *mfn_list = (const uint32_t *)tbufs.t_info + tbufs.t_info->mfn_offset[i]; int j; xen_pfn_t pfn_list[tbufs.t_info->tbuf_size]; for ( j=0; j<tbufs.t_info->tbuf_size; j++) pfn_list[j] = (xen_pfn_t)mfn_list[j]; tbufs.meta[i] = xc_map_foreign_batch(xc_handle, DOMID_XEN, PROT_READ | PROT_WRITE, pfn_list, tbufs.t_info->tbuf_size); if ( tbufs.meta[i] == NULL ) { PERROR("Failed to map cpu buffer!"); exit(EXIT_FAILURE); } tbufs.data[i] = (unsigned char *)(tbufs.meta[i]+1); } xc_interface_close(xc_handle); return &tbufs; } /** * get_num_cpus - get the number of logical CPUs */ static unsigned int get_num_cpus(void) { xc_physinfo_t physinfo = { 0 }; xc_interface *xc_handle = xc_interface_open(0,0,0); int ret; ret = xc_physinfo(xc_handle, &physinfo); if ( ret != 0 ) { PERROR("Failure to get logical CPU count from Xen"); exit(EXIT_FAILURE); } xc_interface_close(xc_handle); opts.cpu_freq = (double)physinfo.cpu_khz/1000.0; return physinfo.nr_cpus; } /** * monitor_tbufs - monitor the contents of tbufs */ static int monitor_tbufs(void) { int i; struct t_struct *tbufs; /* Pointer to hypervisor maps */ struct t_buf **meta; /* pointers to the trace buffer metadata */ unsigned char **data; /* pointers to the trace buffer data areas * where they are mapped into user space. */ unsigned long tbufs_mfn; /* mfn of the tbufs */ unsigned int num; /* number of trace buffers / logical CPUS */ unsigned long tinfo_size; /* size of t_info metadata map */ unsigned long size; /* size of a single trace buffer */ unsigned long data_size, rec_size; /* get number of logical CPUs (and therefore number of trace buffers) */ num = get_num_cpus(); init_current(num); alloc_qos_data(num); printf("CPU Frequency = %7.2f\n", opts.cpu_freq); /* setup access to trace buffers */ get_tbufs(&tbufs_mfn, &tinfo_size); tbufs = map_tbufs(tbufs_mfn, num, tinfo_size); size = tbufs->t_info->tbuf_size * XC_PAGE_SIZE; data_size = size - sizeof(struct t_buf); meta = tbufs->meta; data = tbufs->data; if ( eventchn_init() < 0 ) fprintf(stderr, "Failed to initialize event channel; " "Using POLL method\r\n"); /* now, scan buffers for events */ while ( !interrupted ) { for ( i = 0; (i < num) && !interrupted; i++ ) { unsigned long start_offset, end_offset, cons, prod; cons = meta[i]->cons; prod = meta[i]->prod; xen_rmb(); /* read prod, then read item. */ if ( cons == prod ) continue; start_offset = cons % data_size; end_offset = prod % data_size; if ( start_offset >= end_offset ) { while ( start_offset != data_size ) { rec_size = process_record( i, (struct t_rec *)(data[i] + start_offset)); start_offset += rec_size; } start_offset = 0; } while ( start_offset != end_offset ) { rec_size = process_record( i, (struct t_rec *)(data[i] + start_offset)); start_offset += rec_size; } xen_mb(); /* read item, then update cons. */ meta[i]->cons = prod; } wait_for_event(); wakeups++; } /* cleanup */ free(meta); free(data); /* don't need to munmap - cleanup is automatic */ return 0; } /****************************************************************************** * Command line handling *****************************************************************************/ const char *program_version = "xenbaked v1.4"; const char *program_bug_address = "<rob.gardner@hp.com>"; #define xstr(x) str(x) #define str(x) #x static void usage(void) { #define USAGE_STR \ "Usage: xenbaked [OPTION...]\n" \ "Tool to capture and partially process Xen trace buffer data\n" \ "\n" \ " -m, --ms_per_sample=MS Specify the number of milliseconds per sample\n" \ " (default " xstr(MS_PER_SAMPLE) ").\n" \ " -s, --poll-sleep=p Set sleep time, p, in milliseconds between\n" \ " polling the trace buffer for new data\n" \ " (default " xstr(POLL_SLEEP_MILLIS) ").\n" \ " -t, --log-thresh=l Set number, l, of new records required to\n" \ " trigger a write to output (default " \ xstr(NEW_DATA_THRESH) ").\n" \ " -?, --help Show this message\n" \ " -V, --version Print program version\n" \ "\n" \ "This tool is used to capture trace buffer data from Xen. The data is\n" \ "saved in a shared memory structure to be further processed by xenmon.\n" printf(USAGE_STR); printf("\nReport bugs to %s\n", program_bug_address); exit(EXIT_FAILURE); } /* convert the argument string pointed to by arg to a long int representation */ static long argtol(const char *restrict arg, int base) { char *endp; long val; errno = 0; val = strtol(arg, &endp, base); if (errno != 0) { fprintf(stderr, "Invalid option argument: %s\n", arg); fprintf(stderr, "Error: %s\n\n", strerror(errno)); usage(); } else if (endp == arg || *endp != '\0') { fprintf(stderr, "Invalid option argument: %s\n\n", arg); usage(); } return val; } /* parse command line arguments */ static void parse_args(int argc, char **argv) { int option; static struct option long_options[] = { { "log-thresh", required_argument, 0, 't' }, { "poll-sleep", required_argument, 0, 's' }, { "ms_per_sample", required_argument, 0, 'm' }, { "help", no_argument, 0, '?' }, { "version", no_argument, 0, 'V' }, { 0, 0, 0, 0 } }; while ( (option = getopt_long(argc, argv, "m:s:t:?V", long_options, NULL)) != -1) { switch ( option ) { case 't': /* set new records threshold for logging */ opts.new_data_thresh = argtol(optarg, 0); break; case 's': /* set sleep time (given in milliseconds) */ opts.poll_sleep = millis_to_timespec(argtol(optarg, 0)); break; case 'm': /* set ms_per_sample */ opts.ms_per_sample = argtol(optarg, 0); break; case 'V': /* print program version */ printf("%s\n", program_version); exit(EXIT_SUCCESS); break; default: usage(); } } /* all arguments should have been processed */ if (optind != argc) { usage(); } } #define SHARED_MEM_FILE "/var/run/xenq-shm" static void alloc_qos_data(int ncpu) { int i, n, pgsize, off=0; char *dummy; int qos_fd; cpu_qos_data = (_new_qos_data **) calloc(ncpu, sizeof(_new_qos_data *)); qos_fd = open(SHARED_MEM_FILE, O_RDWR|O_CREAT|O_TRUNC, 0777); if (qos_fd < 0) { PERROR(SHARED_MEM_FILE); exit(2); } pgsize = getpagesize(); dummy = malloc(pgsize); for (n=0; n<ncpu; n++) { for (i=0; i<sizeof(_new_qos_data); i=i+pgsize) if ((write(qos_fd, dummy, pgsize)) != pgsize) { PERROR(SHARED_MEM_FILE); exit(2); } new_qos = (_new_qos_data *) mmap(0, sizeof(_new_qos_data), PROT_READ|PROT_WRITE, MAP_SHARED, qos_fd, off); off += i; if (new_qos == MAP_FAILED) { PERROR("mmap"); exit(3); } // printf("new_qos = %p\n", new_qos); memset(new_qos, 0, sizeof(_new_qos_data)); new_qos->next_datapoint = 0; advance_next_datapoint(0); new_qos->structlen = i; new_qos->ncpu = ncpu; // printf("structlen = 0x%x\n", i); cpu_qos_data[n] = new_qos; } free(dummy); new_qos = NULL; } int main(int argc, char **argv) { int ret; struct sigaction act; time(&start_time); opts.poll_sleep = millis_to_timespec(POLL_SLEEP_MILLIS); opts.new_data_thresh = NEW_DATA_THRESH; opts.ms_per_sample = MS_PER_SAMPLE; opts.cpu_freq = CPU_FREQ; parse_args(argc, argv); fprintf(stderr, "ms_per_sample = %ld\n", opts.ms_per_sample); /* ensure that if we get a signal, we'll do cleanup, then exit */ act.sa_handler = close_handler; act.sa_flags = 0; sigemptyset(&act.sa_mask); sigaction(SIGHUP, &act, NULL); sigaction(SIGTERM, &act, NULL); sigaction(SIGINT, &act, NULL); ret = monitor_tbufs(); dump_stats(); msync(new_qos, sizeof(_new_qos_data), MS_SYNC); disable_tracing(); return ret; } static void qos_init_domain(int domid, int idx) { int i; memset(&new_qos->domain_info[idx], 0, sizeof(_domain_info)); new_qos->domain_info[idx].last_update_time = global_now; // runnable_start_time[idx] = 0; new_qos->domain_info[idx].runnable_start_time = 0; // invalidate new_qos->domain_info[idx].in_use = 1; new_qos->domain_info[idx].blocked_start_time = 0; new_qos->domain_info[idx].id = domid; if (domid == IDLE_DOMAIN_ID) snprintf(new_qos->domain_info[idx].name, sizeof(new_qos->domain_info[idx].name), "Idle Task%d", global_cpu); else snprintf(new_qos->domain_info[idx].name, sizeof(new_qos->domain_info[idx].name), "Domain#%d", domid); for (i=0; i<NSAMPLES; i++) { new_qos->qdata[i].ns_gotten[idx] = 0; new_qos->qdata[i].ns_allocated[idx] = 0; new_qos->qdata[i].ns_waiting[idx] = 0; new_qos->qdata[i].ns_blocked[idx] = 0; new_qos->qdata[i].switchin_count[idx] = 0; new_qos->qdata[i].io_count[idx] = 0; } } static void global_init_domain(int domid, int idx) { int cpu; _new_qos_data *saved_qos; saved_qos = new_qos; for (cpu=0; cpu<NCPU; cpu++) { new_qos = cpu_qos_data[cpu]; qos_init_domain(domid, idx); } new_qos = saved_qos; } // give index of this domain in the qos data array static int indexof(int domid) { int idx; xc_dominfo_t dominfo[NDOMAINS]; xc_interface *xc_handle; int ndomains; if (domid < 0) { // shouldn't happen printf("bad domain id: %d\r\n", domid); return 0; } for (idx=0; idx<NDOMAINS; idx++) if ( (new_qos->domain_info[idx].id == domid) && new_qos->domain_info[idx].in_use) return idx; // not found, make a new entry for (idx=0; idx<NDOMAINS; idx++) if (new_qos->domain_info[idx].in_use == 0) { global_init_domain(domid, idx); return idx; } // call domaininfo hypercall to try and garbage collect unused entries xc_handle = xc_interface_open(0,0,0); ndomains = xc_domain_getinfo(xc_handle, 0, NDOMAINS, dominfo); xc_interface_close(xc_handle); // for each domain in our data, look for it in the system dominfo structure // and purge the domain's data from our state if it does not exist in the // dominfo structure for (idx=0; idx<NDOMAINS; idx++) { int domid = new_qos->domain_info[idx].id; int jdx; for (jdx=0; jdx<ndomains; jdx++) { if (dominfo[jdx].domid == domid) break; } if (jdx == ndomains) // we didn't find domid in the dominfo struct if (domid != IDLE_DOMAIN_ID) // exception for idle domain, which is not // contained in dominfo qos_kill_thread(domid); // purge our stale data } // look again for a free slot for (idx=0; idx<NDOMAINS; idx++) if (new_qos->domain_info[idx].in_use == 0) { global_init_domain(domid, idx); return idx; } // still no space found, so bail fprintf(stderr, "out of space in domain table, increase NDOMAINS\r\n"); exit(2); } static int domain_runnable(int domid) { return new_qos->domain_info[indexof(domid)].runnable; } static void update_blocked_time(int domid, uint64_t now) { uint64_t t_blocked; int id = indexof(domid); if (new_qos->domain_info[id].blocked_start_time != 0) { if (now >= new_qos->domain_info[id].blocked_start_time) t_blocked = now - new_qos->domain_info[id].blocked_start_time; else t_blocked = now + (~0ULL - new_qos->domain_info[id].blocked_start_time); new_qos->qdata[new_qos->next_datapoint].ns_blocked[id] += t_blocked; } if (domain_runnable(domid)) new_qos->domain_info[id].blocked_start_time = 0; else new_qos->domain_info[id].blocked_start_time = now; } // advance to next datapoint for all domains static void advance_next_datapoint(uint64_t now) { int new, old, didx; old = new_qos->next_datapoint; new = QOS_INCR(old); new_qos->next_datapoint = new; // memset(&new_qos->qdata[new], 0, sizeof(uint64_t)*(2+5*NDOMAINS)); for (didx = 0; didx < NDOMAINS; didx++) { new_qos->qdata[new].ns_gotten[didx] = 0; new_qos->qdata[new].ns_allocated[didx] = 0; new_qos->qdata[new].ns_waiting[didx] = 0; new_qos->qdata[new].ns_blocked[didx] = 0; new_qos->qdata[new].switchin_count[didx] = 0; new_qos->qdata[new].io_count[didx] = 0; } new_qos->qdata[new].ns_passed = 0; new_qos->qdata[new].lost_records = 0; new_qos->qdata[new].flip_free_periods = 0; new_qos->qdata[new].timestamp = now; } static void qos_update_thread(int cpu, int domid, uint64_t now) { int n, id; uint64_t last_update_time, start; int64_t time_since_update, run_time = 0; id = indexof(domid); n = new_qos->next_datapoint; last_update_time = new_qos->domain_info[id].last_update_time; time_since_update = now - last_update_time; if (time_since_update < 0) { // what happened here? either a timestamp wraparound, or more likely, // a slight inconsistency among timestamps from various cpu's if (-time_since_update < billion) { // fairly small difference, let's just adjust 'now' to be a little // beyond last_update_time time_since_update = -time_since_update; } else if ( ((~0ULL - last_update_time) < billion) && (now < billion) ) { // difference is huge, must be a wraparound // last_update time should be "near" ~0ULL, // and now should be "near" 0 time_since_update = now + (~0ULL - last_update_time); printf("time wraparound\n"); } else { // none of the above, may be an out of order record // no good solution, just ignore and update again later return; } } new_qos->domain_info[id].last_update_time = now; if (new_qos->domain_info[id].runnable_at_last_update && is_current(domid, cpu)) { start = new_qos->domain_info[id].start_time; if (start > now) { // wrapped around run_time = now + (~0ULL - start); // this could happen if there is nothing going on within a cpu; // in this case the idle domain would run forever // printf("warning: start > now\n"); } else run_time = now - start; // if (run_time < 0) // should not happen // printf("warning: run_time < 0; start = %lld now= %lld\n", start, now); new_qos->domain_info[id].ns_oncpu_since_boot += run_time; new_qos->domain_info[id].start_time = now; new_qos->domain_info[id].ns_since_boot += time_since_update; new_qos->qdata[n].ns_gotten[id] += run_time; // if (domid == 0 && cpu == 1) // printf("adding run time for dom0 on cpu1\r\n"); } new_qos->domain_info[id].runnable_at_last_update = domain_runnable(domid); update_blocked_time(domid, now); // how much time passed since this datapoint was updated? if (now >= new_qos->qdata[n].timestamp) { // all is right with the world, time is increasing new_qos->qdata[n].ns_passed += (now - new_qos->qdata[n].timestamp); } else { // time wrapped around //new_qos->qdata[n].ns_passed += (now + (~0LL - new_qos->qdata[n].timestamp)); // printf("why timewrap?\r\n"); } new_qos->qdata[n].timestamp = now; } // called by dump routines to update all structures static void qos_update_all(uint64_t now, int cpu) { int i; for (i=0; i<NDOMAINS; i++) if (new_qos->domain_info[i].in_use) qos_update_thread(cpu, new_qos->domain_info[i].id, now); } static void qos_update_thread_stats(int cpu, int domid, uint64_t now) { if (new_qos->qdata[new_qos->next_datapoint].ns_passed > (million*opts.ms_per_sample)) { qos_update_all(now, cpu); advance_next_datapoint(now); return; } qos_update_thread(cpu, domid, now); } // called when a new thread gets the cpu static void qos_switch_in(int cpu, int domid, uint64_t now, unsigned long ns_alloc, unsigned long ns_waited) { int idx = indexof(domid); new_qos->domain_info[idx].runnable = 1; update_blocked_time(domid, now); new_qos->domain_info[idx].blocked_start_time = 0; // invalidate new_qos->domain_info[idx].runnable_start_time = 0; // invalidate //runnable_start_time[idx] = 0; new_qos->domain_info[idx].start_time = now; new_qos->qdata[new_qos->next_datapoint].switchin_count[idx]++; new_qos->qdata[new_qos->next_datapoint].ns_allocated[idx] += ns_alloc; new_qos->qdata[new_qos->next_datapoint].ns_waiting[idx] += ns_waited; qos_update_thread_stats(cpu, domid, now); set_current(cpu, domid); // count up page flips for dom0 execution if (domid == 0) dom0_flips = 0; } // called when the current thread is taken off the cpu static void qos_switch_out(int cpu, int domid, uint64_t now, unsigned long gotten) { int idx = indexof(domid); int n; if (!is_current(domid, cpu)) { // printf("switching out domain %d but it is not current. gotten=%ld\r\n", id, gotten); } if (gotten == 0) { printf("gotten==0 in qos_switchout(domid=%d)\n", domid); } if (gotten < 100) { printf("gotten<100ns in qos_switchout(domid=%d)\n", domid); } n = new_qos->next_datapoint; #if 0 new_qos->qdata[n].ns_gotten[idx] += gotten; if (gotten > new_qos->qdata[n].ns_passed) printf("inconsistency #257, diff = %lld\n", gotten - new_qos->qdata[n].ns_passed ); #endif new_qos->domain_info[idx].ns_oncpu_since_boot += gotten; new_qos->domain_info[idx].runnable_start_time = now; // runnable_start_time[id] = now; qos_update_thread_stats(cpu, domid, now); // process dom0 page flips if (domid == 0) if (dom0_flips == 0) new_qos->qdata[n].flip_free_periods++; } // called when domain is put to sleep, may also be called // when thread is already asleep static void qos_state_sleeping(int cpu, int domid, uint64_t now) { int idx; if (!domain_runnable(domid)) // double call? return; idx = indexof(domid); new_qos->domain_info[idx].runnable = 0; new_qos->domain_info[idx].blocked_start_time = now; new_qos->domain_info[idx].runnable_start_time = 0; // invalidate // runnable_start_time[idx] = 0; // invalidate qos_update_thread_stats(cpu, domid, now); } // domain died, presume it's dead on all cpu's, not just mostly dead static void qos_kill_thread(int domid) { int cpu; for (cpu=0; cpu<NCPU; cpu++) { cpu_qos_data[cpu]->domain_info[indexof(domid)].in_use = 0; } } // called when thread becomes runnable, may also be called // when thread is already runnable static void qos_state_runnable(int cpu, int domid, uint64_t now) { int idx; qos_update_thread_stats(cpu, domid, now); if (domain_runnable(domid)) // double call? return; idx = indexof(domid); new_qos->domain_info[idx].runnable = 1; update_blocked_time(domid, now); new_qos->domain_info[idx].blocked_start_time = 0; /* invalidate */ new_qos->domain_info[idx].runnable_start_time = now; // runnable_start_time[id] = now; } static void qos_count_packets(domid_t domid, uint64_t now) { int i, idx = indexof(domid); _new_qos_data *cpu_data; for (i=0; i<NCPU; i++) { cpu_data = cpu_qos_data[i]; if (cpu_data->domain_info[idx].in_use) { cpu_data->qdata[cpu_data->next_datapoint].io_count[idx]++; } } new_qos->qdata[new_qos->next_datapoint].io_count[0]++; dom0_flips++; } static int process_record(int cpu, struct t_rec *r) { uint64_t now = 0; uint32_t *extra_u32 = r->u.nocycles.extra_u32; new_qos = cpu_qos_data[cpu]; rec_count++; if ( r->cycles_included ) { now = ((uint64_t)r->u.cycles.cycles_hi << 32) | r->u.cycles.cycles_lo; now = ((double)now) / (opts.cpu_freq / 1000.0); extra_u32 = r->u.cycles.extra_u32; } global_now = now; global_cpu = cpu; log_event(r->event); switch (r->event) { case TRC_SCHED_SWITCH_INFPREV: // domain data[0] just switched out and received data[1] ns of cpu time qos_switch_out(cpu, extra_u32[0], now, extra_u32[1]); // printf("ns_gotten %ld\n", extra_u32[1]); break; case TRC_SCHED_SWITCH_INFNEXT: // domain data[0] just switched in and // waited data[1] ns, and was allocated data[2] ns of cpu time qos_switch_in(cpu, extra_u32[0], now, extra_u32[2], extra_u32[1]); break; case TRC_SCHED_DOM_ADD: (void) indexof(extra_u32[0]); break; case TRC_SCHED_DOM_REM: qos_kill_thread(extra_u32[0]); break; case TRC_SCHED_SLEEP: qos_state_sleeping(cpu, extra_u32[0], now); break; case TRC_SCHED_WAKE: qos_state_runnable(cpu, extra_u32[0], now); break; case TRC_SCHED_BLOCK: qos_state_sleeping(cpu, extra_u32[0], now); break; case TRC_MEM_PAGE_GRANT_TRANSFER: qos_count_packets(extra_u32[0], now); break; default: break; } new_qos = NULL; return 4 + (r->cycles_included ? 8 : 0) + (r->extra_u32 * 4); }
gpl-2.0
rex-xxx/mt6572_x201
external/webkit/Source/WebKit/win/WebNotificationCenter.cpp
15
7235
/* * Copyright (C) 2006, 2007 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include "WebKitDLL.h" #include "WebNotificationCenter.h" #include "WebNotification.h" #include <WebCore/COMPtr.h> #include <WebCore/PlatformString.h> #include <wtf/HashMap.h> #include <wtf/HashTraits.h> #include <wtf/Vector.h> #include <wtf/text/StringHash.h> #include <utility> #include <wchar.h> using namespace WebCore; typedef std::pair<COMPtr<IUnknown>, COMPtr<IWebNotificationObserver> > ObjectObserverPair; typedef Vector<ObjectObserverPair> ObjectObserverList; typedef ObjectObserverList::iterator ObserverListIterator; typedef HashMap<String, ObjectObserverList> MappedObservers; struct WebNotificationCenterPrivate { MappedObservers m_mappedObservers; }; // WebNotificationCenter ---------------------------------------------------------------- IWebNotificationCenter* WebNotificationCenter::m_defaultCenter = 0; WebNotificationCenter::WebNotificationCenter() : m_refCount(0) , d(new WebNotificationCenterPrivate) { gClassCount++; gClassNameCount.add("WebNotificationCenter"); } WebNotificationCenter::~WebNotificationCenter() { gClassCount--; gClassNameCount.remove("WebNotificationCenter"); } WebNotificationCenter* WebNotificationCenter::createInstance() { WebNotificationCenter* instance = new WebNotificationCenter(); instance->AddRef(); return instance; } // IUnknown ------------------------------------------------------------------- HRESULT STDMETHODCALLTYPE WebNotificationCenter::QueryInterface(REFIID riid, void** ppvObject) { *ppvObject = 0; if (IsEqualGUID(riid, IID_IUnknown)) *ppvObject = static_cast<IWebNotificationCenter*>(this); else if (IsEqualGUID(riid, IID_IWebNotificationCenter)) *ppvObject = static_cast<IWebNotificationCenter*>(this); else return E_NOINTERFACE; AddRef(); return S_OK; } ULONG STDMETHODCALLTYPE WebNotificationCenter::AddRef(void) { return ++m_refCount; } ULONG STDMETHODCALLTYPE WebNotificationCenter::Release(void) { ULONG newRef = --m_refCount; if (!newRef) delete(this); return newRef; } IWebNotificationCenter* WebNotificationCenter::defaultCenterInternal() { if (!m_defaultCenter) m_defaultCenter = WebNotificationCenter::createInstance(); return m_defaultCenter; } void WebNotificationCenter::postNotificationInternal(IWebNotification* notification, BSTR notificationName, IUnknown* anObject) { String name(notificationName, SysStringLen(notificationName)); MappedObservers::iterator it = d->m_mappedObservers.find(name); if (it == d->m_mappedObservers.end()) return; // Intentionally make a copy of the list to avoid the possibility of errors // from a mutation of the list in the onNotify callback. ObjectObserverList list = it->second; ObserverListIterator end = list.end(); for (ObserverListIterator it2 = list.begin(); it2 != end; ++it2) { IUnknown* observedObject = it2->first.get(); IWebNotificationObserver* observer = it2->second.get(); if (!observedObject || !anObject || observedObject == anObject) observer->onNotify(notification); } } // IWebNotificationCenter ----------------------------------------------------- HRESULT STDMETHODCALLTYPE WebNotificationCenter::defaultCenter( /* [retval][out] */ IWebNotificationCenter** center) { *center = defaultCenterInternal(); (*center)->AddRef(); return S_OK; } HRESULT STDMETHODCALLTYPE WebNotificationCenter::addObserver( /* [in] */ IWebNotificationObserver* observer, /* [in] */ BSTR notificationName, /* [in] */ IUnknown* anObject) { String name(notificationName, SysStringLen(notificationName)); MappedObservers::iterator it = d->m_mappedObservers.find(name); if (it != d->m_mappedObservers.end()) it->second.append(ObjectObserverPair(anObject, observer)); else { ObjectObserverList list; list.append(ObjectObserverPair(anObject, observer)); d->m_mappedObservers.add(name, list); } return S_OK; } HRESULT STDMETHODCALLTYPE WebNotificationCenter::postNotification( /* [in] */ IWebNotification* notification) { BSTR name; HRESULT hr = notification->name(&name); if (FAILED(hr)) return hr; COMPtr<IUnknown> obj; hr = notification->getObject(&obj); if (FAILED(hr)) return hr; postNotificationInternal(notification, name, obj.get()); SysFreeString(name); return hr; } HRESULT STDMETHODCALLTYPE WebNotificationCenter::postNotificationName( /* [in] */ BSTR notificationName, /* [in] */ IUnknown* anObject, /* [optional][in] */ IPropertyBag* userInfo) { COMPtr<WebNotification> notification(AdoptCOM, WebNotification::createInstance(notificationName, anObject, userInfo)); postNotificationInternal(notification.get(), notificationName, anObject); return S_OK; } HRESULT STDMETHODCALLTYPE WebNotificationCenter::removeObserver( /* [in] */ IWebNotificationObserver* anObserver, /* [in] */ BSTR notificationName, /* [optional][in] */ IUnknown* anObject) { String name(notificationName, SysStringLen(notificationName)); MappedObservers::iterator it = d->m_mappedObservers.find(name); if (it == d->m_mappedObservers.end()) return E_FAIL; ObjectObserverList& observerList = it->second; ObserverListIterator end = observerList.end(); int i = 0; for (ObserverListIterator it2 = observerList.begin(); it2 != end; ++it2, ++i) { IUnknown* observedObject = it2->first.get(); IWebNotificationObserver* observer = it2->second.get(); if (observer == anObserver && (!anObject || anObject == observedObject)) { observerList.remove(i); break; } } if (observerList.isEmpty()) d->m_mappedObservers.remove(name); return S_OK; }
gpl-2.0
jeremyhammer/imx6_linux
arch/arm/plat-mxc/devices/platform-fsl-usb2-otg.c
15
1290
/* * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved. * * Based on Uwe Kleine-Koenig's platform-fsl-usb2-udc.c * Copyright (C) 2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <mach/hardware.h> #include <mach/devices-common.h> #define imx_fsl_usb2_otg_data_entry_single(soc) \ { \ .iobase = soc ## _USB_OTG_BASE_ADDR, \ .irq = soc ## _INT_USB_OTG, \ } #ifdef CONFIG_SOC_IMX6Q const struct imx_fsl_usb2_otg_data imx6q_fsl_usb2_otg_data __initconst = imx_fsl_usb2_otg_data_entry_single(MX6Q); #endif /* ifdef CONFIG_SOC_IMX6Q */ struct platform_device *__init imx_add_fsl_usb2_otg( const struct imx_fsl_usb2_otg_data *data, const struct fsl_usb2_platform_data *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + SZ_512 - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device_dmamask("fsl-usb2-otg", -1, res, ARRAY_SIZE(res), pdata, sizeof(*pdata), DMA_BIT_MASK(32)); }
gpl-2.0
CyanideL/android_kernel_samsung_smdk4412
net/socket.c
271
83728
/* * NET An implementation of the SOCKET network access protocol. * * Version: @(#)socket.c 1.1.93 18/02/95 * * Authors: Orest Zborowski, <obz@Kodak.COM> * Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Anonymous : NOTSOCK/BADF cleanup. Error fix in * shutdown() * Alan Cox : verify_area() fixes * Alan Cox : Removed DDI * Jonathan Kamens : SOCK_DGRAM reconnect bug * Alan Cox : Moved a load of checks to the very * top level. * Alan Cox : Move address structures to/from user * mode above the protocol layers. * Rob Janssen : Allow 0 length sends. * Alan Cox : Asynchronous I/O support (cribbed from the * tty drivers). * Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style) * Jeff Uphoff : Made max number of sockets command-line * configurable. * Matti Aarnio : Made the number of sockets dynamic, * to be allocated when needed, and mr. * Uphoff's max is used as max to be * allowed to allocate. * Linus : Argh. removed all the socket allocation * altogether: it's in the inode now. * Alan Cox : Made sock_alloc()/sock_release() public * for NetROM and future kernel nfsd type * stuff. * Alan Cox : sendmsg/recvmsg basics. * Tom Dyas : Export net symbols. * Marcin Dalecki : Fixed problems with CONFIG_NET="n". * Alan Cox : Added thread locking to sys_* calls * for sockets. May have errors at the * moment. * Kevin Buhr : Fixed the dumb errors in the above. * Andi Kleen : Some small cleanups, optimizations, * and fixed a copy_from_user() bug. * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0) * Tigran Aivazian : Made listen(2) backlog sanity checks * protocol-independent * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * * This module is effectively the top level interface to the BSD socket * paradigm. * * Based upon Swansea University Computer Society NET3.039 */ #include <linux/mm.h> #include <linux/socket.h> #include <linux/file.h> #include <linux/net.h> #include <linux/interrupt.h> #include <linux/thread_info.h> #include <linux/rcupdate.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <linux/wanrouter.h> #include <linux/if_bridge.h> #include <linux/if_frad.h> #include <linux/if_vlan.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/cache.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/mount.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/compat.h> #include <linux/kmod.h> #include <linux/audit.h> #include <linux/wireless.h> #include <linux/nsproxy.h> #include <linux/magic.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <net/compat.h> #include <net/wext.h> #include <net/cls_cgroup.h> #include <net/sock.h> #include <linux/netfilter.h> #include <linux/if_tun.h> #include <linux/ipv6_route.h> #include <linux/route.h> #include <linux/sockios.h> #include <linux/atalk.h> static int sock_no_open(struct inode *irrelevant, struct file *dontcare); static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static int sock_mmap(struct file *file, struct vm_area_struct *vma); static int sock_close(struct inode *inode, struct file *file); static unsigned int sock_poll(struct file *file, struct poll_table_struct *wait); static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #endif static int sock_fasync(int fd, struct file *filp, int on); static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more); static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); /* * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear * in the operation structures but are done directly via the socketcall() multiplexor. */ static const struct file_operations socket_file_ops = { .owner = THIS_MODULE, .llseek = no_llseek, .aio_read = sock_aio_read, .aio_write = sock_aio_write, .poll = sock_poll, .unlocked_ioctl = sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_sock_ioctl, #endif .mmap = sock_mmap, .open = sock_no_open, /* special open code to disallow open via /proc */ .release = sock_close, .fasync = sock_fasync, .sendpage = sock_sendpage, .splice_write = generic_splice_sendpage, .splice_read = sock_splice_read, }; /* * The protocol list. Each protocol is registered in here. */ static DEFINE_SPINLOCK(net_family_lock); static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; /* * Statistics counters of the socket lists */ static DEFINE_PER_CPU(int, sockets_in_use); /* * Support routines. * Move socket addresses back and forth across the kernel/user * divide and look after the messy bits. */ /** * move_addr_to_kernel - copy a socket address into kernel space * @uaddr: Address in user space * @kaddr: Address in kernel space * @ulen: Length in user space * * The address is copied into kernel space. If the provided address is * too long an error code of -EINVAL is returned. If the copy gives * invalid addresses -EFAULT is returned. On a success 0 is returned. */ int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr) { if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) return -EINVAL; if (ulen == 0) return 0; if (copy_from_user(kaddr, uaddr, ulen)) return -EFAULT; return audit_sockaddr(ulen, kaddr); } /** * move_addr_to_user - copy an address to user space * @kaddr: kernel space address * @klen: length of address in kernel * @uaddr: user space address * @ulen: pointer to user length field * * The value pointed to by ulen on entry is the buffer length available. * This is overwritten with the buffer space used. -EINVAL is returned * if an overlong buffer is specified or a negative buffer size. -EFAULT * is returned if either the buffer or the length field are not * accessible. * After copying the data up to the limit the user specifies, the true * length of the data is written over the length limit the user * specified. Zero is returned for a success. */ static int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, int __user *ulen) { int err; int len; err = get_user(len, ulen); if (err) return err; if (len > klen) len = klen; if (len < 0 || len > sizeof(struct sockaddr_storage)) return -EINVAL; if (len) { if (audit_sockaddr(klen, kaddr)) return -ENOMEM; if (copy_to_user(uaddr, kaddr, len)) return -EFAULT; } /* * "fromlen shall refer to the value before truncation.." * 1003.1g */ return __put_user(klen, ulen); } static struct kmem_cache *sock_inode_cachep __read_mostly; static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; struct socket_wq *wq; ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; wq = kmalloc(sizeof(*wq), GFP_KERNEL); if (!wq) { kmem_cache_free(sock_inode_cachep, ei); return NULL; } init_waitqueue_head(&wq->wait); wq->fasync_list = NULL; RCU_INIT_POINTER(ei->socket.wq, wq); ei->socket.state = SS_UNCONNECTED; ei->socket.flags = 0; ei->socket.ops = NULL; ei->socket.sk = NULL; ei->socket.file = NULL; return &ei->vfs_inode; } static void sock_destroy_inode(struct inode *inode) { struct socket_alloc *ei; struct socket_wq *wq; ei = container_of(inode, struct socket_alloc, vfs_inode); wq = rcu_dereference_protected(ei->socket.wq, 1); kfree_rcu(wq, rcu); kmem_cache_free(sock_inode_cachep, ei); } static void init_once(void *foo) { struct socket_alloc *ei = (struct socket_alloc *)foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { sock_inode_cachep = kmem_cache_create("sock_inode_cache", sizeof(struct socket_alloc), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD), init_once); if (sock_inode_cachep == NULL) return -ENOMEM; return 0; } static const struct super_operations sockfs_ops = { .alloc_inode = sock_alloc_inode, .destroy_inode = sock_destroy_inode, .statfs = simple_statfs, }; /* * sockfs_dname() is called from d_path(). */ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]", dentry->d_inode->i_ino); } static const struct dentry_operations sockfs_dentry_operations = { .d_dname = sockfs_dname, }; static struct dentry *sockfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "socket:", &sockfs_ops, &sockfs_dentry_operations, SOCKFS_MAGIC); } static struct vfsmount *sock_mnt __read_mostly; static struct file_system_type sock_fs_type = { .name = "sockfs", .mount = sockfs_mount, .kill_sb = kill_anon_super, }; /* * Obtains the first available file descriptor and sets it up for use. * * These functions create file structures and maps them to fd space * of the current process. On success it returns file descriptor * and file struct implicitly stored in sock->file. * Note that another thread may close file descriptor before we return * from this function. We use the fact that now we do not refer * to socket after mapping. If one day we will need it, this * function will increment ref. count on file by 1. * * In any case returned fd MAY BE not valid! * This race condition is unavoidable * with shared fd spaces, we cannot solve it inside kernel, * but we take care of internal coherence yet. */ static int sock_alloc_file(struct socket *sock, struct file **f, int flags) { struct qstr name = { .name = "" }; struct path path; struct file *file; int fd; fd = get_unused_fd_flags(flags); if (unlikely(fd < 0)) return fd; path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name); if (unlikely(!path.dentry)) { put_unused_fd(fd); return -ENOMEM; } path.mnt = mntget(sock_mnt); d_instantiate(path.dentry, SOCK_INODE(sock)); SOCK_INODE(sock)->i_fop = &socket_file_ops; file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &socket_file_ops); if (unlikely(!file)) { /* drop dentry, keep inode */ ihold(path.dentry->d_inode); path_put(&path); put_unused_fd(fd); return -ENFILE; } sock->file = file; file->f_flags = O_RDWR | (flags & O_NONBLOCK); file->f_pos = 0; file->private_data = sock; *f = file; return fd; } int sock_map_fd(struct socket *sock, int flags) { struct file *newfile; int fd = sock_alloc_file(sock, &newfile, flags); if (likely(fd >= 0)) fd_install(fd, newfile); return fd; } EXPORT_SYMBOL(sock_map_fd); static struct socket *sock_from_file(struct file *file, int *err) { if (file->f_op == &socket_file_ops) return file->private_data; /* set in sock_map_fd */ *err = -ENOTSOCK; return NULL; } /** * sockfd_lookup - Go from a file number to its socket slot * @fd: file handle * @err: pointer to an error code return * * The file handle passed in is locked and the socket it is bound * too is returned. If an error occurs the err pointer is overwritten * with a negative errno code and NULL is returned. The function checks * for both invalid handles and passing a handle which is not a socket. * * On a success the socket object pointer is returned. */ struct socket *sockfd_lookup(int fd, int *err) { struct file *file; struct socket *sock; file = fget(fd); if (!file) { *err = -EBADF; return NULL; } sock = sock_from_file(file, err); if (!sock) fput(file); return sock; } EXPORT_SYMBOL(sockfd_lookup); static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) { struct file *file; struct socket *sock; *err = -EBADF; file = fget_light(fd, fput_needed); if (file) { sock = sock_from_file(file, err); if (sock) return sock; fput_light(file, *fput_needed); } return NULL; } /** * sock_alloc - allocate a socket * * Allocate a new inode and socket object. The two are bound together * and initialised. The socket is then returned. If we are out of inodes * NULL is returned. */ static struct socket *sock_alloc(void) { struct inode *inode; struct socket *sock; inode = new_inode(sock_mnt->mnt_sb); if (!inode) return NULL; sock = SOCKET_I(inode); kmemcheck_annotate_bitfield(sock, type); inode->i_ino = get_next_ino(); inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); percpu_add(sockets_in_use, 1); return sock; } /* * In theory you can't get an open on this inode, but /proc provides * a back door. Remember to keep it shut otherwise you'll let the * creepy crawlies in. */ static int sock_no_open(struct inode *irrelevant, struct file *dontcare) { return -ENXIO; } const struct file_operations bad_sock_fops = { .owner = THIS_MODULE, .open = sock_no_open, .llseek = noop_llseek, }; /** * sock_release - close a socket * @sock: socket to close * * The socket is released from the protocol stack if it has a release * callback, and the inode is then released if the socket is bound to * an inode not a file. */ void sock_release(struct socket *sock) { if (sock->ops) { struct module *owner = sock->ops->owner; sock->ops->release(sock); sock->ops = NULL; module_put(owner); } if (rcu_dereference_protected(sock->wq, 1)->fasync_list) printk(KERN_ERR "sock_release: fasync list not empty!\n"); percpu_sub(sockets_in_use, 1); if (!sock->file) { iput(SOCK_INODE(sock)); return; } sock->file = NULL; } EXPORT_SYMBOL(sock_release); int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags) { *tx_flags = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) *tx_flags |= SKBTX_HW_TSTAMP; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) *tx_flags |= SKBTX_SW_TSTAMP; return 0; } EXPORT_SYMBOL(sock_tx_timestamp); static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { struct sock_iocb *si = kiocb_to_siocb(iocb); sock_update_classid(sock->sk); si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; return sock->ops->sendmsg(iocb, sock, msg, size); } static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { int err = security_socket_sendmsg(sock, msg, size); return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size); } int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_sendmsg(&iocb, sock, msg, size); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } EXPORT_SYMBOL(sock_sendmsg); int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_sendmsg_nosec(&iocb, sock, msg, size); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) { mm_segment_t oldfs = get_fs(); int result; set_fs(KERNEL_DS); /* * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ msg->msg_iov = (struct iovec *)vec; msg->msg_iovlen = num; result = sock_sendmsg(sock, msg, size); set_fs(oldfs); return result; } EXPORT_SYMBOL(kernel_sendmsg); static int ktime2ts(ktime_t kt, struct timespec *ts) { if (kt.tv64) { *ts = ktime_to_timespec(kt); return 1; } else { return 0; } } /* * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) */ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP); struct timespec ts[3]; int empty = 1; struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); /* Race occurred between timestamp enabling and packet receiving. Fill in the current time for now. */ if (need_software_tstamp && skb->tstamp.tv64 == 0) __net_timestamp(skb); if (need_software_tstamp) { if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) { struct timeval tv; skb_get_timestamp(skb, &tv); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv); } else { skb_get_timestampns(skb, &ts[0]); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts[0]), &ts[0]); } } memset(ts, 0, sizeof(ts)); if (skb->tstamp.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) { skb_get_timestampns(skb, ts + 0); empty = 0; } if (shhwtstamps) { if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE) && ktime2ts(shhwtstamps->syststamp, ts + 1)) empty = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) && ktime2ts(shhwtstamps->hwtstamp, ts + 2)) empty = 0; } if (!empty) put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING, sizeof(ts), &ts); } EXPORT_SYMBOL_GPL(__sock_recv_timestamp); static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount) put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, sizeof(__u32), &skb->dropcount); } void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { sock_recv_timestamp(msg, sk, skb); sock_recv_drops(msg, sk, skb); } EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock_iocb *si = kiocb_to_siocb(iocb); sock_update_classid(sock->sk); si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; si->flags = flags; return sock->ops->recvmsg(iocb, sock, msg, size, flags); } static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { int err = security_socket_recvmsg(sock, msg, size, flags); return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags); } int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_recvmsg(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } EXPORT_SYMBOL(sock_recvmsg); static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } /** * kernel_recvmsg - Receive a message from a socket (kernel space) * @sock: The socket to receive the message from * @msg: Received message * @vec: Input s/g array for message data * @num: Size of input s/g array * @size: Number of bytes to read * @flags: Message flags (MSG_DONTWAIT, etc...) * * On return the msg structure contains the scatter/gather array passed in the * vec argument. The array is modified so that it consists of the unfilled * portion of the original array. * * The returned value is the total number of bytes received, or an error. */ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size, int flags) { mm_segment_t oldfs = get_fs(); int result; set_fs(KERNEL_DS); /* * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ msg->msg_iov = (struct iovec *)vec, msg->msg_iovlen = num; result = sock_recvmsg(sock, msg, size, flags); set_fs(oldfs); return result; } EXPORT_SYMBOL(kernel_recvmsg); static void sock_aio_dtor(struct kiocb *iocb) { kfree(iocb->private); } static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more) { struct socket *sock; int flags; sock = file->private_data; flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */ flags |= more; return kernel_sendpage(sock, page, offset, size, flags); } static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct socket *sock = file->private_data; if (unlikely(!sock->ops->splice_read)) return -EINVAL; sock_update_classid(sock->sk); return sock->ops->splice_read(sock, ppos, pipe, len, flags); } static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, struct sock_iocb *siocb) { if (!is_sync_kiocb(iocb)) { siocb = kmalloc(sizeof(*siocb), GFP_KERNEL); if (!siocb) return NULL; iocb->ki_dtor = sock_aio_dtor; } siocb->kiocb = iocb; iocb->private = siocb; return siocb; } static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb, struct file *file, const struct iovec *iov, unsigned long nr_segs) { struct socket *sock = file->private_data; size_t size = 0; int i; for (i = 0; i < nr_segs; i++) size += iov[i].iov_len; msg->msg_name = NULL; msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags); } static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct sock_iocb siocb, *x; if (pos != 0) return -ESPIPE; if (iocb->ki_left == 0) /* Match SYS5 behaviour */ return 0; x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); } static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb, struct file *file, const struct iovec *iov, unsigned long nr_segs) { struct socket *sock = file->private_data; size_t size = 0; int i; for (i = 0; i < nr_segs; i++) size += iov[i].iov_len; msg->msg_name = NULL; msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; if (sock->type == SOCK_SEQPACKET) msg->msg_flags |= MSG_EOR; return __sock_sendmsg(iocb, sock, msg, size); } static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct sock_iocb siocb, *x; if (pos != 0) return -ESPIPE; x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); } /* * Atomic setting of ioctl hooks to avoid race * with module unload. */ static DEFINE_MUTEX(br_ioctl_mutex); static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg); void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) { mutex_lock(&br_ioctl_mutex); br_ioctl_hook = hook; mutex_unlock(&br_ioctl_mutex); } EXPORT_SYMBOL(brioctl_set); static DEFINE_MUTEX(vlan_ioctl_mutex); static int (*vlan_ioctl_hook) (struct net *, void __user *arg); void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) { mutex_lock(&vlan_ioctl_mutex); vlan_ioctl_hook = hook; mutex_unlock(&vlan_ioctl_mutex); } EXPORT_SYMBOL(vlan_ioctl_set); static DEFINE_MUTEX(dlci_ioctl_mutex); static int (*dlci_ioctl_hook) (unsigned int, void __user *); void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) { mutex_lock(&dlci_ioctl_mutex); dlci_ioctl_hook = hook; mutex_unlock(&dlci_ioctl_mutex); } EXPORT_SYMBOL(dlci_ioctl_set); static long sock_do_ioctl(struct net *net, struct socket *sock, unsigned int cmd, unsigned long arg) { int err; void __user *argp = (void __user *)arg; err = sock->ops->ioctl(sock, cmd, arg); /* * If this ioctl is unknown try to hand it down * to the NIC driver. */ if (err == -ENOIOCTLCMD) err = dev_ioctl(net, cmd, argp); return err; } /* * With an ioctl, arg may well be a user mode pointer, but we don't know * what to do with it - that's up to the protocol still. */ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock; struct sock *sk; void __user *argp = (void __user *)arg; int pid, err; struct net *net; sock = file->private_data; sk = sock->sk; net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { err = dev_ioctl(net, cmd, argp); } else #ifdef CONFIG_WEXT_CORE if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { err = dev_ioctl(net, cmd, argp); } else #endif switch (cmd) { case FIOSETOWN: case SIOCSPGRP: err = -EFAULT; if (get_user(pid, (int __user *)argp)) break; err = f_setown(sock->file, pid, 1); break; case FIOGETOWN: case SIOCGPGRP: err = put_user(f_getown(sock->file), (int __user *)argp); break; case SIOCGIFBR: case SIOCSIFBR: case SIOCBRADDBR: case SIOCBRDELBR: err = -ENOPKG; if (!br_ioctl_hook) request_module("bridge"); mutex_lock(&br_ioctl_mutex); if (br_ioctl_hook) err = br_ioctl_hook(net, cmd, argp); mutex_unlock(&br_ioctl_mutex); break; case SIOCGIFVLAN: case SIOCSIFVLAN: err = -ENOPKG; if (!vlan_ioctl_hook) request_module("8021q"); mutex_lock(&vlan_ioctl_mutex); if (vlan_ioctl_hook) err = vlan_ioctl_hook(net, argp); mutex_unlock(&vlan_ioctl_mutex); break; case SIOCADDDLCI: case SIOCDELDLCI: err = -ENOPKG; if (!dlci_ioctl_hook) request_module("dlci"); mutex_lock(&dlci_ioctl_mutex); if (dlci_ioctl_hook) err = dlci_ioctl_hook(cmd, argp); mutex_unlock(&dlci_ioctl_mutex); break; default: err = sock_do_ioctl(net, sock, cmd, arg); break; } return err; } int sock_create_lite(int family, int type, int protocol, struct socket **res) { int err; struct socket *sock = NULL; err = security_socket_create(family, type, protocol, 1); if (err) goto out; sock = sock_alloc(); if (!sock) { err = -ENOMEM; goto out; } sock->type = type; err = security_socket_post_create(sock, family, type, protocol, 1); if (err) goto out_release; out: *res = sock; return err; out_release: sock_release(sock); sock = NULL; goto out; } EXPORT_SYMBOL(sock_create_lite); /* No kernel lock held - perfect */ static unsigned int sock_poll(struct file *file, poll_table *wait) { struct socket *sock; /* * We can't return errors to poll, so it's either yes or no. */ sock = file->private_data; return sock->ops->poll(file, sock, wait); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) { struct socket *sock = file->private_data; return sock->ops->mmap(file, sock, vma); } static int sock_close(struct inode *inode, struct file *filp) { /* * It was possible the inode is NULL we were * closing an unfinished socket. */ if (!inode) { printk(KERN_DEBUG "sock_close: NULL inode\n"); return 0; } sock_release(SOCKET_I(inode)); return 0; } /* * Update the socket async list * * Fasync_list locking strategy. * * 1. fasync_list is modified only under process context socket lock * i.e. under semaphore. * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) * or under socket lock */ static int sock_fasync(int fd, struct file *filp, int on) { struct socket *sock = filp->private_data; struct sock *sk = sock->sk; struct socket_wq *wq; if (sk == NULL) return -EINVAL; lock_sock(sk); wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk)); fasync_helper(fd, filp, on, &wq->fasync_list); if (!wq->fasync_list) sock_reset_flag(sk, SOCK_FASYNC); else sock_set_flag(sk, SOCK_FASYNC); release_sock(sk); return 0; } /* This function may be called only under socket lock or callback_lock or rcu_lock */ int sock_wake_async(struct socket *sock, int how, int band) { struct socket_wq *wq; if (!sock) return -1; rcu_read_lock(); wq = rcu_dereference(sock->wq); if (!wq || !wq->fasync_list) { rcu_read_unlock(); return -1; } switch (how) { case SOCK_WAKE_WAITD: if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) break; goto call_kill; case SOCK_WAKE_SPACE: if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) break; /* fall through */ case SOCK_WAKE_IO: call_kill: kill_fasync(&wq->fasync_list, SIGIO, band); break; case SOCK_WAKE_URG: kill_fasync(&wq->fasync_list, SIGURG, band); } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(sock_wake_async); int __sock_create(struct net *net, int family, int type, int protocol, struct socket **res, int kern) { int err; struct socket *sock; const struct net_proto_family *pf; /* * Check protocol is in range */ if (family < 0 || family >= NPROTO) return -EAFNOSUPPORT; if (type < 0 || type >= SOCK_MAX) return -EINVAL; /* Compatibility. This uglymoron is moved from INET layer to here to avoid deadlock in module load. */ if (family == PF_INET && type == SOCK_PACKET) { static int warned; if (!warned) { warned = 1; printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n", current->comm); } family = PF_PACKET; } err = security_socket_create(family, type, protocol, kern); if (err) return err; /* * Allocate the socket and allow the family to set things up. if * the protocol is 0, the family is instructed to select an appropriate * default. */ sock = sock_alloc(); if (!sock) { if (net_ratelimit()) printk(KERN_WARNING "socket: no more sockets\n"); return -ENFILE; /* Not exactly a match, but its the closest posix thing */ } sock->type = type; #ifdef CONFIG_MODULES /* Attempt to load a protocol module if the find failed. * * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user * requested real, full-featured networking support upon configuration. * Otherwise module support will break! */ if (rcu_access_pointer(net_families[family]) == NULL) request_module("net-pf-%d", family); #endif rcu_read_lock(); pf = rcu_dereference(net_families[family]); err = -EAFNOSUPPORT; if (!pf) goto out_release; /* * We will call the ->create function, that possibly is in a loadable * module, so we have to bump that loadable module refcnt first. */ if (!try_module_get(pf->owner)) goto out_release; /* Now protected by module ref count */ rcu_read_unlock(); err = pf->create(net, sock, protocol, kern); if (err < 0) goto out_module_put; /* * Now to bump the refcnt of the [loadable] module that owns this * socket at sock_release time we decrement its refcnt. */ if (!try_module_get(sock->ops->owner)) goto out_module_busy; /* * Now that we're done with the ->create function, the [loadable] * module can have its refcnt decremented */ module_put(pf->owner); err = security_socket_post_create(sock, family, type, protocol, kern); if (err) goto out_sock_release; *res = sock; return 0; out_module_busy: err = -EAFNOSUPPORT; out_module_put: sock->ops = NULL; module_put(pf->owner); out_sock_release: sock_release(sock); return err; out_release: rcu_read_unlock(); goto out_sock_release; } EXPORT_SYMBOL(__sock_create); int sock_create(int family, int type, int protocol, struct socket **res) { return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); } EXPORT_SYMBOL(sock_create); int sock_create_kern(int family, int type, int protocol, struct socket **res) { return __sock_create(&init_net, family, type, protocol, res, 1); } EXPORT_SYMBOL(sock_create_kern); SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) { int retval; struct socket *sock; int flags; /* Check the SOCK_* constants for consistency. */ BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK); flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; retval = sock_create(family, type, protocol, &sock); if (retval < 0) goto out; retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); if (retval < 0) goto out_release; out: /* It may be already another descriptor 8) Not kernel problem. */ return retval; out_release: sock_release(sock); return retval; } /* * Create a pair of connected sockets. */ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec) { struct socket *sock1, *sock2; int fd1, fd2, err; struct file *newfile1, *newfile2; int flags; flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; /* * Obtain the first socket and check if the underlying protocol * supports the socketpair call. */ err = sock_create(family, type, protocol, &sock1); if (err < 0) goto out; err = sock_create(family, type, protocol, &sock2); if (err < 0) goto out_release_1; err = sock1->ops->socketpair(sock1, sock2); if (err < 0) goto out_release_both; fd1 = sock_alloc_file(sock1, &newfile1, flags); if (unlikely(fd1 < 0)) { err = fd1; goto out_release_both; } fd2 = sock_alloc_file(sock2, &newfile2, flags); if (unlikely(fd2 < 0)) { err = fd2; fput(newfile1); put_unused_fd(fd1); sock_release(sock2); goto out; } audit_fd_pair(fd1, fd2); fd_install(fd1, newfile1); fd_install(fd2, newfile2); /* fd1 and fd2 may be already another descriptors. * Not kernel problem. */ err = put_user(fd1, &usockvec[0]); if (!err) err = put_user(fd2, &usockvec[1]); if (!err) return 0; sys_close(fd2); sys_close(fd1); return err; out_release_both: sock_release(sock2); out_release_1: sock_release(sock1); out: return err; } /* * Bind a name to a socket. Nothing much to do here since it's * the protocol's responsibility to handle the local address. * * We move the socket address to kernel space before we call * the protocol layer (having also checked the address is ok). */ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address); if (err >= 0) { err = security_socket_bind(sock, (struct sockaddr *)&address, addrlen); if (!err) err = sock->ops->bind(sock, (struct sockaddr *) &address, addrlen); } fput_light(sock->file, fput_needed); } return err; } /* * Perform a listen. Basically, we allow the protocol to do anything * necessary for a listen, and if that works, we mark the socket as * ready for listening. */ SYSCALL_DEFINE2(listen, int, fd, int, backlog) { struct socket *sock; int err, fput_needed; int somaxconn; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; if ((unsigned)backlog > somaxconn) backlog = somaxconn; err = security_socket_listen(sock, backlog); if (!err) err = sock->ops->listen(sock, backlog); fput_light(sock->file, fput_needed); } return err; } /* * For accept, we attempt to create a new socket, set up the link * with the client, wake up the client, then return the new * connected fd. We collect the address of the connector in kernel * space and move it to user at the very end. This is unclean because * we open the socket then return an error. * * 1003.1g adds the ability to recvmsg() to query connection pending * status to recvmsg. We need to add that support in a way thats * clean when we restucture accept also. */ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags) { struct socket *sock, *newsock; struct file *newfile; int err, len, newfd, fput_needed; struct sockaddr_storage address; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = -ENFILE; newsock = sock_alloc(); if (!newsock) goto out_put; newsock->type = sock->type; newsock->ops = sock->ops; /* * We don't need try_module_get here, as the listening socket (sock) * has the protocol module (sock->ops->owner) held. */ __module_get(newsock->ops->owner); newfd = sock_alloc_file(newsock, &newfile, flags); if (unlikely(newfd < 0)) { err = newfd; sock_release(newsock); goto out_put; } err = security_socket_accept(sock, newsock); if (err) goto out_fd; err = sock->ops->accept(sock, newsock, sock->file->f_flags); if (err < 0) goto out_fd; if (upeer_sockaddr) { if (newsock->ops->getname(newsock, (struct sockaddr *)&address, &len, 2) < 0) { err = -ECONNABORTED; goto out_fd; } err = move_addr_to_user((struct sockaddr *)&address, len, upeer_sockaddr, upeer_addrlen); if (err < 0) goto out_fd; } /* File flags are not inherited via accept() unlike another OSes. */ fd_install(newfd, newfile); err = newfd; out_put: fput_light(sock->file, fput_needed); out: return err; out_fd: fput(newfile); put_unused_fd(newfd); goto out_put; } SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen) { return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); } /* * Attempt to connect to a socket with the server address. The address * is in user space so we verify it is OK and move it to kernel space. * * For 1003.1g we need to add clean support for a bind to AF_UNSPEC to * break bindings * * NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and * other SEQPACKET protocols that take time to connect() as it doesn't * include the -EINPROGRESS status for such sockets. */ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = move_addr_to_kernel(uservaddr, addrlen, (struct sockaddr *)&address); if (err < 0) goto out_put; err = security_socket_connect(sock, (struct sockaddr *)&address, addrlen); if (err) goto out_put; err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, sock->file->f_flags); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the local address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = security_socket_getsockname(sock); if (err) goto out_put; err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); if (err) goto out_put; err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the remote address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getpeername(sock); if (err) { fput_light(sock->file, fput_needed); return err; } err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1); if (!err) err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len); fput_light(sock->file, fput_needed); } return err; } /* * Send a datagram to a given address. We move the address into kernel * space and check the user space data area is readable before invoking * the protocol. */ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, unsigned, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; struct sockaddr_storage address; int err; struct msghdr msg; struct iovec iov; int fput_needed; if (len > INT_MAX) len = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; iov.iov_base = buff; iov.iov_len = len; msg.msg_name = NULL; msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; if (addr) { err = move_addr_to_kernel(addr, addr_len, (struct sockaddr *)&address); if (err < 0) goto out_put; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = addr_len; } if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; msg.msg_flags = flags; err = sock_sendmsg(sock, &msg, len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Send a datagram down a socket. */ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, unsigned, flags) { return sys_sendto(fd, buff, len, flags, NULL, 0); } /* * Receive a frame from the socket and optionally record the address of the * sender. We verify the buffers are writable and if needed move the * sender address from kernel to user space. */ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user((struct sockaddr *)&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; } /* * Receive a datagram from a socket. */ asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size, unsigned flags) { return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); } /* * Set a socket option. Because we don't know the option lengths we have * to pass the user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, char __user *, optval, int, optlen) { int err, fput_needed; struct socket *sock; if (optlen < 0) return -EINVAL; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_setsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, optval, optlen); else err = sock->ops->setsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Get a socket option. Because we don't know the option lengths we have * to pass a user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, char __user *, optval, int __user *, optlen) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, optlen); else err = sock->ops->getsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Shutdown a socket. */ SYSCALL_DEFINE2(shutdown, int, fd, int, how) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_shutdown(sock, how); if (!err) err = sock->ops->shutdown(sock, how); fput_light(sock->file, fput_needed); } return err; } /* A couple of helpful macros for getting the address of the 32/64 bit * fields which are the same type (int / unsigned) on our platforms. */ #define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member) #define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) #define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) struct used_address { struct sockaddr_storage name; unsigned int name_len; }; static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned flags, struct used_address *used_address) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct sockaddr_storage address; struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; unsigned char ctl[sizeof(struct cmsghdr) + 20] __attribute__ ((aligned(sizeof(__kernel_size_t)))); /* 20 is size of ipv6_pktinfo */ unsigned char *ctl_buf = ctl; int err, ctl_len, iov_size, total_len; err = -EFAULT; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; /* do not move before msg_sys is valid */ err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; /* Check whether to allocate the iovec area */ err = -ENOMEM; iov_size = msg_sys->msg_iovlen * sizeof(struct iovec); if (msg_sys->msg_iovlen > UIO_FASTIOV) { iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); if (!iov) goto out; } /* This will also move the address data into kernel space */ if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(msg_sys, iov, (struct sockaddr *)&address, VERIFY_READ); } else err = verify_iovec(msg_sys, iov, (struct sockaddr *)&address, VERIFY_READ); if (err < 0) goto out_freeiov; total_len = err; err = -ENOBUFS; if (msg_sys->msg_controllen > INT_MAX) goto out_freeiov; ctl_len = msg_sys->msg_controllen; if ((MSG_CMSG_COMPAT & flags) && ctl_len) { err = cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl, sizeof(ctl)); if (err) goto out_freeiov; ctl_buf = msg_sys->msg_control; ctl_len = msg_sys->msg_controllen; } else if (ctl_len) { if (ctl_len > sizeof(ctl)) { ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); if (ctl_buf == NULL) goto out_freeiov; } err = -EFAULT; /* * Careful! Before this, msg_sys->msg_control contains a user pointer. * Afterwards, it will be a kernel pointer. Thus the compiler-assisted * checking falls down on this. */ if (copy_from_user(ctl_buf, (void __user __force *)msg_sys->msg_control, ctl_len)) goto out_freectl; msg_sys->msg_control = ctl_buf; } msg_sys->msg_flags = flags; if (sock->file->f_flags & O_NONBLOCK) msg_sys->msg_flags |= MSG_DONTWAIT; /* * If this is sendmmsg() and current destination address is same as * previously succeeded address, omit asking LSM's decision. * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ if (used_address && msg_sys->msg_name && used_address->name_len == msg_sys->msg_namelen && !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; } err = sock_sendmsg(sock, msg_sys, total_len); /* * If this is sendmmsg() and sending to current destination address was * successful, remember it. */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; if (msg_sys->msg_name) memcpy(&used_address->name, msg_sys->msg_name, used_address->name_len); } out_freectl: if (ctl_buf != ctl) sock_kfree_s(sock->sk, ctl_buf, ctl_len); out_freeiov: if (iov != iovstack) sock_kfree_s(sock->sk, iov, iov_size); out: return err; } /* * BSD sendmsg interface */ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL); fput_light(sock->file, fput_needed); out: return err; } /* * Linux sendmmsg interface */ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct used_address used_address; if (vlen > UIO_MAXIOV) vlen = UIO_MAXIOV; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; used_address.name_len = UINT_MAX; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; err = 0; while (datagrams < vlen) { if (MSG_CMSG_COMPAT & flags) { err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry, &msg_sys, flags, &used_address); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = __sys_sendmsg(sock, (struct msghdr __user *)entry, &msg_sys, flags, &used_address); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; } fput_light(sock->file, fput_needed); /* We only return an error if no datagrams were able to be sent */ if (datagrams != 0) return datagrams; return err; } SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags) { return __sys_sendmmsg(fd, mmsg, vlen, flags); } static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned flags, int nosec) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; int err, iov_size, total_len, len; /* kernel mode address */ struct sockaddr_storage addr; /* user mode address pointers */ struct sockaddr __user *uaddr; int __user *uaddr_len; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; /* Check whether to allocate the iovec area */ err = -ENOMEM; iov_size = msg_sys->msg_iovlen * sizeof(struct iovec); if (msg_sys->msg_iovlen > UIO_FASTIOV) { iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); if (!iov) goto out; } /* * Save the user-mode address (verify_iovec will change the * kernel msghdr to use the kernel address space) */ uaddr = (__force void __user *)msg_sys->msg_name; uaddr_len = COMPAT_NAMELEN(msg); if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(msg_sys, iov, (struct sockaddr *)&addr, VERIFY_WRITE); } else err = verify_iovec(msg_sys, iov, (struct sockaddr *)&addr, VERIFY_WRITE); if (err < 0) goto out_freeiov; total_len = err; cmsg_ptr = (unsigned long)msg_sys->msg_control; msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, total_len, flags); if (err < 0) goto out_freeiov; len = err; if (uaddr != NULL) { err = move_addr_to_user((struct sockaddr *)&addr, msg_sys->msg_namelen, uaddr, uaddr_len); if (err < 0) goto out_freeiov; } err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), COMPAT_FLAGS(msg)); if (err) goto out_freeiov; if (MSG_CMSG_COMPAT & flags) err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg_compat->msg_controllen); else err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg->msg_controllen); if (err) goto out_freeiov; err = len; out_freeiov: if (iov != iovstack) sock_kfree_s(sock->sk, iov, iov_size); out: return err; } /* * BSD recvmsg interface */ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0); fput_light(sock->file, fput_needed); out: return err; } /* * Linux recvmmsg interface */ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct timespec end_time; if (timeout && poll_select_set_timeout(&end_time, timeout->tv_sec, timeout->tv_nsec)) return -EINVAL; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; err = sock_error(sock->sk); if (err) goto out_put; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; while (datagrams < vlen) { /* * No need to ask LSM for more than the first datagram. */ if (MSG_CMSG_COMPAT & flags) { err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = __sys_recvmsg(sock, (struct msghdr __user *)entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ if (flags & MSG_WAITFORONE) flags |= MSG_DONTWAIT; if (timeout) { ktime_get_ts(timeout); *timeout = timespec_sub(end_time, *timeout); if (timeout->tv_sec < 0) { timeout->tv_sec = timeout->tv_nsec = 0; break; } /* Timeout, return less than vlen datagrams */ if (timeout->tv_nsec == 0 && timeout->tv_sec == 0) break; } /* Out of band data, return right away */ if (msg_sys.msg_flags & MSG_OOB) break; } out_put: fput_light(sock->file, fput_needed); if (err == 0) return datagrams; if (datagrams != 0) { /* * We may return less entries than requested (vlen) if the * sock is non block and there aren't enough datagrams... */ if (err != -EAGAIN) { /* * ... or if recvmsg returns an error after we * received some datagrams, where we record the * error to return on the next call or if the * app asks about it using getsockopt(SO_ERROR). */ sock->sk->sk_err = -err; } return datagrams; } return err; } SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags, struct timespec __user *, timeout) { int datagrams; struct timespec timeout_sys; if (!timeout) return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys))) return -EFAULT; datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys); if (datagrams > 0 && copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys))) datagrams = -EFAULT; return datagrams; } #ifdef __ARCH_WANT_SYS_SOCKETCALL /* Argument list sizes for sys_socketcall */ #define AL(x) ((x) * sizeof(unsigned long)) static const unsigned char nargs[21] = { AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), AL(4), AL(5), AL(4) }; #undef AL /* * System call vectors. * * Argument checking cleaned up. Saved 20% in size. * This function doesn't need to set the kernel lock because * it is set by the callees. */ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) { unsigned long a[6]; unsigned long a0, a1; int err; unsigned int len; if (call < 1 || call > SYS_SENDMMSG) return -EINVAL; len = nargs[call]; if (len > sizeof(a)) return -EINVAL; /* copy_from_user should be SMP safe. */ if (copy_from_user(a, args, len)) return -EFAULT; audit_socketcall(nargs[call] / sizeof(unsigned long), a); a0 = a[0]; a1 = a[1]; switch (call) { case SYS_SOCKET: err = sys_socket(a0, a1, a[2]); break; case SYS_BIND: err = sys_bind(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_CONNECT: err = sys_connect(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_LISTEN: err = sys_listen(a0, a1); break; case SYS_ACCEPT: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], 0); break; case SYS_GETSOCKNAME: err = sys_getsockname(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_GETPEERNAME: err = sys_getpeername(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_SOCKETPAIR: err = sys_socketpair(a0, a1, a[2], (int __user *)a[3]); break; case SYS_SEND: err = sys_send(a0, (void __user *)a1, a[2], a[3]); break; case SYS_SENDTO: err = sys_sendto(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], a[5]); break; case SYS_RECV: err = sys_recv(a0, (void __user *)a1, a[2], a[3]); break; case SYS_RECVFROM: err = sys_recvfrom(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], (int __user *)a[5]); break; case SYS_SHUTDOWN: err = sys_shutdown(a0, a1); break; case SYS_SETSOCKOPT: err = sys_setsockopt(a0, a1, a[2], (char __user *)a[3], a[4]); break; case SYS_GETSOCKOPT: err = sys_getsockopt(a0, a1, a[2], (char __user *)a[3], (int __user *)a[4]); break; case SYS_SENDMSG: err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]); break; case SYS_SENDMMSG: err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]); break; case SYS_RECVMSG: err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); break; case SYS_RECVMMSG: err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], (struct timespec __user *)a[4]); break; case SYS_ACCEPT4: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], a[3]); break; default: err = -EINVAL; break; } return err; } #endif /* __ARCH_WANT_SYS_SOCKETCALL */ /** * sock_register - add a socket protocol handler * @ops: description of protocol * * This function is called by a protocol handler that wants to * advertise its address family, and have it linked into the * socket interface. The value ops->family coresponds to the * socket system call protocol family. */ int sock_register(const struct net_proto_family *ops) { int err; if (ops->family >= NPROTO) { printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family, NPROTO); return -ENOBUFS; } spin_lock(&net_family_lock); if (rcu_dereference_protected(net_families[ops->family], lockdep_is_held(&net_family_lock))) err = -EEXIST; else { rcu_assign_pointer(net_families[ops->family], ops); err = 0; } spin_unlock(&net_family_lock); printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family); return err; } EXPORT_SYMBOL(sock_register); /** * sock_unregister - remove a protocol handler * @family: protocol family to remove * * This function is called by a protocol handler that wants to * remove its address family, and have it unlinked from the * new socket creation. * * If protocol handler is a module, then it can use module reference * counts to protect against new references. If protocol handler is not * a module then it needs to provide its own protection in * the ops->create routine. */ void sock_unregister(int family) { BUG_ON(family < 0 || family >= NPROTO); spin_lock(&net_family_lock); rcu_assign_pointer(net_families[family], NULL); spin_unlock(&net_family_lock); synchronize_rcu(); printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); } EXPORT_SYMBOL(sock_unregister); static int __init sock_init(void) { int err; /* * Initialize sock SLAB cache. */ sk_init(); /* * Initialize skbuff SLAB cache */ skb_init(); /* * Initialize the protocols module. */ init_inodecache(); err = register_filesystem(&sock_fs_type); if (err) goto out_fs; sock_mnt = kern_mount(&sock_fs_type); if (IS_ERR(sock_mnt)) { err = PTR_ERR(sock_mnt); goto out_mount; } /* The real protocol initialization is performed in later initcalls. */ #ifdef CONFIG_NETFILTER netfilter_init(); #endif #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING skb_timestamping_init(); #endif out: return err; out_mount: unregister_filesystem(&sock_fs_type); out_fs: goto out; } core_initcall(sock_init); /* early initcall */ #ifdef CONFIG_PROC_FS void socket_seq_show(struct seq_file *seq) { int cpu; int counter = 0; for_each_possible_cpu(cpu) counter += per_cpu(sockets_in_use, cpu); /* It can be negative, by the way. 8) */ if (counter < 0) counter = 0; seq_printf(seq, "sockets: used %d\n", counter); } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_COMPAT static int do_siocgstamp(struct net *net, struct socket *sock, unsigned int cmd, struct compat_timeval __user *up) { mm_segment_t old_fs = get_fs(); struct timeval ktv; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) { err = put_user(ktv.tv_sec, &up->tv_sec); err |= __put_user(ktv.tv_usec, &up->tv_usec); } return err; } static int do_siocgstampns(struct net *net, struct socket *sock, unsigned int cmd, struct compat_timespec __user *up) { mm_segment_t old_fs = get_fs(); struct timespec kts; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) { err = put_user(kts.tv_sec, &up->tv_sec); err |= __put_user(kts.tv_nsec, &up->tv_nsec); } return err; } static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(struct ifreq)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; err = dev_ioctl(net, SIOCGIFNAME, uifr); if (err) return err; if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq))) return -EFAULT; return 0; } static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) { struct compat_ifconf ifc32; struct ifconf ifc; struct ifconf __user *uifc; struct compat_ifreq __user *ifr32; struct ifreq __user *ifr; unsigned int i, j; int err; if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) return -EFAULT; memset(&ifc, 0, sizeof(ifc)); if (ifc32.ifcbuf == 0) { ifc32.ifc_len = 0; ifc.ifc_len = 0; ifc.ifc_req = NULL; uifc = compat_alloc_user_space(sizeof(struct ifconf)); } else { size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) * sizeof(struct ifreq); uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); ifc.ifc_len = len; ifr = ifc.ifc_req = (void __user *)(uifc + 1); ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) { if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; ifr++; ifr32++; } } if (copy_to_user(uifc, &ifc, sizeof(struct ifconf))) return -EFAULT; err = dev_ioctl(net, SIOCGIFCONF, uifc); if (err) return err; if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) return -EFAULT; ifr = ifc.ifc_req; ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0, j = 0; i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) { if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq))) return -EFAULT; ifr32++; ifr++; } if (ifc32.ifcbuf == 0) { /* Translate from 64-bit structure multiple to * a 32-bit one. */ i = ifc.ifc_len; i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq)); ifc32.ifc_len = i; } else { ifc32.ifc_len = i; } if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf))) return -EFAULT; return 0; } static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) { struct compat_ethtool_rxnfc __user *compat_rxnfc; bool convert_in = false, convert_out = false; size_t buf_size = ALIGN(sizeof(struct ifreq), 8); struct ethtool_rxnfc __user *rxnfc; struct ifreq __user *ifr; u32 rule_cnt = 0, actual_rule_cnt; u32 ethcmd; u32 data; int ret; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; compat_rxnfc = compat_ptr(data); if (get_user(ethcmd, &compat_rxnfc->cmd)) return -EFAULT; /* Most ethtool structures are defined without padding. * Unfortunately struct ethtool_rxnfc is an exception. */ switch (ethcmd) { default: break; case ETHTOOL_GRXCLSRLALL: /* Buffer size is variable */ if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) return -EFAULT; if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) return -ENOMEM; buf_size += rule_cnt * sizeof(u32); /* fall through */ case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: convert_out = true; /* fall through */ case ETHTOOL_SRXCLSRLDEL: case ETHTOOL_SRXCLSRLINS: buf_size += sizeof(struct ethtool_rxnfc); convert_in = true; break; } ifr = compat_alloc_user_space(buf_size); rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8); if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (put_user(convert_in ? rxnfc : compat_ptr(data), &ifr->ifr_ifru.ifru_data)) return -EFAULT; if (convert_in) { /* We expect there to be holes between fs.m_ext and * fs.ring_cookie and at the end of fs, but nowhere else. */ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + sizeof(compat_rxnfc->fs.m_ext) != offsetof(struct ethtool_rxnfc, fs.m_ext) + sizeof(rxnfc->fs.m_ext)); BUILD_BUG_ON( offsetof(struct compat_ethtool_rxnfc, fs.location) - offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != offsetof(struct ethtool_rxnfc, fs.location) - offsetof(struct ethtool_rxnfc, fs.ring_cookie)); if (copy_in_user(rxnfc, compat_rxnfc, (void *)(&rxnfc->fs.m_ext + 1) - (void *)rxnfc) || copy_in_user(&rxnfc->fs.ring_cookie, &compat_rxnfc->fs.ring_cookie, (void *)(&rxnfc->fs.location + 1) - (void *)&rxnfc->fs.ring_cookie) || copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; } ret = dev_ioctl(net, SIOCETHTOOL, ifr); if (ret) return ret; if (convert_out) { if (copy_in_user(compat_rxnfc, rxnfc, (const void *)(&rxnfc->fs.m_ext + 1) - (const void *)rxnfc) || copy_in_user(&compat_rxnfc->fs.ring_cookie, &rxnfc->fs.ring_cookie, (const void *)(&rxnfc->fs.location + 1) - (const void *)&rxnfc->fs.ring_cookie) || copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; if (ethcmd == ETHTOOL_GRXCLSRLALL) { /* As an optimisation, we only copy the actual * number of rules that the underlying * function returned. Since Mallory might * change the rule count in user memory, we * check that it is less than the rule count * originally given (as the user buffer size), * which has been range-checked. */ if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) return -EFAULT; if (actual_rule_cnt < rule_cnt) rule_cnt = actual_rule_cnt; if (copy_in_user(&compat_rxnfc->rule_locs[0], &rxnfc->rule_locs[0], rule_cnt * sizeof(u32))) return -EFAULT; } } return 0; } static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc)) return -EFAULT; return dev_ioctl(net, SIOCWANDEV, uifr); } static int bond_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *ifr32) { struct ifreq kifr; struct ifreq __user *uifr; mm_segment_t old_fs; int err; u32 data; void __user *datap; switch (cmd) { case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (struct ifreq __user __force *) &kifr); set_fs(old_fs); return err; case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; datap = compat_ptr(data); if (put_user(datap, &uifr->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, uifr); default: return -EINVAL; } } static int siocdevprivate_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *u_ifreq32) { struct ifreq __user *u_ifreq64; char tmp_buf[IFNAMSIZ]; void __user *data64; u32 data32; if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]), IFNAMSIZ)) return -EFAULT; if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data)) return -EFAULT; data64 = compat_ptr(data32); u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64)); /* Don't check these user accesses, just let that get trapped * in the ioctl handler instead. */ if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0], IFNAMSIZ)) return -EFAULT; if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, u_ifreq64); } static int dev_ifsioc(struct net *net, struct socket *sock, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) return -EFAULT; err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); if (!err) { switch (cmd) { case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFMEM: case SIOCGIFHWADDR: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCGIFBRDADDR: case SIOCGIFDSTADDR: case SIOCGIFNETMASK: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCGMIIPHY: case SIOCGMIIREG: if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) err = -EFAULT; break; } } return err; } static int compat_sioc_ifmap(struct net *net, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq ifr; struct compat_ifmap __user *uifmap32; mm_segment_t old_fs; int err; uifmap32 = &uifr32->ifr_ifru.ifru_map; err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq); err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma); err |= __get_user(ifr.ifr_map.port, &uifmap32->port); if (err) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (void __user __force *)&ifr); set_fs(old_fs); if (cmd == SIOCGIFMAP && !err) { err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq); err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma); err |= __put_user(ifr.ifr_map.port, &uifmap32->port); if (err) err = -EFAULT; } return err; } static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_data)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_data)) return -EFAULT; return dev_ioctl(net, SIOCSHWTSTAMP, uifr); } struct rtentry32 { u32 rt_pad1; struct sockaddr rt_dst; /* target address */ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ struct sockaddr rt_genmask; /* target network mask (IP) */ unsigned short rt_flags; short rt_pad2; u32 rt_pad3; unsigned char rt_tos; unsigned char rt_class; short rt_pad4; short rt_metric; /* +1 for binary compatibility! */ /* char * */ u32 rt_dev; /* forcing the device at add */ u32 rt_mtu; /* per route MTU/Window */ u32 rt_window; /* Window clamping */ unsigned short rt_irtt; /* Initial RTT */ }; struct in6_rtmsg32 { struct in6_addr rtmsg_dst; struct in6_addr rtmsg_src; struct in6_addr rtmsg_gateway; u32 rtmsg_type; u16 rtmsg_dst_len; u16 rtmsg_src_len; u32 rtmsg_metric; u32 rtmsg_info; u32 rtmsg_flags; s32 rtmsg_ifindex; }; static int routing_ioctl(struct net *net, struct socket *sock, unsigned int cmd, void __user *argp) { int ret; void *r = NULL; struct in6_rtmsg r6; struct rtentry r4; char devname[16]; u32 rtdev; mm_segment_t old_fs = get_fs(); if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ struct in6_rtmsg32 __user *ur6 = argp; ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst), 3 * sizeof(struct in6_addr)); ret |= __get_user(r6.rtmsg_type, &(ur6->rtmsg_type)); ret |= __get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); ret |= __get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); ret |= __get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric)); ret |= __get_user(r6.rtmsg_info, &(ur6->rtmsg_info)); ret |= __get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags)); ret |= __get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); r = (void *) &r6; } else { /* ipv4 */ struct rtentry32 __user *ur4 = argp; ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst), 3 * sizeof(struct sockaddr)); ret |= __get_user(r4.rt_flags, &(ur4->rt_flags)); ret |= __get_user(r4.rt_metric, &(ur4->rt_metric)); ret |= __get_user(r4.rt_mtu, &(ur4->rt_mtu)); ret |= __get_user(r4.rt_window, &(ur4->rt_window)); ret |= __get_user(r4.rt_irtt, &(ur4->rt_irtt)); ret |= __get_user(rtdev, &(ur4->rt_dev)); if (rtdev) { ret |= copy_from_user(devname, compat_ptr(rtdev), 15); r4.rt_dev = (char __user __force *)devname; devname[15] = 0; } else r4.rt_dev = NULL; r = (void *) &r4; } if (ret) { ret = -EFAULT; goto out; } set_fs(KERNEL_DS); ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); set_fs(old_fs); out: return ret; } /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE * for some operations; this forces use of the newer bridge-utils that * use compatible ioctls */ static int old_bridge_ioctl(compat_ulong_t __user *argp) { compat_ulong_t tmp; if (get_user(tmp, argp)) return -EFAULT; if (tmp == BRCTL_GET_VERSION) return BRCTL_VERSION + 1; return -EINVAL; } static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; struct net *net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) return siocdevprivate_ioctl(net, cmd, argp); switch (cmd) { case SIOCSIFBR: case SIOCGIFBR: return old_bridge_ioctl(argp); case SIOCGIFNAME: return dev_ifname32(net, argp); case SIOCGIFCONF: return dev_ifconf(net, argp); case SIOCETHTOOL: return ethtool_ioctl(net, argp); case SIOCWANDEV: return compat_siocwandev(net, argp); case SIOCGIFMAP: case SIOCSIFMAP: return compat_sioc_ifmap(net, cmd, argp); case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: case SIOCBONDCHANGEACTIVE: return bond_ioctl(net, cmd, argp); case SIOCADDRT: case SIOCDELRT: return routing_ioctl(net, sock, cmd, argp); case SIOCGSTAMP: return do_siocgstamp(net, sock, cmd, argp); case SIOCGSTAMPNS: return do_siocgstampns(net, sock, cmd, argp); case SIOCSHWTSTAMP: return compat_siocshwtstamp(net, argp); case FIOSETOWN: case SIOCSPGRP: case FIOGETOWN: case SIOCGPGRP: case SIOCBRADDBR: case SIOCBRDELBR: case SIOCGIFVLAN: case SIOCSIFVLAN: case SIOCADDDLCI: case SIOCDELDLCI: return sock_ioctl(file, cmd, arg); case SIOCGIFFLAGS: case SIOCSIFFLAGS: case SIOCGIFMETRIC: case SIOCSIFMETRIC: case SIOCGIFMTU: case SIOCSIFMTU: case SIOCGIFMEM: case SIOCSIFMEM: case SIOCGIFHWADDR: case SIOCSIFHWADDR: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCSIFHWBROADCAST: case SIOCDIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCSIFPFLAGS: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCSIFTXQLEN: case SIOCBRADDIF: case SIOCBRDELIF: case SIOCSIFNAME: case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return dev_ifsioc(net, sock, cmd, argp); case SIOCSARP: case SIOCGARP: case SIOCDARP: case SIOCATMARK: return sock_do_ioctl(net, sock, cmd, arg); } /* Prevent warning from compat_sys_ioctl, these always * result in -EINVAL in the native case anyway. */ switch (cmd) { case SIOCRTMSG: case SIOCGIFCOUNT: case SIOCSRARP: case SIOCGRARP: case SIOCDRARP: case SIOCSIFLINK: case SIOCGIFSLAVE: case SIOCSIFSLAVE: return -EINVAL; } return -ENOIOCTLCMD; } static long compat_sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock = file->private_data; int ret = -ENOIOCTLCMD; struct sock *sk; struct net *net; sk = sock->sk; net = sock_net(sk); if (sock->ops->compat_ioctl) ret = sock->ops->compat_ioctl(sock, cmd, arg); if (ret == -ENOIOCTLCMD && (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) ret = compat_wext_handle_ioctl(net, cmd, arg); if (ret == -ENOIOCTLCMD) ret = compat_sock_ioctl_trans(file, sock, cmd, arg); return ret; } #endif int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) { return sock->ops->bind(sock, addr, addrlen); } EXPORT_SYMBOL(kernel_bind); int kernel_listen(struct socket *sock, int backlog) { return sock->ops->listen(sock, backlog); } EXPORT_SYMBOL(kernel_listen); int kernel_accept(struct socket *sock, struct socket **newsock, int flags) { struct sock *sk = sock->sk; int err; err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, newsock); if (err < 0) goto done; err = sock->ops->accept(sock, *newsock, flags); if (err < 0) { sock_release(*newsock); *newsock = NULL; goto done; } (*newsock)->ops = sock->ops; __module_get((*newsock)->ops->owner); done: return err; } EXPORT_SYMBOL(kernel_accept); int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags) { return sock->ops->connect(sock, addr, addrlen, flags); } EXPORT_SYMBOL(kernel_connect); int kernel_getsockname(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 0); } EXPORT_SYMBOL(kernel_getsockname); int kernel_getpeername(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 1); } EXPORT_SYMBOL(kernel_getpeername); int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int __user *uoptlen; int err; uoptval = (char __user __force *) optval; uoptlen = (int __user __force *) optlen; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, uoptval, uoptlen); else err = sock->ops->getsockopt(sock, level, optname, uoptval, uoptlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_getsockopt); int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, unsigned int optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int err; uoptval = (char __user __force *) optval; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, uoptval, optlen); else err = sock->ops->setsockopt(sock, level, optname, uoptval, optlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_setsockopt); int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { sock_update_classid(sock->sk); if (sock->ops->sendpage) return sock->ops->sendpage(sock, page, offset, size, flags); return sock_no_sendpage(sock, page, offset, size, flags); } EXPORT_SYMBOL(kernel_sendpage); int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) { mm_segment_t oldfs = get_fs(); int err; set_fs(KERNEL_DS); err = sock->ops->ioctl(sock, cmd, arg); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_sock_ioctl); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) { return sock->ops->shutdown(sock, how); } EXPORT_SYMBOL(kernel_sock_shutdown);
gpl-2.0
RidaShamasneh/nethunter_kernel_g5
drivers/mtd/nand/gpio.c
527
7827
/* * drivers/mtd/nand/gpio.c * * Updated, and converted to generic GPIO based driver by Russell King. * * Written by Ben Dooks <ben@simtec.co.uk> * Based on 2.4 version by Mark Whittaker * * © 2004 Simtec Electronics * * Device driver for NAND connected via GPIO * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/mtd/nand-gpio.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_gpio.h> struct gpiomtd { void __iomem *io_sync; struct mtd_info mtd_info; struct nand_chip nand_chip; struct gpio_nand_platdata plat; }; #define gpio_nand_getpriv(x) container_of(x, struct gpiomtd, mtd_info) #ifdef CONFIG_ARM /* gpio_nand_dosync() * * Make sure the GPIO state changes occur in-order with writes to NAND * memory region. * Needed on PXA due to bus-reordering within the SoC itself (see section on * I/O ordering in PXA manual (section 2.3, p35) */ static void gpio_nand_dosync(struct gpiomtd *gpiomtd) { unsigned long tmp; if (gpiomtd->io_sync) { /* * Linux memory barriers don't cater for what's required here. * What's required is what's here - a read from a separate * region with a dependency on that read. */ tmp = readl(gpiomtd->io_sync); asm volatile("mov %1, %0\n" : "=r" (tmp) : "r" (tmp)); } } #else static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {} #endif static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd); gpio_nand_dosync(gpiomtd); if (ctrl & NAND_CTRL_CHANGE) { gpio_set_value(gpiomtd->plat.gpio_nce, !(ctrl & NAND_NCE)); gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE)); gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE)); gpio_nand_dosync(gpiomtd); } if (cmd == NAND_CMD_NONE) return; writeb(cmd, gpiomtd->nand_chip.IO_ADDR_W); gpio_nand_dosync(gpiomtd); } static int gpio_nand_devready(struct mtd_info *mtd) { struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd); return gpio_get_value(gpiomtd->plat.gpio_rdy); } #ifdef CONFIG_OF static const struct of_device_id gpio_nand_id_table[] = { { .compatible = "gpio-control-nand" }, {} }; MODULE_DEVICE_TABLE(of, gpio_nand_id_table); static int gpio_nand_get_config_of(const struct device *dev, struct gpio_nand_platdata *plat) { u32 val; if (!dev->of_node) return -ENODEV; if (!of_property_read_u32(dev->of_node, "bank-width", &val)) { if (val == 2) { plat->options |= NAND_BUSWIDTH_16; } else if (val != 1) { dev_err(dev, "invalid bank-width %u\n", val); return -EINVAL; } } plat->gpio_rdy = of_get_gpio(dev->of_node, 0); plat->gpio_nce = of_get_gpio(dev->of_node, 1); plat->gpio_ale = of_get_gpio(dev->of_node, 2); plat->gpio_cle = of_get_gpio(dev->of_node, 3); plat->gpio_nwp = of_get_gpio(dev->of_node, 4); if (!of_property_read_u32(dev->of_node, "chip-delay", &val)) plat->chip_delay = val; return 0; } static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev) { struct resource *r; u64 addr; if (of_property_read_u64(pdev->dev.of_node, "gpio-control-nand,io-sync-reg", &addr)) return NULL; r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL); if (!r) return NULL; r->start = addr; r->end = r->start + 0x3; r->flags = IORESOURCE_MEM; return r; } #else /* CONFIG_OF */ static inline int gpio_nand_get_config_of(const struct device *dev, struct gpio_nand_platdata *plat) { return -ENOSYS; } static inline struct resource * gpio_nand_get_io_sync_of(struct platform_device *pdev) { return NULL; } #endif /* CONFIG_OF */ static inline int gpio_nand_get_config(const struct device *dev, struct gpio_nand_platdata *plat) { int ret = gpio_nand_get_config_of(dev, plat); if (!ret) return ret; if (dev_get_platdata(dev)) { memcpy(plat, dev_get_platdata(dev), sizeof(*plat)); return 0; } return -EINVAL; } static inline struct resource * gpio_nand_get_io_sync(struct platform_device *pdev) { struct resource *r = gpio_nand_get_io_sync_of(pdev); if (r) return r; return platform_get_resource(pdev, IORESOURCE_MEM, 1); } static int gpio_nand_remove(struct platform_device *pdev) { struct gpiomtd *gpiomtd = platform_get_drvdata(pdev); nand_release(&gpiomtd->mtd_info); if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) gpio_set_value(gpiomtd->plat.gpio_nwp, 0); gpio_set_value(gpiomtd->plat.gpio_nce, 1); return 0; } static int gpio_nand_probe(struct platform_device *pdev) { struct gpiomtd *gpiomtd; struct nand_chip *chip; struct resource *res; struct mtd_part_parser_data ppdata = {}; int ret = 0; if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev)) return -EINVAL; gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL); if (!gpiomtd) return -ENOMEM; chip = &gpiomtd->nand_chip; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(chip->IO_ADDR_R)) return PTR_ERR(chip->IO_ADDR_R); res = gpio_nand_get_io_sync(pdev); if (res) { gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(gpiomtd->io_sync)) return PTR_ERR(gpiomtd->io_sync); } ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat); if (ret) return ret; ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, "NAND NCE"); if (ret) return ret; gpio_direction_output(gpiomtd->plat.gpio_nce, 1); if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) { ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp, "NAND NWP"); if (ret) return ret; } ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE"); if (ret) return ret; gpio_direction_output(gpiomtd->plat.gpio_ale, 0); ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE"); if (ret) return ret; gpio_direction_output(gpiomtd->plat.gpio_cle, 0); if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) { ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy, "NAND RDY"); if (ret) return ret; gpio_direction_input(gpiomtd->plat.gpio_rdy); chip->dev_ready = gpio_nand_devready; } chip->IO_ADDR_W = chip->IO_ADDR_R; chip->ecc.mode = NAND_ECC_SOFT; chip->options = gpiomtd->plat.options; chip->chip_delay = gpiomtd->plat.chip_delay; chip->cmd_ctrl = gpio_nand_cmd_ctrl; gpiomtd->mtd_info.priv = chip; gpiomtd->mtd_info.owner = THIS_MODULE; platform_set_drvdata(pdev, gpiomtd); if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) gpio_direction_output(gpiomtd->plat.gpio_nwp, 1); if (nand_scan(&gpiomtd->mtd_info, 1)) { ret = -ENXIO; goto err_wp; } if (gpiomtd->plat.adjust_parts) gpiomtd->plat.adjust_parts(&gpiomtd->plat, gpiomtd->mtd_info.size); ppdata.of_node = pdev->dev.of_node; ret = mtd_device_parse_register(&gpiomtd->mtd_info, NULL, &ppdata, gpiomtd->plat.parts, gpiomtd->plat.num_parts); if (!ret) return 0; err_wp: if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) gpio_set_value(gpiomtd->plat.gpio_nwp, 0); return ret; } static struct platform_driver gpio_nand_driver = { .probe = gpio_nand_probe, .remove = gpio_nand_remove, .driver = { .name = "gpio-nand", .owner = THIS_MODULE, .of_match_table = of_match_ptr(gpio_nand_id_table), }, }; module_platform_driver(gpio_nand_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("GPIO NAND Driver");
gpl-2.0
dblessing/linux
arch/x86/kernel/cpu/hypervisor.c
783
2134
/* * Common hypervisor code * * Copyright (C) 2008, VMware, Inc. * Author : Alok N Kataria <akataria@vmware.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/module.h> #include <asm/processor.h> #include <asm/hypervisor.h> static const __initconst struct hypervisor_x86 * const hypervisors[] = { #ifdef CONFIG_XEN &x86_hyper_xen, #endif &x86_hyper_vmware, &x86_hyper_ms_hyperv, #ifdef CONFIG_KVM_GUEST &x86_hyper_kvm, #endif }; const struct hypervisor_x86 *x86_hyper; EXPORT_SYMBOL(x86_hyper); static inline void __init detect_hypervisor_vendor(void) { const struct hypervisor_x86 *h, * const *p; uint32_t pri, max_pri = 0; for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) { h = *p; pri = h->detect(); if (pri != 0 && pri > max_pri) { max_pri = pri; x86_hyper = h; } } if (max_pri) printk(KERN_INFO "Hypervisor detected: %s\n", x86_hyper->name); } void init_hypervisor(struct cpuinfo_x86 *c) { if (x86_hyper && x86_hyper->set_cpu_features) x86_hyper->set_cpu_features(c); } void __init init_hypervisor_platform(void) { detect_hypervisor_vendor(); if (!x86_hyper) return; init_hypervisor(&boot_cpu_data); if (x86_hyper->init_platform) x86_hyper->init_platform(); } bool __init hypervisor_x2apic_available(void) { return x86_hyper && x86_hyper->x2apic_available && x86_hyper->x2apic_available(); }
gpl-2.0
pichina/linux-bcache
sound/core/oss/route.c
783
3125
/* * Route Plug-In * Copyright (c) 2000 by Abramo Bagnara <abramo@alsa-project.org> * * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Library General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/slab.h> #include <linux/time.h> #include <sound/core.h> #include <sound/pcm.h> #include "pcm_plugin.h" static void zero_areas(struct snd_pcm_plugin_channel *dvp, int ndsts, snd_pcm_uframes_t frames, int format) { int dst = 0; for (; dst < ndsts; ++dst) { if (dvp->wanted) snd_pcm_area_silence(&dvp->area, 0, frames, format); dvp->enabled = 0; dvp++; } } static inline void copy_area(const struct snd_pcm_plugin_channel *src_channel, struct snd_pcm_plugin_channel *dst_channel, snd_pcm_uframes_t frames, int format) { dst_channel->enabled = 1; snd_pcm_area_copy(&src_channel->area, 0, &dst_channel->area, 0, frames, format); } static snd_pcm_sframes_t route_transfer(struct snd_pcm_plugin *plugin, const struct snd_pcm_plugin_channel *src_channels, struct snd_pcm_plugin_channel *dst_channels, snd_pcm_uframes_t frames) { int nsrcs, ndsts, dst; struct snd_pcm_plugin_channel *dvp; int format; if (snd_BUG_ON(!plugin || !src_channels || !dst_channels)) return -ENXIO; if (frames == 0) return 0; nsrcs = plugin->src_format.channels; ndsts = plugin->dst_format.channels; format = plugin->dst_format.format; dvp = dst_channels; if (nsrcs <= 1) { /* expand to all channels */ for (dst = 0; dst < ndsts; ++dst) { copy_area(src_channels, dvp, frames, format); dvp++; } return frames; } for (dst = 0; dst < ndsts && dst < nsrcs; ++dst) { copy_area(src_channels, dvp, frames, format); dvp++; src_channels++; } if (dst < ndsts) zero_areas(dvp, ndsts - dst, frames, format); return frames; } int snd_pcm_plugin_build_route(struct snd_pcm_substream *plug, struct snd_pcm_plugin_format *src_format, struct snd_pcm_plugin_format *dst_format, struct snd_pcm_plugin **r_plugin) { struct snd_pcm_plugin *plugin; int err; if (snd_BUG_ON(!r_plugin)) return -ENXIO; *r_plugin = NULL; if (snd_BUG_ON(src_format->rate != dst_format->rate)) return -ENXIO; if (snd_BUG_ON(src_format->format != dst_format->format)) return -ENXIO; err = snd_pcm_plugin_build(plug, "route conversion", src_format, dst_format, 0, &plugin); if (err < 0) return err; plugin->transfer = route_transfer; *r_plugin = plugin; return 0; }
gpl-2.0
Dosis/geeksphone-kernel-zero-2.6.35
arch/arm/mach-omap2/sdrc2xxx.c
1039
4290
/* * linux/arch/arm/mach-omap2/sdrc2xxx.c * * SDRAM timing related functions for OMAP2xxx * * Copyright (C) 2005, 2008 Texas Instruments Inc. * Copyright (C) 2005, 2008 Nokia Corporation * * Tony Lindgren <tony@atomide.com> * Paul Walmsley * Richard Woodruff <r-woodruff2@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <plat/common.h> #include <plat/clock.h> #include <plat/sram.h> #include "prm.h" #include "clock.h" #include <plat/sdrc.h> #include "sdrc.h" /* Memory timing, DLL mode flags */ #define M_DDR 1 #define M_LOCK_CTRL (1 << 2) #define M_UNLOCK 0 #define M_LOCK 1 static struct memory_timings mem_timings; static u32 curr_perf_level = CORE_CLK_SRC_DPLL_X2; static u32 omap2xxx_sdrc_get_slow_dll_ctrl(void) { return mem_timings.slow_dll_ctrl; } static u32 omap2xxx_sdrc_get_fast_dll_ctrl(void) { return mem_timings.fast_dll_ctrl; } static u32 omap2xxx_sdrc_get_type(void) { return mem_timings.m_type; } /* * Check the DLL lock state, and return tue if running in unlock mode. * This is needed to compensate for the shifted DLL value in unlock mode. */ u32 omap2xxx_sdrc_dll_is_unlocked(void) { /* dlla and dllb are a set */ u32 dll_state = sdrc_read_reg(SDRC_DLLA_CTRL); if ((dll_state & (1 << 2)) == (1 << 2)) return 1; else return 0; } /* * 'level' is the value to store to CM_CLKSEL2_PLL.CORE_CLK_SRC. * Practical values are CORE_CLK_SRC_DPLL (for CORE_CLK = DPLL_CLK) or * CORE_CLK_SRC_DPLL_X2 (for CORE_CLK = * DPLL_CLK * 2) * * Used by the clock framework during CORE DPLL changes */ u32 omap2xxx_sdrc_reprogram(u32 level, u32 force) { u32 dll_ctrl, m_type; u32 prev = curr_perf_level; unsigned long flags; if ((curr_perf_level == level) && !force) return prev; if (level == CORE_CLK_SRC_DPLL) dll_ctrl = omap2xxx_sdrc_get_slow_dll_ctrl(); else if (level == CORE_CLK_SRC_DPLL_X2) dll_ctrl = omap2xxx_sdrc_get_fast_dll_ctrl(); else return prev; m_type = omap2xxx_sdrc_get_type(); local_irq_save(flags); if (cpu_is_omap2420()) __raw_writel(0xffff, OMAP2420_PRCM_VOLTSETUP); else __raw_writel(0xffff, OMAP2430_PRCM_VOLTSETUP); omap2_sram_reprogram_sdrc(level, dll_ctrl, m_type); curr_perf_level = level; local_irq_restore(flags); return prev; } /* Used by the clock framework during CORE DPLL changes */ void omap2xxx_sdrc_init_params(u32 force_lock_to_unlock_mode) { unsigned long dll_cnt; u32 fast_dll = 0; /* DDR = 1, SDR = 0 */ mem_timings.m_type = !((sdrc_read_reg(SDRC_MR_0) & 0x3) == 0x1); /* 2422 es2.05 and beyond has a single SIP DDR instead of 2 like others. * In the case of 2422, its ok to use CS1 instead of CS0. */ if (cpu_is_omap2422()) mem_timings.base_cs = 1; else mem_timings.base_cs = 0; if (mem_timings.m_type != M_DDR) return; /* With DDR we need to determine the low frequency DLL value */ if (((mem_timings.fast_dll_ctrl & (1 << 2)) == M_LOCK_CTRL)) mem_timings.dll_mode = M_UNLOCK; else mem_timings.dll_mode = M_LOCK; if (mem_timings.base_cs == 0) { fast_dll = sdrc_read_reg(SDRC_DLLA_CTRL); dll_cnt = sdrc_read_reg(SDRC_DLLA_STATUS) & 0xff00; } else { fast_dll = sdrc_read_reg(SDRC_DLLB_CTRL); dll_cnt = sdrc_read_reg(SDRC_DLLB_STATUS) & 0xff00; } if (force_lock_to_unlock_mode) { fast_dll &= ~0xff00; fast_dll |= dll_cnt; /* Current lock mode */ } /* set fast timings with DLL filter disabled */ mem_timings.fast_dll_ctrl = (fast_dll | (3 << 8)); /* No disruptions, DDR will be offline & C-ABI not followed */ omap2_sram_ddr_init(&mem_timings.slow_dll_ctrl, mem_timings.fast_dll_ctrl, mem_timings.base_cs, force_lock_to_unlock_mode); mem_timings.slow_dll_ctrl &= 0xff00; /* Keep lock value */ /* Turn status into unlock ctrl */ mem_timings.slow_dll_ctrl |= ((mem_timings.fast_dll_ctrl & 0xF) | (1 << 2)); /* 90 degree phase for anything below 133Mhz + disable DLL filter */ mem_timings.slow_dll_ctrl |= ((1 << 1) | (3 << 8)); }
gpl-2.0
markfasheh/linux-4.1-dedupe_fixes
drivers/rtc/rtc-max8907.c
1551
5329
/* * RTC driver for Maxim MAX8907 * * Copyright (c) 2011-2012, NVIDIA Corporation. * * Based on drivers/rtc/rtc-max8925.c, * Copyright (C) 2009-2010 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/bcd.h> #include <linux/i2c.h> #include <linux/mfd/max8907.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/rtc.h> #include <linux/slab.h> enum { RTC_SEC = 0, RTC_MIN, RTC_HOUR, RTC_WEEKDAY, RTC_DATE, RTC_MONTH, RTC_YEAR1, RTC_YEAR2, }; #define TIME_NUM 8 #define ALARM_1SEC (1 << 7) #define HOUR_12 (1 << 7) #define HOUR_AM_PM (1 << 5) #define ALARM0_IRQ (1 << 3) #define ALARM1_IRQ (1 << 2) #define ALARM0_STATUS (1 << 2) #define ALARM1_STATUS (1 << 1) struct max8907_rtc { struct max8907 *max8907; struct regmap *regmap; struct rtc_device *rtc_dev; int irq; }; static irqreturn_t max8907_irq_handler(int irq, void *data) { struct max8907_rtc *rtc = data; regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0); rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF); return IRQ_HANDLED; } static void regs_to_tm(u8 *regs, struct rtc_time *tm) { tm->tm_year = bcd2bin(regs[RTC_YEAR2]) * 100 + bcd2bin(regs[RTC_YEAR1]) - 1900; tm->tm_mon = bcd2bin(regs[RTC_MONTH] & 0x1f) - 1; tm->tm_mday = bcd2bin(regs[RTC_DATE] & 0x3f); tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07); if (regs[RTC_HOUR] & HOUR_12) { tm->tm_hour = bcd2bin(regs[RTC_HOUR] & 0x01f); if (tm->tm_hour == 12) tm->tm_hour = 0; if (regs[RTC_HOUR] & HOUR_AM_PM) tm->tm_hour += 12; } else { tm->tm_hour = bcd2bin(regs[RTC_HOUR] & 0x03f); } tm->tm_min = bcd2bin(regs[RTC_MIN] & 0x7f); tm->tm_sec = bcd2bin(regs[RTC_SEC] & 0x7f); } static void tm_to_regs(struct rtc_time *tm, u8 *regs) { u8 high, low; high = (tm->tm_year + 1900) / 100; low = tm->tm_year % 100; regs[RTC_YEAR2] = bin2bcd(high); regs[RTC_YEAR1] = bin2bcd(low); regs[RTC_MONTH] = bin2bcd(tm->tm_mon + 1); regs[RTC_DATE] = bin2bcd(tm->tm_mday); regs[RTC_WEEKDAY] = tm->tm_wday; regs[RTC_HOUR] = bin2bcd(tm->tm_hour); regs[RTC_MIN] = bin2bcd(tm->tm_min); regs[RTC_SEC] = bin2bcd(tm->tm_sec); } static int max8907_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct max8907_rtc *rtc = dev_get_drvdata(dev); u8 regs[TIME_NUM]; int ret; ret = regmap_bulk_read(rtc->regmap, MAX8907_REG_RTC_SEC, regs, TIME_NUM); if (ret < 0) return ret; regs_to_tm(regs, tm); return 0; } static int max8907_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct max8907_rtc *rtc = dev_get_drvdata(dev); u8 regs[TIME_NUM]; tm_to_regs(tm, regs); return regmap_bulk_write(rtc->regmap, MAX8907_REG_RTC_SEC, regs, TIME_NUM); } static int max8907_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct max8907_rtc *rtc = dev_get_drvdata(dev); u8 regs[TIME_NUM]; unsigned int val; int ret; ret = regmap_bulk_read(rtc->regmap, MAX8907_REG_ALARM0_SEC, regs, TIME_NUM); if (ret < 0) return ret; regs_to_tm(regs, &alrm->time); ret = regmap_read(rtc->regmap, MAX8907_REG_ALARM0_CNTL, &val); if (ret < 0) return ret; alrm->enabled = !!(val & 0x7f); return 0; } static int max8907_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct max8907_rtc *rtc = dev_get_drvdata(dev); u8 regs[TIME_NUM]; int ret; tm_to_regs(&alrm->time, regs); /* Disable alarm while we update the target time */ ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0); if (ret < 0) return ret; ret = regmap_bulk_write(rtc->regmap, MAX8907_REG_ALARM0_SEC, regs, TIME_NUM); if (ret < 0) return ret; if (alrm->enabled) ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x77); return ret; } static const struct rtc_class_ops max8907_rtc_ops = { .read_time = max8907_rtc_read_time, .set_time = max8907_rtc_set_time, .read_alarm = max8907_rtc_read_alarm, .set_alarm = max8907_rtc_set_alarm, }; static int max8907_rtc_probe(struct platform_device *pdev) { struct max8907 *max8907 = dev_get_drvdata(pdev->dev.parent); struct max8907_rtc *rtc; int ret; rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); if (!rtc) return -ENOMEM; platform_set_drvdata(pdev, rtc); rtc->max8907 = max8907; rtc->regmap = max8907->regmap_rtc; rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, "max8907-rtc", &max8907_rtc_ops, THIS_MODULE); if (IS_ERR(rtc->rtc_dev)) { ret = PTR_ERR(rtc->rtc_dev); dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); return ret; } rtc->irq = regmap_irq_get_virq(max8907->irqc_rtc, MAX8907_IRQ_RTC_ALARM0); if (rtc->irq < 0) return rtc->irq; ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL, max8907_irq_handler, IRQF_ONESHOT, "max8907-alarm0", rtc); if (ret < 0) dev_err(&pdev->dev, "Failed to request IRQ%d: %d\n", rtc->irq, ret); return ret; } static struct platform_driver max8907_rtc_driver = { .driver = { .name = "max8907-rtc", }, .probe = max8907_rtc_probe, }; module_platform_driver(max8907_rtc_driver); MODULE_DESCRIPTION("Maxim MAX8907 RTC driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
DevriesL/SM-G9208_ImageBreaker
drivers/staging/vt6656/int.c
2319
5772
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: int.c * * Purpose: Handle USB interrupt endpoint * * Author: Jerry Chen * * Date: Apr. 2, 2004 * * Functions: * * Revision History: * 04-02-2004 Jerry Chen: Initial release * */ #include "int.h" #include "mib.h" #include "tmacro.h" #include "mac.h" #include "power.h" #include "bssdb.h" #include "usbpipe.h" static int msglevel = MSG_LEVEL_INFO; /* MSG_LEVEL_DEBUG */ /*+ * * Function: InterruptPollingThread * * Synopsis: Thread running at IRQL PASSIVE_LEVEL. * * Arguments: Device Extension * * Returns: * * Algorithm: Call USBD for input data; * * History: dd-mm-yyyy Author Comment * * * Notes: * * USB reads are by nature 'Blocking', and when in a read, the device looks * like it's in a 'stall' condition, so we deliberately time out every second * if we've gotten no data * -*/ void INTvWorkItem(struct vnt_private *pDevice) { int ntStatus; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Interrupt Polling Thread\n"); spin_lock_irq(&pDevice->lock); if (pDevice->fKillEventPollingThread != true) ntStatus = PIPEnsInterruptRead(pDevice); spin_unlock_irq(&pDevice->lock); } void INTnsProcessData(struct vnt_private *pDevice) { PSINTData pINTData; struct vnt_manager *pMgmt = &pDevice->vnt_mgmt; struct net_device_stats *pStats = &pDevice->stats; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsInterruptProcessData\n"); pINTData = (PSINTData) pDevice->intBuf.pDataBuf; if (pINTData->byTSR0 & TSR_VALID) { STAvUpdateTDStatCounter(&(pDevice->scStatistic), (u8)(pINTData->byPkt0 & 0x0F), (u8)(pINTData->byPkt0>>4), pINTData->byTSR0); BSSvUpdateNodeTxCounter(pDevice, &(pDevice->scStatistic), pINTData->byTSR0, pINTData->byPkt0); /*DBG_PRN_GRP01(("TSR0 %02x\n", pINTData->byTSR0));*/ } if (pINTData->byTSR1 & TSR_VALID) { STAvUpdateTDStatCounter(&(pDevice->scStatistic), (u8)(pINTData->byPkt1 & 0x0F), (u8)(pINTData->byPkt1>>4), pINTData->byTSR1); BSSvUpdateNodeTxCounter(pDevice, &(pDevice->scStatistic), pINTData->byTSR1, pINTData->byPkt1); /*DBG_PRN_GRP01(("TSR1 %02x\n", pINTData->byTSR1));*/ } if (pINTData->byTSR2 & TSR_VALID) { STAvUpdateTDStatCounter(&(pDevice->scStatistic), (u8)(pINTData->byPkt2 & 0x0F), (u8)(pINTData->byPkt2>>4), pINTData->byTSR2); BSSvUpdateNodeTxCounter(pDevice, &(pDevice->scStatistic), pINTData->byTSR2, pINTData->byPkt2); /*DBG_PRN_GRP01(("TSR2 %02x\n", pINTData->byTSR2));*/ } if (pINTData->byTSR3 & TSR_VALID) { STAvUpdateTDStatCounter(&(pDevice->scStatistic), (u8)(pINTData->byPkt3 & 0x0F), (u8)(pINTData->byPkt3>>4), pINTData->byTSR3); BSSvUpdateNodeTxCounter(pDevice, &(pDevice->scStatistic), pINTData->byTSR3, pINTData->byPkt3); /*DBG_PRN_GRP01(("TSR3 %02x\n", pINTData->byTSR3));*/ } if (pINTData->byISR0 != 0) { if (pINTData->byISR0 & ISR_BNTX) { if (pDevice->eOPMode == OP_MODE_AP) { if (pMgmt->byDTIMCount > 0) { pMgmt->byDTIMCount--; pMgmt->sNodeDBTable[0].bRxPSPoll = false; } else if (pMgmt->byDTIMCount == 0) { /* check if multicast tx buffering */ pMgmt->byDTIMCount = pMgmt->byDTIMPeriod-1; pMgmt->sNodeDBTable[0].bRxPSPoll = true; if (pMgmt->sNodeDBTable[0].bPSEnable) bScheduleCommand((void *) pDevice, WLAN_CMD_RX_PSPOLL, NULL); } bScheduleCommand((void *) pDevice, WLAN_CMD_BECON_SEND, NULL); } /* if (pDevice->eOPMode == OP_MODE_AP) */ pDevice->bBeaconSent = true; } else { pDevice->bBeaconSent = false; } if (pINTData->byISR0 & ISR_TBTT) { if (pDevice->bEnablePSMode) bScheduleCommand((void *) pDevice, WLAN_CMD_TBTT_WAKEUP, NULL); if (pDevice->bChannelSwitch) { pDevice->byChannelSwitchCount--; if (pDevice->byChannelSwitchCount == 0) bScheduleCommand((void *) pDevice, WLAN_CMD_11H_CHSW, NULL); } } pDevice->qwCurrTSF = cpu_to_le64(pINTData->qwTSF); /*DBG_PRN_GRP01(("ISR0 = %02x , LoTsf = %08x, HiTsf = %08x\n", pINTData->byISR0, pINTData->dwLoTSF, pINTData->dwHiTSF)); */ STAvUpdate802_11Counter(&pDevice->s802_11Counter, &pDevice->scStatistic, pINTData->byRTSSuccess, pINTData->byRTSFail, pINTData->byACKFail, pINTData->byFCSErr); STAvUpdateIsrStatCounter(&pDevice->scStatistic, pINTData->byISR0, pINTData->byISR1); } if (pINTData->byISR1 != 0) if (pINTData->byISR1 & ISR_GPIO3) bScheduleCommand((void *) pDevice, WLAN_CMD_RADIO, NULL); pDevice->intBuf.uDataLen = 0; pDevice->intBuf.bInUse = false; pStats->tx_packets = pDevice->scStatistic.ullTsrOK; pStats->tx_bytes = pDevice->scStatistic.ullTxDirectedBytes + pDevice->scStatistic.ullTxMulticastBytes + pDevice->scStatistic.ullTxBroadcastBytes; pStats->tx_errors = pDevice->scStatistic.dwTsrErr; pStats->tx_dropped = pDevice->scStatistic.dwTsrErr; }
gpl-2.0
TrustZoneGenericDriver/linux-xlnx
arch/sh/boards/mach-se/770x/setup.c
2319
4314
/* * linux/arch/sh/boards/se/770x/setup.c * * Copyright (C) 2000 Kazumoto Kojima * * Hitachi SolutionEngine Support. * */ #include <linux/init.h> #include <linux/platform_device.h> #include <mach-se/mach/se.h> #include <mach-se/mach/mrshpc.h> #include <asm/machvec.h> #include <asm/io.h> #include <asm/smc37c93x.h> #include <asm/heartbeat.h> /* * Configure the Super I/O chip */ static void __init smsc_config(int index, int data) { outb_p(index, INDEX_PORT); outb_p(data, DATA_PORT); } /* XXX: Another candidate for a more generic cchip machine vector */ static void __init smsc_setup(char **cmdline_p) { outb_p(CONFIG_ENTER, CONFIG_PORT); outb_p(CONFIG_ENTER, CONFIG_PORT); /* FDC */ smsc_config(CURRENT_LDN_INDEX, LDN_FDC); smsc_config(ACTIVATE_INDEX, 0x01); smsc_config(IRQ_SELECT_INDEX, 6); /* IRQ6 */ /* AUXIO (GPIO): to use IDE1 */ smsc_config(CURRENT_LDN_INDEX, LDN_AUXIO); smsc_config(GPIO46_INDEX, 0x00); /* nIOROP */ smsc_config(GPIO47_INDEX, 0x00); /* nIOWOP */ /* COM1 */ smsc_config(CURRENT_LDN_INDEX, LDN_COM1); smsc_config(ACTIVATE_INDEX, 0x01); smsc_config(IO_BASE_HI_INDEX, 0x03); smsc_config(IO_BASE_LO_INDEX, 0xf8); smsc_config(IRQ_SELECT_INDEX, 4); /* IRQ4 */ /* COM2 */ smsc_config(CURRENT_LDN_INDEX, LDN_COM2); smsc_config(ACTIVATE_INDEX, 0x01); smsc_config(IO_BASE_HI_INDEX, 0x02); smsc_config(IO_BASE_LO_INDEX, 0xf8); smsc_config(IRQ_SELECT_INDEX, 3); /* IRQ3 */ /* RTC */ smsc_config(CURRENT_LDN_INDEX, LDN_RTC); smsc_config(ACTIVATE_INDEX, 0x01); smsc_config(IRQ_SELECT_INDEX, 8); /* IRQ8 */ /* XXX: PARPORT, KBD, and MOUSE will come here... */ outb_p(CONFIG_EXIT, CONFIG_PORT); } static struct resource cf_ide_resources[] = { [0] = { .start = PA_MRSHPC_IO + 0x1f0, .end = PA_MRSHPC_IO + 0x1f0 + 8, .flags = IORESOURCE_MEM, }, [1] = { .start = PA_MRSHPC_IO + 0x1f0 + 0x206, .end = PA_MRSHPC_IO + 0x1f0 + 8 + 0x206 + 8, .flags = IORESOURCE_MEM, }, [2] = { .start = IRQ_CFCARD, .flags = IORESOURCE_IRQ, }, }; static struct platform_device cf_ide_device = { .name = "pata_platform", .id = -1, .num_resources = ARRAY_SIZE(cf_ide_resources), .resource = cf_ide_resources, }; static unsigned char heartbeat_bit_pos[] = { 8, 9, 10, 11, 12, 13, 14, 15 }; static struct heartbeat_data heartbeat_data = { .bit_pos = heartbeat_bit_pos, .nr_bits = ARRAY_SIZE(heartbeat_bit_pos), }; static struct resource heartbeat_resource = { .start = PA_LED, .end = PA_LED, .flags = IORESOURCE_MEM | IORESOURCE_MEM_16BIT, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .dev = { .platform_data = &heartbeat_data, }, .num_resources = 1, .resource = &heartbeat_resource, }; #if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\ defined(CONFIG_CPU_SUBTYPE_SH7712) /* SH771X Ethernet driver */ static struct resource sh_eth0_resources[] = { [0] = { .start = SH_ETH0_BASE, .end = SH_ETH0_BASE + 0x1B8, .flags = IORESOURCE_MEM, }, [1] = { .start = SH_ETH0_IRQ, .end = SH_ETH0_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct platform_device sh_eth0_device = { .name = "sh771x-ether", .id = 0, .dev = { .platform_data = PHY_ID, }, .num_resources = ARRAY_SIZE(sh_eth0_resources), .resource = sh_eth0_resources, }; static struct resource sh_eth1_resources[] = { [0] = { .start = SH_ETH1_BASE, .end = SH_ETH1_BASE + 0x1B8, .flags = IORESOURCE_MEM, }, [1] = { .start = SH_ETH1_IRQ, .end = SH_ETH1_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct platform_device sh_eth1_device = { .name = "sh771x-ether", .id = 1, .dev = { .platform_data = PHY_ID, }, .num_resources = ARRAY_SIZE(sh_eth1_resources), .resource = sh_eth1_resources, }; #endif static struct platform_device *se_devices[] __initdata = { &heartbeat_device, &cf_ide_device, #if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\ defined(CONFIG_CPU_SUBTYPE_SH7712) &sh_eth0_device, &sh_eth1_device, #endif }; static int __init se_devices_setup(void) { mrshpc_setup_windows(); return platform_add_devices(se_devices, ARRAY_SIZE(se_devices)); } device_initcall(se_devices_setup); /* * The Machine Vector */ static struct sh_machine_vector mv_se __initmv = { .mv_name = "SolutionEngine", .mv_setup = smsc_setup, .mv_init_irq = init_se_IRQ, };
gpl-2.0
engine95/s2-gts28wifi-exynos5433
drivers/mtd/devices/sst25l.c
2319
10029
/* * sst25l.c * * Driver for SST25L SPI Flash chips * * Copyright © 2009 Bluewater Systems Ltd * Author: Andre Renaud <andre@bluewatersys.com> * Author: Ryan Mallon * * Based on m25p80.c * * This code is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/mutex.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> /* Erases can take up to 3 seconds! */ #define MAX_READY_WAIT_JIFFIES msecs_to_jiffies(3000) #define SST25L_CMD_WRSR 0x01 /* Write status register */ #define SST25L_CMD_WRDI 0x04 /* Write disable */ #define SST25L_CMD_RDSR 0x05 /* Read status register */ #define SST25L_CMD_WREN 0x06 /* Write enable */ #define SST25L_CMD_READ 0x03 /* High speed read */ #define SST25L_CMD_EWSR 0x50 /* Enable write status register */ #define SST25L_CMD_SECTOR_ERASE 0x20 /* Erase sector */ #define SST25L_CMD_READ_ID 0x90 /* Read device ID */ #define SST25L_CMD_AAI_PROGRAM 0xaf /* Auto address increment */ #define SST25L_STATUS_BUSY (1 << 0) /* Chip is busy */ #define SST25L_STATUS_WREN (1 << 1) /* Write enabled */ #define SST25L_STATUS_BP0 (1 << 2) /* Block protection 0 */ #define SST25L_STATUS_BP1 (1 << 3) /* Block protection 1 */ struct sst25l_flash { struct spi_device *spi; struct mutex lock; struct mtd_info mtd; }; struct flash_info { const char *name; uint16_t device_id; unsigned page_size; unsigned nr_pages; unsigned erase_size; }; #define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd) static struct flash_info sst25l_flash_info[] = { {"sst25lf020a", 0xbf43, 256, 1024, 4096}, {"sst25lf040a", 0xbf44, 256, 2048, 4096}, }; static int sst25l_status(struct sst25l_flash *flash, int *status) { struct spi_message m; struct spi_transfer t; unsigned char cmd_resp[2]; int err; spi_message_init(&m); memset(&t, 0, sizeof(struct spi_transfer)); cmd_resp[0] = SST25L_CMD_RDSR; cmd_resp[1] = 0xff; t.tx_buf = cmd_resp; t.rx_buf = cmd_resp; t.len = sizeof(cmd_resp); spi_message_add_tail(&t, &m); err = spi_sync(flash->spi, &m); if (err < 0) return err; *status = cmd_resp[1]; return 0; } static int sst25l_write_enable(struct sst25l_flash *flash, int enable) { unsigned char command[2]; int status, err; command[0] = enable ? SST25L_CMD_WREN : SST25L_CMD_WRDI; err = spi_write(flash->spi, command, 1); if (err) return err; command[0] = SST25L_CMD_EWSR; err = spi_write(flash->spi, command, 1); if (err) return err; command[0] = SST25L_CMD_WRSR; command[1] = enable ? 0 : SST25L_STATUS_BP0 | SST25L_STATUS_BP1; err = spi_write(flash->spi, command, 2); if (err) return err; if (enable) { err = sst25l_status(flash, &status); if (err) return err; if (!(status & SST25L_STATUS_WREN)) return -EROFS; } return 0; } static int sst25l_wait_till_ready(struct sst25l_flash *flash) { unsigned long deadline; int status, err; deadline = jiffies + MAX_READY_WAIT_JIFFIES; do { err = sst25l_status(flash, &status); if (err) return err; if (!(status & SST25L_STATUS_BUSY)) return 0; cond_resched(); } while (!time_after_eq(jiffies, deadline)); return -ETIMEDOUT; } static int sst25l_erase_sector(struct sst25l_flash *flash, uint32_t offset) { unsigned char command[4]; int err; err = sst25l_write_enable(flash, 1); if (err) return err; command[0] = SST25L_CMD_SECTOR_ERASE; command[1] = offset >> 16; command[2] = offset >> 8; command[3] = offset; err = spi_write(flash->spi, command, 4); if (err) return err; err = sst25l_wait_till_ready(flash); if (err) return err; return sst25l_write_enable(flash, 0); } static int sst25l_erase(struct mtd_info *mtd, struct erase_info *instr) { struct sst25l_flash *flash = to_sst25l_flash(mtd); uint32_t addr, end; int err; /* Sanity checks */ if ((uint32_t)instr->len % mtd->erasesize) return -EINVAL; if ((uint32_t)instr->addr % mtd->erasesize) return -EINVAL; addr = instr->addr; end = addr + instr->len; mutex_lock(&flash->lock); err = sst25l_wait_till_ready(flash); if (err) { mutex_unlock(&flash->lock); return err; } while (addr < end) { err = sst25l_erase_sector(flash, addr); if (err) { mutex_unlock(&flash->lock); instr->state = MTD_ERASE_FAILED; dev_err(&flash->spi->dev, "Erase failed\n"); return err; } addr += mtd->erasesize; } mutex_unlock(&flash->lock); instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); return 0; } static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, unsigned char *buf) { struct sst25l_flash *flash = to_sst25l_flash(mtd); struct spi_transfer transfer[2]; struct spi_message message; unsigned char command[4]; int ret; spi_message_init(&message); memset(&transfer, 0, sizeof(transfer)); command[0] = SST25L_CMD_READ; command[1] = from >> 16; command[2] = from >> 8; command[3] = from; transfer[0].tx_buf = command; transfer[0].len = sizeof(command); spi_message_add_tail(&transfer[0], &message); transfer[1].rx_buf = buf; transfer[1].len = len; spi_message_add_tail(&transfer[1], &message); mutex_lock(&flash->lock); /* Wait for previous write/erase to complete */ ret = sst25l_wait_till_ready(flash); if (ret) { mutex_unlock(&flash->lock); return ret; } spi_sync(flash->spi, &message); if (retlen && message.actual_length > sizeof(command)) *retlen += message.actual_length - sizeof(command); mutex_unlock(&flash->lock); return 0; } static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const unsigned char *buf) { struct sst25l_flash *flash = to_sst25l_flash(mtd); int i, j, ret, bytes, copied = 0; unsigned char command[5]; if ((uint32_t)to % mtd->writesize) return -EINVAL; mutex_lock(&flash->lock); ret = sst25l_write_enable(flash, 1); if (ret) goto out; for (i = 0; i < len; i += mtd->writesize) { ret = sst25l_wait_till_ready(flash); if (ret) goto out; /* Write the first byte of the page */ command[0] = SST25L_CMD_AAI_PROGRAM; command[1] = (to + i) >> 16; command[2] = (to + i) >> 8; command[3] = (to + i); command[4] = buf[i]; ret = spi_write(flash->spi, command, 5); if (ret < 0) goto out; copied++; /* * Write the remaining bytes using auto address * increment mode */ bytes = min_t(uint32_t, mtd->writesize, len - i); for (j = 1; j < bytes; j++, copied++) { ret = sst25l_wait_till_ready(flash); if (ret) goto out; command[1] = buf[i + j]; ret = spi_write(flash->spi, command, 2); if (ret) goto out; } } out: ret = sst25l_write_enable(flash, 0); if (retlen) *retlen = copied; mutex_unlock(&flash->lock); return ret; } static struct flash_info *sst25l_match_device(struct spi_device *spi) { struct flash_info *flash_info = NULL; struct spi_message m; struct spi_transfer t; unsigned char cmd_resp[6]; int i, err; uint16_t id; spi_message_init(&m); memset(&t, 0, sizeof(struct spi_transfer)); cmd_resp[0] = SST25L_CMD_READ_ID; cmd_resp[1] = 0; cmd_resp[2] = 0; cmd_resp[3] = 0; cmd_resp[4] = 0xff; cmd_resp[5] = 0xff; t.tx_buf = cmd_resp; t.rx_buf = cmd_resp; t.len = sizeof(cmd_resp); spi_message_add_tail(&t, &m); err = spi_sync(spi, &m); if (err < 0) { dev_err(&spi->dev, "error reading device id\n"); return NULL; } id = (cmd_resp[4] << 8) | cmd_resp[5]; for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++) if (sst25l_flash_info[i].device_id == id) flash_info = &sst25l_flash_info[i]; if (!flash_info) dev_err(&spi->dev, "unknown id %.4x\n", id); return flash_info; } static int sst25l_probe(struct spi_device *spi) { struct flash_info *flash_info; struct sst25l_flash *flash; struct flash_platform_data *data; int ret; flash_info = sst25l_match_device(spi); if (!flash_info) return -ENODEV; flash = kzalloc(sizeof(struct sst25l_flash), GFP_KERNEL); if (!flash) return -ENOMEM; flash->spi = spi; mutex_init(&flash->lock); dev_set_drvdata(&spi->dev, flash); data = spi->dev.platform_data; if (data && data->name) flash->mtd.name = data->name; else flash->mtd.name = dev_name(&spi->dev); flash->mtd.type = MTD_NORFLASH; flash->mtd.flags = MTD_CAP_NORFLASH; flash->mtd.erasesize = flash_info->erase_size; flash->mtd.writesize = flash_info->page_size; flash->mtd.writebufsize = flash_info->page_size; flash->mtd.size = flash_info->page_size * flash_info->nr_pages; flash->mtd._erase = sst25l_erase; flash->mtd._read = sst25l_read; flash->mtd._write = sst25l_write; dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name, (long long)flash->mtd.size >> 10); pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) " ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", flash->mtd.name, (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), flash->mtd.erasesize, flash->mtd.erasesize / 1024, flash->mtd.numeraseregions); ret = mtd_device_parse_register(&flash->mtd, NULL, NULL, data ? data->parts : NULL, data ? data->nr_parts : 0); if (ret) { kfree(flash); dev_set_drvdata(&spi->dev, NULL); return -ENODEV; } return 0; } static int sst25l_remove(struct spi_device *spi) { struct sst25l_flash *flash = dev_get_drvdata(&spi->dev); int ret; ret = mtd_device_unregister(&flash->mtd); if (ret == 0) kfree(flash); return ret; } static struct spi_driver sst25l_driver = { .driver = { .name = "sst25l", .owner = THIS_MODULE, }, .probe = sst25l_probe, .remove = sst25l_remove, }; module_spi_driver(sst25l_driver); MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips"); MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, " "Ryan Mallon"); MODULE_LICENSE("GPL");
gpl-2.0
bq-rk3066/android_kernel_bq_rk3188_DEPRECATED
arch/s390/kernel/processor.c
2831
2179
/* * arch/s390/kernel/processor.c * * Copyright IBM Corp. 2008 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) */ #define KMSG_COMPONENT "cpu" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/seq_file.h> #include <linux/delay.h> #include <linux/cpu.h> #include <asm/elf.h> #include <asm/lowcore.h> #include <asm/param.h> static DEFINE_PER_CPU(struct cpuid, cpu_id); /* * cpu_init - initializes state that is per-CPU. */ void __cpuinit cpu_init(void) { struct cpuid *id = &per_cpu(cpu_id, smp_processor_id()); get_cpu_id(id); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); } /* * show_cpuinfo - Get information on one CPU for use by procfs. */ static int show_cpuinfo(struct seq_file *m, void *v) { static const char *hwcap_str[10] = { "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", "edat", "etf3eh", "highgprs" }; unsigned long n = (unsigned long) v - 1; int i; if (!n) { s390_adjust_jiffies(); seq_printf(m, "vendor_id : IBM/S390\n" "# processors : %i\n" "bogomips per cpu: %lu.%02lu\n", num_online_cpus(), loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ))%100); seq_puts(m, "features\t: "); for (i = 0; i < 10; i++) if (hwcap_str[i] && (elf_hwcap & (1UL << i))) seq_printf(m, "%s ", hwcap_str[i]); seq_puts(m, "\n"); } get_online_cpus(); if (cpu_online(n)) { struct cpuid *id = &per_cpu(cpu_id, n); seq_printf(m, "processor %li: " "version = %02X, " "identification = %06X, " "machine = %04X\n", n, id->version, id->ident, id->machine); } put_online_cpus(); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { ++*pos; return c_start(m, pos); } static void c_stop(struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_cpuinfo, };
gpl-2.0
penreturns/AK-OnePone
drivers/mmc/host/dw_mmc.c
3855
54788
/* * Synopsys DesignWare Multimedia Card Interface driver * (Based on NXP driver for lpc 31xx) * * Copyright (C) 2009 NXP Semiconductors * Copyright (C) 2009, 2010 Imagination Technologies Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/blkdev.h> #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/dw_mmc.h> #include <linux/bitops.h> #include <linux/regulator/consumer.h> #include <linux/workqueue.h> #include "dw_mmc.h" /* Common flag combinations */ #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \ SDMMC_INT_HTO | SDMMC_INT_SBE | \ SDMMC_INT_EBE) #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ SDMMC_INT_RESP_ERR) #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE) #define DW_MCI_SEND_STATUS 1 #define DW_MCI_RECV_STATUS 2 #define DW_MCI_DMA_THRESHOLD 16 #ifdef CONFIG_MMC_DW_IDMAC struct idmac_desc { u32 des0; /* Control Descriptor */ #define IDMAC_DES0_DIC BIT(1) #define IDMAC_DES0_LD BIT(2) #define IDMAC_DES0_FD BIT(3) #define IDMAC_DES0_CH BIT(4) #define IDMAC_DES0_ER BIT(5) #define IDMAC_DES0_CES BIT(30) #define IDMAC_DES0_OWN BIT(31) u32 des1; /* Buffer sizes */ #define IDMAC_SET_BUFFER1_SIZE(d, s) \ ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff)) u32 des2; /* buffer 1 physical address */ u32 des3; /* buffer 2 physical address */ }; #endif /* CONFIG_MMC_DW_IDMAC */ /** * struct dw_mci_slot - MMC slot state * @mmc: The mmc_host representing this slot. * @host: The MMC controller this slot is using. * @ctype: Card type for this slot. * @mrq: mmc_request currently being processed or waiting to be * processed, or NULL when the slot is idle. * @queue_node: List node for placing this node in the @queue list of * &struct dw_mci. * @clock: Clock rate configured by set_ios(). Protected by host->lock. * @flags: Random state bits associated with the slot. * @id: Number of this slot. * @last_detect_state: Most recently observed card detect state. */ struct dw_mci_slot { struct mmc_host *mmc; struct dw_mci *host; u32 ctype; struct mmc_request *mrq; struct list_head queue_node; unsigned int clock; unsigned long flags; #define DW_MMC_CARD_PRESENT 0 #define DW_MMC_CARD_NEED_INIT 1 int id; int last_detect_state; }; static struct workqueue_struct *dw_mci_card_workqueue; #if defined(CONFIG_DEBUG_FS) static int dw_mci_req_show(struct seq_file *s, void *v) { struct dw_mci_slot *slot = s->private; struct mmc_request *mrq; struct mmc_command *cmd; struct mmc_command *stop; struct mmc_data *data; /* Make sure we get a consistent snapshot */ spin_lock_bh(&slot->host->lock); mrq = slot->mrq; if (mrq) { cmd = mrq->cmd; data = mrq->data; stop = mrq->stop; if (cmd) seq_printf(s, "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", cmd->opcode, cmd->arg, cmd->flags, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[2], cmd->error); if (data) seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", data->bytes_xfered, data->blocks, data->blksz, data->flags, data->error); if (stop) seq_printf(s, "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", stop->opcode, stop->arg, stop->flags, stop->resp[0], stop->resp[1], stop->resp[2], stop->resp[2], stop->error); } spin_unlock_bh(&slot->host->lock); return 0; } static int dw_mci_req_open(struct inode *inode, struct file *file) { return single_open(file, dw_mci_req_show, inode->i_private); } static const struct file_operations dw_mci_req_fops = { .owner = THIS_MODULE, .open = dw_mci_req_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int dw_mci_regs_show(struct seq_file *s, void *v) { seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS); seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS); seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD); seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL); seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK); seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA); return 0; } static int dw_mci_regs_open(struct inode *inode, struct file *file) { return single_open(file, dw_mci_regs_show, inode->i_private); } static const struct file_operations dw_mci_regs_fops = { .owner = THIS_MODULE, .open = dw_mci_regs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void dw_mci_init_debugfs(struct dw_mci_slot *slot) { struct mmc_host *mmc = slot->mmc; struct dw_mci *host = slot->host; struct dentry *root; struct dentry *node; root = mmc->debugfs_root; if (!root) return; node = debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops); if (!node) goto err; node = debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops); if (!node) goto err; node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); if (!node) goto err; node = debugfs_create_x32("pending_events", S_IRUSR, root, (u32 *)&host->pending_events); if (!node) goto err; node = debugfs_create_x32("completed_events", S_IRUSR, root, (u32 *)&host->completed_events); if (!node) goto err; return; err: dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); } #endif /* defined(CONFIG_DEBUG_FS) */ static void dw_mci_set_timeout(struct dw_mci *host) { /* timeout (maximum) */ mci_writel(host, TMOUT, 0xffffffff); } static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) { struct mmc_data *data; u32 cmdr; cmd->error = -EINPROGRESS; cmdr = cmd->opcode; if (cmdr == MMC_STOP_TRANSMISSION) cmdr |= SDMMC_CMD_STOP; else cmdr |= SDMMC_CMD_PRV_DAT_WAIT; if (cmd->flags & MMC_RSP_PRESENT) { /* We expect a response, so set this bit */ cmdr |= SDMMC_CMD_RESP_EXP; if (cmd->flags & MMC_RSP_136) cmdr |= SDMMC_CMD_RESP_LONG; } if (cmd->flags & MMC_RSP_CRC) cmdr |= SDMMC_CMD_RESP_CRC; data = cmd->data; if (data) { cmdr |= SDMMC_CMD_DAT_EXP; if (data->flags & MMC_DATA_STREAM) cmdr |= SDMMC_CMD_STRM_MODE; if (data->flags & MMC_DATA_WRITE) cmdr |= SDMMC_CMD_DAT_WR; } return cmdr; } static void dw_mci_start_command(struct dw_mci *host, struct mmc_command *cmd, u32 cmd_flags) { host->cmd = cmd; dev_vdbg(&host->dev, "start command: ARGR=0x%08x CMDR=0x%08x\n", cmd->arg, cmd_flags); mci_writel(host, CMDARG, cmd->arg); wmb(); mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); } static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data) { dw_mci_start_command(host, data->stop, host->stop_cmdr); } /* DMA interface functions */ static void dw_mci_stop_dma(struct dw_mci *host) { if (host->using_dma) { host->dma_ops->stop(host); host->dma_ops->cleanup(host); } else { /* Data transfer was stopped by the interrupt handler */ set_bit(EVENT_XFER_COMPLETE, &host->pending_events); } } static int dw_mci_get_dma_dir(struct mmc_data *data) { if (data->flags & MMC_DATA_WRITE) return DMA_TO_DEVICE; else return DMA_FROM_DEVICE; } #ifdef CONFIG_MMC_DW_IDMAC static void dw_mci_dma_cleanup(struct dw_mci *host) { struct mmc_data *data = host->data; if (data) if (!data->host_cookie) dma_unmap_sg(&host->dev, data->sg, data->sg_len, dw_mci_get_dma_dir(data)); } static void dw_mci_idmac_stop_dma(struct dw_mci *host) { u32 temp; /* Disable and reset the IDMAC interface */ temp = mci_readl(host, CTRL); temp &= ~SDMMC_CTRL_USE_IDMAC; temp |= SDMMC_CTRL_DMA_RESET; mci_writel(host, CTRL, temp); /* Stop the IDMAC running */ temp = mci_readl(host, BMOD); temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); mci_writel(host, BMOD, temp); } static void dw_mci_idmac_complete_dma(struct dw_mci *host) { struct mmc_data *data = host->data; dev_vdbg(&host->dev, "DMA complete\n"); host->dma_ops->cleanup(host); /* * If the card was removed, data will be NULL. No point in trying to * send the stop command or waiting for NBUSY in this case. */ if (data) { set_bit(EVENT_XFER_COMPLETE, &host->pending_events); tasklet_schedule(&host->tasklet); } } static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data, unsigned int sg_len) { int i; struct idmac_desc *desc = host->sg_cpu; for (i = 0; i < sg_len; i++, desc++) { unsigned int length = sg_dma_len(&data->sg[i]); u32 mem_addr = sg_dma_address(&data->sg[i]); /* Set the OWN bit and disable interrupts for this descriptor */ desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH; /* Buffer length */ IDMAC_SET_BUFFER1_SIZE(desc, length); /* Physical address to DMA to/from */ desc->des2 = mem_addr; } /* Set first descriptor */ desc = host->sg_cpu; desc->des0 |= IDMAC_DES0_FD; /* Set last descriptor */ desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc); desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); desc->des0 |= IDMAC_DES0_LD; wmb(); } static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) { u32 temp; dw_mci_translate_sglist(host, host->data, sg_len); /* Select IDMAC interface */ temp = mci_readl(host, CTRL); temp |= SDMMC_CTRL_USE_IDMAC; mci_writel(host, CTRL, temp); wmb(); /* Enable the IDMAC */ temp = mci_readl(host, BMOD); temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; mci_writel(host, BMOD, temp); /* Start it running */ mci_writel(host, PLDMND, 1); } static int dw_mci_idmac_init(struct dw_mci *host) { struct idmac_desc *p; int i; /* Number of descriptors in the ring buffer */ host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); /* Forward link the descriptor list */ for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1)); /* Set the last descriptor as the end-of-ring descriptor */ p->des3 = host->sg_dma; p->des0 = IDMAC_DES0_ER; /* Mask out interrupts - get Tx & Rx complete only */ mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); /* Set the descriptor base address */ mci_writel(host, DBADDR, host->sg_dma); return 0; } static struct dw_mci_dma_ops dw_mci_idmac_ops = { .init = dw_mci_idmac_init, .start = dw_mci_idmac_start_dma, .stop = dw_mci_idmac_stop_dma, .complete = dw_mci_idmac_complete_dma, .cleanup = dw_mci_dma_cleanup, }; #endif /* CONFIG_MMC_DW_IDMAC */ static int dw_mci_pre_dma_transfer(struct dw_mci *host, struct mmc_data *data, bool next) { struct scatterlist *sg; unsigned int i, sg_len; if (!next && data->host_cookie) return data->host_cookie; /* * We don't do DMA on "complex" transfers, i.e. with * non-word-aligned buffers or lengths. Also, we don't bother * with all the DMA setup overhead for short transfers. */ if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) return -EINVAL; if (data->blksz & 3) return -EINVAL; for_each_sg(data->sg, sg, data->sg_len, i) { if (sg->offset & 3 || sg->length & 3) return -EINVAL; } sg_len = dma_map_sg(&host->dev, data->sg, data->sg_len, dw_mci_get_dma_dir(data)); if (sg_len == 0) return -EINVAL; if (next) data->host_cookie = sg_len; return sg_len; } static void dw_mci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, bool is_first_req) { struct dw_mci_slot *slot = mmc_priv(mmc); struct mmc_data *data = mrq->data; if (!slot->host->use_dma || !data) return; if (data->host_cookie) { data->host_cookie = 0; return; } if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0) data->host_cookie = 0; } static void dw_mci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, int err) { struct dw_mci_slot *slot = mmc_priv(mmc); struct mmc_data *data = mrq->data; if (!slot->host->use_dma || !data) return; if (data->host_cookie) dma_unmap_sg(&slot->host->dev, data->sg, data->sg_len, dw_mci_get_dma_dir(data)); data->host_cookie = 0; } static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) { int sg_len; u32 temp; host->using_dma = 0; /* If we don't have a channel, we can't do DMA */ if (!host->use_dma) return -ENODEV; sg_len = dw_mci_pre_dma_transfer(host, data, 0); if (sg_len < 0) { host->dma_ops->stop(host); return sg_len; } host->using_dma = 1; dev_vdbg(&host->dev, "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, sg_len); /* Enable the DMA interface */ temp = mci_readl(host, CTRL); temp |= SDMMC_CTRL_DMA_ENABLE; mci_writel(host, CTRL, temp); /* Disable RX/TX IRQs, let DMA handle it */ temp = mci_readl(host, INTMASK); temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); mci_writel(host, INTMASK, temp); host->dma_ops->start(host, sg_len); return 0; } static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) { u32 temp; data->error = -EINPROGRESS; WARN_ON(host->data); host->sg = NULL; host->data = data; if (data->flags & MMC_DATA_READ) host->dir_status = DW_MCI_RECV_STATUS; else host->dir_status = DW_MCI_SEND_STATUS; if (dw_mci_submit_data_dma(host, data)) { int flags = SG_MITER_ATOMIC; if (host->data->flags & MMC_DATA_READ) flags |= SG_MITER_TO_SG; else flags |= SG_MITER_FROM_SG; sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); host->sg = data->sg; host->part_buf_start = 0; host->part_buf_count = 0; mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); temp = mci_readl(host, INTMASK); temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; mci_writel(host, INTMASK, temp); temp = mci_readl(host, CTRL); temp &= ~SDMMC_CTRL_DMA_ENABLE; mci_writel(host, CTRL, temp); } } static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) { struct dw_mci *host = slot->host; unsigned long timeout = jiffies + msecs_to_jiffies(500); unsigned int cmd_status = 0; mci_writel(host, CMDARG, arg); wmb(); mci_writel(host, CMD, SDMMC_CMD_START | cmd); while (time_before(jiffies, timeout)) { cmd_status = mci_readl(host, CMD); if (!(cmd_status & SDMMC_CMD_START)) return; } dev_err(&slot->mmc->class_dev, "Timeout sending command (cmd %#x arg %#x status %#x)\n", cmd, arg, cmd_status); } static void dw_mci_setup_bus(struct dw_mci_slot *slot) { struct dw_mci *host = slot->host; u32 div; if (slot->clock != host->current_speed) { if (host->bus_hz % slot->clock) /* * move the + 1 after the divide to prevent * over-clocking the card. */ div = ((host->bus_hz / slot->clock) >> 1) + 1; else div = (host->bus_hz / slot->clock) >> 1; dev_info(&slot->mmc->class_dev, "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" " div = %d)\n", slot->id, host->bus_hz, slot->clock, div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div); /* disable clock */ mci_writel(host, CLKENA, 0); mci_writel(host, CLKSRC, 0); /* inform CIU */ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); /* set clock to desired speed */ mci_writel(host, CLKDIV, div); /* inform CIU */ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); /* enable clock */ mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE | SDMMC_CLKEN_LOW_PWR) << slot->id)); /* inform CIU */ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); host->current_speed = slot->clock; } /* Set the current slot bus width */ mci_writel(host, CTYPE, (slot->ctype << slot->id)); } static void __dw_mci_start_request(struct dw_mci *host, struct dw_mci_slot *slot, struct mmc_command *cmd) { struct mmc_request *mrq; struct mmc_data *data; u32 cmdflags; mrq = slot->mrq; if (host->pdata->select_slot) host->pdata->select_slot(slot->id); /* Slot specific timing and width adjustment */ dw_mci_setup_bus(slot); host->cur_slot = slot; host->mrq = mrq; host->pending_events = 0; host->completed_events = 0; host->data_status = 0; data = cmd->data; if (data) { dw_mci_set_timeout(host); mci_writel(host, BYTCNT, data->blksz*data->blocks); mci_writel(host, BLKSIZ, data->blksz); } cmdflags = dw_mci_prepare_command(slot->mmc, cmd); /* this is the first command, send the initialization clock */ if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) cmdflags |= SDMMC_CMD_INIT; if (data) { dw_mci_submit_data(host, data); wmb(); } dw_mci_start_command(host, cmd, cmdflags); if (mrq->stop) host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); } static void dw_mci_start_request(struct dw_mci *host, struct dw_mci_slot *slot) { struct mmc_request *mrq = slot->mrq; struct mmc_command *cmd; cmd = mrq->sbc ? mrq->sbc : mrq->cmd; __dw_mci_start_request(host, slot, cmd); } /* must be called with host->lock held */ static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, struct mmc_request *mrq) { dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", host->state); slot->mrq = mrq; if (host->state == STATE_IDLE) { host->state = STATE_SENDING_CMD; dw_mci_start_request(host, slot); } else { list_add_tail(&slot->queue_node, &host->queue); } } static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct dw_mci_slot *slot = mmc_priv(mmc); struct dw_mci *host = slot->host; WARN_ON(slot->mrq); /* * The check for card presence and queueing of the request must be * atomic, otherwise the card could be removed in between and the * request wouldn't fail until another card was inserted. */ spin_lock_bh(&host->lock); if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { spin_unlock_bh(&host->lock); mrq->cmd->error = -ENOMEDIUM; mmc_request_done(mmc, mrq); return; } dw_mci_queue_request(host, slot, mrq); spin_unlock_bh(&host->lock); } static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct dw_mci_slot *slot = mmc_priv(mmc); u32 regs; /* set default 1 bit mode */ slot->ctype = SDMMC_CTYPE_1BIT; switch (ios->bus_width) { case MMC_BUS_WIDTH_1: slot->ctype = SDMMC_CTYPE_1BIT; break; case MMC_BUS_WIDTH_4: slot->ctype = SDMMC_CTYPE_4BIT; break; case MMC_BUS_WIDTH_8: slot->ctype = SDMMC_CTYPE_8BIT; break; } regs = mci_readl(slot->host, UHS_REG); /* DDR mode set */ if (ios->timing == MMC_TIMING_UHS_DDR50) regs |= (0x1 << slot->id) << 16; else regs &= ~(0x1 << slot->id) << 16; mci_writel(slot->host, UHS_REG, regs); if (ios->clock) { /* * Use mirror of ios->clock to prevent race with mmc * core ios update when finding the minimum. */ slot->clock = ios->clock; } switch (ios->power_mode) { case MMC_POWER_UP: set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); break; default: break; } } static int dw_mci_get_ro(struct mmc_host *mmc) { int read_only; struct dw_mci_slot *slot = mmc_priv(mmc); struct dw_mci_board *brd = slot->host->pdata; /* Use platform get_ro function, else try on board write protect */ if (brd->get_ro) read_only = brd->get_ro(slot->id); else read_only = mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; dev_dbg(&mmc->class_dev, "card is %s\n", read_only ? "read-only" : "read-write"); return read_only; } static int dw_mci_get_cd(struct mmc_host *mmc) { int present; struct dw_mci_slot *slot = mmc_priv(mmc); struct dw_mci_board *brd = slot->host->pdata; /* Use platform get_cd function, else try onboard card detect */ if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) present = 1; else if (brd->get_cd) present = !brd->get_cd(slot->id); else present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) == 0 ? 1 : 0; if (present) dev_dbg(&mmc->class_dev, "card is present\n"); else dev_dbg(&mmc->class_dev, "card is not present\n"); return present; } static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) { struct dw_mci_slot *slot = mmc_priv(mmc); struct dw_mci *host = slot->host; u32 int_mask; /* Enable/disable Slot Specific SDIO interrupt */ int_mask = mci_readl(host, INTMASK); if (enb) { mci_writel(host, INTMASK, (int_mask | (1 << SDMMC_INT_SDIO(slot->id)))); } else { mci_writel(host, INTMASK, (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id)))); } } static const struct mmc_host_ops dw_mci_ops = { .request = dw_mci_request, .pre_req = dw_mci_pre_req, .post_req = dw_mci_post_req, .set_ios = dw_mci_set_ios, .get_ro = dw_mci_get_ro, .get_cd = dw_mci_get_cd, .enable_sdio_irq = dw_mci_enable_sdio_irq, }; static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) __releases(&host->lock) __acquires(&host->lock) { struct dw_mci_slot *slot; struct mmc_host *prev_mmc = host->cur_slot->mmc; WARN_ON(host->cmd || host->data); host->cur_slot->mrq = NULL; host->mrq = NULL; if (!list_empty(&host->queue)) { slot = list_entry(host->queue.next, struct dw_mci_slot, queue_node); list_del(&slot->queue_node); dev_vdbg(&host->dev, "list not empty: %s is next\n", mmc_hostname(slot->mmc)); host->state = STATE_SENDING_CMD; dw_mci_start_request(host, slot); } else { dev_vdbg(&host->dev, "list empty\n"); host->state = STATE_IDLE; } spin_unlock(&host->lock); mmc_request_done(prev_mmc, mrq); spin_lock(&host->lock); } static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) { u32 status = host->cmd_status; host->cmd_status = 0; /* Read the response from the card (up to 16 bytes) */ if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { cmd->resp[3] = mci_readl(host, RESP0); cmd->resp[2] = mci_readl(host, RESP1); cmd->resp[1] = mci_readl(host, RESP2); cmd->resp[0] = mci_readl(host, RESP3); } else { cmd->resp[0] = mci_readl(host, RESP0); cmd->resp[1] = 0; cmd->resp[2] = 0; cmd->resp[3] = 0; } } if (status & SDMMC_INT_RTO) cmd->error = -ETIMEDOUT; else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) cmd->error = -EILSEQ; else if (status & SDMMC_INT_RESP_ERR) cmd->error = -EIO; else cmd->error = 0; if (cmd->error) { /* newer ip versions need a delay between retries */ if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY) mdelay(20); if (cmd->data) { host->data = NULL; dw_mci_stop_dma(host); } } } static void dw_mci_tasklet_func(unsigned long priv) { struct dw_mci *host = (struct dw_mci *)priv; struct mmc_data *data; struct mmc_command *cmd; enum dw_mci_state state; enum dw_mci_state prev_state; u32 status, ctrl; spin_lock(&host->lock); state = host->state; data = host->data; do { prev_state = state; switch (state) { case STATE_IDLE: break; case STATE_SENDING_CMD: if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events)) break; cmd = host->cmd; host->cmd = NULL; set_bit(EVENT_CMD_COMPLETE, &host->completed_events); dw_mci_command_complete(host, cmd); if (cmd == host->mrq->sbc && !cmd->error) { prev_state = state = STATE_SENDING_CMD; __dw_mci_start_request(host, host->cur_slot, host->mrq->cmd); goto unlock; } if (!host->mrq->data || cmd->error) { dw_mci_request_end(host, host->mrq); goto unlock; } prev_state = state = STATE_SENDING_DATA; /* fall through */ case STATE_SENDING_DATA: if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) { dw_mci_stop_dma(host); if (data->stop) send_stop_cmd(host, data); state = STATE_DATA_ERROR; break; } if (!test_and_clear_bit(EVENT_XFER_COMPLETE, &host->pending_events)) break; set_bit(EVENT_XFER_COMPLETE, &host->completed_events); prev_state = state = STATE_DATA_BUSY; /* fall through */ case STATE_DATA_BUSY: if (!test_and_clear_bit(EVENT_DATA_COMPLETE, &host->pending_events)) break; host->data = NULL; set_bit(EVENT_DATA_COMPLETE, &host->completed_events); status = host->data_status; if (status & DW_MCI_DATA_ERROR_FLAGS) { if (status & SDMMC_INT_DTO) { data->error = -ETIMEDOUT; } else if (status & SDMMC_INT_DCRC) { data->error = -EILSEQ; } else if (status & SDMMC_INT_EBE && host->dir_status == DW_MCI_SEND_STATUS) { /* * No data CRC status was returned. * The number of bytes transferred will * be exaggerated in PIO mode. */ data->bytes_xfered = 0; data->error = -ETIMEDOUT; } else { dev_err(&host->dev, "data FIFO error " "(status=%08x)\n", status); data->error = -EIO; } /* * After an error, there may be data lingering * in the FIFO, so reset it - doing so * generates a block interrupt, hence setting * the scatter-gather pointer to NULL. */ sg_miter_stop(&host->sg_miter); host->sg = NULL; ctrl = mci_readl(host, CTRL); ctrl |= SDMMC_CTRL_FIFO_RESET; mci_writel(host, CTRL, ctrl); } else { data->bytes_xfered = data->blocks * data->blksz; data->error = 0; } if (!data->stop) { dw_mci_request_end(host, host->mrq); goto unlock; } if (host->mrq->sbc && !data->error) { data->stop->error = 0; dw_mci_request_end(host, host->mrq); goto unlock; } prev_state = state = STATE_SENDING_STOP; if (!data->error) send_stop_cmd(host, data); /* fall through */ case STATE_SENDING_STOP: if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events)) break; host->cmd = NULL; dw_mci_command_complete(host, host->mrq->stop); dw_mci_request_end(host, host->mrq); goto unlock; case STATE_DATA_ERROR: if (!test_and_clear_bit(EVENT_XFER_COMPLETE, &host->pending_events)) break; state = STATE_DATA_BUSY; break; } } while (state != prev_state); host->state = state; unlock: spin_unlock(&host->lock); } /* push final bytes to part_buf, only use during push */ static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) { memcpy((void *)&host->part_buf, buf, cnt); host->part_buf_count = cnt; } /* append bytes to part_buf, only use during push */ static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) { cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); host->part_buf_count += cnt; return cnt; } /* pull first bytes from part_buf, only use during pull */ static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) { cnt = min(cnt, (int)host->part_buf_count); if (cnt) { memcpy(buf, (void *)&host->part_buf + host->part_buf_start, cnt); host->part_buf_count -= cnt; host->part_buf_start += cnt; } return cnt; } /* pull final bytes from the part_buf, assuming it's just been filled */ static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) { memcpy(buf, &host->part_buf, cnt); host->part_buf_start = cnt; host->part_buf_count = (1 << host->data_shift) - cnt; } static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) { /* try and push anything in the part_buf */ if (unlikely(host->part_buf_count)) { int len = dw_mci_push_part_bytes(host, buf, cnt); buf += len; cnt -= len; if (!sg_next(host->sg) || host->part_buf_count == 2) { mci_writew(host, DATA(host->data_offset), host->part_buf16); host->part_buf_count = 0; } } #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS if (unlikely((unsigned long)buf & 0x1)) { while (cnt >= 2) { u16 aligned_buf[64]; int len = min(cnt & -2, (int)sizeof(aligned_buf)); int items = len >> 1; int i; /* memcpy from input buffer into aligned buffer */ memcpy(aligned_buf, buf, len); buf += len; cnt -= len; /* push data from aligned buffer into fifo */ for (i = 0; i < items; ++i) mci_writew(host, DATA(host->data_offset), aligned_buf[i]); } } else #endif { u16 *pdata = buf; for (; cnt >= 2; cnt -= 2) mci_writew(host, DATA(host->data_offset), *pdata++); buf = pdata; } /* put anything remaining in the part_buf */ if (cnt) { dw_mci_set_part_bytes(host, buf, cnt); if (!sg_next(host->sg)) mci_writew(host, DATA(host->data_offset), host->part_buf16); } } static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) { #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS if (unlikely((unsigned long)buf & 0x1)) { while (cnt >= 2) { /* pull data from fifo into aligned buffer */ u16 aligned_buf[64]; int len = min(cnt & -2, (int)sizeof(aligned_buf)); int items = len >> 1; int i; for (i = 0; i < items; ++i) aligned_buf[i] = mci_readw(host, DATA(host->data_offset)); /* memcpy from aligned buffer into output buffer */ memcpy(buf, aligned_buf, len); buf += len; cnt -= len; } } else #endif { u16 *pdata = buf; for (; cnt >= 2; cnt -= 2) *pdata++ = mci_readw(host, DATA(host->data_offset)); buf = pdata; } if (cnt) { host->part_buf16 = mci_readw(host, DATA(host->data_offset)); dw_mci_pull_final_bytes(host, buf, cnt); } } static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) { /* try and push anything in the part_buf */ if (unlikely(host->part_buf_count)) { int len = dw_mci_push_part_bytes(host, buf, cnt); buf += len; cnt -= len; if (!sg_next(host->sg) || host->part_buf_count == 4) { mci_writel(host, DATA(host->data_offset), host->part_buf32); host->part_buf_count = 0; } } #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS if (unlikely((unsigned long)buf & 0x3)) { while (cnt >= 4) { u32 aligned_buf[32]; int len = min(cnt & -4, (int)sizeof(aligned_buf)); int items = len >> 2; int i; /* memcpy from input buffer into aligned buffer */ memcpy(aligned_buf, buf, len); buf += len; cnt -= len; /* push data from aligned buffer into fifo */ for (i = 0; i < items; ++i) mci_writel(host, DATA(host->data_offset), aligned_buf[i]); } } else #endif { u32 *pdata = buf; for (; cnt >= 4; cnt -= 4) mci_writel(host, DATA(host->data_offset), *pdata++); buf = pdata; } /* put anything remaining in the part_buf */ if (cnt) { dw_mci_set_part_bytes(host, buf, cnt); if (!sg_next(host->sg)) mci_writel(host, DATA(host->data_offset), host->part_buf32); } } static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) { #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS if (unlikely((unsigned long)buf & 0x3)) { while (cnt >= 4) { /* pull data from fifo into aligned buffer */ u32 aligned_buf[32]; int len = min(cnt & -4, (int)sizeof(aligned_buf)); int items = len >> 2; int i; for (i = 0; i < items; ++i) aligned_buf[i] = mci_readl(host, DATA(host->data_offset)); /* memcpy from aligned buffer into output buffer */ memcpy(buf, aligned_buf, len); buf += len; cnt -= len; } } else #endif { u32 *pdata = buf; for (; cnt >= 4; cnt -= 4) *pdata++ = mci_readl(host, DATA(host->data_offset)); buf = pdata; } if (cnt) { host->part_buf32 = mci_readl(host, DATA(host->data_offset)); dw_mci_pull_final_bytes(host, buf, cnt); } } static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) { /* try and push anything in the part_buf */ if (unlikely(host->part_buf_count)) { int len = dw_mci_push_part_bytes(host, buf, cnt); buf += len; cnt -= len; if (!sg_next(host->sg) || host->part_buf_count == 8) { mci_writew(host, DATA(host->data_offset), host->part_buf); host->part_buf_count = 0; } } #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS if (unlikely((unsigned long)buf & 0x7)) { while (cnt >= 8) { u64 aligned_buf[16]; int len = min(cnt & -8, (int)sizeof(aligned_buf)); int items = len >> 3; int i; /* memcpy from input buffer into aligned buffer */ memcpy(aligned_buf, buf, len); buf += len; cnt -= len; /* push data from aligned buffer into fifo */ for (i = 0; i < items; ++i) mci_writeq(host, DATA(host->data_offset), aligned_buf[i]); } } else #endif { u64 *pdata = buf; for (; cnt >= 8; cnt -= 8) mci_writeq(host, DATA(host->data_offset), *pdata++); buf = pdata; } /* put anything remaining in the part_buf */ if (cnt) { dw_mci_set_part_bytes(host, buf, cnt); if (!sg_next(host->sg)) mci_writeq(host, DATA(host->data_offset), host->part_buf); } } static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) { #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS if (unlikely((unsigned long)buf & 0x7)) { while (cnt >= 8) { /* pull data from fifo into aligned buffer */ u64 aligned_buf[16]; int len = min(cnt & -8, (int)sizeof(aligned_buf)); int items = len >> 3; int i; for (i = 0; i < items; ++i) aligned_buf[i] = mci_readq(host, DATA(host->data_offset)); /* memcpy from aligned buffer into output buffer */ memcpy(buf, aligned_buf, len); buf += len; cnt -= len; } } else #endif { u64 *pdata = buf; for (; cnt >= 8; cnt -= 8) *pdata++ = mci_readq(host, DATA(host->data_offset)); buf = pdata; } if (cnt) { host->part_buf = mci_readq(host, DATA(host->data_offset)); dw_mci_pull_final_bytes(host, buf, cnt); } } static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) { int len; /* get remaining partial bytes */ len = dw_mci_pull_part_bytes(host, buf, cnt); if (unlikely(len == cnt)) return; buf += len; cnt -= len; /* get the rest of the data */ host->pull_data(host, buf, cnt); } static void dw_mci_read_data_pio(struct dw_mci *host) { struct sg_mapping_iter *sg_miter = &host->sg_miter; void *buf; unsigned int offset; struct mmc_data *data = host->data; int shift = host->data_shift; u32 status; unsigned int nbytes = 0, len; unsigned int remain, fcnt; do { if (!sg_miter_next(sg_miter)) goto done; host->sg = sg_miter->__sg; buf = sg_miter->addr; remain = sg_miter->length; offset = 0; do { fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift) + host->part_buf_count; len = min(remain, fcnt); if (!len) break; dw_mci_pull_data(host, (void *)(buf + offset), len); offset += len; nbytes += len; remain -= len; } while (remain); sg_miter->consumed = offset; status = mci_readl(host, MINTSTS); mci_writel(host, RINTSTS, SDMMC_INT_RXDR); if (status & DW_MCI_DATA_ERROR_FLAGS) { host->data_status = status; data->bytes_xfered += nbytes; sg_miter_stop(sg_miter); host->sg = NULL; smp_wmb(); set_bit(EVENT_DATA_ERROR, &host->pending_events); tasklet_schedule(&host->tasklet); return; } } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ data->bytes_xfered += nbytes; if (!remain) { if (!sg_miter_next(sg_miter)) goto done; sg_miter->consumed = 0; } sg_miter_stop(sg_miter); return; done: data->bytes_xfered += nbytes; sg_miter_stop(sg_miter); host->sg = NULL; smp_wmb(); set_bit(EVENT_XFER_COMPLETE, &host->pending_events); } static void dw_mci_write_data_pio(struct dw_mci *host) { struct sg_mapping_iter *sg_miter = &host->sg_miter; void *buf; unsigned int offset; struct mmc_data *data = host->data; int shift = host->data_shift; u32 status; unsigned int nbytes = 0, len; unsigned int fifo_depth = host->fifo_depth; unsigned int remain, fcnt; do { if (!sg_miter_next(sg_miter)) goto done; host->sg = sg_miter->__sg; buf = sg_miter->addr; remain = sg_miter->length; offset = 0; do { fcnt = ((fifo_depth - SDMMC_GET_FCNT(mci_readl(host, STATUS))) << shift) - host->part_buf_count; len = min(remain, fcnt); if (!len) break; host->push_data(host, (void *)(buf + offset), len); offset += len; nbytes += len; remain -= len; } while (remain); sg_miter->consumed = offset; status = mci_readl(host, MINTSTS); mci_writel(host, RINTSTS, SDMMC_INT_TXDR); if (status & DW_MCI_DATA_ERROR_FLAGS) { host->data_status = status; data->bytes_xfered += nbytes; sg_miter_stop(sg_miter); host->sg = NULL; smp_wmb(); set_bit(EVENT_DATA_ERROR, &host->pending_events); tasklet_schedule(&host->tasklet); return; } } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ data->bytes_xfered += nbytes; if (!remain) { if (!sg_miter_next(sg_miter)) goto done; sg_miter->consumed = 0; } sg_miter_stop(sg_miter); return; done: data->bytes_xfered += nbytes; sg_miter_stop(sg_miter); host->sg = NULL; smp_wmb(); set_bit(EVENT_XFER_COMPLETE, &host->pending_events); } static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) { if (!host->cmd_status) host->cmd_status = status; smp_wmb(); set_bit(EVENT_CMD_COMPLETE, &host->pending_events); tasklet_schedule(&host->tasklet); } static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) { struct dw_mci *host = dev_id; u32 status, pending; unsigned int pass_count = 0; int i; do { status = mci_readl(host, RINTSTS); pending = mci_readl(host, MINTSTS); /* read-only mask reg */ /* * DTO fix - version 2.10a and below, and only if internal DMA * is configured. */ if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) { if (!pending && ((mci_readl(host, STATUS) >> 17) & 0x1fff)) pending |= SDMMC_INT_DATA_OVER; } if (!pending) break; if (pending & DW_MCI_CMD_ERROR_FLAGS) { mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); host->cmd_status = status; smp_wmb(); set_bit(EVENT_CMD_COMPLETE, &host->pending_events); } if (pending & DW_MCI_DATA_ERROR_FLAGS) { /* if there is an error report DATA_ERROR */ mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); host->data_status = status; smp_wmb(); set_bit(EVENT_DATA_ERROR, &host->pending_events); if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC | SDMMC_INT_SBE | SDMMC_INT_EBE))) tasklet_schedule(&host->tasklet); } if (pending & SDMMC_INT_DATA_OVER) { mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); if (!host->data_status) host->data_status = status; smp_wmb(); if (host->dir_status == DW_MCI_RECV_STATUS) { if (host->sg != NULL) dw_mci_read_data_pio(host); } set_bit(EVENT_DATA_COMPLETE, &host->pending_events); tasklet_schedule(&host->tasklet); } if (pending & SDMMC_INT_RXDR) { mci_writel(host, RINTSTS, SDMMC_INT_RXDR); if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) dw_mci_read_data_pio(host); } if (pending & SDMMC_INT_TXDR) { mci_writel(host, RINTSTS, SDMMC_INT_TXDR); if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) dw_mci_write_data_pio(host); } if (pending & SDMMC_INT_CMD_DONE) { mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); dw_mci_cmd_interrupt(host, status); } if (pending & SDMMC_INT_CD) { mci_writel(host, RINTSTS, SDMMC_INT_CD); queue_work(dw_mci_card_workqueue, &host->card_work); } /* Handle SDIO Interrupts */ for (i = 0; i < host->num_slots; i++) { struct dw_mci_slot *slot = host->slot[i]; if (pending & SDMMC_INT_SDIO(i)) { mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i)); mmc_signal_sdio_irq(slot->mmc); } } } while (pass_count++ < 5); #ifdef CONFIG_MMC_DW_IDMAC /* Handle DMA interrupts */ pending = mci_readl(host, IDSTS); if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); set_bit(EVENT_DATA_COMPLETE, &host->pending_events); host->dma_ops->complete(host); } #endif return IRQ_HANDLED; } static void dw_mci_work_routine_card(struct work_struct *work) { struct dw_mci *host = container_of(work, struct dw_mci, card_work); int i; for (i = 0; i < host->num_slots; i++) { struct dw_mci_slot *slot = host->slot[i]; struct mmc_host *mmc = slot->mmc; struct mmc_request *mrq; int present; u32 ctrl; present = dw_mci_get_cd(mmc); while (present != slot->last_detect_state) { dev_dbg(&slot->mmc->class_dev, "card %s\n", present ? "inserted" : "removed"); /* Power up slot (before spin_lock, may sleep) */ if (present != 0 && host->pdata->setpower) host->pdata->setpower(slot->id, mmc->ocr_avail); spin_lock_bh(&host->lock); /* Card change detected */ slot->last_detect_state = present; /* Mark card as present if applicable */ if (present != 0) set_bit(DW_MMC_CARD_PRESENT, &slot->flags); /* Clean up queue if present */ mrq = slot->mrq; if (mrq) { if (mrq == host->mrq) { host->data = NULL; host->cmd = NULL; switch (host->state) { case STATE_IDLE: break; case STATE_SENDING_CMD: mrq->cmd->error = -ENOMEDIUM; if (!mrq->data) break; /* fall through */ case STATE_SENDING_DATA: mrq->data->error = -ENOMEDIUM; dw_mci_stop_dma(host); break; case STATE_DATA_BUSY: case STATE_DATA_ERROR: if (mrq->data->error == -EINPROGRESS) mrq->data->error = -ENOMEDIUM; if (!mrq->stop) break; /* fall through */ case STATE_SENDING_STOP: mrq->stop->error = -ENOMEDIUM; break; } dw_mci_request_end(host, mrq); } else { list_del(&slot->queue_node); mrq->cmd->error = -ENOMEDIUM; if (mrq->data) mrq->data->error = -ENOMEDIUM; if (mrq->stop) mrq->stop->error = -ENOMEDIUM; spin_unlock(&host->lock); mmc_request_done(slot->mmc, mrq); spin_lock(&host->lock); } } /* Power down slot */ if (present == 0) { clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); /* * Clear down the FIFO - doing so generates a * block interrupt, hence setting the * scatter-gather pointer to NULL. */ sg_miter_stop(&host->sg_miter); host->sg = NULL; ctrl = mci_readl(host, CTRL); ctrl |= SDMMC_CTRL_FIFO_RESET; mci_writel(host, CTRL, ctrl); #ifdef CONFIG_MMC_DW_IDMAC ctrl = mci_readl(host, BMOD); ctrl |= 0x01; /* Software reset of DMA */ mci_writel(host, BMOD, ctrl); #endif } spin_unlock_bh(&host->lock); /* Power down slot (after spin_unlock, may sleep) */ if (present == 0 && host->pdata->setpower) host->pdata->setpower(slot->id, 0); present = dw_mci_get_cd(mmc); } mmc_detect_change(slot->mmc, msecs_to_jiffies(host->pdata->detect_delay_ms)); } } static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) { struct mmc_host *mmc; struct dw_mci_slot *slot; mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->dev); if (!mmc) return -ENOMEM; slot = mmc_priv(mmc); slot->id = id; slot->mmc = mmc; slot->host = host; mmc->ops = &dw_mci_ops; mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); mmc->f_max = host->bus_hz; if (host->pdata->get_ocr) mmc->ocr_avail = host->pdata->get_ocr(id); else mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; /* * Start with slot power disabled, it will be enabled when a card * is detected. */ if (host->pdata->setpower) host->pdata->setpower(id, 0); if (host->pdata->caps) mmc->caps = host->pdata->caps; if (host->pdata->caps2) mmc->caps2 = host->pdata->caps2; if (host->pdata->get_bus_wd) if (host->pdata->get_bus_wd(slot->id) >= 4) mmc->caps |= MMC_CAP_4_BIT_DATA; if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; if (host->pdata->blk_settings) { mmc->max_segs = host->pdata->blk_settings->max_segs; mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; mmc->max_blk_count = host->pdata->blk_settings->max_blk_count; mmc->max_req_size = host->pdata->blk_settings->max_req_size; mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; } else { /* Useful defaults if platform data is unset. */ #ifdef CONFIG_MMC_DW_IDMAC mmc->max_segs = host->ring_size; mmc->max_blk_size = 65536; mmc->max_blk_count = host->ring_size; mmc->max_seg_size = 0x1000; mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count; #else mmc->max_segs = 64; mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ mmc->max_blk_count = 512; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; #endif /* CONFIG_MMC_DW_IDMAC */ } host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); if (IS_ERR(host->vmmc)) { pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); host->vmmc = NULL; } else regulator_enable(host->vmmc); if (dw_mci_get_cd(mmc)) set_bit(DW_MMC_CARD_PRESENT, &slot->flags); else clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); host->slot[id] = slot; mmc_add_host(mmc); #if defined(CONFIG_DEBUG_FS) dw_mci_init_debugfs(slot); #endif /* Card initially undetected */ slot->last_detect_state = 0; /* * Card may have been plugged in prior to boot so we * need to run the detect tasklet */ queue_work(dw_mci_card_workqueue, &host->card_work); return 0; } static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) { /* Shutdown detect IRQ */ if (slot->host->pdata->exit) slot->host->pdata->exit(id); /* Debugfs stuff is cleaned up by mmc core */ mmc_remove_host(slot->mmc); slot->host->slot[id] = NULL; mmc_free_host(slot->mmc); } static void dw_mci_init_dma(struct dw_mci *host) { /* Alloc memory for sg translation */ host->sg_cpu = dma_alloc_coherent(&host->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL); if (!host->sg_cpu) { dev_err(&host->dev, "%s: could not alloc DMA memory\n", __func__); goto no_dma; } /* Determine which DMA interface to use */ #ifdef CONFIG_MMC_DW_IDMAC host->dma_ops = &dw_mci_idmac_ops; dev_info(&host->dev, "Using internal DMA controller.\n"); #endif if (!host->dma_ops) goto no_dma; if (host->dma_ops->init && host->dma_ops->start && host->dma_ops->stop && host->dma_ops->cleanup) { if (host->dma_ops->init(host)) { dev_err(&host->dev, "%s: Unable to initialize " "DMA Controller.\n", __func__); goto no_dma; } } else { dev_err(&host->dev, "DMA initialization not found.\n"); goto no_dma; } host->use_dma = 1; return; no_dma: dev_info(&host->dev, "Using PIO mode.\n"); host->use_dma = 0; return; } static bool mci_wait_reset(struct device *dev, struct dw_mci *host) { unsigned long timeout = jiffies + msecs_to_jiffies(500); unsigned int ctrl; mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET)); /* wait till resets clear */ do { ctrl = mci_readl(host, CTRL); if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET))) return true; } while (time_before(jiffies, timeout)); dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl); return false; } int dw_mci_probe(struct dw_mci *host) { int width, i, ret = 0; u32 fifo_size; if (!host->pdata || !host->pdata->init) { dev_err(&host->dev, "Platform data must supply init function\n"); return -ENODEV; } if (!host->pdata->select_slot && host->pdata->num_slots > 1) { dev_err(&host->dev, "Platform data must supply select_slot function\n"); return -ENODEV; } if (!host->pdata->bus_hz) { dev_err(&host->dev, "Platform data must supply bus speed\n"); return -ENODEV; } host->bus_hz = host->pdata->bus_hz; host->quirks = host->pdata->quirks; spin_lock_init(&host->lock); INIT_LIST_HEAD(&host->queue); host->dma_ops = host->pdata->dma_ops; dw_mci_init_dma(host); /* * Get the host data width - this assumes that HCON has been set with * the correct values. */ i = (mci_readl(host, HCON) >> 7) & 0x7; if (!i) { host->push_data = dw_mci_push_data16; host->pull_data = dw_mci_pull_data16; width = 16; host->data_shift = 1; } else if (i == 2) { host->push_data = dw_mci_push_data64; host->pull_data = dw_mci_pull_data64; width = 64; host->data_shift = 3; } else { /* Check for a reserved value, and warn if it is */ WARN((i != 1), "HCON reports a reserved host data width!\n" "Defaulting to 32-bit access.\n"); host->push_data = dw_mci_push_data32; host->pull_data = dw_mci_pull_data32; width = 32; host->data_shift = 2; } /* Reset all blocks */ if (!mci_wait_reset(&host->dev, host)) { ret = -ENODEV; goto err_dmaunmap; } /* Clear the interrupts for the host controller */ mci_writel(host, RINTSTS, 0xFFFFFFFF); mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ /* Put in max timeout */ mci_writel(host, TMOUT, 0xFFFFFFFF); /* * FIFO threshold settings RxMark = fifo_size / 2 - 1, * Tx Mark = fifo_size / 2 DMA Size = 8 */ if (!host->pdata->fifo_depth) { /* * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may * have been overwritten by the bootloader, just like we're * about to do, so if you know the value for your hardware, you * should put it in the platform data. */ fifo_size = mci_readl(host, FIFOTH); fifo_size = 1 + ((fifo_size >> 16) & 0xfff); } else { fifo_size = host->pdata->fifo_depth; } host->fifo_depth = fifo_size; host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | ((fifo_size/2) << 0)); mci_writel(host, FIFOTH, host->fifoth_val); /* disable clock to CIU */ mci_writel(host, CLKENA, 0); mci_writel(host, CLKSRC, 0); tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); dw_mci_card_workqueue = alloc_workqueue("dw-mci-card", WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1); if (!dw_mci_card_workqueue) goto err_dmaunmap; INIT_WORK(&host->card_work, dw_mci_work_routine_card); ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host); if (ret) goto err_workqueue; if (host->pdata->num_slots) host->num_slots = host->pdata->num_slots; else host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; /* We need at least one slot to succeed */ for (i = 0; i < host->num_slots; i++) { ret = dw_mci_init_slot(host, i); if (ret) { ret = -ENODEV; goto err_init_slot; } } /* * In 2.40a spec, Data offset is changed. * Need to check the version-id and set data-offset for DATA register. */ host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); dev_info(&host->dev, "Version ID is %04x\n", host->verid); if (host->verid < DW_MMC_240A) host->data_offset = DATA_OFFSET; else host->data_offset = DATA_240A_OFFSET; /* * Enable interrupts for command done, data over, data empty, card det, * receive ready and error such as transmit, receive timeout, crc error */ mci_writel(host, RINTSTS, 0xFFFFFFFF); mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ dev_info(&host->dev, "DW MMC controller at irq %d, " "%d bit host data width, " "%u deep fifo\n", host->irq, width, fifo_size); if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) dev_info(&host->dev, "Internal DMAC interrupt fix enabled.\n"); return 0; err_init_slot: /* De-init any initialized slots */ while (i > 0) { if (host->slot[i]) dw_mci_cleanup_slot(host->slot[i], i); i--; } free_irq(host->irq, host); err_workqueue: destroy_workqueue(dw_mci_card_workqueue); err_dmaunmap: if (host->use_dma && host->dma_ops->exit) host->dma_ops->exit(host); dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); if (host->vmmc) { regulator_disable(host->vmmc); regulator_put(host->vmmc); } return ret; } EXPORT_SYMBOL(dw_mci_probe); void dw_mci_remove(struct dw_mci *host) { int i; mci_writel(host, RINTSTS, 0xFFFFFFFF); mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ for (i = 0; i < host->num_slots; i++) { dev_dbg(&host->dev, "remove slot %d\n", i); if (host->slot[i]) dw_mci_cleanup_slot(host->slot[i], i); } /* disable clock to CIU */ mci_writel(host, CLKENA, 0); mci_writel(host, CLKSRC, 0); free_irq(host->irq, host); destroy_workqueue(dw_mci_card_workqueue); dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); if (host->use_dma && host->dma_ops->exit) host->dma_ops->exit(host); if (host->vmmc) { regulator_disable(host->vmmc); regulator_put(host->vmmc); } } EXPORT_SYMBOL(dw_mci_remove); #ifdef CONFIG_PM_SLEEP /* * TODO: we should probably disable the clock to the card in the suspend path. */ int dw_mci_suspend(struct dw_mci *host) { int i, ret = 0; for (i = 0; i < host->num_slots; i++) { struct dw_mci_slot *slot = host->slot[i]; if (!slot) continue; ret = mmc_suspend_host(slot->mmc); if (ret < 0) { while (--i >= 0) { slot = host->slot[i]; if (slot) mmc_resume_host(host->slot[i]->mmc); } return ret; } } if (host->vmmc) regulator_disable(host->vmmc); return 0; } EXPORT_SYMBOL(dw_mci_suspend); int dw_mci_resume(struct dw_mci *host) { int i, ret; if (host->vmmc) regulator_enable(host->vmmc); if (host->dma_ops->init) host->dma_ops->init(host); if (!mci_wait_reset(&host->dev, host)) { ret = -ENODEV; return ret; } /* Restore the old value at FIFOTH register */ mci_writel(host, FIFOTH, host->fifoth_val); mci_writel(host, RINTSTS, 0xFFFFFFFF); mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); for (i = 0; i < host->num_slots; i++) { struct dw_mci_slot *slot = host->slot[i]; if (!slot) continue; ret = mmc_resume_host(host->slot[i]->mmc); if (ret < 0) return ret; } return 0; } EXPORT_SYMBOL(dw_mci_resume); #endif /* CONFIG_PM_SLEEP */ static int __init dw_mci_init(void) { printk(KERN_INFO "Synopsys Designware Multimedia Card Interface Driver"); return 0; } static void __exit dw_mci_exit(void) { } module_init(dw_mci_init); module_exit(dw_mci_exit); MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); MODULE_AUTHOR("NXP Semiconductor VietNam"); MODULE_AUTHOR("Imagination Technologies Ltd"); MODULE_LICENSE("GPL v2");
gpl-2.0
xDARKMATT3Rx/xDARKMATT3Rx_Kernel_BPH1
sound/soc/blackfin/bf5xx-i2s.c
4111
7345
/* * File: sound/soc/blackfin/bf5xx-i2s.c * Author: Cliff Cai <Cliff.Cai@analog.com> * * Created: Tue June 06 2008 * Description: Blackfin I2S CPU DAI driver * * Modified: * Copyright 2008 Analog Devices Inc. * * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include <asm/irq.h> #include <asm/portmux.h> #include <linux/mutex.h> #include <linux/gpio.h> #include "bf5xx-sport.h" struct bf5xx_i2s_port { u16 tcr1; u16 rcr1; u16 tcr2; u16 rcr2; int configured; }; static int bf5xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct sport_device *sport_handle = snd_soc_dai_get_drvdata(cpu_dai); struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data; int ret = 0; /* interface format:support I2S,slave mode */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: bf5xx_i2s->tcr1 |= TFSR | TCKFE; bf5xx_i2s->rcr1 |= RFSR | RCKFE; bf5xx_i2s->tcr2 |= TSFSE; bf5xx_i2s->rcr2 |= RSFSE; break; case SND_SOC_DAIFMT_DSP_A: bf5xx_i2s->tcr1 |= TFSR; bf5xx_i2s->rcr1 |= RFSR; break; case SND_SOC_DAIFMT_LEFT_J: ret = -EINVAL; break; default: printk(KERN_ERR "%s: Unknown DAI format type\n", __func__); ret = -EINVAL; break; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: break; case SND_SOC_DAIFMT_CBS_CFS: case SND_SOC_DAIFMT_CBM_CFS: case SND_SOC_DAIFMT_CBS_CFM: ret = -EINVAL; break; default: printk(KERN_ERR "%s: Unknown DAI master type\n", __func__); ret = -EINVAL; break; } return ret; } static int bf5xx_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai); struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data; int ret = 0; bf5xx_i2s->tcr2 &= ~0x1f; bf5xx_i2s->rcr2 &= ~0x1f; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: bf5xx_i2s->tcr2 |= 7; bf5xx_i2s->rcr2 |= 7; sport_handle->wdsize = 1; case SNDRV_PCM_FORMAT_S16_LE: bf5xx_i2s->tcr2 |= 15; bf5xx_i2s->rcr2 |= 15; sport_handle->wdsize = 2; break; case SNDRV_PCM_FORMAT_S24_LE: bf5xx_i2s->tcr2 |= 23; bf5xx_i2s->rcr2 |= 23; sport_handle->wdsize = 3; break; case SNDRV_PCM_FORMAT_S32_LE: bf5xx_i2s->tcr2 |= 31; bf5xx_i2s->rcr2 |= 31; sport_handle->wdsize = 4; break; } if (!bf5xx_i2s->configured) { /* * TX and RX are not independent,they are enabled at the * same time, even if only one side is running. So, we * need to configure both of them at the time when the first * stream is opened. * * CPU DAI:slave mode. */ bf5xx_i2s->configured = 1; ret = sport_config_rx(sport_handle, bf5xx_i2s->rcr1, bf5xx_i2s->rcr2, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); return -EBUSY; } ret = sport_config_tx(sport_handle, bf5xx_i2s->tcr1, bf5xx_i2s->tcr2, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); return -EBUSY; } } return 0; } static void bf5xx_i2s_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai); struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data; pr_debug("%s enter\n", __func__); /* No active stream, SPORT is allowed to be configured again. */ if (!dai->active) bf5xx_i2s->configured = 0; } #ifdef CONFIG_PM static int bf5xx_i2s_suspend(struct snd_soc_dai *dai) { struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai); pr_debug("%s : sport %d\n", __func__, dai->id); if (dai->capture_active) sport_rx_stop(sport_handle); if (dai->playback_active) sport_tx_stop(sport_handle); return 0; } static int bf5xx_i2s_resume(struct snd_soc_dai *dai) { struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai); struct bf5xx_i2s_port *bf5xx_i2s = sport_handle->private_data; int ret; pr_debug("%s : sport %d\n", __func__, dai->id); ret = sport_config_rx(sport_handle, bf5xx_i2s->rcr1, bf5xx_i2s->rcr2, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); return -EBUSY; } ret = sport_config_tx(sport_handle, bf5xx_i2s->tcr1, bf5xx_i2s->tcr2, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); return -EBUSY; } return 0; } #else #define bf5xx_i2s_suspend NULL #define bf5xx_i2s_resume NULL #endif #define BF5XX_I2S_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \ SNDRV_PCM_RATE_96000) #define BF5XX_I2S_FORMATS \ (SNDRV_PCM_FMTBIT_S8 | \ SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S32_LE) static const struct snd_soc_dai_ops bf5xx_i2s_dai_ops = { .shutdown = bf5xx_i2s_shutdown, .hw_params = bf5xx_i2s_hw_params, .set_fmt = bf5xx_i2s_set_dai_fmt, }; static struct snd_soc_dai_driver bf5xx_i2s_dai = { .suspend = bf5xx_i2s_suspend, .resume = bf5xx_i2s_resume, .playback = { .channels_min = 1, .channels_max = 2, .rates = BF5XX_I2S_RATES, .formats = BF5XX_I2S_FORMATS,}, .capture = { .channels_min = 1, .channels_max = 2, .rates = BF5XX_I2S_RATES, .formats = BF5XX_I2S_FORMATS,}, .ops = &bf5xx_i2s_dai_ops, }; static int __devinit bf5xx_i2s_probe(struct platform_device *pdev) { struct sport_device *sport_handle; int ret; /* configure SPORT for I2S */ sport_handle = sport_init(pdev, 4, 2 * sizeof(u32), sizeof(struct bf5xx_i2s_port)); if (!sport_handle) return -ENODEV; /* register with the ASoC layers */ ret = snd_soc_register_dai(&pdev->dev, &bf5xx_i2s_dai); if (ret) { pr_err("Failed to register DAI: %d\n", ret); sport_done(sport_handle); return ret; } return 0; } static int __devexit bf5xx_i2s_remove(struct platform_device *pdev) { struct sport_device *sport_handle = platform_get_drvdata(pdev); pr_debug("%s enter\n", __func__); snd_soc_unregister_dai(&pdev->dev); sport_done(sport_handle); return 0; } static struct platform_driver bfin_i2s_driver = { .probe = bf5xx_i2s_probe, .remove = __devexit_p(bf5xx_i2s_remove), .driver = { .name = "bfin-i2s", .owner = THIS_MODULE, }, }; module_platform_driver(bfin_i2s_driver); /* Module information */ MODULE_AUTHOR("Cliff Cai"); MODULE_DESCRIPTION("I2S driver for ADI Blackfin"); MODULE_LICENSE("GPL");
gpl-2.0
Sinsoftomorrow/android_kernel_lge_g3
drivers/hwmon/vt8231.c
4111
31964
/* * vt8231.c - Part of lm_sensors, Linux kernel modules * for hardware monitoring * * Copyright (c) 2005 Roger Lucas <vt8231@hiddenengine.co.uk> * Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com> * Aaron M. Marsh <amarsh@sdf.lonestar.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Supports VIA VT8231 South Bridge embedded sensors */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/acpi.h> #include <linux/io.h> static int force_addr; module_param(force_addr, int, 0); MODULE_PARM_DESC(force_addr, "Initialize the base address of the sensors"); static struct platform_device *pdev; #define VT8231_EXTENT 0x80 #define VT8231_BASE_REG 0x70 #define VT8231_ENABLE_REG 0x74 /* * The VT8231 registers * * The reset value for the input channel configuration is used (Reg 0x4A=0x07) * which sets the selected inputs marked with '*' below if multiple options are * possible: * * Voltage Mode Temperature Mode * Sensor Linux Id Linux Id VIA Id * -------- -------- -------- ------ * CPU Diode N/A temp1 0 * UIC1 in0 temp2 * 1 * UIC2 in1 * temp3 2 * UIC3 in2 * temp4 3 * UIC4 in3 * temp5 4 * UIC5 in4 * temp6 5 * 3.3V in5 N/A * * Note that the BIOS may set the configuration register to a different value * to match the motherboard configuration. */ /* fans numbered 0-1 */ #define VT8231_REG_FAN_MIN(nr) (0x3b + (nr)) #define VT8231_REG_FAN(nr) (0x29 + (nr)) /* Voltage inputs numbered 0-5 */ static const u8 regvolt[] = { 0x21, 0x22, 0x23, 0x24, 0x25, 0x26 }; static const u8 regvoltmax[] = { 0x3d, 0x2b, 0x2d, 0x2f, 0x31, 0x33 }; static const u8 regvoltmin[] = { 0x3e, 0x2c, 0x2e, 0x30, 0x32, 0x34 }; /* * Temperatures are numbered 1-6 according to the Linux kernel specification. * * In the VIA datasheet, however, the temperatures are numbered from zero. * Since it is important that this driver can easily be compared to the VIA * datasheet, we will use the VIA numbering within this driver and map the * kernel sysfs device name to the VIA number in the sysfs callback. */ #define VT8231_REG_TEMP_LOW01 0x49 #define VT8231_REG_TEMP_LOW25 0x4d static const u8 regtemp[] = { 0x1f, 0x21, 0x22, 0x23, 0x24, 0x25 }; static const u8 regtempmax[] = { 0x39, 0x3d, 0x2b, 0x2d, 0x2f, 0x31 }; static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 }; #define TEMP_FROM_REG(reg) (((253 * 4 - (reg)) * 550 + 105) / 210) #define TEMP_MAXMIN_FROM_REG(reg) (((253 - (reg)) * 2200 + 105) / 210) #define TEMP_MAXMIN_TO_REG(val) (253 - ((val) * 210 + 1100) / 2200) #define VT8231_REG_CONFIG 0x40 #define VT8231_REG_ALARM1 0x41 #define VT8231_REG_ALARM2 0x42 #define VT8231_REG_FANDIV 0x47 #define VT8231_REG_UCH_CONFIG 0x4a #define VT8231_REG_TEMP1_CONFIG 0x4b #define VT8231_REG_TEMP2_CONFIG 0x4c /* * temps 0-5 as numbered in VIA datasheet - see later for mapping to Linux * numbering */ #define ISTEMP(i, ch_config) ((i) == 0 ? 1 : \ ((ch_config) >> ((i)+1)) & 0x01) /* voltages 0-5 */ #define ISVOLT(i, ch_config) ((i) == 5 ? 1 : \ !(((ch_config) >> ((i)+2)) & 0x01)) #define DIV_FROM_REG(val) (1 << (val)) /* * NB The values returned here are NOT temperatures. The calibration curves * for the thermistor curves are board-specific and must go in the * sensors.conf file. Temperature sensors are actually ten bits, but the * VIA datasheet only considers the 8 MSBs obtained from the regtemp[] * register. The temperature value returned should have a magnitude of 3, * so we use the VIA scaling as the "true" scaling and use the remaining 2 * LSBs as fractional precision. * * All the on-chip hardware temperature comparisons for the alarms are only * 8-bits wide, and compare against the 8 MSBs of the temperature. The bits * in the registers VT8231_REG_TEMP_LOW01 and VT8231_REG_TEMP_LOW25 are * ignored. */ /* ****** FAN RPM CONVERSIONS ******** * This chip saturates back at 0, not at 255 like many the other chips. * So, 0 means 0 RPM */ static inline u8 FAN_TO_REG(long rpm, int div) { if (rpm == 0) return 0; return SENSORS_LIMIT(1310720 / (rpm * div), 1, 255); } #define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : 1310720 / ((val) * (div))) struct vt8231_data { unsigned short addr; const char *name; struct mutex update_lock; struct device *hwmon_dev; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[6]; /* Register value */ u8 in_max[6]; /* Register value */ u8 in_min[6]; /* Register value */ u16 temp[6]; /* Register value 10 bit, right aligned */ u8 temp_max[6]; /* Register value */ u8 temp_min[6]; /* Register value */ u8 fan[2]; /* Register value */ u8 fan_min[2]; /* Register value */ u8 fan_div[2]; /* Register encoding, shifted right */ u16 alarms; /* Register encoding */ u8 uch_config; }; static struct pci_dev *s_bridge; static int vt8231_probe(struct platform_device *pdev); static int __devexit vt8231_remove(struct platform_device *pdev); static struct vt8231_data *vt8231_update_device(struct device *dev); static void vt8231_init_device(struct vt8231_data *data); static inline int vt8231_read_value(struct vt8231_data *data, u8 reg) { return inb_p(data->addr + reg); } static inline void vt8231_write_value(struct vt8231_data *data, u8 reg, u8 value) { outb_p(value, data->addr + reg); } /* following are the sysfs callback functions */ static ssize_t show_in(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", ((data->in[nr] - 3) * 10000) / 958); } static ssize_t show_in_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", ((data->in_min[nr] - 3) * 10000) / 958); } static ssize_t show_in_max(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", (((data->in_max[nr] - 3) * 10000) / 958)); } static ssize_t set_in_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255); vt8231_write_value(data, regvoltmin[nr], data->in_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255); vt8231_write_value(data, regvoltmax[nr], data->in_max[nr]); mutex_unlock(&data->update_lock); return count; } /* Special case for input 5 as this has 3.3V scaling built into the chip */ static ssize_t show_in5(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", (((data->in[5] - 3) * 10000 * 54) / (958 * 34))); } static ssize_t show_in5_min(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", (((data->in_min[5] - 3) * 10000 * 54) / (958 * 34))); } static ssize_t show_in5_max(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", (((data->in_max[5] - 3) * 10000 * 54) / (958 * 34))); } static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3, 0, 255); vt8231_write_value(data, regvoltmin[5], data->in_min[5]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3, 0, 255); vt8231_write_value(data, regvoltmax[5], data->in_max[5]); mutex_unlock(&data->update_lock); return count; } #define define_voltage_sysfs(offset) \ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ show_in, NULL, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \ show_in_min, set_in_min, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \ show_in_max, set_in_max, offset) define_voltage_sysfs(0); define_voltage_sysfs(1); define_voltage_sysfs(2); define_voltage_sysfs(3); define_voltage_sysfs(4); static DEVICE_ATTR(in5_input, S_IRUGO, show_in5, NULL); static DEVICE_ATTR(in5_min, S_IRUGO | S_IWUSR, show_in5_min, set_in5_min); static DEVICE_ATTR(in5_max, S_IRUGO | S_IWUSR, show_in5_max, set_in5_max); /* Temperatures */ static ssize_t show_temp0(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", data->temp[0] * 250); } static ssize_t show_temp0_max(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", data->temp_max[0] * 1000); } static ssize_t show_temp0_min(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", data->temp_min[0] * 1000); } static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_max[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255); vt8231_write_value(data, regtempmax[0], data->temp_max[0]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp0_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_min[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255); vt8231_write_value(data, regtempmin[0], data->temp_min[0]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr])); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", TEMP_MAXMIN_FROM_REG(data->temp_max[nr])); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", TEMP_MAXMIN_FROM_REG(data->temp_min[nr])); } static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_max[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255); vt8231_write_value(data, regtempmax[nr], data->temp_max[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_min[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255); vt8231_write_value(data, regtempmin[nr], data->temp_min[nr]); mutex_unlock(&data->update_lock); return count; } /* * Note that these map the Linux temperature sensor numbering (1-6) to the VIA * temperature sensor numbering (0-5) */ #define define_temperature_sysfs(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \ show_temp, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \ show_temp_max, set_temp_max, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \ show_temp_min, set_temp_min, offset - 1) static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp0, NULL); static DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp0_max, set_temp0_max); static DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp0_min, set_temp0_min); define_temperature_sysfs(2); define_temperature_sysfs(3); define_temperature_sysfs(4); define_temperature_sysfs(5); define_temperature_sysfs(6); /* Fans */ static ssize_t show_fan(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr], DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr])); } static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); vt8231_write_value(data, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); unsigned long val; int nr = sensor_attr->index; int old = vt8231_read_value(data, VT8231_REG_FANDIV); long min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); switch (val) { case 1: data->fan_div[nr] = 0; break; case 2: data->fan_div[nr] = 1; break; case 4: data->fan_div[nr] = 2; break; case 8: data->fan_div[nr] = 3; break; default: dev_err(dev, "fan_div value %ld not supported. " "Choose one of 1, 2, 4 or 8!\n", val); mutex_unlock(&data->update_lock); return -EINVAL; } /* Correct the fan minimum speed */ data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); vt8231_write_value(data, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]); old = (old & 0x0f) | (data->fan_div[1] << 6) | (data->fan_div[0] << 4); vt8231_write_value(data, VT8231_REG_FANDIV, old); mutex_unlock(&data->update_lock); return count; } #define define_fan_sysfs(offset) \ static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \ show_fan, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \ show_fan_div, set_fan_div, offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \ show_fan_min, set_fan_min, offset - 1) define_fan_sysfs(1); define_fan_sysfs(2); /* Alarms */ static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", data->alarms); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp4_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp5_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(temp6_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7); static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct vt8231_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static struct attribute *vt8231_attributes_temps[6][5] = { { &dev_attr_temp1_input.attr, &dev_attr_temp1_max_hyst.attr, &dev_attr_temp1_max.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_max_hyst.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp4_input.dev_attr.attr, &sensor_dev_attr_temp4_max_hyst.dev_attr.attr, &sensor_dev_attr_temp4_max.dev_attr.attr, &sensor_dev_attr_temp4_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp5_input.dev_attr.attr, &sensor_dev_attr_temp5_max_hyst.dev_attr.attr, &sensor_dev_attr_temp5_max.dev_attr.attr, &sensor_dev_attr_temp5_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp6_input.dev_attr.attr, &sensor_dev_attr_temp6_max_hyst.dev_attr.attr, &sensor_dev_attr_temp6_max.dev_attr.attr, &sensor_dev_attr_temp6_alarm.dev_attr.attr, NULL } }; static const struct attribute_group vt8231_group_temps[6] = { { .attrs = vt8231_attributes_temps[0] }, { .attrs = vt8231_attributes_temps[1] }, { .attrs = vt8231_attributes_temps[2] }, { .attrs = vt8231_attributes_temps[3] }, { .attrs = vt8231_attributes_temps[4] }, { .attrs = vt8231_attributes_temps[5] }, }; static struct attribute *vt8231_attributes_volts[6][5] = { { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, NULL }, { &dev_attr_in5_input.attr, &dev_attr_in5_min.attr, &dev_attr_in5_max.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, NULL } }; static const struct attribute_group vt8231_group_volts[6] = { { .attrs = vt8231_attributes_volts[0] }, { .attrs = vt8231_attributes_volts[1] }, { .attrs = vt8231_attributes_volts[2] }, { .attrs = vt8231_attributes_volts[3] }, { .attrs = vt8231_attributes_volts[4] }, { .attrs = vt8231_attributes_volts[5] }, }; static struct attribute *vt8231_attributes[] = { &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, &sensor_dev_attr_fan2_div.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &dev_attr_alarms.attr, &dev_attr_name.attr, NULL }; static const struct attribute_group vt8231_group = { .attrs = vt8231_attributes, }; static struct platform_driver vt8231_driver = { .driver = { .owner = THIS_MODULE, .name = "vt8231", }, .probe = vt8231_probe, .remove = __devexit_p(vt8231_remove), }; static DEFINE_PCI_DEVICE_TABLE(vt8231_pci_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) }, { 0, } }; MODULE_DEVICE_TABLE(pci, vt8231_pci_ids); static int __devinit vt8231_pci_probe(struct pci_dev *dev, const struct pci_device_id *id); static struct pci_driver vt8231_pci_driver = { .name = "vt8231", .id_table = vt8231_pci_ids, .probe = vt8231_pci_probe, }; static int vt8231_probe(struct platform_device *pdev) { struct resource *res; struct vt8231_data *data; int err = 0, i; /* Reserve the ISA region */ res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start, VT8231_EXTENT, vt8231_driver.driver.name)) { dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n", (unsigned long)res->start, (unsigned long)res->end); return -ENODEV; } data = kzalloc(sizeof(struct vt8231_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit_release; } platform_set_drvdata(pdev, data); data->addr = res->start; data->name = "vt8231"; mutex_init(&data->update_lock); vt8231_init_device(data); /* Register sysfs hooks */ err = sysfs_create_group(&pdev->dev.kobj, &vt8231_group); if (err) goto exit_free; /* Must update device information to find out the config field */ data->uch_config = vt8231_read_value(data, VT8231_REG_UCH_CONFIG); for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) { if (ISTEMP(i, data->uch_config)) { err = sysfs_create_group(&pdev->dev.kobj, &vt8231_group_temps[i]); if (err) goto exit_remove_files; } } for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) { if (ISVOLT(i, data->uch_config)) { err = sysfs_create_group(&pdev->dev.kobj, &vt8231_group_volts[i]); if (err) goto exit_remove_files; } } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_volts[i]); for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_temps[i]); sysfs_remove_group(&pdev->dev.kobj, &vt8231_group); exit_free: platform_set_drvdata(pdev, NULL); kfree(data); exit_release: release_region(res->start, VT8231_EXTENT); return err; } static int __devexit vt8231_remove(struct platform_device *pdev) { struct vt8231_data *data = platform_get_drvdata(pdev); int i; hwmon_device_unregister(data->hwmon_dev); for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_volts[i]); for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_temps[i]); sysfs_remove_group(&pdev->dev.kobj, &vt8231_group); release_region(data->addr, VT8231_EXTENT); platform_set_drvdata(pdev, NULL); kfree(data); return 0; } static void vt8231_init_device(struct vt8231_data *data) { vt8231_write_value(data, VT8231_REG_TEMP1_CONFIG, 0); vt8231_write_value(data, VT8231_REG_TEMP2_CONFIG, 0); } static struct vt8231_data *vt8231_update_device(struct device *dev) { struct vt8231_data *data = dev_get_drvdata(dev); int i; u16 low; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { for (i = 0; i < 6; i++) { if (ISVOLT(i, data->uch_config)) { data->in[i] = vt8231_read_value(data, regvolt[i]); data->in_min[i] = vt8231_read_value(data, regvoltmin[i]); data->in_max[i] = vt8231_read_value(data, regvoltmax[i]); } } for (i = 0; i < 2; i++) { data->fan[i] = vt8231_read_value(data, VT8231_REG_FAN(i)); data->fan_min[i] = vt8231_read_value(data, VT8231_REG_FAN_MIN(i)); } low = vt8231_read_value(data, VT8231_REG_TEMP_LOW01); low = (low >> 6) | ((low & 0x30) >> 2) | (vt8231_read_value(data, VT8231_REG_TEMP_LOW25) << 4); for (i = 0; i < 6; i++) { if (ISTEMP(i, data->uch_config)) { data->temp[i] = (vt8231_read_value(data, regtemp[i]) << 2) | ((low >> (2 * i)) & 0x03); data->temp_max[i] = vt8231_read_value(data, regtempmax[i]); data->temp_min[i] = vt8231_read_value(data, regtempmin[i]); } } i = vt8231_read_value(data, VT8231_REG_FANDIV); data->fan_div[0] = (i >> 4) & 0x03; data->fan_div[1] = i >> 6; data->alarms = vt8231_read_value(data, VT8231_REG_ALARM1) | (vt8231_read_value(data, VT8231_REG_ALARM2) << 8); /* Set alarm flags correctly */ if (!data->fan[0] && data->fan_min[0]) data->alarms |= 0x40; else if (data->fan[0] && !data->fan_min[0]) data->alarms &= ~0x40; if (!data->fan[1] && data->fan_min[1]) data->alarms |= 0x80; else if (data->fan[1] && !data->fan_min[1]) data->alarms &= ~0x80; data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static int __devinit vt8231_device_add(unsigned short address) { struct resource res = { .start = address, .end = address + VT8231_EXTENT - 1, .name = "vt8231", .flags = IORESOURCE_IO, }; int err; err = acpi_check_resource_conflict(&res); if (err) goto exit; pdev = platform_device_alloc("vt8231", address); if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed\n"); goto exit; } err = platform_device_add_resources(pdev, &res, 1); if (err) { pr_err("Device resource addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add(pdev); if (err) { pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } return 0; exit_device_put: platform_device_put(pdev); exit: return err; } static int __devinit vt8231_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { u16 address, val; if (force_addr) { address = force_addr & 0xff00; dev_warn(&dev->dev, "Forcing ISA address 0x%x\n", address); if (PCIBIOS_SUCCESSFUL != pci_write_config_word(dev, VT8231_BASE_REG, address | 1)) return -ENODEV; } if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VT8231_BASE_REG, &val)) return -ENODEV; address = val & ~(VT8231_EXTENT - 1); if (address == 0) { dev_err(&dev->dev, "base address not set - upgrade BIOS or use force_addr=0xaddr\n"); return -ENODEV; } if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VT8231_ENABLE_REG, &val)) return -ENODEV; if (!(val & 0x0001)) { dev_warn(&dev->dev, "enabling sensors\n"); if (PCIBIOS_SUCCESSFUL != pci_write_config_word(dev, VT8231_ENABLE_REG, val | 0x0001)) return -ENODEV; } if (platform_driver_register(&vt8231_driver)) goto exit; /* Sets global pdev as a side effect */ if (vt8231_device_add(address)) goto exit_unregister; /* * Always return failure here. This is to allow other drivers to bind * to this pci device. We don't really want to have control over the * pci device, we only wanted to read as few register values from it. */ /* * We do, however, mark ourselves as using the PCI device to stop it * getting unloaded. */ s_bridge = pci_dev_get(dev); return -ENODEV; exit_unregister: platform_driver_unregister(&vt8231_driver); exit: return -ENODEV; } static int __init sm_vt8231_init(void) { return pci_register_driver(&vt8231_pci_driver); } static void __exit sm_vt8231_exit(void) { pci_unregister_driver(&vt8231_pci_driver); if (s_bridge != NULL) { platform_device_unregister(pdev); platform_driver_unregister(&vt8231_driver); pci_dev_put(s_bridge); s_bridge = NULL; } } MODULE_AUTHOR("Roger Lucas <vt8231@hiddenengine.co.uk>"); MODULE_DESCRIPTION("VT8231 sensors"); MODULE_LICENSE("GPL"); module_init(sm_vt8231_init); module_exit(sm_vt8231_exit);
gpl-2.0
MattCrystal/Nine
drivers/hwmon/vt8231.c
4111
31964
/* * vt8231.c - Part of lm_sensors, Linux kernel modules * for hardware monitoring * * Copyright (c) 2005 Roger Lucas <vt8231@hiddenengine.co.uk> * Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com> * Aaron M. Marsh <amarsh@sdf.lonestar.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Supports VIA VT8231 South Bridge embedded sensors */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/acpi.h> #include <linux/io.h> static int force_addr; module_param(force_addr, int, 0); MODULE_PARM_DESC(force_addr, "Initialize the base address of the sensors"); static struct platform_device *pdev; #define VT8231_EXTENT 0x80 #define VT8231_BASE_REG 0x70 #define VT8231_ENABLE_REG 0x74 /* * The VT8231 registers * * The reset value for the input channel configuration is used (Reg 0x4A=0x07) * which sets the selected inputs marked with '*' below if multiple options are * possible: * * Voltage Mode Temperature Mode * Sensor Linux Id Linux Id VIA Id * -------- -------- -------- ------ * CPU Diode N/A temp1 0 * UIC1 in0 temp2 * 1 * UIC2 in1 * temp3 2 * UIC3 in2 * temp4 3 * UIC4 in3 * temp5 4 * UIC5 in4 * temp6 5 * 3.3V in5 N/A * * Note that the BIOS may set the configuration register to a different value * to match the motherboard configuration. */ /* fans numbered 0-1 */ #define VT8231_REG_FAN_MIN(nr) (0x3b + (nr)) #define VT8231_REG_FAN(nr) (0x29 + (nr)) /* Voltage inputs numbered 0-5 */ static const u8 regvolt[] = { 0x21, 0x22, 0x23, 0x24, 0x25, 0x26 }; static const u8 regvoltmax[] = { 0x3d, 0x2b, 0x2d, 0x2f, 0x31, 0x33 }; static const u8 regvoltmin[] = { 0x3e, 0x2c, 0x2e, 0x30, 0x32, 0x34 }; /* * Temperatures are numbered 1-6 according to the Linux kernel specification. * * In the VIA datasheet, however, the temperatures are numbered from zero. * Since it is important that this driver can easily be compared to the VIA * datasheet, we will use the VIA numbering within this driver and map the * kernel sysfs device name to the VIA number in the sysfs callback. */ #define VT8231_REG_TEMP_LOW01 0x49 #define VT8231_REG_TEMP_LOW25 0x4d static const u8 regtemp[] = { 0x1f, 0x21, 0x22, 0x23, 0x24, 0x25 }; static const u8 regtempmax[] = { 0x39, 0x3d, 0x2b, 0x2d, 0x2f, 0x31 }; static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 }; #define TEMP_FROM_REG(reg) (((253 * 4 - (reg)) * 550 + 105) / 210) #define TEMP_MAXMIN_FROM_REG(reg) (((253 - (reg)) * 2200 + 105) / 210) #define TEMP_MAXMIN_TO_REG(val) (253 - ((val) * 210 + 1100) / 2200) #define VT8231_REG_CONFIG 0x40 #define VT8231_REG_ALARM1 0x41 #define VT8231_REG_ALARM2 0x42 #define VT8231_REG_FANDIV 0x47 #define VT8231_REG_UCH_CONFIG 0x4a #define VT8231_REG_TEMP1_CONFIG 0x4b #define VT8231_REG_TEMP2_CONFIG 0x4c /* * temps 0-5 as numbered in VIA datasheet - see later for mapping to Linux * numbering */ #define ISTEMP(i, ch_config) ((i) == 0 ? 1 : \ ((ch_config) >> ((i)+1)) & 0x01) /* voltages 0-5 */ #define ISVOLT(i, ch_config) ((i) == 5 ? 1 : \ !(((ch_config) >> ((i)+2)) & 0x01)) #define DIV_FROM_REG(val) (1 << (val)) /* * NB The values returned here are NOT temperatures. The calibration curves * for the thermistor curves are board-specific and must go in the * sensors.conf file. Temperature sensors are actually ten bits, but the * VIA datasheet only considers the 8 MSBs obtained from the regtemp[] * register. The temperature value returned should have a magnitude of 3, * so we use the VIA scaling as the "true" scaling and use the remaining 2 * LSBs as fractional precision. * * All the on-chip hardware temperature comparisons for the alarms are only * 8-bits wide, and compare against the 8 MSBs of the temperature. The bits * in the registers VT8231_REG_TEMP_LOW01 and VT8231_REG_TEMP_LOW25 are * ignored. */ /* ****** FAN RPM CONVERSIONS ******** * This chip saturates back at 0, not at 255 like many the other chips. * So, 0 means 0 RPM */ static inline u8 FAN_TO_REG(long rpm, int div) { if (rpm == 0) return 0; return SENSORS_LIMIT(1310720 / (rpm * div), 1, 255); } #define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : 1310720 / ((val) * (div))) struct vt8231_data { unsigned short addr; const char *name; struct mutex update_lock; struct device *hwmon_dev; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[6]; /* Register value */ u8 in_max[6]; /* Register value */ u8 in_min[6]; /* Register value */ u16 temp[6]; /* Register value 10 bit, right aligned */ u8 temp_max[6]; /* Register value */ u8 temp_min[6]; /* Register value */ u8 fan[2]; /* Register value */ u8 fan_min[2]; /* Register value */ u8 fan_div[2]; /* Register encoding, shifted right */ u16 alarms; /* Register encoding */ u8 uch_config; }; static struct pci_dev *s_bridge; static int vt8231_probe(struct platform_device *pdev); static int __devexit vt8231_remove(struct platform_device *pdev); static struct vt8231_data *vt8231_update_device(struct device *dev); static void vt8231_init_device(struct vt8231_data *data); static inline int vt8231_read_value(struct vt8231_data *data, u8 reg) { return inb_p(data->addr + reg); } static inline void vt8231_write_value(struct vt8231_data *data, u8 reg, u8 value) { outb_p(value, data->addr + reg); } /* following are the sysfs callback functions */ static ssize_t show_in(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", ((data->in[nr] - 3) * 10000) / 958); } static ssize_t show_in_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", ((data->in_min[nr] - 3) * 10000) / 958); } static ssize_t show_in_max(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", (((data->in_max[nr] - 3) * 10000) / 958)); } static ssize_t set_in_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255); vt8231_write_value(data, regvoltmin[nr], data->in_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255); vt8231_write_value(data, regvoltmax[nr], data->in_max[nr]); mutex_unlock(&data->update_lock); return count; } /* Special case for input 5 as this has 3.3V scaling built into the chip */ static ssize_t show_in5(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", (((data->in[5] - 3) * 10000 * 54) / (958 * 34))); } static ssize_t show_in5_min(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", (((data->in_min[5] - 3) * 10000 * 54) / (958 * 34))); } static ssize_t show_in5_max(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", (((data->in_max[5] - 3) * 10000 * 54) / (958 * 34))); } static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3, 0, 255); vt8231_write_value(data, regvoltmin[5], data->in_min[5]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3, 0, 255); vt8231_write_value(data, regvoltmax[5], data->in_max[5]); mutex_unlock(&data->update_lock); return count; } #define define_voltage_sysfs(offset) \ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ show_in, NULL, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \ show_in_min, set_in_min, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \ show_in_max, set_in_max, offset) define_voltage_sysfs(0); define_voltage_sysfs(1); define_voltage_sysfs(2); define_voltage_sysfs(3); define_voltage_sysfs(4); static DEVICE_ATTR(in5_input, S_IRUGO, show_in5, NULL); static DEVICE_ATTR(in5_min, S_IRUGO | S_IWUSR, show_in5_min, set_in5_min); static DEVICE_ATTR(in5_max, S_IRUGO | S_IWUSR, show_in5_max, set_in5_max); /* Temperatures */ static ssize_t show_temp0(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", data->temp[0] * 250); } static ssize_t show_temp0_max(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", data->temp_max[0] * 1000); } static ssize_t show_temp0_min(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", data->temp_min[0] * 1000); } static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_max[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255); vt8231_write_value(data, regtempmax[0], data->temp_max[0]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp0_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_min[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255); vt8231_write_value(data, regtempmin[0], data->temp_min[0]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr])); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", TEMP_MAXMIN_FROM_REG(data->temp_max[nr])); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", TEMP_MAXMIN_FROM_REG(data->temp_min[nr])); } static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_max[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255); vt8231_write_value(data, regtempmax[nr], data->temp_max[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_min[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255); vt8231_write_value(data, regtempmin[nr], data->temp_min[nr]); mutex_unlock(&data->update_lock); return count; } /* * Note that these map the Linux temperature sensor numbering (1-6) to the VIA * temperature sensor numbering (0-5) */ #define define_temperature_sysfs(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \ show_temp, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \ show_temp_max, set_temp_max, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \ show_temp_min, set_temp_min, offset - 1) static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp0, NULL); static DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp0_max, set_temp0_max); static DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp0_min, set_temp0_min); define_temperature_sysfs(2); define_temperature_sysfs(3); define_temperature_sysfs(4); define_temperature_sysfs(5); define_temperature_sysfs(6); /* Fans */ static ssize_t show_fan(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr], DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr])); } static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); vt8231_write_value(data, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); unsigned long val; int nr = sensor_attr->index; int old = vt8231_read_value(data, VT8231_REG_FANDIV); long min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); switch (val) { case 1: data->fan_div[nr] = 0; break; case 2: data->fan_div[nr] = 1; break; case 4: data->fan_div[nr] = 2; break; case 8: data->fan_div[nr] = 3; break; default: dev_err(dev, "fan_div value %ld not supported. " "Choose one of 1, 2, 4 or 8!\n", val); mutex_unlock(&data->update_lock); return -EINVAL; } /* Correct the fan minimum speed */ data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); vt8231_write_value(data, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]); old = (old & 0x0f) | (data->fan_div[1] << 6) | (data->fan_div[0] << 4); vt8231_write_value(data, VT8231_REG_FANDIV, old); mutex_unlock(&data->update_lock); return count; } #define define_fan_sysfs(offset) \ static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \ show_fan, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \ show_fan_div, set_fan_div, offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \ show_fan_min, set_fan_min, offset - 1) define_fan_sysfs(1); define_fan_sysfs(2); /* Alarms */ static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", data->alarms); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp4_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp5_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(temp6_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7); static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct vt8231_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static struct attribute *vt8231_attributes_temps[6][5] = { { &dev_attr_temp1_input.attr, &dev_attr_temp1_max_hyst.attr, &dev_attr_temp1_max.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_max_hyst.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp4_input.dev_attr.attr, &sensor_dev_attr_temp4_max_hyst.dev_attr.attr, &sensor_dev_attr_temp4_max.dev_attr.attr, &sensor_dev_attr_temp4_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp5_input.dev_attr.attr, &sensor_dev_attr_temp5_max_hyst.dev_attr.attr, &sensor_dev_attr_temp5_max.dev_attr.attr, &sensor_dev_attr_temp5_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp6_input.dev_attr.attr, &sensor_dev_attr_temp6_max_hyst.dev_attr.attr, &sensor_dev_attr_temp6_max.dev_attr.attr, &sensor_dev_attr_temp6_alarm.dev_attr.attr, NULL } }; static const struct attribute_group vt8231_group_temps[6] = { { .attrs = vt8231_attributes_temps[0] }, { .attrs = vt8231_attributes_temps[1] }, { .attrs = vt8231_attributes_temps[2] }, { .attrs = vt8231_attributes_temps[3] }, { .attrs = vt8231_attributes_temps[4] }, { .attrs = vt8231_attributes_temps[5] }, }; static struct attribute *vt8231_attributes_volts[6][5] = { { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, NULL }, { &dev_attr_in5_input.attr, &dev_attr_in5_min.attr, &dev_attr_in5_max.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, NULL } }; static const struct attribute_group vt8231_group_volts[6] = { { .attrs = vt8231_attributes_volts[0] }, { .attrs = vt8231_attributes_volts[1] }, { .attrs = vt8231_attributes_volts[2] }, { .attrs = vt8231_attributes_volts[3] }, { .attrs = vt8231_attributes_volts[4] }, { .attrs = vt8231_attributes_volts[5] }, }; static struct attribute *vt8231_attributes[] = { &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, &sensor_dev_attr_fan2_div.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &dev_attr_alarms.attr, &dev_attr_name.attr, NULL }; static const struct attribute_group vt8231_group = { .attrs = vt8231_attributes, }; static struct platform_driver vt8231_driver = { .driver = { .owner = THIS_MODULE, .name = "vt8231", }, .probe = vt8231_probe, .remove = __devexit_p(vt8231_remove), }; static DEFINE_PCI_DEVICE_TABLE(vt8231_pci_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) }, { 0, } }; MODULE_DEVICE_TABLE(pci, vt8231_pci_ids); static int __devinit vt8231_pci_probe(struct pci_dev *dev, const struct pci_device_id *id); static struct pci_driver vt8231_pci_driver = { .name = "vt8231", .id_table = vt8231_pci_ids, .probe = vt8231_pci_probe, }; static int vt8231_probe(struct platform_device *pdev) { struct resource *res; struct vt8231_data *data; int err = 0, i; /* Reserve the ISA region */ res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start, VT8231_EXTENT, vt8231_driver.driver.name)) { dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n", (unsigned long)res->start, (unsigned long)res->end); return -ENODEV; } data = kzalloc(sizeof(struct vt8231_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit_release; } platform_set_drvdata(pdev, data); data->addr = res->start; data->name = "vt8231"; mutex_init(&data->update_lock); vt8231_init_device(data); /* Register sysfs hooks */ err = sysfs_create_group(&pdev->dev.kobj, &vt8231_group); if (err) goto exit_free; /* Must update device information to find out the config field */ data->uch_config = vt8231_read_value(data, VT8231_REG_UCH_CONFIG); for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) { if (ISTEMP(i, data->uch_config)) { err = sysfs_create_group(&pdev->dev.kobj, &vt8231_group_temps[i]); if (err) goto exit_remove_files; } } for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) { if (ISVOLT(i, data->uch_config)) { err = sysfs_create_group(&pdev->dev.kobj, &vt8231_group_volts[i]); if (err) goto exit_remove_files; } } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_volts[i]); for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_temps[i]); sysfs_remove_group(&pdev->dev.kobj, &vt8231_group); exit_free: platform_set_drvdata(pdev, NULL); kfree(data); exit_release: release_region(res->start, VT8231_EXTENT); return err; } static int __devexit vt8231_remove(struct platform_device *pdev) { struct vt8231_data *data = platform_get_drvdata(pdev); int i; hwmon_device_unregister(data->hwmon_dev); for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_volts[i]); for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_temps[i]); sysfs_remove_group(&pdev->dev.kobj, &vt8231_group); release_region(data->addr, VT8231_EXTENT); platform_set_drvdata(pdev, NULL); kfree(data); return 0; } static void vt8231_init_device(struct vt8231_data *data) { vt8231_write_value(data, VT8231_REG_TEMP1_CONFIG, 0); vt8231_write_value(data, VT8231_REG_TEMP2_CONFIG, 0); } static struct vt8231_data *vt8231_update_device(struct device *dev) { struct vt8231_data *data = dev_get_drvdata(dev); int i; u16 low; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { for (i = 0; i < 6; i++) { if (ISVOLT(i, data->uch_config)) { data->in[i] = vt8231_read_value(data, regvolt[i]); data->in_min[i] = vt8231_read_value(data, regvoltmin[i]); data->in_max[i] = vt8231_read_value(data, regvoltmax[i]); } } for (i = 0; i < 2; i++) { data->fan[i] = vt8231_read_value(data, VT8231_REG_FAN(i)); data->fan_min[i] = vt8231_read_value(data, VT8231_REG_FAN_MIN(i)); } low = vt8231_read_value(data, VT8231_REG_TEMP_LOW01); low = (low >> 6) | ((low & 0x30) >> 2) | (vt8231_read_value(data, VT8231_REG_TEMP_LOW25) << 4); for (i = 0; i < 6; i++) { if (ISTEMP(i, data->uch_config)) { data->temp[i] = (vt8231_read_value(data, regtemp[i]) << 2) | ((low >> (2 * i)) & 0x03); data->temp_max[i] = vt8231_read_value(data, regtempmax[i]); data->temp_min[i] = vt8231_read_value(data, regtempmin[i]); } } i = vt8231_read_value(data, VT8231_REG_FANDIV); data->fan_div[0] = (i >> 4) & 0x03; data->fan_div[1] = i >> 6; data->alarms = vt8231_read_value(data, VT8231_REG_ALARM1) | (vt8231_read_value(data, VT8231_REG_ALARM2) << 8); /* Set alarm flags correctly */ if (!data->fan[0] && data->fan_min[0]) data->alarms |= 0x40; else if (data->fan[0] && !data->fan_min[0]) data->alarms &= ~0x40; if (!data->fan[1] && data->fan_min[1]) data->alarms |= 0x80; else if (data->fan[1] && !data->fan_min[1]) data->alarms &= ~0x80; data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static int __devinit vt8231_device_add(unsigned short address) { struct resource res = { .start = address, .end = address + VT8231_EXTENT - 1, .name = "vt8231", .flags = IORESOURCE_IO, }; int err; err = acpi_check_resource_conflict(&res); if (err) goto exit; pdev = platform_device_alloc("vt8231", address); if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed\n"); goto exit; } err = platform_device_add_resources(pdev, &res, 1); if (err) { pr_err("Device resource addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add(pdev); if (err) { pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } return 0; exit_device_put: platform_device_put(pdev); exit: return err; } static int __devinit vt8231_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { u16 address, val; if (force_addr) { address = force_addr & 0xff00; dev_warn(&dev->dev, "Forcing ISA address 0x%x\n", address); if (PCIBIOS_SUCCESSFUL != pci_write_config_word(dev, VT8231_BASE_REG, address | 1)) return -ENODEV; } if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VT8231_BASE_REG, &val)) return -ENODEV; address = val & ~(VT8231_EXTENT - 1); if (address == 0) { dev_err(&dev->dev, "base address not set - upgrade BIOS or use force_addr=0xaddr\n"); return -ENODEV; } if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VT8231_ENABLE_REG, &val)) return -ENODEV; if (!(val & 0x0001)) { dev_warn(&dev->dev, "enabling sensors\n"); if (PCIBIOS_SUCCESSFUL != pci_write_config_word(dev, VT8231_ENABLE_REG, val | 0x0001)) return -ENODEV; } if (platform_driver_register(&vt8231_driver)) goto exit; /* Sets global pdev as a side effect */ if (vt8231_device_add(address)) goto exit_unregister; /* * Always return failure here. This is to allow other drivers to bind * to this pci device. We don't really want to have control over the * pci device, we only wanted to read as few register values from it. */ /* * We do, however, mark ourselves as using the PCI device to stop it * getting unloaded. */ s_bridge = pci_dev_get(dev); return -ENODEV; exit_unregister: platform_driver_unregister(&vt8231_driver); exit: return -ENODEV; } static int __init sm_vt8231_init(void) { return pci_register_driver(&vt8231_pci_driver); } static void __exit sm_vt8231_exit(void) { pci_unregister_driver(&vt8231_pci_driver); if (s_bridge != NULL) { platform_device_unregister(pdev); platform_driver_unregister(&vt8231_driver); pci_dev_put(s_bridge); s_bridge = NULL; } } MODULE_AUTHOR("Roger Lucas <vt8231@hiddenengine.co.uk>"); MODULE_DESCRIPTION("VT8231 sensors"); MODULE_LICENSE("GPL"); module_init(sm_vt8231_init); module_exit(sm_vt8231_exit);
gpl-2.0
Cl3Kener/UBER-N5
drivers/staging/iio/accel/adis16204_ring.c
4879
3567
#include <linux/export.h> #include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include "../iio.h" #include "../ring_sw.h" #include "../trigger_consumer.h" #include "adis16204.h" /** * adis16204_read_ring_data() read data registers which will be placed into ring * @dev: device associated with child of actual device (iio_dev or iio_trig) * @rx: somewhere to pass back the value read **/ static int adis16204_read_ring_data(struct device *dev, u8 *rx) { struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct adis16204_state *st = iio_priv(indio_dev); struct spi_transfer xfers[ADIS16204_OUTPUTS + 1]; int ret; int i; mutex_lock(&st->buf_lock); spi_message_init(&msg); memset(xfers, 0, sizeof(xfers)); for (i = 0; i <= ADIS16204_OUTPUTS; i++) { xfers[i].bits_per_word = 8; xfers[i].cs_change = 1; xfers[i].len = 2; xfers[i].delay_usecs = 20; xfers[i].tx_buf = st->tx + 2 * i; st->tx[2 * i] = ADIS16204_READ_REG(ADIS16204_SUPPLY_OUT + 2 * i); st->tx[2 * i + 1] = 0; if (i >= 1) xfers[i].rx_buf = rx + 2 * (i - 1); spi_message_add_tail(&xfers[i], &msg); } ret = spi_sync(st->us, &msg); if (ret) dev_err(&st->us->dev, "problem when burst reading"); mutex_unlock(&st->buf_lock); return ret; } /* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device * specific to be rolled into the core. */ static irqreturn_t adis16204_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adis16204_state *st = iio_priv(indio_dev); struct iio_buffer *ring = indio_dev->buffer; int i = 0; s16 *data; size_t datasize = ring->access->get_bytes_per_datum(ring); data = kmalloc(datasize, GFP_KERNEL); if (data == NULL) { dev_err(&st->us->dev, "memory alloc failed in ring bh"); return -ENOMEM; } if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) && adis16204_read_ring_data(&indio_dev->dev, st->rx) >= 0) for (; i < bitmap_weight(indio_dev->active_scan_mask, indio_dev->masklength); i++) data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2])); /* Guaranteed to be aligned with 8 byte boundary */ if (ring->scan_timestamp) *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; ring->access->store_to(ring, (u8 *)data, pf->timestamp); iio_trigger_notify_done(indio_dev->trig); kfree(data); return IRQ_HANDLED; } void adis16204_unconfigure_ring(struct iio_dev *indio_dev) { iio_dealloc_pollfunc(indio_dev->pollfunc); iio_sw_rb_free(indio_dev->buffer); } static const struct iio_buffer_setup_ops adis16204_ring_setup_ops = { .preenable = &iio_sw_buffer_preenable, .postenable = &iio_triggered_buffer_postenable, .predisable = &iio_triggered_buffer_predisable, }; int adis16204_configure_ring(struct iio_dev *indio_dev) { int ret = 0; struct iio_buffer *ring; ring = iio_sw_rb_allocate(indio_dev); if (!ring) { ret = -ENOMEM; return ret; } indio_dev->buffer = ring; ring->scan_timestamp = true; indio_dev->setup_ops = &adis16204_ring_setup_ops; indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &adis16204_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_iio_sw_rb_free; } indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_iio_sw_rb_free: iio_sw_rb_free(indio_dev->buffer); return ret; }
gpl-2.0
mifl/android_kernel_pantech_ef50l
fs/fuse/cuse.c
5135
15109
/* * CUSE: Character device in Userspace * * Copyright (C) 2008-2009 SUSE Linux Products GmbH * Copyright (C) 2008-2009 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2. * * CUSE enables character devices to be implemented from userland much * like FUSE allows filesystems. On initialization /dev/cuse is * created. By opening the file and replying to the CUSE_INIT request * userland CUSE server can create a character device. After that the * operation is very similar to FUSE. * * A CUSE instance involves the following objects. * * cuse_conn : contains fuse_conn and serves as bonding structure * channel : file handle connected to the userland CUSE server * cdev : the implemented character device * dev : generic device for cdev * * Note that 'channel' is what 'dev' is in FUSE. As CUSE deals with * devices, it's called 'channel' to reduce confusion. * * channel determines when the character device dies. When channel is * closed, everything begins to destruct. The cuse_conn is taken off * the lookup table preventing further access from cdev, cdev and * generic device are removed and the base reference of cuse_conn is * put. * * On each open, the matching cuse_conn is looked up and if found an * additional reference is taken which is released when the file is * closed. */ #include <linux/fuse.h> #include <linux/cdev.h> #include <linux/device.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/kdev_t.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/magic.h> #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stat.h> #include <linux/module.h> #include "fuse_i.h" #define CUSE_CONNTBL_LEN 64 struct cuse_conn { struct list_head list; /* linked on cuse_conntbl */ struct fuse_conn fc; /* fuse connection */ struct cdev *cdev; /* associated character device */ struct device *dev; /* device representing @cdev */ /* init parameters, set once during initialization */ bool unrestricted_ioctl; }; static DEFINE_SPINLOCK(cuse_lock); /* protects cuse_conntbl */ static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN]; static struct class *cuse_class; static struct cuse_conn *fc_to_cc(struct fuse_conn *fc) { return container_of(fc, struct cuse_conn, fc); } static struct list_head *cuse_conntbl_head(dev_t devt) { return &cuse_conntbl[(MAJOR(devt) + MINOR(devt)) % CUSE_CONNTBL_LEN]; } /************************************************************************** * CUSE frontend operations * * These are file operations for the character device. * * On open, CUSE opens a file from the FUSE mnt and stores it to * private_data of the open file. All other ops call FUSE ops on the * FUSE file. */ static ssize_t cuse_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { loff_t pos = 0; return fuse_direct_io(file, buf, count, &pos, 0); } static ssize_t cuse_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { loff_t pos = 0; /* * No locking or generic_write_checks(), the server is * responsible for locking and sanity checks. */ return fuse_direct_io(file, buf, count, &pos, 1); } static int cuse_open(struct inode *inode, struct file *file) { dev_t devt = inode->i_cdev->dev; struct cuse_conn *cc = NULL, *pos; int rc; /* look up and get the connection */ spin_lock(&cuse_lock); list_for_each_entry(pos, cuse_conntbl_head(devt), list) if (pos->dev->devt == devt) { fuse_conn_get(&pos->fc); cc = pos; break; } spin_unlock(&cuse_lock); /* dead? */ if (!cc) return -ENODEV; /* * Generic permission check is already done against the chrdev * file, proceed to open. */ rc = fuse_do_open(&cc->fc, 0, file, 0); if (rc) fuse_conn_put(&cc->fc); return rc; } static int cuse_release(struct inode *inode, struct file *file) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; fuse_sync_release(ff, file->f_flags); fuse_conn_put(fc); return 0; } static long cuse_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fuse_file *ff = file->private_data; struct cuse_conn *cc = fc_to_cc(ff->fc); unsigned int flags = 0; if (cc->unrestricted_ioctl) flags |= FUSE_IOCTL_UNRESTRICTED; return fuse_do_ioctl(file, cmd, arg, flags); } static long cuse_file_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fuse_file *ff = file->private_data; struct cuse_conn *cc = fc_to_cc(ff->fc); unsigned int flags = FUSE_IOCTL_COMPAT; if (cc->unrestricted_ioctl) flags |= FUSE_IOCTL_UNRESTRICTED; return fuse_do_ioctl(file, cmd, arg, flags); } static const struct file_operations cuse_frontend_fops = { .owner = THIS_MODULE, .read = cuse_read, .write = cuse_write, .open = cuse_open, .release = cuse_release, .unlocked_ioctl = cuse_file_ioctl, .compat_ioctl = cuse_file_compat_ioctl, .poll = fuse_file_poll, .llseek = noop_llseek, }; /************************************************************************** * CUSE channel initialization and destruction */ struct cuse_devinfo { const char *name; }; /** * cuse_parse_one - parse one key=value pair * @pp: i/o parameter for the current position * @end: points to one past the end of the packed string * @keyp: out parameter for key * @valp: out parameter for value * * *@pp points to packed strings - "key0=val0\0key1=val1\0" which ends * at @end - 1. This function parses one pair and set *@keyp to the * start of the key and *@valp to the start of the value. Note that * the original string is modified such that the key string is * terminated with '\0'. *@pp is updated to point to the next string. * * RETURNS: * 1 on successful parse, 0 on EOF, -errno on failure. */ static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp) { char *p = *pp; char *key, *val; while (p < end && *p == '\0') p++; if (p == end) return 0; if (end[-1] != '\0') { printk(KERN_ERR "CUSE: info not properly terminated\n"); return -EINVAL; } key = val = p; p += strlen(p); if (valp) { strsep(&val, "="); if (!val) val = key + strlen(key); key = strstrip(key); val = strstrip(val); } else key = strstrip(key); if (!strlen(key)) { printk(KERN_ERR "CUSE: zero length info key specified\n"); return -EINVAL; } *pp = p; *keyp = key; if (valp) *valp = val; return 1; } /** * cuse_parse_dev_info - parse device info * @p: device info string * @len: length of device info string * @devinfo: out parameter for parsed device info * * Parse @p to extract device info and store it into @devinfo. String * pointed to by @p is modified by parsing and @devinfo points into * them, so @p shouldn't be freed while @devinfo is in use. * * RETURNS: * 0 on success, -errno on failure. */ static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo) { char *end = p + len; char *key, *val; int rc; while (true) { rc = cuse_parse_one(&p, end, &key, &val); if (rc < 0) return rc; if (!rc) break; if (strcmp(key, "DEVNAME") == 0) devinfo->name = val; else printk(KERN_WARNING "CUSE: unknown device info \"%s\"\n", key); } if (!devinfo->name || !strlen(devinfo->name)) { printk(KERN_ERR "CUSE: DEVNAME unspecified\n"); return -EINVAL; } return 0; } static void cuse_gendev_release(struct device *dev) { kfree(dev); } /** * cuse_process_init_reply - finish initializing CUSE channel * * This function creates the character device and sets up all the * required data structures for it. Please read the comment at the * top of this file for high level overview. */ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) { struct cuse_conn *cc = fc_to_cc(fc); struct cuse_init_out *arg = req->out.args[0].value; struct page *page = req->pages[0]; struct cuse_devinfo devinfo = { }; struct device *dev; struct cdev *cdev; dev_t devt; int rc; if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) { goto err; } fc->minor = arg->minor; fc->max_read = max_t(unsigned, arg->max_read, 4096); fc->max_write = max_t(unsigned, arg->max_write, 4096); /* parse init reply */ cc->unrestricted_ioctl = arg->flags & CUSE_UNRESTRICTED_IOCTL; rc = cuse_parse_devinfo(page_address(page), req->out.args[1].size, &devinfo); if (rc) goto err; /* determine and reserve devt */ devt = MKDEV(arg->dev_major, arg->dev_minor); if (!MAJOR(devt)) rc = alloc_chrdev_region(&devt, MINOR(devt), 1, devinfo.name); else rc = register_chrdev_region(devt, 1, devinfo.name); if (rc) { printk(KERN_ERR "CUSE: failed to register chrdev region\n"); goto err; } /* devt determined, create device */ rc = -ENOMEM; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) goto err_region; device_initialize(dev); dev_set_uevent_suppress(dev, 1); dev->class = cuse_class; dev->devt = devt; dev->release = cuse_gendev_release; dev_set_drvdata(dev, cc); dev_set_name(dev, "%s", devinfo.name); rc = device_add(dev); if (rc) goto err_device; /* register cdev */ rc = -ENOMEM; cdev = cdev_alloc(); if (!cdev) goto err_device; cdev->owner = THIS_MODULE; cdev->ops = &cuse_frontend_fops; rc = cdev_add(cdev, devt, 1); if (rc) goto err_cdev; cc->dev = dev; cc->cdev = cdev; /* make the device available */ spin_lock(&cuse_lock); list_add(&cc->list, cuse_conntbl_head(devt)); spin_unlock(&cuse_lock); /* announce device availability */ dev_set_uevent_suppress(dev, 0); kobject_uevent(&dev->kobj, KOBJ_ADD); out: kfree(arg); __free_page(page); return; err_cdev: cdev_del(cdev); err_device: put_device(dev); err_region: unregister_chrdev_region(devt, 1); err: fc->conn_error = 1; goto out; } static int cuse_send_init(struct cuse_conn *cc) { int rc; struct fuse_req *req; struct page *page; struct fuse_conn *fc = &cc->fc; struct cuse_init_in *arg; void *outarg; BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE); req = fuse_get_req(fc); if (IS_ERR(req)) { rc = PTR_ERR(req); goto err; } rc = -ENOMEM; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) goto err_put_req; outarg = kzalloc(sizeof(struct cuse_init_out), GFP_KERNEL); if (!outarg) goto err_free_page; arg = &req->misc.cuse_init_in; arg->major = FUSE_KERNEL_VERSION; arg->minor = FUSE_KERNEL_MINOR_VERSION; arg->flags |= CUSE_UNRESTRICTED_IOCTL; req->in.h.opcode = CUSE_INIT; req->in.numargs = 1; req->in.args[0].size = sizeof(struct cuse_init_in); req->in.args[0].value = arg; req->out.numargs = 2; req->out.args[0].size = sizeof(struct cuse_init_out); req->out.args[0].value = outarg; req->out.args[1].size = CUSE_INIT_INFO_MAX; req->out.argvar = 1; req->out.argpages = 1; req->pages[0] = page; req->num_pages = 1; req->end = cuse_process_init_reply; fuse_request_send_background(fc, req); return 0; err_free_page: __free_page(page); err_put_req: fuse_put_request(fc, req); err: return rc; } static void cuse_fc_release(struct fuse_conn *fc) { struct cuse_conn *cc = fc_to_cc(fc); kfree(cc); } /** * cuse_channel_open - open method for /dev/cuse * @inode: inode for /dev/cuse * @file: file struct being opened * * Userland CUSE server can create a CUSE device by opening /dev/cuse * and replying to the initialization request kernel sends. This * function is responsible for handling CUSE device initialization. * Because the fd opened by this function is used during * initialization, this function only creates cuse_conn and sends * init. The rest is delegated to a kthread. * * RETURNS: * 0 on success, -errno on failure. */ static int cuse_channel_open(struct inode *inode, struct file *file) { struct cuse_conn *cc; int rc; /* set up cuse_conn */ cc = kzalloc(sizeof(*cc), GFP_KERNEL); if (!cc) return -ENOMEM; fuse_conn_init(&cc->fc); INIT_LIST_HEAD(&cc->list); cc->fc.release = cuse_fc_release; cc->fc.connected = 1; cc->fc.blocked = 0; rc = cuse_send_init(cc); if (rc) { fuse_conn_put(&cc->fc); return rc; } file->private_data = &cc->fc; /* channel owns base reference to cc */ return 0; } /** * cuse_channel_release - release method for /dev/cuse * @inode: inode for /dev/cuse * @file: file struct being closed * * Disconnect the channel, deregister CUSE device and initiate * destruction by putting the default reference. * * RETURNS: * 0 on success, -errno on failure. */ static int cuse_channel_release(struct inode *inode, struct file *file) { struct cuse_conn *cc = fc_to_cc(file->private_data); int rc; /* remove from the conntbl, no more access from this point on */ spin_lock(&cuse_lock); list_del_init(&cc->list); spin_unlock(&cuse_lock); /* remove device */ if (cc->dev) device_unregister(cc->dev); if (cc->cdev) { unregister_chrdev_region(cc->cdev->dev, 1); cdev_del(cc->cdev); } /* kill connection and shutdown channel */ fuse_conn_kill(&cc->fc); rc = fuse_dev_release(inode, file); /* puts the base reference */ return rc; } static struct file_operations cuse_channel_fops; /* initialized during init */ /************************************************************************** * Misc stuff and module initializatiion * * CUSE exports the same set of attributes to sysfs as fusectl. */ static ssize_t cuse_class_waiting_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cuse_conn *cc = dev_get_drvdata(dev); return sprintf(buf, "%d\n", atomic_read(&cc->fc.num_waiting)); } static ssize_t cuse_class_abort_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cuse_conn *cc = dev_get_drvdata(dev); fuse_abort_conn(&cc->fc); return count; } static struct device_attribute cuse_class_dev_attrs[] = { __ATTR(waiting, S_IFREG | 0400, cuse_class_waiting_show, NULL), __ATTR(abort, S_IFREG | 0200, NULL, cuse_class_abort_store), { } }; static struct miscdevice cuse_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "cuse", .fops = &cuse_channel_fops, }; static int __init cuse_init(void) { int i, rc; /* init conntbl */ for (i = 0; i < CUSE_CONNTBL_LEN; i++) INIT_LIST_HEAD(&cuse_conntbl[i]); /* inherit and extend fuse_dev_operations */ cuse_channel_fops = fuse_dev_operations; cuse_channel_fops.owner = THIS_MODULE; cuse_channel_fops.open = cuse_channel_open; cuse_channel_fops.release = cuse_channel_release; cuse_class = class_create(THIS_MODULE, "cuse"); if (IS_ERR(cuse_class)) return PTR_ERR(cuse_class); cuse_class->dev_attrs = cuse_class_dev_attrs; rc = misc_register(&cuse_miscdev); if (rc) { class_destroy(cuse_class); return rc; } return 0; } static void __exit cuse_exit(void) { misc_deregister(&cuse_miscdev); class_destroy(cuse_class); } module_init(cuse_init); module_exit(cuse_exit); MODULE_AUTHOR("Tejun Heo <tj@kernel.org>"); MODULE_DESCRIPTION("Character device in Userspace"); MODULE_LICENSE("GPL");
gpl-2.0
justin0406/HD-GEE
drivers/scsi/mac_scsi.c
5135
14648
/* * Generic Macintosh NCR5380 driver * * Copyright 1998, Michael Schmitz <mschmitz@lbl.gov> * * derived in part from: */ /* * Generic Generic NCR5380 driver * * Copyright 1995, Russell King * * ALPHA RELEASE 1. * * For more information, please consult * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * $Log: mac_NCR5380.c,v $ */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> #include "scsi.h" #include <scsi/scsi_host.h> #include "mac_scsi.h" /* These control the behaviour of the generic 5380 core */ #define AUTOSENSE #define PSEUDO_DMA #include "NCR5380.h" #if 0 #define NDEBUG (NDEBUG_INTR | NDEBUG_PSEUDO_DMA | NDEBUG_ARBITRATION | NDEBUG_SELECTION | NDEBUG_RESELECTION) #else #define NDEBUG (NDEBUG_ABORT) #endif #define RESET_BOOT #define DRIVER_SETUP extern void via_scsi_clear(void); #ifdef RESET_BOOT static void mac_scsi_reset_boot(struct Scsi_Host *instance); #endif static int setup_called = 0; static int setup_can_queue = -1; static int setup_cmd_per_lun = -1; static int setup_sg_tablesize = -1; static int setup_use_pdma = -1; #ifdef SUPPORT_TAGS static int setup_use_tagged_queuing = -1; #endif static int setup_hostid = -1; /* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms, * we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more * need ten times the standard value... */ #define TOSHIBA_DELAY #ifdef TOSHIBA_DELAY #define AFTER_RESET_DELAY (5*HZ/2) #else #define AFTER_RESET_DELAY (HZ/2) #endif static volatile unsigned char *mac_scsi_regp = NULL; static volatile unsigned char *mac_scsi_drq = NULL; static volatile unsigned char *mac_scsi_nodrq = NULL; /* * NCR 5380 register access functions */ #if 0 /* Debug versions */ #define CTRL(p,v) (*ctrl = (v)) static char macscsi_read(struct Scsi_Host *instance, int reg) { int iobase = instance->io_port; int i; int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl; CTRL(iobase, 0); i = in_8(iobase + (reg<<4)); CTRL(iobase, 0x40); return i; } static void macscsi_write(struct Scsi_Host *instance, int reg, int value) { int iobase = instance->io_port; int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl; CTRL(iobase, 0); out_8(iobase + (reg<<4), value); CTRL(iobase, 0x40); } #else /* Fast versions */ static __inline__ char macscsi_read(struct Scsi_Host *instance, int reg) { return in_8(instance->io_port + (reg<<4)); } static __inline__ void macscsi_write(struct Scsi_Host *instance, int reg, int value) { out_8(instance->io_port + (reg<<4), value); } #endif /* * Function : mac_scsi_setup(char *str) * * Purpose : booter command line initialization of the overrides array, * * Inputs : str - comma delimited list of options * */ static int __init mac_scsi_setup(char *str) { #ifdef DRIVER_SETUP int ints[7]; (void)get_options( str, ARRAY_SIZE(ints), ints); if (setup_called++ || ints[0] < 1 || ints[0] > 6) { printk(KERN_WARNING "scsi: <mac5380>" " Usage: mac5380=<can_queue>[,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>,<use_pdma>]\n"); printk(KERN_ALERT "scsi: <mac5380> Bad Penguin parameters?\n"); return 0; } if (ints[0] >= 1) { if (ints[1] > 0) /* no limits on this, just > 0 */ setup_can_queue = ints[1]; } if (ints[0] >= 2) { if (ints[2] > 0) setup_cmd_per_lun = ints[2]; } if (ints[0] >= 3) { if (ints[3] >= 0) { setup_sg_tablesize = ints[3]; /* Must be <= SG_ALL (255) */ if (setup_sg_tablesize > SG_ALL) setup_sg_tablesize = SG_ALL; } } if (ints[0] >= 4) { /* Must be between 0 and 7 */ if (ints[4] >= 0 && ints[4] <= 7) setup_hostid = ints[4]; else if (ints[4] > 7) printk(KERN_WARNING "mac_scsi_setup: invalid host ID %d !\n", ints[4] ); } #ifdef SUPPORT_TAGS if (ints[0] >= 5) { if (ints[5] >= 0) setup_use_tagged_queuing = !!ints[5]; } if (ints[0] == 6) { if (ints[6] >= 0) setup_use_pdma = ints[6]; } #else if (ints[0] == 5) { if (ints[5] >= 0) setup_use_pdma = ints[5]; } #endif /* SUPPORT_TAGS */ #endif /* DRIVER_SETUP */ return 1; } __setup("mac5380=", mac_scsi_setup); /* * Function : int macscsi_detect(struct scsi_host_template * tpnt) * * Purpose : initializes mac NCR5380 driver based on the * command line / compile time port and irq definitions. * * Inputs : tpnt - template for this SCSI adapter. * * Returns : 1 if a host adapter was found, 0 if not. * */ int __init macscsi_detect(struct scsi_host_template * tpnt) { static int called = 0; int flags = 0; struct Scsi_Host *instance; if (!MACH_IS_MAC || called) return( 0 ); if (macintosh_config->scsi_type != MAC_SCSI_OLD) return( 0 ); /* setup variables */ tpnt->can_queue = (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE; tpnt->cmd_per_lun = (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN; tpnt->sg_tablesize = (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE; if (setup_hostid >= 0) tpnt->this_id = setup_hostid; else { /* use 7 as default */ tpnt->this_id = 7; } #ifdef SUPPORT_TAGS if (setup_use_tagged_queuing < 0) setup_use_tagged_queuing = USE_TAGGED_QUEUING; #endif /* Once we support multiple 5380s (e.g. DuoDock) we'll do something different here */ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); if (macintosh_config->ident == MAC_MODEL_IIFX) { mac_scsi_regp = via1+0x8000; mac_scsi_drq = via1+0xE000; mac_scsi_nodrq = via1+0xC000; /* The IIFX should be able to do true DMA, but pseudo-dma doesn't work */ flags = FLAG_NO_PSEUDO_DMA; } else { mac_scsi_regp = via1+0x10000; mac_scsi_drq = via1+0x6000; mac_scsi_nodrq = via1+0x12000; } if (! setup_use_pdma) flags = FLAG_NO_PSEUDO_DMA; instance->io_port = (unsigned long) mac_scsi_regp; instance->irq = IRQ_MAC_SCSI; #ifdef RESET_BOOT mac_scsi_reset_boot(instance); #endif NCR5380_init(instance, flags); instance->n_io_port = 255; ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; if (instance->irq != SCSI_IRQ_NONE) if (request_irq(instance->irq, NCR5380_intr, 0, "ncr5380", instance)) { printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; } printk(KERN_INFO "scsi%d: generic 5380 at port %lX irq", instance->host_no, instance->io_port); if (instance->irq == SCSI_IRQ_NONE) printk (KERN_INFO "s disabled"); else printk (KERN_INFO " %d", instance->irq); printk(KERN_INFO " options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", instance->can_queue, instance->cmd_per_lun, MACSCSI_PUBLIC_RELEASE); printk(KERN_INFO "\nscsi%d:", instance->host_no); NCR5380_print_options(instance); printk("\n"); called = 1; return 1; } int macscsi_release (struct Scsi_Host *shpnt) { if (shpnt->irq != SCSI_IRQ_NONE) free_irq(shpnt->irq, shpnt); NCR5380_exit(shpnt); return 0; } #ifdef RESET_BOOT /* * Our 'bus reset on boot' function */ static void mac_scsi_reset_boot(struct Scsi_Host *instance) { unsigned long end; NCR5380_local_declare(); NCR5380_setup(instance); /* * Do a SCSI reset to clean up the bus during initialization. No messing * with the queues, interrupts, or locks necessary here. */ printk(KERN_INFO "Macintosh SCSI: resetting the SCSI bus..." ); /* get in phase */ NCR5380_write( TARGET_COMMAND_REG, PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); /* assert RST */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); /* The min. reset hold time is 25us, so 40us should be enough */ udelay( 50 ); /* reset RST and interrupt */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); NCR5380_read( RESET_PARITY_INTERRUPT_REG ); for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); ) barrier(); printk(KERN_INFO " done\n" ); } #endif const char * macscsi_info (struct Scsi_Host *spnt) { return ""; } /* Pseudo-DMA: (Ove Edlund) The code attempts to catch bus errors that occur if one for example "trips over the cable". XXX: Since bus errors in the PDMA routines never happen on my computer, the bus error code is untested. If the code works as intended, a bus error results in Pseudo-DMA beeing disabled, meaning that the driver switches to slow handshake. If bus errors are NOT extremely rare, this has to be changed. */ #define CP_IO_TO_MEM(s,d,len) \ __asm__ __volatile__ \ (" cmp.w #4,%2\n" \ " bls 8f\n" \ " move.w %1,%%d0\n" \ " neg.b %%d0\n" \ " and.w #3,%%d0\n" \ " sub.w %%d0,%2\n" \ " bra 2f\n" \ " 1: move.b (%0),(%1)+\n" \ " 2: dbf %%d0,1b\n" \ " move.w %2,%%d0\n" \ " lsr.w #5,%%d0\n" \ " bra 4f\n" \ " 3: move.l (%0),(%1)+\n" \ "31: move.l (%0),(%1)+\n" \ "32: move.l (%0),(%1)+\n" \ "33: move.l (%0),(%1)+\n" \ "34: move.l (%0),(%1)+\n" \ "35: move.l (%0),(%1)+\n" \ "36: move.l (%0),(%1)+\n" \ "37: move.l (%0),(%1)+\n" \ " 4: dbf %%d0,3b\n" \ " move.w %2,%%d0\n" \ " lsr.w #2,%%d0\n" \ " and.w #7,%%d0\n" \ " bra 6f\n" \ " 5: move.l (%0),(%1)+\n" \ " 6: dbf %%d0,5b\n" \ " and.w #3,%2\n" \ " bra 8f\n" \ " 7: move.b (%0),(%1)+\n" \ " 8: dbf %2,7b\n" \ " moveq.l #0, %2\n" \ " 9: \n" \ ".section .fixup,\"ax\"\n" \ " .even\n" \ "90: moveq.l #1, %2\n" \ " jra 9b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 4\n" \ " .long 1b,90b\n" \ " .long 3b,90b\n" \ " .long 31b,90b\n" \ " .long 32b,90b\n" \ " .long 33b,90b\n" \ " .long 34b,90b\n" \ " .long 35b,90b\n" \ " .long 36b,90b\n" \ " .long 37b,90b\n" \ " .long 5b,90b\n" \ " .long 7b,90b\n" \ ".previous" \ : "=a"(s), "=a"(d), "=d"(len) \ : "0"(s), "1"(d), "2"(len) \ : "d0") static int macscsi_pread (struct Scsi_Host *instance, unsigned char *dst, int len) { unsigned char *d; volatile unsigned char *s; NCR5380_local_declare(); NCR5380_setup(instance); s = mac_scsi_drq+0x60; d = dst; /* These conditions are derived from MacOS */ while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && !(NCR5380_read(STATUS_REG) & SR_REQ)) ; if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { printk(KERN_ERR "Error in macscsi_pread\n"); return -1; } CP_IO_TO_MEM(s, d, len); if (len != 0) { printk(KERN_NOTICE "Bus error in macscsi_pread\n"); return -1; } return 0; } #define CP_MEM_TO_IO(s,d,len) \ __asm__ __volatile__ \ (" cmp.w #4,%2\n" \ " bls 8f\n" \ " move.w %0,%%d0\n" \ " neg.b %%d0\n" \ " and.w #3,%%d0\n" \ " sub.w %%d0,%2\n" \ " bra 2f\n" \ " 1: move.b (%0)+,(%1)\n" \ " 2: dbf %%d0,1b\n" \ " move.w %2,%%d0\n" \ " lsr.w #5,%%d0\n" \ " bra 4f\n" \ " 3: move.l (%0)+,(%1)\n" \ "31: move.l (%0)+,(%1)\n" \ "32: move.l (%0)+,(%1)\n" \ "33: move.l (%0)+,(%1)\n" \ "34: move.l (%0)+,(%1)\n" \ "35: move.l (%0)+,(%1)\n" \ "36: move.l (%0)+,(%1)\n" \ "37: move.l (%0)+,(%1)\n" \ " 4: dbf %%d0,3b\n" \ " move.w %2,%%d0\n" \ " lsr.w #2,%%d0\n" \ " and.w #7,%%d0\n" \ " bra 6f\n" \ " 5: move.l (%0)+,(%1)\n" \ " 6: dbf %%d0,5b\n" \ " and.w #3,%2\n" \ " bra 8f\n" \ " 7: move.b (%0)+,(%1)\n" \ " 8: dbf %2,7b\n" \ " moveq.l #0, %2\n" \ " 9: \n" \ ".section .fixup,\"ax\"\n" \ " .even\n" \ "90: moveq.l #1, %2\n" \ " jra 9b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 4\n" \ " .long 1b,90b\n" \ " .long 3b,90b\n" \ " .long 31b,90b\n" \ " .long 32b,90b\n" \ " .long 33b,90b\n" \ " .long 34b,90b\n" \ " .long 35b,90b\n" \ " .long 36b,90b\n" \ " .long 37b,90b\n" \ " .long 5b,90b\n" \ " .long 7b,90b\n" \ ".previous" \ : "=a"(s), "=a"(d), "=d"(len) \ : "0"(s), "1"(d), "2"(len) \ : "d0") static int macscsi_pwrite (struct Scsi_Host *instance, unsigned char *src, int len) { unsigned char *s; volatile unsigned char *d; NCR5380_local_declare(); NCR5380_setup(instance); s = src; d = mac_scsi_drq; /* These conditions are derived from MacOS */ while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (!(NCR5380_read(STATUS_REG) & SR_REQ) || (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))) ; if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) { printk(KERN_ERR "Error in macscsi_pwrite\n"); return -1; } CP_MEM_TO_IO(s, d, len); if (len != 0) { printk(KERN_NOTICE "Bus error in macscsi_pwrite\n"); return -1; } return 0; } #include "NCR5380.c" static struct scsi_host_template driver_template = { .proc_name = "Mac5380", .proc_info = macscsi_proc_info, .name = "Macintosh NCR5380 SCSI", .detect = macscsi_detect, .release = macscsi_release, .info = macscsi_info, .queuecommand = macscsi_queue_command, .eh_abort_handler = macscsi_abort, .eh_bus_reset_handler = macscsi_bus_reset, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = CMD_PER_LUN, .use_clustering = DISABLE_CLUSTERING }; #include "scsi_module.c"
gpl-2.0
tkymgr/KTG_kernel-3.0_msm7x30
arch/arm/mach-omap2/clkt2xxx_apll.c
7951
3286
/* * OMAP2xxx APLL clock control functions * * Copyright (C) 2005-2008 Texas Instruments, Inc. * Copyright (C) 2004-2010 Nokia Corporation * * Contacts: * Richard Woodruff <r-woodruff2@ti.com> * Paul Walmsley * * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, * Gordon McNutt and RidgeRun, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/clk.h> #include <linux/io.h> #include <plat/clock.h> #include <plat/prcm.h> #include "clock.h" #include "clock2xxx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-24xx.h" /* CM_CLKEN_PLL.EN_{54,96}M_PLL options (24XX) */ #define EN_APLL_STOPPED 0 #define EN_APLL_LOCKED 3 /* CM_CLKSEL1_PLL.APLLS_CLKIN options (24XX) */ #define APLLS_CLKIN_19_2MHZ 0 #define APLLS_CLKIN_13MHZ 2 #define APLLS_CLKIN_12MHZ 3 void __iomem *cm_idlest_pll; /* Private functions */ /* Enable an APLL if off */ static int omap2_clk_apll_enable(struct clk *clk, u32 status_mask) { u32 cval, apll_mask; apll_mask = EN_APLL_LOCKED << clk->enable_bit; cval = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN); if ((cval & apll_mask) == apll_mask) return 0; /* apll already enabled */ cval &= ~apll_mask; cval |= apll_mask; omap2_cm_write_mod_reg(cval, PLL_MOD, CM_CLKEN); omap2_cm_wait_idlest(cm_idlest_pll, status_mask, OMAP24XX_CM_IDLEST_VAL, clk->name); /* * REVISIT: Should we return an error code if omap2_wait_clock_ready() * fails? */ return 0; } static int omap2_clk_apll96_enable(struct clk *clk) { return omap2_clk_apll_enable(clk, OMAP24XX_ST_96M_APLL_MASK); } static int omap2_clk_apll54_enable(struct clk *clk) { return omap2_clk_apll_enable(clk, OMAP24XX_ST_54M_APLL_MASK); } static void _apll96_allow_idle(struct clk *clk) { omap2xxx_cm_set_apll96_auto_low_power_stop(); } static void _apll96_deny_idle(struct clk *clk) { omap2xxx_cm_set_apll96_disable_autoidle(); } static void _apll54_allow_idle(struct clk *clk) { omap2xxx_cm_set_apll54_auto_low_power_stop(); } static void _apll54_deny_idle(struct clk *clk) { omap2xxx_cm_set_apll54_disable_autoidle(); } /* Stop APLL */ static void omap2_clk_apll_disable(struct clk *clk) { u32 cval; cval = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN); cval &= ~(EN_APLL_LOCKED << clk->enable_bit); omap2_cm_write_mod_reg(cval, PLL_MOD, CM_CLKEN); } /* Public data */ const struct clkops clkops_apll96 = { .enable = omap2_clk_apll96_enable, .disable = omap2_clk_apll_disable, .allow_idle = _apll96_allow_idle, .deny_idle = _apll96_deny_idle, }; const struct clkops clkops_apll54 = { .enable = omap2_clk_apll54_enable, .disable = omap2_clk_apll_disable, .allow_idle = _apll54_allow_idle, .deny_idle = _apll54_deny_idle, }; /* Public functions */ u32 omap2xxx_get_apll_clkin(void) { u32 aplls, srate = 0; aplls = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL1); aplls &= OMAP24XX_APLLS_CLKIN_MASK; aplls >>= OMAP24XX_APLLS_CLKIN_SHIFT; if (aplls == APLLS_CLKIN_19_2MHZ) srate = 19200000; else if (aplls == APLLS_CLKIN_13MHZ) srate = 13000000; else if (aplls == APLLS_CLKIN_12MHZ) srate = 12000000; return srate; }
gpl-2.0
lirokoa/android_kernel_samsung_smdk4412
drivers/misc/iwmc3200top/debugfs.c
8207
3614
/* * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver * drivers/misc/iwmc3200top/debufs.c * * Copyright (C) 2009 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> * - * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio.h> #include <linux/debugfs.h> #include "iwmc3200top.h" #include "fw-msg.h" #include "log.h" #include "debugfs.h" /* Constants definition */ #define HEXADECIMAL_RADIX 16 /* Functions definition */ #define DEBUGFS_ADD(name, parent) do { \ dbgfs->dbgfs_##parent##_files.file_##name = \ debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv, \ &iwmct_dbgfs_##name##_ops); \ } while (0) #define DEBUGFS_RM(name) do { \ debugfs_remove(name); \ name = NULL; \ } while (0) #define DEBUGFS_READ_FUNC(name) \ ssize_t iwmct_dbgfs_##name##_read(struct file *file, \ char __user *user_buf, \ size_t count, loff_t *ppos); #define DEBUGFS_WRITE_FUNC(name) \ ssize_t iwmct_dbgfs_##name##_write(struct file *file, \ const char __user *user_buf, \ size_t count, loff_t *ppos); #define DEBUGFS_READ_FILE_OPS(name) \ DEBUGFS_READ_FUNC(name) \ static const struct file_operations iwmct_dbgfs_##name##_ops = { \ .read = iwmct_dbgfs_##name##_read, \ .open = iwmct_dbgfs_open_file_generic, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_WRITE_FILE_OPS(name) \ DEBUGFS_WRITE_FUNC(name) \ static const struct file_operations iwmct_dbgfs_##name##_ops = { \ .write = iwmct_dbgfs_##name##_write, \ .open = iwmct_dbgfs_open_file_generic, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ DEBUGFS_READ_FUNC(name) \ DEBUGFS_WRITE_FUNC(name) \ static const struct file_operations iwmct_dbgfs_##name##_ops = {\ .write = iwmct_dbgfs_##name##_write, \ .read = iwmct_dbgfs_##name##_read, \ .open = iwmct_dbgfs_open_file_generic, \ .llseek = generic_file_llseek, \ }; /* Debugfs file ops definitions */ /* * Create the debugfs files and directories * */ void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name) { struct iwmct_debugfs *dbgfs; dbgfs = kzalloc(sizeof(struct iwmct_debugfs), GFP_KERNEL); if (!dbgfs) { LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n", sizeof(struct iwmct_debugfs)); return; } priv->dbgfs = dbgfs; dbgfs->name = name; dbgfs->dir_drv = debugfs_create_dir(name, NULL); if (!dbgfs->dir_drv) { LOG_ERROR(priv, DEBUGFS, "failed to create debugfs dir\n"); return; } return; } /** * Remove the debugfs files and directories * */ void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs) { if (!dbgfs) return; DEBUGFS_RM(dbgfs->dir_drv); kfree(dbgfs); dbgfs = NULL; }
gpl-2.0
xboxfanj/android_kernel_oneplus_msm8974
arch/mips/pnx8550/common/platform.c
9231
3202
/* * Platform device support for NXP PNX8550 SoCs * * Copyright 2005, Embedded Alley Solutions, Inc * * Based on arch/mips/au1000/common/platform.c * Platform device support for Au1x00 SoCs. * * Copyright 2004, Matt Porter <mporter@kernel.crashing.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/resource.h> #include <linux/serial.h> #include <linux/serial_pnx8xxx.h> #include <linux/platform_device.h> #include <int.h> #include <usb.h> #include <uart.h> static struct resource pnx8550_usb_ohci_resources[] = { [0] = { .start = PNX8550_USB_OHCI_OP_BASE, .end = PNX8550_USB_OHCI_OP_BASE + PNX8550_USB_OHCI_OP_LEN, .flags = IORESOURCE_MEM, }, [1] = { .start = PNX8550_INT_USB, .end = PNX8550_INT_USB, .flags = IORESOURCE_IRQ, }, }; static struct resource pnx8550_uart_resources[] = { [0] = { .start = PNX8550_UART_PORT0, .end = PNX8550_UART_PORT0 + 0xfff, .flags = IORESOURCE_MEM, }, [1] = { .start = PNX8550_UART_INT(0), .end = PNX8550_UART_INT(0), .flags = IORESOURCE_IRQ, }, [2] = { .start = PNX8550_UART_PORT1, .end = PNX8550_UART_PORT1 + 0xfff, .flags = IORESOURCE_MEM, }, [3] = { .start = PNX8550_UART_INT(1), .end = PNX8550_UART_INT(1), .flags = IORESOURCE_IRQ, }, }; struct pnx8xxx_port pnx8xxx_ports[] = { [0] = { .port = { .type = PORT_PNX8XXX, .iotype = UPIO_MEM, .membase = (void __iomem *)PNX8550_UART_PORT0, .mapbase = PNX8550_UART_PORT0, .irq = PNX8550_UART_INT(0), .uartclk = 3692300, .fifosize = 16, .flags = UPF_BOOT_AUTOCONF, .line = 0, }, }, [1] = { .port = { .type = PORT_PNX8XXX, .iotype = UPIO_MEM, .membase = (void __iomem *)PNX8550_UART_PORT1, .mapbase = PNX8550_UART_PORT1, .irq = PNX8550_UART_INT(1), .uartclk = 3692300, .fifosize = 16, .flags = UPF_BOOT_AUTOCONF, .line = 1, }, }, }; /* The dmamask must be set for OHCI to work */ static u64 ohci_dmamask = DMA_BIT_MASK(32); static u64 uart_dmamask = DMA_BIT_MASK(32); static struct platform_device pnx8550_usb_ohci_device = { .name = "pnx8550-ohci", .id = -1, .dev = { .dma_mask = &ohci_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(pnx8550_usb_ohci_resources), .resource = pnx8550_usb_ohci_resources, }; static struct platform_device pnx8550_uart_device = { .name = "pnx8xxx-uart", .id = -1, .dev = { .dma_mask = &uart_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = pnx8xxx_ports, }, .num_resources = ARRAY_SIZE(pnx8550_uart_resources), .resource = pnx8550_uart_resources, }; static struct platform_device *pnx8550_platform_devices[] __initdata = { &pnx8550_usb_ohci_device, &pnx8550_uart_device, }; static int __init pnx8550_platform_init(void) { return platform_add_devices(pnx8550_platform_devices, ARRAY_SIZE(pnx8550_platform_devices)); } arch_initcall(pnx8550_platform_init);
gpl-2.0
ZdrowyGosciu/kernel_g900f
drivers/staging/rtl8192u/ieee80211/proc.c
9487
2766
/* * Scatterlist Cryptographic API. * * Procfs information. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <linux/init.h> //#include <linux/crypto.h> #include "rtl_crypto.h" #include <linux/rwsem.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "internal.h" extern struct list_head crypto_alg_list; extern struct rw_semaphore crypto_alg_sem; static void *c_start(struct seq_file *m, loff_t *pos) { struct list_head *v; loff_t n = *pos; down_read(&crypto_alg_sem); list_for_each(v, &crypto_alg_list) if (!n--) return list_entry(v, struct crypto_alg, cra_list); return NULL; } static void *c_next(struct seq_file *m, void *p, loff_t *pos) { struct list_head *v = p; (*pos)++; v = v->next; return (v == &crypto_alg_list) ? NULL : list_entry(v, struct crypto_alg, cra_list); } static void c_stop(struct seq_file *m, void *p) { up_read(&crypto_alg_sem); } static int c_show(struct seq_file *m, void *p) { struct crypto_alg *alg = (struct crypto_alg *)p; seq_printf(m, "name : %s\n", alg->cra_name); seq_printf(m, "module : %s\n", (alg->cra_module ? alg->cra_module->name : "kernel")); switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_CIPHER: seq_printf(m, "type : cipher\n"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "min keysize : %u\n", alg->cra_cipher.cia_min_keysize); seq_printf(m, "max keysize : %u\n", alg->cra_cipher.cia_max_keysize); break; case CRYPTO_ALG_TYPE_DIGEST: seq_printf(m, "type : digest\n"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "digestsize : %u\n", alg->cra_digest.dia_digestsize); break; case CRYPTO_ALG_TYPE_COMPRESS: seq_printf(m, "type : compression\n"); break; default: seq_printf(m, "type : unknown\n"); break; } seq_putc(m, '\n'); return 0; } static struct seq_operations crypto_seq_ops = { .start = c_start, .next = c_next, .stop = c_stop, .show = c_show }; static int crypto_info_open(struct inode *inode, struct file *file) { return seq_open(file, &crypto_seq_ops); } static struct file_operations proc_crypto_ops = { .open = crypto_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release }; void __init crypto_init_proc(void) { struct proc_dir_entry *proc; proc = create_proc_entry("crypto", 0, NULL); if (proc) proc->proc_fops = &proc_crypto_ops; }
gpl-2.0
Elite-Kernels/elite_bullhead
drivers/mtd/chips/cfi_probe.c
11023
11986
/* Common Flash Interface probe code. (C) 2000 Red Hat. GPL'd. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <asm/byteorder.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/mtd/xip.h> #include <linux/mtd/map.h> #include <linux/mtd/cfi.h> #include <linux/mtd/gen_probe.h> //#define DEBUG_CFI #ifdef DEBUG_CFI static void print_cfi_ident(struct cfi_ident *); #endif static int cfi_probe_chip(struct map_info *map, __u32 base, unsigned long *chip_map, struct cfi_private *cfi); static int cfi_chip_setup(struct map_info *map, struct cfi_private *cfi); struct mtd_info *cfi_probe(struct map_info *map); #ifdef CONFIG_MTD_XIP /* only needed for short periods, so this is rather simple */ #define xip_disable() local_irq_disable() #define xip_allowed(base, map) \ do { \ (void) map_read(map, base); \ xip_iprefetch(); \ local_irq_enable(); \ } while (0) #define xip_enable(base, map, cfi) \ do { \ cfi_qry_mode_off(base, map, cfi); \ xip_allowed(base, map); \ } while (0) #define xip_disable_qry(base, map, cfi) \ do { \ xip_disable(); \ cfi_qry_mode_on(base, map, cfi); \ } while (0) #else #define xip_disable() do { } while (0) #define xip_allowed(base, map) do { } while (0) #define xip_enable(base, map, cfi) do { } while (0) #define xip_disable_qry(base, map, cfi) do { } while (0) #endif /* check for QRY. in: interleave,type,mode ret: table index, <0 for error */ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, unsigned long *chip_map, struct cfi_private *cfi) { int i; if ((base + 0) >= map->size) { printk(KERN_NOTICE "Probe at base[0x00](0x%08lx) past the end of the map(0x%08lx)\n", (unsigned long)base, map->size -1); return 0; } if ((base + 0xff) >= map->size) { printk(KERN_NOTICE "Probe at base[0x55](0x%08lx) past the end of the map(0x%08lx)\n", (unsigned long)base + 0x55, map->size -1); return 0; } xip_disable(); if (!cfi_qry_mode_on(base, map, cfi)) { xip_enable(base, map, cfi); return 0; } if (!cfi->numchips) { /* This is the first time we're called. Set up the CFI stuff accordingly and return */ return cfi_chip_setup(map, cfi); } /* Check each previous chip to see if it's an alias */ for (i=0; i < (base >> cfi->chipshift); i++) { unsigned long start; if(!test_bit(i, chip_map)) { /* Skip location; no valid chip at this address */ continue; } start = i << cfi->chipshift; /* This chip should be in read mode if it's one we've already touched. */ if (cfi_qry_present(map, start, cfi)) { /* Eep. This chip also had the QRY marker. * Is it an alias for the new one? */ cfi_qry_mode_off(start, map, cfi); /* If the QRY marker goes away, it's an alias */ if (!cfi_qry_present(map, start, cfi)) { xip_allowed(base, map); printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n", map->name, base, start); return 0; } /* Yes, it's actually got QRY for data. Most * unfortunate. Stick the new chip in read mode * too and if it's the same, assume it's an alias. */ /* FIXME: Use other modes to do a proper check */ cfi_qry_mode_off(base, map, cfi); if (cfi_qry_present(map, base, cfi)) { xip_allowed(base, map); printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n", map->name, base, start); return 0; } } } /* OK, if we got to here, then none of the previous chips appear to be aliases for the current one. */ set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */ cfi->numchips++; /* Put it back into Read Mode */ cfi_qry_mode_off(base, map, cfi); xip_allowed(base, map); printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", map->name, cfi->interleave, cfi->device_type*8, base, map->bankwidth*8); return 1; } static int __xipram cfi_chip_setup(struct map_info *map, struct cfi_private *cfi) { int ofs_factor = cfi->interleave*cfi->device_type; __u32 base = 0; int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor); int i; int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA; xip_enable(base, map, cfi); #ifdef DEBUG_CFI printk("Number of erase regions: %d\n", num_erase_regions); #endif if (!num_erase_regions) return 0; cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); if (!cfi->cfiq) { printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); return 0; } memset(cfi->cfiq,0,sizeof(struct cfi_ident)); cfi->cfi_mode = CFI_MODE_CFI; cfi->sector_erase_cmd = CMD(0x30); /* Read the CFI info structure */ xip_disable_qry(base, map, cfi); for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++) ((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor); /* Do any necessary byteswapping */ cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID); cfi->cfiq->P_ADR = le16_to_cpu(cfi->cfiq->P_ADR); cfi->cfiq->A_ID = le16_to_cpu(cfi->cfiq->A_ID); cfi->cfiq->A_ADR = le16_to_cpu(cfi->cfiq->A_ADR); cfi->cfiq->InterfaceDesc = le16_to_cpu(cfi->cfiq->InterfaceDesc); cfi->cfiq->MaxBufWriteSize = le16_to_cpu(cfi->cfiq->MaxBufWriteSize); #ifdef DEBUG_CFI /* Dump the information therein */ print_cfi_ident(cfi->cfiq); #endif for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { cfi->cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi->cfiq->EraseRegionInfo[i]); #ifdef DEBUG_CFI printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n", i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff, (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1); #endif } if (cfi->cfiq->P_ID == P_ID_SST_OLD) { addr_unlock1 = 0x5555; addr_unlock2 = 0x2AAA; } /* * Note we put the device back into Read Mode BEFORE going into Auto * Select Mode, as some devices support nesting of modes, others * don't. This way should always work. * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and * so should be treated as nops or illegal (and so put the device * back into Read Mode, which is a nop in this case). */ cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xaa, addr_unlock1, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, addr_unlock2, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x90, addr_unlock1, base, map, cfi, cfi->device_type, NULL); cfi->mfr = cfi_read_query16(map, base); cfi->id = cfi_read_query16(map, base + ofs_factor); /* Get AMD/Spansion extended JEDEC ID */ if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e) cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 | cfi_read_query(map, base + 0xf * ofs_factor); /* Put it back into Read Mode */ cfi_qry_mode_off(base, map, cfi); xip_allowed(base, map); printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank. Manufacturer ID %#08x Chip ID %#08x\n", map->name, cfi->interleave, cfi->device_type*8, base, map->bankwidth*8, cfi->mfr, cfi->id); return 1; } #ifdef DEBUG_CFI static char *vendorname(__u16 vendor) { switch (vendor) { case P_ID_NONE: return "None"; case P_ID_INTEL_EXT: return "Intel/Sharp Extended"; case P_ID_AMD_STD: return "AMD/Fujitsu Standard"; case P_ID_INTEL_STD: return "Intel/Sharp Standard"; case P_ID_AMD_EXT: return "AMD/Fujitsu Extended"; case P_ID_WINBOND: return "Winbond Standard"; case P_ID_ST_ADV: return "ST Advanced"; case P_ID_MITSUBISHI_STD: return "Mitsubishi Standard"; case P_ID_MITSUBISHI_EXT: return "Mitsubishi Extended"; case P_ID_SST_PAGE: return "SST Page Write"; case P_ID_SST_OLD: return "SST 39VF160x/39VF320x"; case P_ID_INTEL_PERFORMANCE: return "Intel Performance Code"; case P_ID_INTEL_DATA: return "Intel Data"; case P_ID_RESERVED: return "Not Allowed / Reserved for Future Use"; default: return "Unknown"; } } static void print_cfi_ident(struct cfi_ident *cfip) { #if 0 if (cfip->qry[0] != 'Q' || cfip->qry[1] != 'R' || cfip->qry[2] != 'Y') { printk("Invalid CFI ident structure.\n"); return; } #endif printk("Primary Vendor Command Set: %4.4X (%s)\n", cfip->P_ID, vendorname(cfip->P_ID)); if (cfip->P_ADR) printk("Primary Algorithm Table at %4.4X\n", cfip->P_ADR); else printk("No Primary Algorithm Table\n"); printk("Alternative Vendor Command Set: %4.4X (%s)\n", cfip->A_ID, vendorname(cfip->A_ID)); if (cfip->A_ADR) printk("Alternate Algorithm Table at %4.4X\n", cfip->A_ADR); else printk("No Alternate Algorithm Table\n"); printk("Vcc Minimum: %2d.%d V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf); printk("Vcc Maximum: %2d.%d V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf); if (cfip->VppMin) { printk("Vpp Minimum: %2d.%d V\n", cfip->VppMin >> 4, cfip->VppMin & 0xf); printk("Vpp Maximum: %2d.%d V\n", cfip->VppMax >> 4, cfip->VppMax & 0xf); } else printk("No Vpp line\n"); printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp); printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp)); if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) { printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp); printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp)); } else printk("Full buffer write not supported\n"); printk("Typical block erase timeout: %d ms\n", 1<<cfip->BlockEraseTimeoutTyp); printk("Maximum block erase timeout: %d ms\n", (1<<cfip->BlockEraseTimeoutMax) * (1<<cfip->BlockEraseTimeoutTyp)); if (cfip->ChipEraseTimeoutTyp || cfip->ChipEraseTimeoutMax) { printk("Typical chip erase timeout: %d ms\n", 1<<cfip->ChipEraseTimeoutTyp); printk("Maximum chip erase timeout: %d ms\n", (1<<cfip->ChipEraseTimeoutMax) * (1<<cfip->ChipEraseTimeoutTyp)); } else printk("Chip erase not supported\n"); printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20)); printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc); switch(cfip->InterfaceDesc) { case CFI_INTERFACE_X8_ASYNC: printk(" - x8-only asynchronous interface\n"); break; case CFI_INTERFACE_X16_ASYNC: printk(" - x16-only asynchronous interface\n"); break; case CFI_INTERFACE_X8_BY_X16_ASYNC: printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n"); break; case CFI_INTERFACE_X32_ASYNC: printk(" - x32-only asynchronous interface\n"); break; case CFI_INTERFACE_X16_BY_X32_ASYNC: printk(" - supports x16 and x32 via Word# with asynchronous interface\n"); break; case CFI_INTERFACE_NOT_ALLOWED: printk(" - Not Allowed / Reserved\n"); break; default: printk(" - Unknown\n"); break; } printk("Max. bytes in buffer write: 0x%x\n", 1<< cfip->MaxBufWriteSize); printk("Number of Erase Block Regions: %d\n", cfip->NumEraseRegions); } #endif /* DEBUG_CFI */ static struct chip_probe cfi_chip_probe = { .name = "CFI", .probe_chip = cfi_probe_chip }; struct mtd_info *cfi_probe(struct map_info *map) { /* * Just use the generic probe stuff to call our CFI-specific * chip_probe routine in all the possible permutations, etc. */ return mtd_do_chip_probe(map, &cfi_chip_probe); } static struct mtd_chip_driver cfi_chipdrv = { .probe = cfi_probe, .name = "cfi_probe", .module = THIS_MODULE }; static int __init cfi_probe_init(void) { register_mtd_chip_driver(&cfi_chipdrv); return 0; } static void __exit cfi_probe_exit(void) { unregister_mtd_chip_driver(&cfi_chipdrv); } module_init(cfi_probe_init); module_exit(cfi_probe_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al."); MODULE_DESCRIPTION("Probe code for CFI-compliant flash chips");
gpl-2.0
Jackeagle/htc_butterfly_asia_india_4.4.2_kernel
arch/sh/mm/kmap.c
11791
1664
/* * arch/sh/mm/kmap.c * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka * Copyright (C) 2002 - 2009 Paul Mundt * * Released under the terms of the GNU GPL v2.0. */ #include <linux/mm.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/fs.h> #include <linux/highmem.h> #include <linux/module.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> #define kmap_get_fixmap_pte(vaddr) \ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) static pte_t *kmap_coherent_pte; void __init kmap_coherent_init(void) { unsigned long vaddr; /* cache the first coherent kmap pte */ vaddr = __fix_to_virt(FIX_CMAP_BEGIN); kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); } void *kmap_coherent(struct page *page, unsigned long addr) { enum fixed_addresses idx; unsigned long vaddr; BUG_ON(!test_bit(PG_dcache_clean, &page->flags)); pagefault_disable(); idx = FIX_CMAP_END - (((addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1)) + (FIX_N_COLOURS * smp_processor_id())); vaddr = __fix_to_virt(idx); BUG_ON(!pte_none(*(kmap_coherent_pte - idx))); set_pte(kmap_coherent_pte - idx, mk_pte(page, PAGE_KERNEL)); return (void *)vaddr; } void kunmap_coherent(void *kvaddr) { if (kvaddr >= (void *)FIXADDR_START) { unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK; enum fixed_addresses idx = __virt_to_fix(vaddr); /* XXX.. Kill this later, here for sanity at the moment.. */ __flush_purge_region((void *)vaddr, PAGE_SIZE); pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx); local_flush_tlb_one(get_asid(), vaddr); } pagefault_enable(); }
gpl-2.0
atilag/hammerhead-nexus5-kernel
net/bridge/netfilter/ebt_802_3.c
13583
1727
/* * 802_3 * * Author: * Chris Vitale csv@bluetail.com * * May 2003 * */ #include <linux/module.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_802_3.h> static bool ebt_802_3_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct ebt_802_3_info *info = par->matchinfo; const struct ebt_802_3_hdr *hdr = ebt_802_3_hdr(skb); __be16 type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type; if (info->bitmask & EBT_802_3_SAP) { if (FWINV(info->sap != hdr->llc.ui.ssap, EBT_802_3_SAP)) return false; if (FWINV(info->sap != hdr->llc.ui.dsap, EBT_802_3_SAP)) return false; } if (info->bitmask & EBT_802_3_TYPE) { if (!(hdr->llc.ui.dsap == CHECK_TYPE && hdr->llc.ui.ssap == CHECK_TYPE)) return false; if (FWINV(info->type != type, EBT_802_3_TYPE)) return false; } return true; } static int ebt_802_3_mt_check(const struct xt_mtchk_param *par) { const struct ebt_802_3_info *info = par->matchinfo; if (info->bitmask & ~EBT_802_3_MASK || info->invflags & ~EBT_802_3_MASK) return -EINVAL; return 0; } static struct xt_match ebt_802_3_mt_reg __read_mostly = { .name = "802_3", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_802_3_mt, .checkentry = ebt_802_3_mt_check, .matchsize = sizeof(struct ebt_802_3_info), .me = THIS_MODULE, }; static int __init ebt_802_3_init(void) { return xt_register_match(&ebt_802_3_mt_reg); } static void __exit ebt_802_3_fini(void) { xt_unregister_match(&ebt_802_3_mt_reg); } module_init(ebt_802_3_init); module_exit(ebt_802_3_fini); MODULE_DESCRIPTION("Ebtables: DSAP/SSAP field and SNAP type matching"); MODULE_LICENSE("GPL");
gpl-2.0
silence-star/android_kernel_nubia_NX503A
net/bridge/netfilter/ebt_802_3.c
13583
1727
/* * 802_3 * * Author: * Chris Vitale csv@bluetail.com * * May 2003 * */ #include <linux/module.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_802_3.h> static bool ebt_802_3_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct ebt_802_3_info *info = par->matchinfo; const struct ebt_802_3_hdr *hdr = ebt_802_3_hdr(skb); __be16 type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type; if (info->bitmask & EBT_802_3_SAP) { if (FWINV(info->sap != hdr->llc.ui.ssap, EBT_802_3_SAP)) return false; if (FWINV(info->sap != hdr->llc.ui.dsap, EBT_802_3_SAP)) return false; } if (info->bitmask & EBT_802_3_TYPE) { if (!(hdr->llc.ui.dsap == CHECK_TYPE && hdr->llc.ui.ssap == CHECK_TYPE)) return false; if (FWINV(info->type != type, EBT_802_3_TYPE)) return false; } return true; } static int ebt_802_3_mt_check(const struct xt_mtchk_param *par) { const struct ebt_802_3_info *info = par->matchinfo; if (info->bitmask & ~EBT_802_3_MASK || info->invflags & ~EBT_802_3_MASK) return -EINVAL; return 0; } static struct xt_match ebt_802_3_mt_reg __read_mostly = { .name = "802_3", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_802_3_mt, .checkentry = ebt_802_3_mt_check, .matchsize = sizeof(struct ebt_802_3_info), .me = THIS_MODULE, }; static int __init ebt_802_3_init(void) { return xt_register_match(&ebt_802_3_mt_reg); } static void __exit ebt_802_3_fini(void) { xt_unregister_match(&ebt_802_3_mt_reg); } module_init(ebt_802_3_init); module_exit(ebt_802_3_fini); MODULE_DESCRIPTION("Ebtables: DSAP/SSAP field and SNAP type matching"); MODULE_LICENSE("GPL");
gpl-2.0
vvavrychuk/glibc
stdio-common/reg-printf.c
16
2640
/* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <errno.h> #include <limits.h> #include <printf.h> #include <stddef.h> #include <stdlib.h> #include <bits/libc-lock.h> /* Array of functions indexed by format character. */ libc_freeres_ptr (printf_arginfo_size_function **__printf_arginfo_table) attribute_hidden; printf_function **__printf_function_table attribute_hidden; __libc_lock_define_initialized (static, lock) int __register_printf_specifier (int, printf_function, printf_arginfo_size_function); int __register_printf_function (int, printf_function, printf_arginfo_function); /* Register FUNC to be called to format SPEC specifiers. */ int __register_printf_specifier (spec, converter, arginfo) int spec; printf_function converter; printf_arginfo_size_function arginfo; { if (spec < 0 || spec > (int) UCHAR_MAX) { __set_errno (EINVAL); return -1; } int result = 0; __libc_lock_lock (lock); if (__printf_function_table == NULL) { __printf_arginfo_table = (printf_arginfo_size_function **) calloc (UCHAR_MAX + 1, sizeof (void *) * 2); if (__printf_arginfo_table == NULL) { result = -1; goto out; } __printf_function_table = (printf_function **) (__printf_arginfo_table + UCHAR_MAX + 1); } __printf_function_table[spec] = converter; __printf_arginfo_table[spec] = arginfo; out: __libc_lock_unlock (lock); return result; } weak_alias (__register_printf_specifier, register_printf_specifier) /* Register FUNC to be called to format SPEC specifiers. */ int __register_printf_function (spec, converter, arginfo) int spec; printf_function converter; printf_arginfo_function arginfo; { return __register_printf_specifier (spec, converter, (printf_arginfo_size_function*) arginfo); } weak_alias (__register_printf_function, register_printf_function)
gpl-2.0
VasilyNemkov/percona-xtrabackup
storage/innobase/xtrabackup/src/libarchive/tar/test/test_extract_tar_bz2.c
16
2000
/*- * Copyright (c) 2012 Michihiro NAKAJIMA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "test.h" __FBSDID("$FreeBSD$"); DEFINE_TEST(test_extract_tar_bz2) { const char *reffile = "test_extract.tar.bz2"; int f; extract_reference_file(reffile); f = systemf("%s -tf %s >test.out 2>test.err", testprog, reffile); if (f == 0 || canBzip2()) { assertEqualInt(0, systemf("%s -xf %s >test.out 2>test.err", testprog, reffile)); assertFileExists("file1"); assertTextFileContents("contents of file1.\n", "file1"); assertFileExists("file2"); assertTextFileContents("contents of file2.\n", "file2"); assertEmptyFile("test.out"); assertEmptyFile("test.err"); } else { skipping("It seems bzip2 is not supported on this platform"); } }
gpl-2.0
xtreamerdev/linux-xtr
arch/ppc/platforms/pmac_setup.c
16
18145
/* * arch/ppc/platforms/setup.c * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Adapted for Power Macintosh by Paul Mackerras * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) * * Derived from "arch/alpha/kernel/setup.c" * Copyright (C) 1995 Linus Torvalds * * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ /* * bootup setup stuff.. */ #include <linux/config.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/a.out.h> #include <linux/tty.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/major.h> #include <linux/initrd.h> #include <linux/vt_kern.h> #include <linux/console.h> #include <linux/ide.h> #include <linux/pci.h> #include <linux/adb.h> #include <linux/cuda.h> #include <linux/pmu.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/bitops.h> #include <linux/suspend.h> #include <asm/reg.h> #include <asm/sections.h> #include <asm/prom.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/pci-bridge.h> #include <asm/ohare.h> #include <asm/mediabay.h> #include <asm/machdep.h> #include <asm/dma.h> #include <asm/bootx.h> #include <asm/cputable.h> #include <asm/btext.h> #include <asm/pmac_feature.h> #include <asm/time.h> #include <asm/of_device.h> #include <asm/mmu_context.h> #include "pmac_pic.h" #include "mem_pieces.h" #undef SHOW_GATWICK_IRQS extern long pmac_time_init(void); extern unsigned long pmac_get_rtc_time(void); extern int pmac_set_rtc_time(unsigned long nowtime); extern void pmac_read_rtc_time(void); extern void pmac_calibrate_decr(void); extern void pmac_pcibios_fixup(void); extern void pmac_find_bridges(void); extern unsigned long pmac_ide_get_base(int index); extern void pmac_ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, unsigned long ctrl_port, int *irq); extern void pmac_nvram_update(void); extern unsigned char pmac_nvram_read_byte(int addr); extern void pmac_nvram_write_byte(int addr, unsigned char val); extern int pmac_pci_enable_device_hook(struct pci_dev *dev, int initial); extern void pmac_pcibios_after_init(void); extern int of_show_percpuinfo(struct seq_file *m, int i); struct device_node *memory_node; unsigned char drive_info; int ppc_override_l2cr = 0; int ppc_override_l2cr_value; int has_l2cache = 0; static int current_root_goodness = -1; extern int pmac_newworld; #define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */ extern void zs_kgdb_hook(int tty_num); static void ohare_init(void); #ifdef CONFIG_BOOTX_TEXT void pmac_progress(char *s, unsigned short hex); #endif sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN; #ifdef CONFIG_SMP extern struct smp_ops_t psurge_smp_ops; extern struct smp_ops_t core99_smp_ops; #endif /* CONFIG_SMP */ int __pmac pmac_show_cpuinfo(struct seq_file *m) { struct device_node *np; char *pp; int plen; int mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_MODEL, 0); unsigned int mbflags = (unsigned int)pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_FLAGS, 0); char* mbname; if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME, (int)&mbname) != 0) mbname = "Unknown"; /* find motherboard type */ seq_printf(m, "machine\t\t: "); np = find_devices("device-tree"); if (np != NULL) { pp = (char *) get_property(np, "model", NULL); if (pp != NULL) seq_printf(m, "%s\n", pp); else seq_printf(m, "PowerMac\n"); pp = (char *) get_property(np, "compatible", &plen); if (pp != NULL) { seq_printf(m, "motherboard\t:"); while (plen > 0) { int l = strlen(pp) + 1; seq_printf(m, " %s", pp); plen -= l; pp += l; } seq_printf(m, "\n"); } } else seq_printf(m, "PowerMac\n"); /* print parsed model */ seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname); seq_printf(m, "pmac flags\t: %08x\n", mbflags); /* find l2 cache info */ np = find_devices("l2-cache"); if (np == 0) np = find_type_devices("cache"); if (np != 0) { unsigned int *ic = (unsigned int *) get_property(np, "i-cache-size", NULL); unsigned int *dc = (unsigned int *) get_property(np, "d-cache-size", NULL); seq_printf(m, "L2 cache\t:"); has_l2cache = 1; if (get_property(np, "cache-unified", NULL) != 0 && dc) { seq_printf(m, " %dK unified", *dc / 1024); } else { if (ic) seq_printf(m, " %dK instruction", *ic / 1024); if (dc) seq_printf(m, "%s %dK data", (ic? " +": ""), *dc / 1024); } pp = get_property(np, "ram-type", NULL); if (pp) seq_printf(m, " %s", pp); seq_printf(m, "\n"); } /* find ram info */ np = find_devices("memory"); if (np != 0) { int n; struct reg_property *reg = (struct reg_property *) get_property(np, "reg", &n); if (reg != 0) { unsigned long total = 0; for (n /= sizeof(struct reg_property); n > 0; --n) total += (reg++)->size; seq_printf(m, "memory\t\t: %luMB\n", total >> 20); } } /* Checks "l2cr-value" property in the registry */ np = find_devices("cpus"); if (np == 0) np = find_type_devices("cpu"); if (np != 0) { unsigned int *l2cr = (unsigned int *) get_property(np, "l2cr-value", NULL); if (l2cr != 0) { seq_printf(m, "l2cr override\t: 0x%x\n", *l2cr); } } /* Indicate newworld/oldworld */ seq_printf(m, "pmac-generation\t: %s\n", pmac_newworld ? "NewWorld" : "OldWorld"); return 0; } int __openfirmware pmac_show_percpuinfo(struct seq_file *m, int i) { #ifdef CONFIG_CPU_FREQ_PMAC extern unsigned int pmac_get_one_cpufreq(int i); unsigned int freq = pmac_get_one_cpufreq(i); if (freq != 0) { seq_printf(m, "clock\t\t: %dMHz\n", freq/1000); return 0; } #endif /* CONFIG_CPU_FREQ_PMAC */ return of_show_percpuinfo(m, i); } static volatile u32 *sysctrl_regs; void __init pmac_setup_arch(void) { struct device_node *cpu; int *fp; unsigned long pvr; pvr = PVR_VER(mfspr(SPRN_PVR)); /* Set loops_per_jiffy to a half-way reasonable value, for use until calibrate_delay gets called. */ cpu = find_type_devices("cpu"); if (cpu != 0) { fp = (int *) get_property(cpu, "clock-frequency", NULL); if (fp != 0) { if (pvr == 4 || pvr >= 8) /* 604, G3, G4 etc. */ loops_per_jiffy = *fp / HZ; else /* 601, 603, etc. */ loops_per_jiffy = *fp / (2*HZ); } else loops_per_jiffy = 50000000 / HZ; } /* this area has the CPU identification register and some registers used by smp boards */ sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000); ohare_init(); /* Lookup PCI hosts */ pmac_find_bridges(); /* Checks "l2cr-value" property in the registry */ if (cpu_has_feature(CPU_FTR_L2CR)) { struct device_node *np = find_devices("cpus"); if (np == 0) np = find_type_devices("cpu"); if (np != 0) { unsigned int *l2cr = (unsigned int *) get_property(np, "l2cr-value", NULL); if (l2cr != 0) { ppc_override_l2cr = 1; ppc_override_l2cr_value = *l2cr; _set_L2CR(0); _set_L2CR(ppc_override_l2cr_value); } } } if (ppc_override_l2cr) printk(KERN_INFO "L2CR overriden (0x%x), backside cache is %s\n", ppc_override_l2cr_value, (ppc_override_l2cr_value & 0x80000000) ? "enabled" : "disabled"); #ifdef CONFIG_KGDB zs_kgdb_hook(0); #endif #ifdef CONFIG_ADB_CUDA find_via_cuda(); #else if (find_devices("via-cuda")) { printk("WARNING ! Your machine is Cuda based but your kernel\n"); printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n"); } #endif #ifdef CONFIG_ADB_PMU find_via_pmu(); #else if (find_devices("via-pmu")) { printk("WARNING ! Your machine is PMU based but your kernel\n"); printk(" wasn't compiled with CONFIG_ADB_PMU option !\n"); } #endif #ifdef CONFIG_NVRAM pmac_nvram_init(); #endif #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) ROOT_DEV = Root_RAM0; else #endif ROOT_DEV = DEFAULT_ROOT_DEVICE; #ifdef CONFIG_SMP /* Check for Core99 */ if (find_devices("uni-n") || find_devices("u3")) ppc_md.smp_ops = &core99_smp_ops; else ppc_md.smp_ops = &psurge_smp_ops; #endif /* CONFIG_SMP */ pci_create_OF_bus_map(); } static void __init ohare_init(void) { /* * Turn on the L2 cache. * We assume that we have a PSX memory controller iff * we have an ohare I/O controller. */ if (find_devices("ohare") != NULL) { if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) { if (sysctrl_regs[4] & 0x10) sysctrl_regs[4] |= 0x04000020; else sysctrl_regs[4] |= 0x04000000; if(has_l2cache) printk(KERN_INFO "Level 2 cache enabled\n"); } } } extern char *bootpath; extern char *bootdevice; void *boot_host; int boot_target; int boot_part; extern dev_t boot_dev; #ifdef CONFIG_SCSI void __init note_scsi_host(struct device_node *node, void *host) { int l; char *p; l = strlen(node->full_name); if (bootpath != NULL && bootdevice != NULL && strncmp(node->full_name, bootdevice, l) == 0 && (bootdevice[l] == '/' || bootdevice[l] == 0)) { boot_host = host; /* * There's a bug in OF 1.0.5. (Why am I not surprised.) * If you pass a path like scsi/sd@1:0 to canon, it returns * something like /bandit@F2000000/gc@10/53c94@10000/sd@0,0 * That is, the scsi target number doesn't get preserved. * So we pick the target number out of bootpath and use that. */ p = strstr(bootpath, "/sd@"); if (p != NULL) { p += 4; boot_target = simple_strtoul(p, NULL, 10); p = strchr(p, ':'); if (p != NULL) boot_part = simple_strtoul(p + 1, NULL, 10); } } } #endif #if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC) static dev_t __init find_ide_boot(void) { char *p; int n; dev_t __init pmac_find_ide_boot(char *bootdevice, int n); if (bootdevice == NULL) return 0; p = strrchr(bootdevice, '/'); if (p == NULL) return 0; n = p - bootdevice; return pmac_find_ide_boot(bootdevice, n); } #endif /* CONFIG_BLK_DEV_IDE && CONFIG_BLK_DEV_IDE_PMAC */ void __init find_boot_device(void) { #if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC) boot_dev = find_ide_boot(); #endif } static int initializing = 1; /* TODO: Merge the suspend-to-ram with the common code !!! * currently, this is a stub implementation for suspend-to-disk * only */ #ifdef CONFIG_SOFTWARE_SUSPEND static int pmac_pm_prepare(suspend_state_t state) { printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state); return 0; } static int pmac_pm_enter(suspend_state_t state) { printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state); /* Giveup the lazy FPU & vec so we don't have to back them * up from the low level code */ enable_kernel_fp(); #ifdef CONFIG_ALTIVEC if (cur_cpu_spec[0]->cpu_features & CPU_FTR_ALTIVEC) enable_kernel_altivec(); #endif /* CONFIG_ALTIVEC */ return 0; } static int pmac_pm_finish(suspend_state_t state) { printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state); /* Restore userland MMU context */ set_context(current->active_mm->context, current->active_mm->pgd); return 0; } static struct pm_ops pmac_pm_ops = { .pm_disk_mode = PM_DISK_SHUTDOWN, .prepare = pmac_pm_prepare, .enter = pmac_pm_enter, .finish = pmac_pm_finish, }; #endif /* CONFIG_SOFTWARE_SUSPEND */ static int pmac_late_init(void) { initializing = 0; #ifdef CONFIG_SOFTWARE_SUSPEND pm_set_ops(&pmac_pm_ops); #endif /* CONFIG_SOFTWARE_SUSPEND */ return 0; } late_initcall(pmac_late_init); /* can't be __init - can be called whenever a disk is first accessed */ void __pmac note_bootable_part(dev_t dev, int part, int goodness) { static int found_boot = 0; char *p; if (!initializing) return; if ((goodness <= current_root_goodness) && ROOT_DEV != DEFAULT_ROOT_DEVICE) return; p = strstr(saved_command_line, "root="); if (p != NULL && (p == saved_command_line || p[-1] == ' ')) return; if (!found_boot) { find_boot_device(); found_boot = 1; } if (!boot_dev || dev == boot_dev) { ROOT_DEV = dev + part; boot_dev = 0; current_root_goodness = goodness; } } void __pmac pmac_restart(char *cmd) { #ifdef CONFIG_ADB_CUDA struct adb_request req; #endif /* CONFIG_ADB_CUDA */ switch (sys_ctrler) { #ifdef CONFIG_ADB_CUDA case SYS_CTRLER_CUDA: cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM); for (;;) cuda_poll(); break; #endif /* CONFIG_ADB_CUDA */ #ifdef CONFIG_ADB_PMU case SYS_CTRLER_PMU: pmu_restart(); break; #endif /* CONFIG_ADB_PMU */ default: ; } } void __pmac pmac_power_off(void) { #ifdef CONFIG_ADB_CUDA struct adb_request req; #endif /* CONFIG_ADB_CUDA */ switch (sys_ctrler) { #ifdef CONFIG_ADB_CUDA case SYS_CTRLER_CUDA: cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN); for (;;) cuda_poll(); break; #endif /* CONFIG_ADB_CUDA */ #ifdef CONFIG_ADB_PMU case SYS_CTRLER_PMU: pmu_shutdown(); break; #endif /* CONFIG_ADB_PMU */ default: ; } } void __pmac pmac_halt(void) { pmac_power_off(); } /* * Read in a property describing some pieces of memory. */ static int __init get_mem_prop(char *name, struct mem_pieces *mp) { struct reg_property *rp; int i, s; unsigned int *ip; int nac = prom_n_addr_cells(memory_node); int nsc = prom_n_size_cells(memory_node); ip = (unsigned int *) get_property(memory_node, name, &s); if (ip == NULL) { printk(KERN_ERR "error: couldn't get %s property on /memory\n", name); return 0; } s /= (nsc + nac) * 4; rp = mp->regions; for (i = 0; i < s; ++i, ip += nac+nsc) { if (nac >= 2 && ip[nac-2] != 0) continue; rp->address = ip[nac-1]; if (nsc >= 2 && ip[nac+nsc-2] != 0) rp->size = ~0U; else rp->size = ip[nac+nsc-1]; ++rp; } mp->n_regions = rp - mp->regions; /* Make sure the pieces are sorted. */ mem_pieces_sort(mp); mem_pieces_coalesce(mp); return 1; } /* * On systems with Open Firmware, collect information about * physical RAM and which pieces are already in use. * At this point, we have (at least) the first 8MB mapped with a BAT. * Our text, data, bss use something over 1MB, starting at 0. * Open Firmware may be using 1MB at the 4MB point. */ unsigned long __init pmac_find_end_of_memory(void) { unsigned long a, total; struct mem_pieces phys_mem; /* * Find out where physical memory is, and check that it * starts at 0 and is contiguous. It seems that RAM is * always physically contiguous on Power Macintoshes. * * Supporting discontiguous physical memory isn't hard, * it just makes the virtual <-> physical mapping functions * more complicated (or else you end up wasting space * in mem_map). */ memory_node = find_devices("memory"); if (memory_node == NULL || !get_mem_prop("reg", &phys_mem) || phys_mem.n_regions == 0) panic("No RAM??"); a = phys_mem.regions[0].address; if (a != 0) panic("RAM doesn't start at physical address 0"); total = phys_mem.regions[0].size; if (phys_mem.n_regions > 1) { printk("RAM starting at 0x%x is not contiguous\n", phys_mem.regions[1].address); printk("Using RAM from 0 to 0x%lx\n", total-1); } return total; } void __init pmac_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { /* isa_io_base gets set in pmac_find_bridges */ isa_mem_base = PMAC_ISA_MEM_BASE; pci_dram_offset = PMAC_PCI_DRAM_OFFSET; ISA_DMA_THRESHOLD = ~0L; DMA_MODE_READ = 1; DMA_MODE_WRITE = 2; ppc_md.setup_arch = pmac_setup_arch; ppc_md.show_cpuinfo = pmac_show_cpuinfo; ppc_md.show_percpuinfo = pmac_show_percpuinfo; ppc_md.irq_canonicalize = NULL; ppc_md.init_IRQ = pmac_pic_init; ppc_md.get_irq = pmac_get_irq; /* Changed later on ... */ ppc_md.pcibios_fixup = pmac_pcibios_fixup; ppc_md.pcibios_enable_device_hook = pmac_pci_enable_device_hook; ppc_md.pcibios_after_init = pmac_pcibios_after_init; ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; ppc_md.restart = pmac_restart; ppc_md.power_off = pmac_power_off; ppc_md.halt = pmac_halt; ppc_md.time_init = pmac_time_init; ppc_md.set_rtc_time = pmac_set_rtc_time; ppc_md.get_rtc_time = pmac_get_rtc_time; ppc_md.calibrate_decr = pmac_calibrate_decr; ppc_md.find_end_of_memory = pmac_find_end_of_memory; ppc_md.feature_call = pmac_do_feature_call; #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) #ifdef CONFIG_BLK_DEV_IDE_PMAC ppc_ide_md.ide_init_hwif = pmac_ide_init_hwif_ports; ppc_ide_md.default_io_base = pmac_ide_get_base; #endif /* CONFIG_BLK_DEV_IDE_PMAC */ #endif /* defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) */ #ifdef CONFIG_BOOTX_TEXT ppc_md.progress = pmac_progress; #endif /* CONFIG_BOOTX_TEXT */ if (ppc_md.progress) ppc_md.progress("pmac_init(): exit", 0); } #ifdef CONFIG_BOOTX_TEXT void __init pmac_progress(char *s, unsigned short hex) { if (boot_text_mapped) { btext_drawstring(s); btext_drawchar('\n'); } } #endif /* CONFIG_BOOTX_TEXT */ static int __init pmac_declare_of_platform_devices(void) { struct device_node *np; np = find_devices("uni-n"); if (np) { for (np = np->child; np != NULL; np = np->sibling) if (strncmp(np->name, "i2c", 3) == 0) { of_platform_device_create(np, "uni-n-i2c"); break; } } np = find_devices("u3"); if (np) { for (np = np->child; np != NULL; np = np->sibling) if (strncmp(np->name, "i2c", 3) == 0) { of_platform_device_create(np, "u3-i2c"); break; } } np = find_devices("valkyrie"); if (np) of_platform_device_create(np, "valkyrie"); np = find_devices("platinum"); if (np) of_platform_device_create(np, "platinum"); return 0; } device_initcall(pmac_declare_of_platform_devices);
gpl-2.0
noblehng/glibc
sysdeps/unix/sysv/linux/sparc/sparc64/swapcontext.c
16
1681
/* Copyright (C) 2001-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Jakub Jelinek <jakub@redhat.com>. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <ucontext.h> extern int __getcontext (ucontext_t *ucp); extern int __setcontext (const ucontext_t *ucp, int restoremask); int __swapcontext (ucontext_t *oucp, const ucontext_t *ucp) { extern void __swapcontext_ret (void); /* Save the current machine context to oucp. */ __getcontext (oucp); /* Modify oucp to skip the __setcontext call on reactivation. */ oucp->uc_mcontext.mc_gregs[MC_PC] = (long) __swapcontext_ret; oucp->uc_mcontext.mc_gregs[MC_NPC] = ((long) __swapcontext_ret) + 4; /* Restore the machine context in ucp. */ __setcontext (ucp, 1); return 0; } asm (" \n\ .text \n\ .type __swapcontext_ret, #function \n\ __swapcontext_ret: \n\ return %i7 + 8 \n\ clr %o0 \n\ .size __swapcontext_ret, .-__swapcontext_ret \n\ "); weak_alias (__swapcontext, swapcontext)
gpl-2.0
xingrz/android_kernel_nubia_msm8996
drivers/mmc/host/sdhci-acpi.c
272
11603
/* * Secure Digital Host Controller Interface ACPI driver. * * Copyright (c) 2012, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/init.h> #include <linux/export.h> #include <linux/module.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/compiler.h> #include <linux/stddef.h> #include <linux/bitops.h> #include <linux/types.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/acpi.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/delay.h> #include <linux/mmc/host.h> #include <linux/mmc/pm.h> #include <linux/mmc/slot-gpio.h> #include <linux/mmc/sdhci.h> #include "sdhci.h" enum { SDHCI_ACPI_SD_CD = BIT(0), SDHCI_ACPI_RUNTIME_PM = BIT(1), SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL = BIT(2), }; struct sdhci_acpi_chip { const struct sdhci_ops *ops; unsigned int quirks; unsigned int quirks2; unsigned long caps; unsigned int caps2; mmc_pm_flag_t pm_caps; }; struct sdhci_acpi_slot { const struct sdhci_acpi_chip *chip; unsigned int quirks; unsigned int quirks2; unsigned long caps; unsigned int caps2; mmc_pm_flag_t pm_caps; unsigned int flags; int (*probe_slot)(struct platform_device *, const char *, const char *); int (*remove_slot)(struct platform_device *); }; struct sdhci_acpi_host { struct sdhci_host *host; const struct sdhci_acpi_slot *slot; struct platform_device *pdev; bool use_runtime_pm; }; static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) { return c->slot && (c->slot->flags & flag); } static int sdhci_acpi_enable_dma(struct sdhci_host *host) { return 0; } static void sdhci_acpi_int_hw_reset(struct sdhci_host *host) { u8 reg; reg = sdhci_readb(host, SDHCI_POWER_CONTROL); reg |= 0x10; sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); /* For eMMC, minimum is 1us but give it 9us for good measure */ udelay(9); reg &= ~0x10; sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); /* For eMMC, minimum is 200us but give it 300us for good measure */ usleep_range(300, 1000); } static const struct sdhci_ops sdhci_acpi_ops_dflt = { .set_clock = sdhci_set_clock, .enable_dma = sdhci_acpi_enable_dma, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; static const struct sdhci_ops sdhci_acpi_ops_int = { .set_clock = sdhci_set_clock, .enable_dma = sdhci_acpi_enable_dma, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, .hw_reset = sdhci_acpi_int_hw_reset, }; static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { .ops = &sdhci_acpi_ops_int, }; static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev, const char *hid, const char *uid) { struct sdhci_acpi_host *c = platform_get_drvdata(pdev); struct sdhci_host *host; if (!c || !c->host) return 0; host = c->host; /* Platform specific code during emmc proble slot goes here */ if (hid && uid && !strcmp(hid, "80860F14") && !strcmp(uid, "1") && sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 && sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807) host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ return 0; } static int sdhci_acpi_sdio_probe_slot(struct platform_device *pdev, const char *hid, const char *uid) { struct sdhci_acpi_host *c = platform_get_drvdata(pdev); struct sdhci_host *host; if (!c || !c->host) return 0; host = c->host; /* Platform specific code during emmc proble slot goes here */ return 0; } static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev, const char *hid, const char *uid) { struct sdhci_acpi_host *c = platform_get_drvdata(pdev); struct sdhci_host *host; if (!c || !c->host || !c->slot) return 0; host = c->host; /* Platform specific code during emmc proble slot goes here */ return 0; } static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { .chip = &sdhci_acpi_chip_int, .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR, .caps2 = MMC_CAP2_HC_ERASE_SZ, .flags = SDHCI_ACPI_RUNTIME_PM, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_STOP_WITH_TC, .probe_slot = sdhci_acpi_emmc_probe_slot, }; static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION, .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD, .flags = SDHCI_ACPI_RUNTIME_PM, .pm_caps = MMC_PM_KEEP_POWER, .probe_slot = sdhci_acpi_sdio_probe_slot, }; static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { .flags = SDHCI_ACPI_SD_CD | SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL | SDHCI_ACPI_RUNTIME_PM, .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | SDHCI_QUIRK2_STOP_WITH_TC, .probe_slot = sdhci_acpi_sd_probe_slot, }; struct sdhci_acpi_uid_slot { const char *hid; const char *uid; const struct sdhci_acpi_slot *slot; }; static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc }, { "80860F14" , "3" , &sdhci_acpi_slot_int_sd }, { "80860F16" , NULL, &sdhci_acpi_slot_int_sd }, { "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio }, { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd }, { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio }, { "PNP0D40" }, { }, }; static const struct acpi_device_id sdhci_acpi_ids[] = { { "80860F14" }, { "80860F16" }, { "INT33BB" }, { "INT33C6" }, { "INT3436" }, { "PNP0D40" }, { }, }; MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid, const char *uid) { const struct sdhci_acpi_uid_slot *u; for (u = sdhci_acpi_uids; u->hid; u++) { if (strcmp(u->hid, hid)) continue; if (!u->uid) return u->slot; if (uid && !strcmp(u->uid, uid)) return u->slot; } return NULL; } static int sdhci_acpi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; acpi_handle handle = ACPI_HANDLE(dev); struct acpi_device *device; struct sdhci_acpi_host *c; struct sdhci_host *host; struct resource *iomem; resource_size_t len; const char *hid; const char *uid; int err; if (acpi_bus_get_device(handle, &device)) return -ENODEV; if (acpi_bus_get_status(device) || !device->status.present) return -ENODEV; hid = acpi_device_hid(device); uid = device->pnp.unique_id; iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) return -ENOMEM; len = resource_size(iomem); if (len < 0x100) dev_err(dev, "Invalid iomem size!\n"); if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev))) return -ENOMEM; host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host)); if (IS_ERR(host)) return PTR_ERR(host); c = sdhci_priv(host); c->host = host; c->slot = sdhci_acpi_get_slot(hid, uid); c->pdev = pdev; c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); platform_set_drvdata(pdev, c); host->hw_name = "ACPI"; host->ops = &sdhci_acpi_ops_dflt; host->irq = platform_get_irq(pdev, 0); host->ioaddr = devm_ioremap_nocache(dev, iomem->start, resource_size(iomem)); if (host->ioaddr == NULL) { err = -ENOMEM; goto err_free; } if (!dev->dma_mask) { u64 dma_mask; if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) { /* 64-bit DMA is not supported at present */ dma_mask = DMA_BIT_MASK(32); } else { dma_mask = DMA_BIT_MASK(32); } err = dma_coerce_mask_and_coherent(dev, dma_mask); if (err) goto err_free; } if (c->slot) { if (c->slot->probe_slot) { err = c->slot->probe_slot(pdev, hid, uid); if (err) goto err_free; } if (c->slot->chip) { host->ops = c->slot->chip->ops; host->quirks |= c->slot->chip->quirks; host->quirks2 |= c->slot->chip->quirks2; host->mmc->caps |= c->slot->chip->caps; host->mmc->caps2 |= c->slot->chip->caps2; host->mmc->pm_caps |= c->slot->chip->pm_caps; } host->quirks |= c->slot->quirks; host->quirks2 |= c->slot->quirks2; host->mmc->caps |= c->slot->caps; host->mmc->caps2 |= c->slot->caps2; host->mmc->pm_caps |= c->slot->pm_caps; } host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) { bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL); if (mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL)) { dev_warn(dev, "failed to setup card detect gpio\n"); c->use_runtime_pm = false; } } err = sdhci_add_host(host); if (err) goto err_free; if (c->use_runtime_pm) { pm_runtime_set_active(dev); pm_suspend_ignore_children(dev, 1); pm_runtime_set_autosuspend_delay(dev, 50); pm_runtime_use_autosuspend(dev); pm_runtime_enable(dev); } return 0; err_free: sdhci_free_host(c->host); return err; } static int sdhci_acpi_remove(struct platform_device *pdev) { struct sdhci_acpi_host *c = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; int dead; if (c->use_runtime_pm) { pm_runtime_get_sync(dev); pm_runtime_disable(dev); pm_runtime_put_noidle(dev); } if (c->slot && c->slot->remove_slot) c->slot->remove_slot(pdev); dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); sdhci_remove_host(c->host, dead); sdhci_free_host(c->host); return 0; } #ifdef CONFIG_PM_SLEEP static int sdhci_acpi_suspend(struct device *dev) { struct sdhci_acpi_host *c = dev_get_drvdata(dev); return sdhci_suspend_host(c->host); } static int sdhci_acpi_resume(struct device *dev) { struct sdhci_acpi_host *c = dev_get_drvdata(dev); return sdhci_resume_host(c->host); } #else #define sdhci_acpi_suspend NULL #define sdhci_acpi_resume NULL #endif #ifdef CONFIG_PM_RUNTIME static int sdhci_acpi_runtime_suspend(struct device *dev) { struct sdhci_acpi_host *c = dev_get_drvdata(dev); return sdhci_runtime_suspend_host(c->host); } static int sdhci_acpi_runtime_resume(struct device *dev) { struct sdhci_acpi_host *c = dev_get_drvdata(dev); return sdhci_runtime_resume_host(c->host); } static int sdhci_acpi_runtime_idle(struct device *dev) { return 0; } #endif static const struct dev_pm_ops sdhci_acpi_pm_ops = { .suspend = sdhci_acpi_suspend, .resume = sdhci_acpi_resume, SET_RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend, sdhci_acpi_runtime_resume, sdhci_acpi_runtime_idle) }; static struct platform_driver sdhci_acpi_driver = { .driver = { .name = "sdhci-acpi", .owner = THIS_MODULE, .acpi_match_table = sdhci_acpi_ids, .pm = &sdhci_acpi_pm_ops, }, .probe = sdhci_acpi_probe, .remove = sdhci_acpi_remove, }; module_platform_driver(sdhci_acpi_driver); MODULE_DESCRIPTION("Secure Digital Host Controller Interface ACPI driver"); MODULE_AUTHOR("Adrian Hunter"); MODULE_LICENSE("GPL v2");
gpl-2.0
Split-Screen/android_kernel_motorola_msm8610
drivers/usb/serial/ftdi_sio.c
272
89413
/* * USB FTDI SIO driver * * Copyright (C) 2009 - 2010 * Johan Hovold (jhovold@gmail.com) * Copyright (C) 1999 - 2001 * Greg Kroah-Hartman (greg@kroah.com) * Bill Ryder (bryder@sgi.com) * Copyright (C) 2002 * Kuba Ober (kuba@mareimbrium.org) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * See Documentation/usb/usb-serial.txt for more information on using this * driver * * See http://ftdi-usb-sio.sourceforge.net for up to date testing info * and extra documentation * * Change entries from 2004 and earlier can be found in versions of this * file in kernel versions prior to the 2.6.24 release. * */ /* Bill Ryder - bryder@sgi.com - wrote the FTDI_SIO implementation */ /* Thanx to FTDI for so kindly providing details of the protocol required */ /* to talk to the device */ /* Thanx to gkh and the rest of the usb dev group for all code I have assimilated :-) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/serial.h> #include <linux/usb/serial.h> #include "ftdi_sio.h" #include "ftdi_sio_ids.h" /* * Version Information */ #define DRIVER_VERSION "v1.6.0" #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr, Johan Hovold <jhovold@gmail.com>" #define DRIVER_DESC "USB FTDI Serial Converters Driver" static bool debug; static __u16 vendor = FTDI_VID; static __u16 product; struct ftdi_private { struct kref kref; enum ftdi_chip_type chip_type; /* type of device, either SIO or FT8U232AM */ int baud_base; /* baud base clock for divisor setting */ int custom_divisor; /* custom_divisor kludge, this is for baud_base (different from what goes to the chip!) */ __u16 last_set_data_urb_value ; /* the last data state set - needed for doing * a break */ int flags; /* some ASYNC_xxxx flags are supported */ unsigned long last_dtr_rts; /* saved modem control outputs */ struct async_icount icount; wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ char prev_status; /* Used for TIOCMIWAIT */ bool dev_gone; /* Used to abort TIOCMIWAIT */ char transmit_empty; /* If transmitter is empty or not */ struct usb_serial_port *port; __u16 interface; /* FT2232C, FT2232H or FT4232H port interface (0 for FT232/245) */ speed_t force_baud; /* if non-zero, force the baud rate to this value */ int force_rtscts; /* if non-zero, force RTS-CTS to always be enabled */ unsigned int latency; /* latency setting in use */ unsigned short max_packet_size; struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() and change_speed() */ }; /* struct ftdi_sio_quirk is used by devices requiring special attention. */ struct ftdi_sio_quirk { int (*probe)(struct usb_serial *); /* Special settings for probed ports. */ void (*port_probe)(struct ftdi_private *); }; static int ftdi_jtag_probe(struct usb_serial *serial); static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); static int ftdi_NDI_device_setup(struct usb_serial *serial); static int ftdi_stmclite_probe(struct usb_serial *serial); static int ftdi_8u2232c_probe(struct usb_serial *serial); static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); static struct ftdi_sio_quirk ftdi_jtag_quirk = { .probe = ftdi_jtag_probe, }; static struct ftdi_sio_quirk ftdi_mtxorb_hack_quirk = { .probe = ftdi_mtxorb_hack_setup, }; static struct ftdi_sio_quirk ftdi_NDI_device_quirk = { .probe = ftdi_NDI_device_setup, }; static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = { .port_probe = ftdi_USB_UIRT_setup, }; static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = { .port_probe = ftdi_HE_TIRA1_setup, }; static struct ftdi_sio_quirk ftdi_stmclite_quirk = { .probe = ftdi_stmclite_probe, }; static struct ftdi_sio_quirk ftdi_8u2232c_quirk = { .probe = ftdi_8u2232c_probe, }; /* * The 8U232AM has the same API as the sio except for: * - it can support MUCH higher baudrates; up to: * o 921600 for RS232 and 2000000 for RS422/485 at 48MHz * o 230400 at 12MHz * so .. 8U232AM's baudrate setting codes are different * - it has a two byte status code. * - it returns characters every 16ms (the FTDI does it every 40ms) * * the bcdDevice value is used to differentiate FT232BM and FT245BM from * the earlier FT8U232AM and FT8U232BM. For now, include all known VID/PID * combinations in both tables. * FIXME: perhaps bcdDevice can also identify 12MHz FT8U232AM devices, * but I don't know if those ever went into mass production. [Ian Abbott] */ /* * Device ID not listed? Test via module params product/vendor or * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! */ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_3_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_4_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_5_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_6_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_7_PID) }, { USB_DEVICE(FTDI_VID, FTDI_USINT_CAT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_USINT_WKEY_PID) }, { USB_DEVICE(FTDI_VID, FTDI_USINT_RS232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IPLUS2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DMX4ALL) }, { USB_DEVICE(FTDI_VID, FTDI_SIO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) , .driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk }, { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) }, { USB_DEVICE(FTDI_VID, FTDI_232H_PID) }, { USB_DEVICE(FTDI_VID, FTDI_FTX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_SNIFFER_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_633_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_631_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_635_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_640_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_642_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DSS20_PID) }, { USB_DEVICE(FTDI_VID, FTDI_URBAN_0_PID) }, { USB_DEVICE(FTDI_VID, FTDI_URBAN_1_PID) }, { USB_DEVICE(FTDI_NF_RIC_VID, FTDI_NF_RIC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_VNHCPCUSB_D_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_0_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_3_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) }, { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0103_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0104_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0105_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0106_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0107_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0108_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0109_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0110_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0111_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0112_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0113_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0114_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0115_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0116_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0117_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0118_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0119_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0120_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0121_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0122_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0123_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0124_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0125_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0126_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0127_PID), .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0128_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0129_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012C_PID), .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0130_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0131_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0132_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0133_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0134_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0135_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0136_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0137_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0138_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0139_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0140_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0141_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0142_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0143_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0144_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0145_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0146_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0147_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0148_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0149_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0150_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0151_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0152_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0153_PID), .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0154_PID), .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0155_PID), .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0156_PID), .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0157_PID), .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0158_PID), .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0159_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0160_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0161_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0162_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0163_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0164_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0165_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0166_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0167_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0168_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0169_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0170_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0171_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0172_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0173_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0174_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0175_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0176_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0177_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0178_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0179_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0180_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0181_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0182_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0183_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0184_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0185_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0186_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0187_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0188_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0189_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0190_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0191_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0192_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0193_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0194_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0195_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0196_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0197_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0198_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0199_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019A_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019B_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019C_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019D_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019E_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019F_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A0_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A1_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A3_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A4_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A5_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A6_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A7_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A8_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A9_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AA_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AB_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AC_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AD_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AF_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B0_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B1_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B3_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B4_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B5_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B6_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B7_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B8_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B9_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BA_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BB_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BC_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BD_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BF_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C0_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C1_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C3_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C4_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C5_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C6_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C7_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C8_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C9_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CA_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CB_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CC_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CD_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CF_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D0_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D1_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D3_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D4_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D5_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D6_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D7_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D8_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D9_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DA_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DB_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DC_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DD_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DF_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E0_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E1_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E3_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E4_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E5_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E6_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E7_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E8_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E9_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EA_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EB_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EC_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01ED_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EF_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F0_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F1_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F3_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F4_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F5_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F6_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F7_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F8_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F9_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FA_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FB_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FC_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, { USB_DEVICE(FTDI_VID, FTDI_USBX_707_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2101_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2102_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2103_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2104_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2106_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2201_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2201_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2202_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2202_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2203_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2203_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_3_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_4_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_3_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_4_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_3_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_4_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_3_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_4_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_5_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_6_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_7_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_8_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_3_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_4_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_5_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_6_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_7_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_8_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_3_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_4_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_5_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_6_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_7_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_1_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_2_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_3_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_4_PID) }, { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) }, { USB_DEVICE(OCT_VID, OCT_US101_PID) }, { USB_DEVICE(OCT_VID, OCT_DK201_PID) }, { USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID), .driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk }, { USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID), .driver_info = (kernel_ulong_t)&ftdi_USB_UIRT_quirk }, { USB_DEVICE(FTDI_VID, PROTEGO_SPECIAL_1) }, { USB_DEVICE(FTDI_VID, PROTEGO_R2X0) }, { USB_DEVICE(FTDI_VID, PROTEGO_SPECIAL_3) }, { USB_DEVICE(FTDI_VID, PROTEGO_SPECIAL_4) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E808_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E809_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80A_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80B_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80C_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80D_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80E_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80F_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E888_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E889_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88A_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88B_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88C_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88D_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88E_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88F_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UO100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UM100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UR100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_ALC8500_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PYRAMID_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1000PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_US485_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PICPRO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PCMCIA_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PK1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_RS232MON_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_APP70_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, /* * ELV devices: */ { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UDF77_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UIO88_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UAD8_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UDA7_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_USI2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_T1100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_PCD200_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_ULA200_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_CSI8_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1000DL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, { USB_DEVICE(FTDI_VID, LINX_FUTURE_1_PID) }, { USB_DEVICE(FTDI_VID, LINX_FUTURE_2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CCSICDU20_0_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CCSICDU40_1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CCSMACHX_2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CCSLOAD_N_GO_3_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CCSICDU64_4_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CCSPRIME8_5_PID) }, { USB_DEVICE(FTDI_VID, INSIDE_ACCESSO) }, { USB_DEVICE(INTREPID_VID, INTREPID_VALUECAN_PID) }, { USB_DEVICE(INTREPID_VID, INTREPID_NEOVI_PID) }, { USB_DEVICE(FALCOM_VID, FALCOM_TWIST_PID) }, { USB_DEVICE(FALCOM_VID, FALCOM_SAMBA_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SUUNTO_SPORTS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) }, { USB_DEVICE(TTI_VID, TTI_QL355P_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) }, { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, { USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOPTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USPTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_2_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR2_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR_PID) }, { USB_DEVICE(BANDB_VID, BANDB_485USB9F_2W_PID) }, { USB_DEVICE(BANDB_VID, BANDB_485USB9F_4W_PID) }, { USB_DEVICE(BANDB_VID, BANDB_232USB9M_PID) }, { USB_DEVICE(BANDB_VID, BANDB_485USBTB_2W_PID) }, { USB_DEVICE(BANDB_VID, BANDB_485USBTB_4W_PID) }, { USB_DEVICE(BANDB_VID, BANDB_TTL5USB9M_PID) }, { USB_DEVICE(BANDB_VID, BANDB_TTL3USB9M_PID) }, { USB_DEVICE(BANDB_VID, BANDB_ZZ_PROG1_USB_PID) }, { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) }, { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_3_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_0_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_1_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_2_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_3_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_4_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_YS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_Y6_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_Y8_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_IC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_DB9_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_RS232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_Y9_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TERATRONIK_VCP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TERATRONIK_D2XX_PID) }, { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) }, { USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) }, { USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16HR_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16HRC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16IC_PID) }, { USB_DEVICE(KOBIL_VID, KOBIL_CONV_B1_PID) }, { USB_DEVICE(KOBIL_VID, KOBIL_CONV_KAAN_PID) }, { USB_DEVICE(POSIFLEX_VID, POSIFLEX_PP7000_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TTUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ECLO_COM_1WIRE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_WESTREX_MODEL_777_PID) }, { USB_DEVICE(FTDI_VID, FTDI_WESTREX_MODEL_8900F_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) }, { USB_DEVICE(FTDI_VID, FTDI_NZR_SEM_USB_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) }, { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13U_PID) }, { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) }, { USB_DEVICE(FTDI_VID, FTDI_NDI_HUC_PID), .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(FTDI_VID, FTDI_NDI_SPECTRA_SCU_PID), .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(FTDI_VID, FTDI_NDI_FUTURE_2_PID), .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(FTDI_VID, FTDI_NDI_FUTURE_3_PID), .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, LMI_LM3S_ICDI_BOARD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, /* Papouch devices based on FTDI chip */ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_2_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_2_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_2_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485S_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485C_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_LEC_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB232_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_IRAMP_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK5_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO8x8_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x2_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO10x1_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO30x3_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO60x3_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x16_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO3x32_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK6_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_UPSUSB_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_MU_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SIMUKEY_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMUX_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMSR_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, { USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) }, { USB_DEVICE(ATMEL_VID, STK541_PID) }, { USB_DEVICE(DE_VID, STB_PID) }, { USB_DEVICE(DE_VID, WHT_PID) }, { USB_DEVICE(ADI_VID, ADI_GNICE_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID, USB_CLASS_VENDOR_SPEC, USB_SUBCLASS_VENDOR_SPEC, 0x00) }, { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, { USB_DEVICE(FTDI_VID, PI_C865_PID) }, { USB_DEVICE(FTDI_VID, PI_C857_PID) }, { USB_DEVICE(PI_VID, PI_C866_PID) }, { USB_DEVICE(PI_VID, PI_C663_PID) }, { USB_DEVICE(PI_VID, PI_C725_PID) }, { USB_DEVICE(PI_VID, PI_E517_PID) }, { USB_DEVICE(PI_VID, PI_C863_PID) }, { USB_DEVICE(PI_VID, PI_E861_PID) }, { USB_DEVICE(PI_VID, PI_C867_PID) }, { USB_DEVICE(PI_VID, PI_E609_PID) }, { USB_DEVICE(PI_VID, PI_E709_PID) }, { USB_DEVICE(PI_VID, PI_100F_PID) }, { USB_DEVICE(PI_VID, PI_1011_PID) }, { USB_DEVICE(PI_VID, PI_1012_PID) }, { USB_DEVICE(PI_VID, PI_1013_PID) }, { USB_DEVICE(PI_VID, PI_1014_PID) }, { USB_DEVICE(PI_VID, PI_1015_PID) }, { USB_DEVICE(PI_VID, PI_1016_PID) }, { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) }, { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) }, { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, TI_XDS100V2_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) }, { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) }, { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) }, { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) }, { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_ST_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SLITE_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH2_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, { USB_DEVICE(FTDI_VID, ACCESIO_COM4SM_PID) }, { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(ST_VID, ST_STMCLT1030_PID), .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk }, { USB_DEVICE(FTDI_VID, FTDI_RF_R106) }, { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) }, /* Crucible Devices */ { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table_combined); static struct usb_driver ftdi_driver = { .name = "ftdi_sio", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = id_table_combined, }; static const char *ftdi_chip_name[] = { [SIO] = "SIO", /* the serial part of FT8U100AX */ [FT8U232AM] = "FT8U232AM", [FT232BM] = "FT232BM", [FT2232C] = "FT2232C", [FT232RL] = "FT232RL", [FT2232H] = "FT2232H", [FT4232H] = "FT4232H", [FT232H] = "FT232H", [FTX] = "FT-X" }; /* Used for TIOCMIWAIT */ #define FTDI_STATUS_B0_MASK (FTDI_RS0_CTS | FTDI_RS0_DSR | FTDI_RS0_RI | FTDI_RS0_RLSD) #define FTDI_STATUS_B1_MASK (FTDI_RS_BI) /* End TIOCMIWAIT */ #define FTDI_IMPL_ASYNC_FLAGS = (ASYNC_SPD_HI | ASYNC_SPD_VHI \ | ASYNC_SPD_CUST | ASYNC_SPD_SHI | ASYNC_SPD_WARP) /* function prototypes for a FTDI serial converter */ static int ftdi_sio_probe(struct usb_serial *serial, const struct usb_device_id *id); static int ftdi_sio_port_probe(struct usb_serial_port *port); static int ftdi_sio_port_remove(struct usb_serial_port *port); static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port); static void ftdi_close(struct usb_serial_port *port); static void ftdi_dtr_rts(struct usb_serial_port *port, int on); static void ftdi_process_read_urb(struct urb *urb); static int ftdi_prepare_write_buffer(struct usb_serial_port *port, void *dest, size_t size); static void ftdi_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old); static int ftdi_tiocmget(struct tty_struct *tty); static int ftdi_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static int ftdi_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount); static int ftdi_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); static void ftdi_break_ctl(struct tty_struct *tty, int break_state); static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base); static unsigned short int ftdi_232am_baud_to_divisor(int baud); static __u32 ftdi_232bm_baud_base_to_divisor(int baud, int base); static __u32 ftdi_232bm_baud_to_divisor(int baud); static __u32 ftdi_2232h_baud_base_to_divisor(int baud, int base); static __u32 ftdi_2232h_baud_to_divisor(int baud); static struct usb_serial_driver ftdi_sio_device = { .driver = { .owner = THIS_MODULE, .name = "ftdi_sio", }, .description = "FTDI USB Serial Device", .id_table = id_table_combined, .num_ports = 1, .bulk_in_size = 512, .bulk_out_size = 256, .probe = ftdi_sio_probe, .port_probe = ftdi_sio_port_probe, .port_remove = ftdi_sio_port_remove, .open = ftdi_open, .close = ftdi_close, .dtr_rts = ftdi_dtr_rts, .throttle = usb_serial_generic_throttle, .unthrottle = usb_serial_generic_unthrottle, .process_read_urb = ftdi_process_read_urb, .prepare_write_buffer = ftdi_prepare_write_buffer, .tiocmget = ftdi_tiocmget, .tiocmset = ftdi_tiocmset, .get_icount = ftdi_get_icount, .ioctl = ftdi_ioctl, .set_termios = ftdi_set_termios, .break_ctl = ftdi_break_ctl, }; static struct usb_serial_driver * const serial_drivers[] = { &ftdi_sio_device, NULL }; #define WDR_TIMEOUT 5000 /* default urb timeout */ #define WDR_SHORT_TIMEOUT 1000 /* shorter urb timeout */ /* High and low are for DTR, RTS etc etc */ #define HIGH 1 #define LOW 0 /* * *************************************************************************** * Utility functions * *************************************************************************** */ static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base) { unsigned short int divisor; /* divisor shifted 3 bits to the left */ int divisor3 = base / 2 / baud; if ((divisor3 & 0x7) == 7) divisor3++; /* round x.7/8 up to x+1 */ divisor = divisor3 >> 3; divisor3 &= 0x7; if (divisor3 == 1) divisor |= 0xc000; else if (divisor3 >= 4) divisor |= 0x4000; else if (divisor3 != 0) divisor |= 0x8000; else if (divisor == 1) divisor = 0; /* special case for maximum baud rate */ return divisor; } static unsigned short int ftdi_232am_baud_to_divisor(int baud) { return ftdi_232am_baud_base_to_divisor(baud, 48000000); } static __u32 ftdi_232bm_baud_base_to_divisor(int baud, int base) { static const unsigned char divfrac[8] = { 0, 3, 2, 4, 1, 5, 6, 7 }; __u32 divisor; /* divisor shifted 3 bits to the left */ int divisor3 = base / 2 / baud; divisor = divisor3 >> 3; divisor |= (__u32)divfrac[divisor3 & 0x7] << 14; /* Deal with special cases for highest baud rates. */ if (divisor == 1) divisor = 0; else if (divisor == 0x4001) divisor = 1; return divisor; } static __u32 ftdi_232bm_baud_to_divisor(int baud) { return ftdi_232bm_baud_base_to_divisor(baud, 48000000); } static __u32 ftdi_2232h_baud_base_to_divisor(int baud, int base) { static const unsigned char divfrac[8] = { 0, 3, 2, 4, 1, 5, 6, 7 }; __u32 divisor; int divisor3; /* hi-speed baud rate is 10-bit sampling instead of 16-bit */ divisor3 = base * 8 / (baud * 10); divisor = divisor3 >> 3; divisor |= (__u32)divfrac[divisor3 & 0x7] << 14; /* Deal with special cases for highest baud rates. */ if (divisor == 1) divisor = 0; else if (divisor == 0x4001) divisor = 1; /* * Set this bit to turn off a divide by 2.5 on baud rate generator * This enables baud rates up to 12Mbaud but cannot reach below 1200 * baud with this bit set */ divisor |= 0x00020000; return divisor; } static __u32 ftdi_2232h_baud_to_divisor(int baud) { return ftdi_2232h_baud_base_to_divisor(baud, 120000000); } #define set_mctrl(port, set) update_mctrl((port), (set), 0) #define clear_mctrl(port, clear) update_mctrl((port), 0, (clear)) static int update_mctrl(struct usb_serial_port *port, unsigned int set, unsigned int clear) { struct ftdi_private *priv = usb_get_serial_port_data(port); unsigned urb_value; int rv; if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0) { dbg("%s - DTR|RTS not being set|cleared", __func__); return 0; /* no change */ } clear &= ~set; /* 'set' takes precedence over 'clear' */ urb_value = 0; if (clear & TIOCM_DTR) urb_value |= FTDI_SIO_SET_DTR_LOW; if (clear & TIOCM_RTS) urb_value |= FTDI_SIO_SET_RTS_LOW; if (set & TIOCM_DTR) urb_value |= FTDI_SIO_SET_DTR_HIGH; if (set & TIOCM_RTS) urb_value |= FTDI_SIO_SET_RTS_HIGH; rv = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), FTDI_SIO_SET_MODEM_CTRL_REQUEST, FTDI_SIO_SET_MODEM_CTRL_REQUEST_TYPE, urb_value, priv->interface, NULL, 0, WDR_TIMEOUT); if (rv < 0) { dbg("%s Error from MODEM_CTRL urb: DTR %s, RTS %s", __func__, (set & TIOCM_DTR) ? "HIGH" : (clear & TIOCM_DTR) ? "LOW" : "unchanged", (set & TIOCM_RTS) ? "HIGH" : (clear & TIOCM_RTS) ? "LOW" : "unchanged"); } else { dbg("%s - DTR %s, RTS %s", __func__, (set & TIOCM_DTR) ? "HIGH" : (clear & TIOCM_DTR) ? "LOW" : "unchanged", (set & TIOCM_RTS) ? "HIGH" : (clear & TIOCM_RTS) ? "LOW" : "unchanged"); /* FIXME: locking on last_dtr_rts */ priv->last_dtr_rts = (priv->last_dtr_rts & ~clear) | set; } return rv; } static __u32 get_ftdi_divisor(struct tty_struct *tty, struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); __u32 div_value = 0; int div_okay = 1; int baud; /* * The logic involved in setting the baudrate can be cleanly split into * 3 steps. * 1. Standard baud rates are set in tty->termios->c_cflag * 2. If these are not enough, you can set any speed using alt_speed as * follows: * - set tty->termios->c_cflag speed to B38400 * - set your real speed in tty->alt_speed; it gets ignored when * alt_speed==0, (or) * - call TIOCSSERIAL ioctl with (struct serial_struct) set as * follows: * flags & ASYNC_SPD_MASK == ASYNC_SPD_[HI, VHI, SHI, WARP], * this just sets alt_speed to (HI: 57600, VHI: 115200, * SHI: 230400, WARP: 460800) * ** Steps 1, 2 are done courtesy of tty_get_baud_rate * 3. You can also set baud rate by setting custom divisor as follows * - set tty->termios->c_cflag speed to B38400 * - call TIOCSSERIAL ioctl with (struct serial_struct) set as * follows: * o flags & ASYNC_SPD_MASK == ASYNC_SPD_CUST * o custom_divisor set to baud_base / your_new_baudrate * ** Step 3 is done courtesy of code borrowed from serial.c * I should really spend some time and separate + move this common * code to serial.c, it is replicated in nearly every serial driver * you see. */ /* 1. Get the baud rate from the tty settings, this observes alt_speed hack */ baud = tty_get_baud_rate(tty); dbg("%s - tty_get_baud_rate reports speed %d", __func__, baud); /* 2. Observe async-compatible custom_divisor hack, update baudrate if needed */ if (baud == 38400 && ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) && (priv->custom_divisor)) { baud = priv->baud_base / priv->custom_divisor; dbg("%s - custom divisor %d sets baud rate to %d", __func__, priv->custom_divisor, baud); } /* 3. Convert baudrate to device-specific divisor */ if (!baud) baud = 9600; switch (priv->chip_type) { case SIO: /* SIO chip */ switch (baud) { case 300: div_value = ftdi_sio_b300; break; case 600: div_value = ftdi_sio_b600; break; case 1200: div_value = ftdi_sio_b1200; break; case 2400: div_value = ftdi_sio_b2400; break; case 4800: div_value = ftdi_sio_b4800; break; case 9600: div_value = ftdi_sio_b9600; break; case 19200: div_value = ftdi_sio_b19200; break; case 38400: div_value = ftdi_sio_b38400; break; case 57600: div_value = ftdi_sio_b57600; break; case 115200: div_value = ftdi_sio_b115200; break; } /* baud */ if (div_value == 0) { dbg("%s - Baudrate (%d) requested is not supported", __func__, baud); div_value = ftdi_sio_b9600; baud = 9600; div_okay = 0; } break; case FT8U232AM: /* 8U232AM chip */ if (baud <= 3000000) { div_value = ftdi_232am_baud_to_divisor(baud); } else { dbg("%s - Baud rate too high!", __func__); baud = 9600; div_value = ftdi_232am_baud_to_divisor(9600); div_okay = 0; } break; case FT232BM: /* FT232BM chip */ case FT2232C: /* FT2232C chip */ case FT232RL: /* FT232RL chip */ case FTX: /* FT-X series */ if (baud <= 3000000) { __u16 product_id = le16_to_cpu( port->serial->dev->descriptor.idProduct); if (((FTDI_NDI_HUC_PID == product_id) || (FTDI_NDI_SPECTRA_SCU_PID == product_id) || (FTDI_NDI_FUTURE_2_PID == product_id) || (FTDI_NDI_FUTURE_3_PID == product_id) || (FTDI_NDI_AURORA_SCU_PID == product_id)) && (baud == 19200)) { baud = 1200000; } div_value = ftdi_232bm_baud_to_divisor(baud); } else { dbg("%s - Baud rate too high!", __func__); div_value = ftdi_232bm_baud_to_divisor(9600); div_okay = 0; baud = 9600; } break; case FT2232H: /* FT2232H chip */ case FT4232H: /* FT4232H chip */ case FT232H: /* FT232H chip */ if ((baud <= 12000000) && (baud >= 1200)) { div_value = ftdi_2232h_baud_to_divisor(baud); } else if (baud < 1200) { div_value = ftdi_232bm_baud_to_divisor(baud); } else { dbg("%s - Baud rate too high!", __func__); div_value = ftdi_232bm_baud_to_divisor(9600); div_okay = 0; baud = 9600; } break; } /* priv->chip_type */ if (div_okay) { dbg("%s - Baud rate set to %d (divisor 0x%lX) on chip %s", __func__, baud, (unsigned long)div_value, ftdi_chip_name[priv->chip_type]); } tty_encode_baud_rate(tty, baud, baud); return div_value; } static int change_speed(struct tty_struct *tty, struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); __u16 urb_value; __u16 urb_index; __u32 urb_index_value; int rv; urb_index_value = get_ftdi_divisor(tty, port); urb_value = (__u16)urb_index_value; urb_index = (__u16)(urb_index_value >> 16); if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) || (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) { /* Probably the BM type needs the MSB of the encoded fractional * divider also moved like for the chips above. Any infos? */ urb_index = (__u16)((urb_index << 8) | priv->interface); } rv = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), FTDI_SIO_SET_BAUDRATE_REQUEST, FTDI_SIO_SET_BAUDRATE_REQUEST_TYPE, urb_value, urb_index, NULL, 0, WDR_SHORT_TIMEOUT); return rv; } static int write_latency_timer(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_device *udev = port->serial->dev; int rv; int l = priv->latency; if (priv->flags & ASYNC_LOW_LATENCY) l = 1; dbg("%s: setting latency timer = %i", __func__, l); rv = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), FTDI_SIO_SET_LATENCY_TIMER_REQUEST, FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE, l, priv->interface, NULL, 0, WDR_TIMEOUT); if (rv < 0) dev_err(&port->dev, "Unable to write latency timer: %i\n", rv); return rv; } static int read_latency_timer(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_device *udev = port->serial->dev; unsigned char *buf; int rv; dbg("%s", __func__); buf = kmalloc(1, GFP_KERNEL); if (!buf) return -ENOMEM; rv = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), FTDI_SIO_GET_LATENCY_TIMER_REQUEST, FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE, 0, priv->interface, buf, 1, WDR_TIMEOUT); if (rv < 0) dev_err(&port->dev, "Unable to read latency timer: %i\n", rv); else priv->latency = buf[0]; kfree(buf); return rv; } static int get_serial_info(struct usb_serial_port *port, struct serial_struct __user *retinfo) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct serial_struct tmp; if (!retinfo) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.flags = priv->flags; tmp.baud_base = priv->baud_base; tmp.custom_divisor = priv->custom_divisor; if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) return -EFAULT; return 0; } static int set_serial_info(struct tty_struct *tty, struct usb_serial_port *port, struct serial_struct __user *newinfo) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct serial_struct new_serial; struct ftdi_private old_priv; if (copy_from_user(&new_serial, newinfo, sizeof(new_serial))) return -EFAULT; mutex_lock(&priv->cfg_lock); old_priv = *priv; /* Do error checking and permission checking */ if (!capable(CAP_SYS_ADMIN)) { if (((new_serial.flags & ~ASYNC_USR_MASK) != (priv->flags & ~ASYNC_USR_MASK))) { mutex_unlock(&priv->cfg_lock); return -EPERM; } priv->flags = ((priv->flags & ~ASYNC_USR_MASK) | (new_serial.flags & ASYNC_USR_MASK)); priv->custom_divisor = new_serial.custom_divisor; goto check_and_exit; } if (new_serial.baud_base != priv->baud_base) { mutex_unlock(&priv->cfg_lock); return -EINVAL; } /* Make the changes - these are privileged changes! */ priv->flags = ((priv->flags & ~ASYNC_FLAGS) | (new_serial.flags & ASYNC_FLAGS)); priv->custom_divisor = new_serial.custom_divisor; write_latency_timer(port); check_and_exit: if ((old_priv.flags & ASYNC_SPD_MASK) != (priv->flags & ASYNC_SPD_MASK)) { if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) tty->alt_speed = 57600; else if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) tty->alt_speed = 115200; else if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) tty->alt_speed = 230400; else if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) tty->alt_speed = 460800; else tty->alt_speed = 0; } if (((old_priv.flags & ASYNC_SPD_MASK) != (priv->flags & ASYNC_SPD_MASK)) || (((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) && (old_priv.custom_divisor != priv->custom_divisor))) { change_speed(tty, port); mutex_unlock(&priv->cfg_lock); } else mutex_unlock(&priv->cfg_lock); return 0; } static int get_lsr_info(struct usb_serial_port *port, struct serial_struct __user *retinfo) { struct ftdi_private *priv = usb_get_serial_port_data(port); unsigned int result = 0; if (!retinfo) return -EFAULT; if (priv->transmit_empty) result = TIOCSER_TEMT; if (copy_to_user(retinfo, &result, sizeof(unsigned int))) return -EFAULT; return 0; } /* Determine type of FTDI chip based on USB config and descriptor. */ static void ftdi_determine_type(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; struct usb_device *udev = serial->dev; unsigned version; unsigned interfaces; /* Assume it is not the original SIO device for now. */ priv->baud_base = 48000000 / 2; version = le16_to_cpu(udev->descriptor.bcdDevice); interfaces = udev->actconfig->desc.bNumInterfaces; dbg("%s: bcdDevice = 0x%x, bNumInterfaces = %u", __func__, version, interfaces); if (interfaces > 1) { int inter; /* Multiple interfaces.*/ if (version == 0x0800) { priv->chip_type = FT4232H; /* Hi-speed - baud clock runs at 120MHz */ priv->baud_base = 120000000 / 2; } else if (version == 0x0700) { priv->chip_type = FT2232H; /* Hi-speed - baud clock runs at 120MHz */ priv->baud_base = 120000000 / 2; } else priv->chip_type = FT2232C; /* Determine interface code. */ inter = serial->interface->altsetting->desc.bInterfaceNumber; if (inter == 0) { priv->interface = INTERFACE_A; } else if (inter == 1) { priv->interface = INTERFACE_B; } else if (inter == 2) { priv->interface = INTERFACE_C; } else if (inter == 3) { priv->interface = INTERFACE_D; } /* BM-type devices have a bug where bcdDevice gets set * to 0x200 when iSerialNumber is 0. */ if (version < 0x500) { dbg("%s: something fishy - bcdDevice too low for multi-interface device", __func__); } } else if (version < 0x200) { /* Old device. Assume it's the original SIO. */ priv->chip_type = SIO; priv->baud_base = 12000000 / 16; } else if (version < 0x400) { /* Assume it's an FT8U232AM (or FT8U245AM) */ /* (It might be a BM because of the iSerialNumber bug, * but it will still work as an AM device.) */ priv->chip_type = FT8U232AM; } else if (version < 0x600) { /* Assume it's an FT232BM (or FT245BM) */ priv->chip_type = FT232BM; } else if (version < 0x900) { /* Assume it's an FT232RL */ priv->chip_type = FT232RL; } else if (version < 0x1000) { /* Assume it's an FT232H */ priv->chip_type = FT232H; } else { /* Assume it's an FT-X series device */ priv->chip_type = FTX; } dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]); } /* Determine the maximum packet size for the device. This depends on the chip * type and the USB host capabilities. The value should be obtained from the * device descriptor as the chip will use the appropriate values for the host.*/ static void ftdi_set_max_packet_size(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; struct usb_device *udev = serial->dev; struct usb_interface *interface = serial->interface; struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc; unsigned num_endpoints; int i; num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints); /* NOTE: some customers have programmed FT232R/FT245R devices * with an endpoint size of 0 - not good. In this case, we * want to override the endpoint descriptor setting and use a * value of 64 for wMaxPacketSize */ for (i = 0; i < num_endpoints; i++) { dev_info(&udev->dev, "Endpoint %d MaxPacketSize %d\n", i+1, interface->cur_altsetting->endpoint[i].desc.wMaxPacketSize); ep_desc = &interface->cur_altsetting->endpoint[i].desc; if (ep_desc->wMaxPacketSize == 0) { ep_desc->wMaxPacketSize = cpu_to_le16(0x40); dev_info(&udev->dev, "Overriding wMaxPacketSize on endpoint %d\n", i); } } /* set max packet size based on descriptor */ priv->max_packet_size = usb_endpoint_maxp(ep_desc); dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size); } /* * *************************************************************************** * Sysfs Attribute * *************************************************************************** */ static ssize_t show_latency_timer(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_serial_port *port = to_usb_serial_port(dev); struct ftdi_private *priv = usb_get_serial_port_data(port); if (priv->flags & ASYNC_LOW_LATENCY) return sprintf(buf, "1\n"); else return sprintf(buf, "%i\n", priv->latency); } /* Write a new value of the latency timer, in units of milliseconds. */ static ssize_t store_latency_timer(struct device *dev, struct device_attribute *attr, const char *valbuf, size_t count) { struct usb_serial_port *port = to_usb_serial_port(dev); struct ftdi_private *priv = usb_get_serial_port_data(port); int v = simple_strtoul(valbuf, NULL, 10); int rv; priv->latency = v; rv = write_latency_timer(port); if (rv < 0) return -EIO; return count; } /* Write an event character directly to the FTDI register. The ASCII value is in the low 8 bits, with the enable bit in the 9th bit. */ static ssize_t store_event_char(struct device *dev, struct device_attribute *attr, const char *valbuf, size_t count) { struct usb_serial_port *port = to_usb_serial_port(dev); struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_device *udev = port->serial->dev; int v = simple_strtoul(valbuf, NULL, 10); int rv; dbg("%s: setting event char = %i", __func__, v); rv = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), FTDI_SIO_SET_EVENT_CHAR_REQUEST, FTDI_SIO_SET_EVENT_CHAR_REQUEST_TYPE, v, priv->interface, NULL, 0, WDR_TIMEOUT); if (rv < 0) { dbg("Unable to write event character: %i", rv); return -EIO; } return count; } static DEVICE_ATTR(latency_timer, S_IWUSR | S_IRUGO, show_latency_timer, store_latency_timer); static DEVICE_ATTR(event_char, S_IWUSR, NULL, store_event_char); static int create_sysfs_attrs(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); int retval = 0; dbg("%s", __func__); /* XXX I've no idea if the original SIO supports the event_char * sysfs parameter, so I'm playing it safe. */ if (priv->chip_type != SIO) { dbg("sysfs attributes for %s", ftdi_chip_name[priv->chip_type]); retval = device_create_file(&port->dev, &dev_attr_event_char); if ((!retval) && (priv->chip_type == FT232BM || priv->chip_type == FT2232C || priv->chip_type == FT232RL || priv->chip_type == FT2232H || priv->chip_type == FT4232H || priv->chip_type == FT232H || priv->chip_type == FTX)) { retval = device_create_file(&port->dev, &dev_attr_latency_timer); } } return retval; } static void remove_sysfs_attrs(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); dbg("%s", __func__); /* XXX see create_sysfs_attrs */ if (priv->chip_type != SIO) { device_remove_file(&port->dev, &dev_attr_event_char); if (priv->chip_type == FT232BM || priv->chip_type == FT2232C || priv->chip_type == FT232RL || priv->chip_type == FT2232H || priv->chip_type == FT4232H || priv->chip_type == FT232H || priv->chip_type == FTX) { device_remove_file(&port->dev, &dev_attr_latency_timer); } } } /* * *************************************************************************** * FTDI driver specific functions * *************************************************************************** */ /* Probe function to check for special devices */ static int ftdi_sio_probe(struct usb_serial *serial, const struct usb_device_id *id) { struct ftdi_sio_quirk *quirk = (struct ftdi_sio_quirk *)id->driver_info; if (quirk && quirk->probe) { int ret = quirk->probe(serial); if (ret != 0) return ret; } usb_set_serial_data(serial, (void *)id->driver_info); return 0; } static int ftdi_sio_port_probe(struct usb_serial_port *port) { struct ftdi_private *priv; struct ftdi_sio_quirk *quirk = usb_get_serial_data(port->serial); dbg("%s", __func__); priv = kzalloc(sizeof(struct ftdi_private), GFP_KERNEL); if (!priv) { dev_err(&port->dev, "%s- kmalloc(%Zd) failed.\n", __func__, sizeof(struct ftdi_private)); return -ENOMEM; } kref_init(&priv->kref); mutex_init(&priv->cfg_lock); memset(&priv->icount, 0x00, sizeof(priv->icount)); init_waitqueue_head(&priv->delta_msr_wait); priv->flags = ASYNC_LOW_LATENCY; priv->dev_gone = false; if (quirk && quirk->port_probe) quirk->port_probe(priv); priv->port = port; usb_set_serial_port_data(port, priv); ftdi_determine_type(port); ftdi_set_max_packet_size(port); if (read_latency_timer(port) < 0) priv->latency = 16; write_latency_timer(port); create_sysfs_attrs(port); return 0; } /* Setup for the USB-UIRT device, which requires hardwired * baudrate (38400 gets mapped to 312500) */ /* Called from usbserial:serial_probe */ static void ftdi_USB_UIRT_setup(struct ftdi_private *priv) { dbg("%s", __func__); priv->flags |= ASYNC_SPD_CUST; priv->custom_divisor = 77; priv->force_baud = 38400; } /* Setup for the HE-TIRA1 device, which requires hardwired * baudrate (38400 gets mapped to 100000) and RTS-CTS enabled. */ static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv) { dbg("%s", __func__); priv->flags |= ASYNC_SPD_CUST; priv->custom_divisor = 240; priv->force_baud = 38400; priv->force_rtscts = 1; } /* * Module parameter to control latency timer for NDI FTDI-based USB devices. * If this value is not set in /etc/modprobe.d/ its value will be set * to 1ms. */ static int ndi_latency_timer = 1; /* Setup for the NDI FTDI-based USB devices, which requires hardwired * baudrate (19200 gets mapped to 1200000). * * Called from usbserial:serial_probe. */ static int ftdi_NDI_device_setup(struct usb_serial *serial) { struct usb_device *udev = serial->dev; int latency = ndi_latency_timer; if (latency == 0) latency = 1; if (latency > 99) latency = 99; dbg("%s setting NDI device latency to %d", __func__, latency); dev_info(&udev->dev, "NDI device with a latency value of %d", latency); /* FIXME: errors are not returned */ usb_control_msg(udev, usb_sndctrlpipe(udev, 0), FTDI_SIO_SET_LATENCY_TIMER_REQUEST, FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE, latency, 0, NULL, 0, WDR_TIMEOUT); return 0; } /* * First port on JTAG adaptors such as Olimex arm-usb-ocd or the FIC/OpenMoko * Neo1973 Debug Board is reserved for JTAG interface and can be accessed from * userspace using openocd. */ static int ftdi_jtag_probe(struct usb_serial *serial) { struct usb_device *udev = serial->dev; struct usb_interface *interface = serial->interface; dbg("%s", __func__); if (interface == udev->actconfig->interface[0]) { dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n"); return -ENODEV; } return 0; } static int ftdi_8u2232c_probe(struct usb_serial *serial) { struct usb_device *udev = serial->dev; dbg("%s", __func__); if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) || (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2"))) return ftdi_jtag_probe(serial); return 0; } /* * First and second port on STMCLiteadaptors is reserved for JTAG interface * and the forth port for pio */ static int ftdi_stmclite_probe(struct usb_serial *serial) { struct usb_device *udev = serial->dev; struct usb_interface *interface = serial->interface; dbg("%s", __func__); if (interface == udev->actconfig->interface[2]) return 0; dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n"); return -ENODEV; } /* * The Matrix Orbital VK204-25-USB has an invalid IN endpoint. * We have to correct it if we want to read from it. */ static int ftdi_mtxorb_hack_setup(struct usb_serial *serial) { struct usb_host_endpoint *ep = serial->dev->ep_in[1]; struct usb_endpoint_descriptor *ep_desc = &ep->desc; if (ep->enabled && ep_desc->wMaxPacketSize == 0) { ep_desc->wMaxPacketSize = cpu_to_le16(0x40); dev_info(&serial->dev->dev, "Fixing invalid wMaxPacketSize on read pipe\n"); } return 0; } static void ftdi_sio_priv_release(struct kref *k) { struct ftdi_private *priv = container_of(k, struct ftdi_private, kref); kfree(priv); } static int ftdi_sio_port_remove(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); dbg("%s", __func__); priv->dev_gone = true; wake_up_interruptible_all(&priv->delta_msr_wait); remove_sysfs_attrs(port); kref_put(&priv->kref, ftdi_sio_priv_release); return 0; } static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port) { struct ktermios dummy; struct usb_device *dev = port->serial->dev; struct ftdi_private *priv = usb_get_serial_port_data(port); int result; dbg("%s", __func__); /* No error checking for this (will get errors later anyway) */ /* See ftdi_sio.h for description of what is reset */ usb_control_msg(dev, usb_sndctrlpipe(dev, 0), FTDI_SIO_RESET_REQUEST, FTDI_SIO_RESET_REQUEST_TYPE, FTDI_SIO_RESET_SIO, priv->interface, NULL, 0, WDR_TIMEOUT); /* Termios defaults are set by usb_serial_init. We don't change port->tty->termios - this would lose speed settings, etc. This is same behaviour as serial.c/rs_open() - Kuba */ /* ftdi_set_termios will send usb control messages */ if (tty) { memset(&dummy, 0, sizeof(dummy)); ftdi_set_termios(tty, port, &dummy); } /* Start reading from the device */ result = usb_serial_generic_open(tty, port); if (!result) kref_get(&priv->kref); return result; } static void ftdi_dtr_rts(struct usb_serial_port *port, int on) { struct ftdi_private *priv = usb_get_serial_port_data(port); /* Disable flow control */ if (!on) { if (usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), FTDI_SIO_SET_FLOW_CTRL_REQUEST, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 0, priv->interface, NULL, 0, WDR_TIMEOUT) < 0) { dev_err(&port->dev, "error from flowcontrol urb\n"); } } /* drop RTS and DTR */ if (on) set_mctrl(port, TIOCM_DTR | TIOCM_RTS); else clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); } /* * usbserial:__serial_close only calls ftdi_close if the point is open * * This only gets called when it is the last close */ static void ftdi_close(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); dbg("%s", __func__); usb_serial_generic_close(port); kref_put(&priv->kref, ftdi_sio_priv_release); } /* The SIO requires the first byte to have: * B0 1 * B1 0 * B2..7 length of message excluding byte 0 * * The new devices do not require this byte */ static int ftdi_prepare_write_buffer(struct usb_serial_port *port, void *dest, size_t size) { struct ftdi_private *priv; int count; unsigned long flags; priv = usb_get_serial_port_data(port); if (priv->chip_type == SIO) { unsigned char *buffer = dest; int i, len, c; count = 0; spin_lock_irqsave(&port->lock, flags); for (i = 0; i < size - 1; i += priv->max_packet_size) { len = min_t(int, size - i, priv->max_packet_size) - 1; c = kfifo_out(&port->write_fifo, &buffer[i + 1], len); if (!c) break; priv->icount.tx += c; buffer[i] = (c << 2) + 1; count += c + 1; } spin_unlock_irqrestore(&port->lock, flags); } else { count = kfifo_out_locked(&port->write_fifo, dest, size, &port->lock); priv->icount.tx += count; } return count; } #define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE) static int ftdi_process_packet(struct tty_struct *tty, struct usb_serial_port *port, struct ftdi_private *priv, char *packet, int len) { int i; char status; char flag; char *ch; dbg("%s - port %d", __func__, port->number); if (len < 2) { dbg("malformed packet"); return 0; } /* Compare new line status to the old one, signal if different/ N.B. packet may be processed more than once, but differences are only processed once. */ status = packet[0] & FTDI_STATUS_B0_MASK; if (status != priv->prev_status) { char diff_status = status ^ priv->prev_status; if (diff_status & FTDI_RS0_CTS) priv->icount.cts++; if (diff_status & FTDI_RS0_DSR) priv->icount.dsr++; if (diff_status & FTDI_RS0_RI) priv->icount.rng++; if (diff_status & FTDI_RS0_RLSD) priv->icount.dcd++; wake_up_interruptible_all(&priv->delta_msr_wait); priv->prev_status = status; } flag = TTY_NORMAL; if (packet[1] & FTDI_RS_ERR_MASK) { /* Break takes precedence over parity, which takes precedence * over framing errors */ if (packet[1] & FTDI_RS_BI) { flag = TTY_BREAK; priv->icount.brk++; usb_serial_handle_break(port); } else if (packet[1] & FTDI_RS_PE) { flag = TTY_PARITY; priv->icount.parity++; } else if (packet[1] & FTDI_RS_FE) { flag = TTY_FRAME; priv->icount.frame++; } /* Overrun is special, not associated with a char */ if (packet[1] & FTDI_RS_OE) { priv->icount.overrun++; tty_insert_flip_char(tty, 0, TTY_OVERRUN); } } /* save if the transmitter is empty or not */ if (packet[1] & FTDI_RS_TEMT) priv->transmit_empty = 1; else priv->transmit_empty = 0; len -= 2; if (!len) return 0; /* status only */ priv->icount.rx += len; ch = packet + 2; if (port->port.console && port->sysrq) { for (i = 0; i < len; i++, ch++) { if (!usb_serial_handle_sysrq_char(port, *ch)) tty_insert_flip_char(tty, *ch, flag); } } else { tty_insert_flip_string_fixed_flag(tty, ch, flag, len); } return len; } static void ftdi_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; struct tty_struct *tty; struct ftdi_private *priv = usb_get_serial_port_data(port); char *data = (char *)urb->transfer_buffer; int i; int len; int count = 0; tty = tty_port_tty_get(&port->port); if (!tty) return; for (i = 0; i < urb->actual_length; i += priv->max_packet_size) { len = min_t(int, urb->actual_length - i, priv->max_packet_size); count += ftdi_process_packet(tty, port, priv, &data[i], len); } if (count) tty_flip_buffer_push(tty); tty_kref_put(tty); } static void ftdi_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct ftdi_private *priv = usb_get_serial_port_data(port); __u16 urb_value; /* break_state = -1 to turn on break, and 0 to turn off break */ /* see drivers/char/tty_io.c to see it used */ /* last_set_data_urb_value NEVER has the break bit set in it */ if (break_state) urb_value = priv->last_set_data_urb_value | FTDI_SIO_SET_BREAK; else urb_value = priv->last_set_data_urb_value; if (usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), FTDI_SIO_SET_DATA_REQUEST, FTDI_SIO_SET_DATA_REQUEST_TYPE, urb_value , priv->interface, NULL, 0, WDR_TIMEOUT) < 0) { dev_err(&port->dev, "%s FAILED to enable/disable break state " "(state was %d)\n", __func__, break_state); } dbg("%s break state is %d - urb is %d", __func__, break_state, urb_value); } /* old_termios contains the original termios settings and tty->termios contains * the new setting to be used * WARNING: set_termios calls this with old_termios in kernel space */ static void ftdi_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct usb_device *dev = port->serial->dev; struct ftdi_private *priv = usb_get_serial_port_data(port); struct ktermios *termios = tty->termios; unsigned int cflag = termios->c_cflag; __u16 urb_value; /* will hold the new flags */ /* Added for xon/xoff support */ unsigned int iflag = termios->c_iflag; unsigned char vstop; unsigned char vstart; dbg("%s", __func__); /* Force baud rate if this device requires it, unless it is set to B0. */ if (priv->force_baud && ((termios->c_cflag & CBAUD) != B0)) { dbg("%s: forcing baud rate for this device", __func__); tty_encode_baud_rate(tty, priv->force_baud, priv->force_baud); } /* Force RTS-CTS if this device requires it. */ if (priv->force_rtscts) { dbg("%s: forcing rtscts for this device", __func__); termios->c_cflag |= CRTSCTS; } cflag = termios->c_cflag; if (old_termios == 0) goto no_skip; if (old_termios->c_cflag == termios->c_cflag && old_termios->c_ispeed == termios->c_ispeed && old_termios->c_ospeed == termios->c_ospeed) goto no_c_cflag_changes; /* NOTE These routines can get interrupted by ftdi_sio_read_bulk_callback - need to examine what this means - don't see any problems yet */ if ((old_termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)) == (termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB))) goto no_data_parity_stop_changes; no_skip: /* Set number of data bits, parity, stop bits */ urb_value = 0; urb_value |= (cflag & CSTOPB ? FTDI_SIO_SET_DATA_STOP_BITS_2 : FTDI_SIO_SET_DATA_STOP_BITS_1); if (cflag & PARENB) { if (cflag & CMSPAR) urb_value |= cflag & PARODD ? FTDI_SIO_SET_DATA_PARITY_MARK : FTDI_SIO_SET_DATA_PARITY_SPACE; else urb_value |= cflag & PARODD ? FTDI_SIO_SET_DATA_PARITY_ODD : FTDI_SIO_SET_DATA_PARITY_EVEN; } else { urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE; } if (cflag & CSIZE) { switch (cflag & CSIZE) { case CS7: urb_value |= 7; dbg("Setting CS7"); break; case CS8: urb_value |= 8; dbg("Setting CS8"); break; default: dev_err(&port->dev, "CSIZE was set but not CS7-CS8\n"); } } /* This is needed by the break command since it uses the same command - but is or'ed with this value */ priv->last_set_data_urb_value = urb_value; if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0), FTDI_SIO_SET_DATA_REQUEST, FTDI_SIO_SET_DATA_REQUEST_TYPE, urb_value , priv->interface, NULL, 0, WDR_SHORT_TIMEOUT) < 0) { dev_err(&port->dev, "%s FAILED to set " "databits/stopbits/parity\n", __func__); } /* Now do the baudrate */ no_data_parity_stop_changes: if ((cflag & CBAUD) == B0) { /* Disable flow control */ if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0), FTDI_SIO_SET_FLOW_CTRL_REQUEST, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 0, priv->interface, NULL, 0, WDR_TIMEOUT) < 0) { dev_err(&port->dev, "%s error from disable flowcontrol urb\n", __func__); } /* Drop RTS and DTR */ clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); } else { /* set the baudrate determined before */ mutex_lock(&priv->cfg_lock); if (change_speed(tty, port)) dev_err(&port->dev, "%s urb failed to set baudrate\n", __func__); mutex_unlock(&priv->cfg_lock); /* Ensure RTS and DTR are raised when baudrate changed from 0 */ if (!old_termios || (old_termios->c_cflag & CBAUD) == B0) set_mctrl(port, TIOCM_DTR | TIOCM_RTS); } /* Set flow control */ /* Note device also supports DTR/CD (ugh) and Xon/Xoff in hardware */ no_c_cflag_changes: if (cflag & CRTSCTS) { dbg("%s Setting to CRTSCTS flow control", __func__); if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0), FTDI_SIO_SET_FLOW_CTRL_REQUEST, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 0 , (FTDI_SIO_RTS_CTS_HS | priv->interface), NULL, 0, WDR_TIMEOUT) < 0) { dev_err(&port->dev, "urb failed to set to rts/cts flow control\n"); } } else { /* * Xon/Xoff code * * Check the IXOFF status in the iflag component of the * termios structure. If IXOFF is not set, the pre-xon/xoff * code is executed. */ if (iflag & IXOFF) { dbg("%s request to enable xonxoff iflag=%04x", __func__, iflag); /* Try to enable the XON/XOFF on the ftdi_sio * Set the vstart and vstop -- could have been done up * above where a lot of other dereferencing is done but * that would be very inefficient as vstart and vstop * are not always needed. */ vstart = termios->c_cc[VSTART]; vstop = termios->c_cc[VSTOP]; urb_value = (vstop << 8) | (vstart); if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0), FTDI_SIO_SET_FLOW_CTRL_REQUEST, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, urb_value , (FTDI_SIO_XON_XOFF_HS | priv->interface), NULL, 0, WDR_TIMEOUT) < 0) { dev_err(&port->dev, "urb failed to set to " "xon/xoff flow control\n"); } } else { /* else clause to only run if cflag ! CRTSCTS and iflag * ! XOFF. CHECKME Assuming XON/XOFF handled by tty * stack - not by device */ dbg("%s Turning off hardware flow control", __func__); if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0), FTDI_SIO_SET_FLOW_CTRL_REQUEST, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 0, priv->interface, NULL, 0, WDR_TIMEOUT) < 0) { dev_err(&port->dev, "urb failed to clear flow control\n"); } } } } static int ftdi_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ftdi_private *priv = usb_get_serial_port_data(port); unsigned char *buf; int len; int ret; dbg("%s TIOCMGET", __func__); buf = kmalloc(2, GFP_KERNEL); if (!buf) return -ENOMEM; /* * The 8U232AM returns a two byte value (the SIO a 1 byte value) in * the same format as the data returned from the in point. */ switch (priv->chip_type) { case SIO: len = 1; break; case FT8U232AM: case FT232BM: case FT2232C: case FT232RL: case FT2232H: case FT4232H: case FT232H: case FTX: len = 2; break; default: ret = -EFAULT; goto out; } ret = usb_control_msg(port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0), FTDI_SIO_GET_MODEM_STATUS_REQUEST, FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE, 0, priv->interface, buf, len, WDR_TIMEOUT); if (ret < 0) goto out; ret = (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) | (buf[0] & FTDI_SIO_CTS_MASK ? TIOCM_CTS : 0) | (buf[0] & FTDI_SIO_RI_MASK ? TIOCM_RI : 0) | (buf[0] & FTDI_SIO_RLSD_MASK ? TIOCM_CD : 0) | priv->last_dtr_rts; out: kfree(buf); return ret; } static int ftdi_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; dbg("%s TIOCMSET", __func__); return update_mctrl(port, set, clear); } static int ftdi_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { struct usb_serial_port *port = tty->driver_data; struct ftdi_private *priv = usb_get_serial_port_data(port); struct async_icount *ic = &priv->icount; icount->cts = ic->cts; icount->dsr = ic->dsr; icount->rng = ic->rng; icount->dcd = ic->dcd; icount->tx = ic->tx; icount->rx = ic->rx; icount->frame = ic->frame; icount->parity = ic->parity; icount->overrun = ic->overrun; icount->brk = ic->brk; icount->buf_overrun = ic->buf_overrun; return 0; } static int ftdi_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; struct ftdi_private *priv = usb_get_serial_port_data(port); struct async_icount cnow; struct async_icount cprev; dbg("%s cmd 0x%04x", __func__, cmd); /* Based on code from acm.c and others */ switch (cmd) { case TIOCGSERIAL: /* gets serial port data */ return get_serial_info(port, (struct serial_struct __user *) arg); case TIOCSSERIAL: /* sets serial port data */ return set_serial_info(tty, port, (struct serial_struct __user *) arg); /* * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change * - mask passed in arg for lines of interest * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) * Caller should use TIOCGICOUNT to see which one it was. * * This code is borrowed from linux/drivers/char/serial.c */ case TIOCMIWAIT: cprev = priv->icount; while (!priv->dev_gone) { interruptible_sleep_on(&priv->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; cnow = priv->icount; if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { return 0; } cprev = cnow; } return -EIO; break; case TIOCSERGETLSR: return get_lsr_info(port, (struct serial_struct __user *)arg); break; default: break; } /* This is not necessarily an error - turns out the higher layers * will do some ioctls themselves (see comment above) */ dbg("%s arg not supported - it was 0x%04x - check /usr/include/asm/ioctls.h", __func__, cmd); return -ENOIOCTLCMD; } static int __init ftdi_init(void) { int retval; dbg("%s", __func__); if (vendor > 0 && product > 0) { /* Add user specified VID/PID to reserved element of table. */ int i; for (i = 0; id_table_combined[i].idVendor; i++) ; id_table_combined[i].match_flags = USB_DEVICE_ID_MATCH_DEVICE; id_table_combined[i].idVendor = vendor; id_table_combined[i].idProduct = product; } retval = usb_serial_register_drivers(&ftdi_driver, serial_drivers); if (retval == 0) printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return retval; } static void __exit ftdi_exit(void) { dbg("%s", __func__); usb_serial_deregister_drivers(&ftdi_driver, serial_drivers); } module_init(ftdi_init); module_exit(ftdi_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not"); module_param(vendor, ushort, 0); MODULE_PARM_DESC(vendor, "User specified vendor ID (default=" __MODULE_STRING(FTDI_VID)")"); module_param(product, ushort, 0); MODULE_PARM_DESC(product, "User specified product ID"); module_param(ndi_latency_timer, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ndi_latency_timer, "NDI device latency timer override");
gpl-2.0
cooler-SAI/murlocs_434
dep/ACE_wrappers/ace/Task.cpp
272
7768
// $Id: Task.cpp 91368 2010-08-16 13:03:34Z mhengstmengel $ #include "ace/Task.h" #include "ace/Module.h" #if !defined (__ACE_INLINE__) #include "ace/Task.inl" #endif /* __ACE_INLINE__ */ ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_Task_Base::ACE_Task_Base (ACE_Thread_Manager *thr_man) : thr_count_ (0), thr_mgr_ (thr_man), flags_ (0), grp_id_ (-1) #if !(defined (ACE_MVS) || defined(__TANDEM)) ,last_thread_id_ (0) #endif /* !defined (ACE_MVS) */ { #if (defined (ACE_MVS) || defined(__TANDEM)) ACE_OS::memset( &this->last_thread_id_, '\0', sizeof( this->last_thread_id_ )); #endif /* defined (ACE_MVS) */ } ACE_Task_Base::~ACE_Task_Base (void) { } // Default ACE_Task service routine int ACE_Task_Base::svc (void) { ACE_TRACE ("ACE_Task_Base::svc"); return 0; } // Default ACE_Task open routine int ACE_Task_Base::open (void *) { ACE_TRACE ("ACE_Task_Base::open"); return 0; } // Default ACE_Task close routine int ACE_Task_Base::close (u_long) { ACE_TRACE ("ACE_Task_Base::close"); return 0; } // Forward the call to close() so that existing applications don't // break. int ACE_Task_Base::module_closed (void) { return this->close (1); } // Default ACE_Task put routine. int ACE_Task_Base::put (ACE_Message_Block *, ACE_Time_Value *) { ACE_TRACE ("ACE_Task_Base::put"); return 0; } // Wait for all threads running in a task to exit. int ACE_Task_Base::wait (void) { ACE_TRACE ("ACE_Task_Base::wait"); // If we don't have a thread manager, we probably were never // activated. if (this->thr_mgr () != 0) return this->thr_mgr ()->wait_task (this); else return 0; } // Suspend a task. int ACE_Task_Base::suspend (void) { ACE_TRACE ("ACE_Task_Base::suspend"); ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->lock_, -1)); if (this->thr_count_ > 0) return this->thr_mgr_->suspend_task (this); return 0; } // Resume a suspended task. int ACE_Task_Base::resume (void) { ACE_TRACE ("ACE_Task_Base::resume"); ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->lock_, -1)); if (this->thr_count_ > 0) return this->thr_mgr_->resume_task (this); return 0; } int ACE_Task_Base::activate (long flags, int n_threads, int force_active, long priority, int grp_id, ACE_Task_Base *task, ACE_hthread_t thread_handles[], void *stack[], size_t stack_size[], ACE_thread_t thread_ids[], const char* thr_name[]) { ACE_TRACE ("ACE_Task_Base::activate"); #if defined (ACE_MT_SAFE) && (ACE_MT_SAFE != 0) ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, this->lock_, -1); // If the task passed in is zero, we will use <this> if (task == 0) task = this; if (this->thr_count_ > 0 && force_active == 0) return 1; // Already active. else { if (this->thr_count_ > 0 && this->grp_id_ != -1) // If we're joining an existing group of threads then make // sure to use its group id. grp_id = this->grp_id_; this->thr_count_ += n_threads; } // Use the ACE_Thread_Manager singleton if we're running as an // active object and the caller didn't supply us with a // Thread_Manager. if (this->thr_mgr_ == 0) # if defined (ACE_THREAD_MANAGER_LACKS_STATICS) this->thr_mgr_ = ACE_THREAD_MANAGER_SINGLETON::instance (); # else /* ! ACE_THREAD_MANAGER_LACKS_STATICS */ this->thr_mgr_ = ACE_Thread_Manager::instance (); # endif /* ACE_THREAD_MANAGER_LACKS_STATICS */ int grp_spawned = -1; if (thread_ids == 0) // Thread Ids were not specified grp_spawned = this->thr_mgr_->spawn_n (n_threads, &ACE_Task_Base::svc_run, (void *) this, flags, priority, grp_id, task, thread_handles, stack, stack_size, thr_name); else // thread names were specified grp_spawned = this->thr_mgr_->spawn_n (thread_ids, n_threads, &ACE_Task_Base::svc_run, (void *) this, flags, priority, grp_id, stack, stack_size, thread_handles, task, thr_name); if (grp_spawned == -1) { // If spawn_n fails, restore original thread count. this->thr_count_ -= n_threads; return -1; } if (this->grp_id_ == -1) this->grp_id_ = grp_spawned; #if defined (ACE_MVS) || defined(__TANDEM) ACE_OS::memcpy( &this->last_thread_id_, '\0', sizeof(this->last_thread_id_)); #else this->last_thread_id_ = 0; // Reset to prevent inadvertant match on ID #endif /* defined (ACE_MVS) */ return 0; #else { // Keep the compiler from complaining. ACE_UNUSED_ARG (flags); ACE_UNUSED_ARG (n_threads); ACE_UNUSED_ARG (force_active); ACE_UNUSED_ARG (priority); ACE_UNUSED_ARG (grp_id); ACE_UNUSED_ARG (task); ACE_UNUSED_ARG (thread_handles); ACE_UNUSED_ARG (stack); ACE_UNUSED_ARG (stack_size); ACE_UNUSED_ARG (thread_ids); ACE_UNUSED_ARG (thr_name); ACE_NOTSUP_RETURN (-1); } #endif /* ACE_MT_SAFE */ } void ACE_Task_Base::cleanup (void *object, void *) { ACE_Task_Base *t = (ACE_Task_Base *) object; // The thread count must be decremented first in case the <close> // hook does something crazy like "delete this". { ACE_MT (ACE_GUARD (ACE_Thread_Mutex, ace_mon, t->lock_)); t->thr_count_--; if (0 == t->thr_count_) t->last_thread_id_ = ACE_Thread::self (); } // @@ Is it possible to pass in the exit status somehow? t->close (); // t is undefined here. close() could have deleted it. } #if defined (ACE_HAS_SIG_C_FUNC) extern "C" void ACE_Task_Base_cleanup (void *object, void *) { ACE_Task_Base::cleanup (object, 0); } #endif /* ACE_HAS_SIG_C_FUNC */ ACE_THR_FUNC_RETURN ACE_Task_Base::svc_run (void *args) { ACE_TRACE ("ACE_Task_Base::svc_run"); ACE_Task_Base *t = (ACE_Task_Base *) args; // Register ourself with our <Thread_Manager>'s thread exit hook // mechanism so that our close() hook will be sure to get invoked // when this thread exits. #if defined ACE_HAS_SIG_C_FUNC t->thr_mgr ()->at_exit (t, ACE_Task_Base_cleanup, 0); #else t->thr_mgr ()->at_exit (t, ACE_Task_Base::cleanup, 0); #endif /* ACE_HAS_SIG_C_FUNC */ // Call the Task's svc() hook method. int const svc_status = t->svc (); ACE_THR_FUNC_RETURN status; #if defined (ACE_HAS_INTEGRAL_TYPE_THR_FUNC_RETURN) // Reinterpret case between integral types is not mentioned in the C++ spec status = static_cast<ACE_THR_FUNC_RETURN> (svc_status); #else status = reinterpret_cast<ACE_THR_FUNC_RETURN> (svc_status); #endif /* ACE_HAS_INTEGRAL_TYPE_THR_FUNC_RETURN */ // If we changed this zero change the other if in OS.cpp Thread_Adapter::invoke #if 1 // Call the <Task->close> hook. ACE_Thread_Manager *thr_mgr_ptr = t->thr_mgr (); // This calls the Task->close () hook. t->cleanup (t, 0); // This prevents a second invocation of the cleanup code // (called later by <ACE_Thread_Manager::exit>. thr_mgr_ptr->at_exit (t, 0, 0); #endif return status; } ACE_END_VERSIONED_NAMESPACE_DECL
gpl-2.0
siis/pfwall
drivers/w1/w1.c
528
26210
/* * w1.c * * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/atomic.h> #include "w1.h" #include "w1_log.h" #include "w1_int.h" #include "w1_family.h" #include "w1_netlink.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol."); static int w1_timeout = 10; int w1_max_slave_count = 10; int w1_max_slave_ttl = 10; module_param_named(timeout, w1_timeout, int, 0); module_param_named(max_slave_count, w1_max_slave_count, int, 0); module_param_named(slave_ttl, w1_max_slave_ttl, int, 0); DEFINE_MUTEX(w1_mlock); LIST_HEAD(w1_masters); static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn); static int w1_master_match(struct device *dev, struct device_driver *drv) { return 1; } static int w1_master_probe(struct device *dev) { return -ENODEV; } static void w1_master_release(struct device *dev) { struct w1_master *md = dev_to_w1_master(dev); dev_dbg(dev, "%s: Releasing %s.\n", __func__, md->name); memset(md, 0, sizeof(struct w1_master) + sizeof(struct w1_bus_master)); kfree(md); } static void w1_slave_release(struct device *dev) { struct w1_slave *sl = dev_to_w1_slave(dev); dev_dbg(dev, "%s: Releasing %s.\n", __func__, sl->name); while (atomic_read(&sl->refcnt)) { dev_dbg(dev, "Waiting for %s to become free: refcnt=%d.\n", sl->name, atomic_read(&sl->refcnt)); if (msleep_interruptible(1000)) flush_signals(current); } w1_family_put(sl->family); sl->master->slave_count--; complete(&sl->released); } static ssize_t w1_slave_read_name(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_slave *sl = dev_to_w1_slave(dev); return sprintf(buf, "%s\n", sl->name); } static ssize_t w1_slave_read_id(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_slave *sl = dev_to_w1_slave(dev); ssize_t count = sizeof(sl->reg_num); memcpy(buf, (u8 *)&sl->reg_num, count); return count; } static struct device_attribute w1_slave_attr_name = __ATTR(name, S_IRUGO, w1_slave_read_name, NULL); static struct device_attribute w1_slave_attr_id = __ATTR(id, S_IRUGO, w1_slave_read_id, NULL); /* Default family */ static ssize_t w1_default_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); mutex_lock(&sl->master->mutex); if (w1_reset_select_slave(sl)) { count = 0; goto out_up; } w1_write_block(sl->master, buf, count); out_up: mutex_unlock(&sl->master->mutex); return count; } static ssize_t w1_default_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); mutex_lock(&sl->master->mutex); w1_read_block(sl->master, buf, count); mutex_unlock(&sl->master->mutex); return count; } static struct bin_attribute w1_default_attr = { .attr = { .name = "rw", .mode = S_IRUGO | S_IWUSR, }, .size = PAGE_SIZE, .read = w1_default_read, .write = w1_default_write, }; static int w1_default_add_slave(struct w1_slave *sl) { return sysfs_create_bin_file(&sl->dev.kobj, &w1_default_attr); } static void w1_default_remove_slave(struct w1_slave *sl) { sysfs_remove_bin_file(&sl->dev.kobj, &w1_default_attr); } static struct w1_family_ops w1_default_fops = { .add_slave = w1_default_add_slave, .remove_slave = w1_default_remove_slave, }; static struct w1_family w1_default_family = { .fops = &w1_default_fops, }; static int w1_uevent(struct device *dev, struct kobj_uevent_env *env); static struct bus_type w1_bus_type = { .name = "w1", .match = w1_master_match, .uevent = w1_uevent, }; struct device_driver w1_master_driver = { .name = "w1_master_driver", .bus = &w1_bus_type, .probe = w1_master_probe, }; struct device w1_master_device = { .parent = NULL, .bus = &w1_bus_type, .init_name = "w1 bus master", .driver = &w1_master_driver, .release = &w1_master_release }; static struct device_driver w1_slave_driver = { .name = "w1_slave_driver", .bus = &w1_bus_type, }; #if 0 struct device w1_slave_device = { .parent = NULL, .bus = &w1_bus_type, .init_name = "w1 bus slave", .driver = &w1_slave_driver, .release = &w1_slave_release }; #endif /* 0 */ static ssize_t w1_master_attribute_show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); ssize_t count; mutex_lock(&md->mutex); count = sprintf(buf, "%s\n", md->name); mutex_unlock(&md->mutex); return count; } static ssize_t w1_master_attribute_store_search(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { long tmp; struct w1_master *md = dev_to_w1_master(dev); if (strict_strtol(buf, 0, &tmp) == -EINVAL) return -EINVAL; mutex_lock(&md->mutex); md->search_count = tmp; mutex_unlock(&md->mutex); wake_up_process(md->thread); return count; } static ssize_t w1_master_attribute_show_search(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); ssize_t count; mutex_lock(&md->mutex); count = sprintf(buf, "%d\n", md->search_count); mutex_unlock(&md->mutex); return count; } static ssize_t w1_master_attribute_store_pullup(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { long tmp; struct w1_master *md = dev_to_w1_master(dev); if (strict_strtol(buf, 0, &tmp) == -EINVAL) return -EINVAL; mutex_lock(&md->mutex); md->enable_pullup = tmp; mutex_unlock(&md->mutex); wake_up_process(md->thread); return count; } static ssize_t w1_master_attribute_show_pullup(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); ssize_t count; mutex_lock(&md->mutex); count = sprintf(buf, "%d\n", md->enable_pullup); mutex_unlock(&md->mutex); return count; } static ssize_t w1_master_attribute_show_pointer(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); ssize_t count; mutex_lock(&md->mutex); count = sprintf(buf, "0x%p\n", md->bus_master); mutex_unlock(&md->mutex); return count; } static ssize_t w1_master_attribute_show_timeout(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t count; count = sprintf(buf, "%d\n", w1_timeout); return count; } static ssize_t w1_master_attribute_show_max_slave_count(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); ssize_t count; mutex_lock(&md->mutex); count = sprintf(buf, "%d\n", md->max_slave_count); mutex_unlock(&md->mutex); return count; } static ssize_t w1_master_attribute_show_attempts(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); ssize_t count; mutex_lock(&md->mutex); count = sprintf(buf, "%lu\n", md->attempts); mutex_unlock(&md->mutex); return count; } static ssize_t w1_master_attribute_show_slave_count(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); ssize_t count; mutex_lock(&md->mutex); count = sprintf(buf, "%d\n", md->slave_count); mutex_unlock(&md->mutex); return count; } static ssize_t w1_master_attribute_show_slaves(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); int c = PAGE_SIZE; mutex_lock(&md->mutex); if (md->slave_count == 0) c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n"); else { struct list_head *ent, *n; struct w1_slave *sl; list_for_each_safe(ent, n, &md->slist) { sl = list_entry(ent, struct w1_slave, w1_slave_entry); c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name); } } mutex_unlock(&md->mutex); return PAGE_SIZE - c; } static ssize_t w1_master_attribute_show_add(struct device *dev, struct device_attribute *attr, char *buf) { int c = PAGE_SIZE; c -= snprintf(buf+PAGE_SIZE - c, c, "write device id xx-xxxxxxxxxxxx to add slave\n"); return PAGE_SIZE - c; } static int w1_atoreg_num(struct device *dev, const char *buf, size_t count, struct w1_reg_num *rn) { unsigned int family; unsigned long long id; int i; u64 rn64_le; /* The CRC value isn't read from the user because the sysfs directory * doesn't include it and most messages from the bus search don't * print it either. It would be unreasonable for the user to then * provide it. */ const char *error_msg = "bad slave string format, expecting " "ff-dddddddddddd\n"; if (buf[2] != '-') { dev_err(dev, "%s", error_msg); return -EINVAL; } i = sscanf(buf, "%02x-%012llx", &family, &id); if (i != 2) { dev_err(dev, "%s", error_msg); return -EINVAL; } rn->family = family; rn->id = id; rn64_le = cpu_to_le64(*(u64 *)rn); rn->crc = w1_calc_crc8((u8 *)&rn64_le, 7); #if 0 dev_info(dev, "With CRC device is %02x.%012llx.%02x.\n", rn->family, (unsigned long long)rn->id, rn->crc); #endif return 0; } /* Searches the slaves in the w1_master and returns a pointer or NULL. * Note: must hold the mutex */ static struct w1_slave *w1_slave_search_device(struct w1_master *dev, struct w1_reg_num *rn) { struct w1_slave *sl; list_for_each_entry(sl, &dev->slist, w1_slave_entry) { if (sl->reg_num.family == rn->family && sl->reg_num.id == rn->id && sl->reg_num.crc == rn->crc) { return sl; } } return NULL; } static ssize_t w1_master_attribute_store_add(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w1_master *md = dev_to_w1_master(dev); struct w1_reg_num rn; struct w1_slave *sl; ssize_t result = count; if (w1_atoreg_num(dev, buf, count, &rn)) return -EINVAL; mutex_lock(&md->mutex); sl = w1_slave_search_device(md, &rn); /* It would be nice to do a targeted search one the one-wire bus * for the new device to see if it is out there or not. But the * current search doesn't support that. */ if (sl) { dev_info(dev, "Device %s already exists\n", sl->name); result = -EINVAL; } else { w1_attach_slave_device(md, &rn); } mutex_unlock(&md->mutex); return result; } static ssize_t w1_master_attribute_show_remove(struct device *dev, struct device_attribute *attr, char *buf) { int c = PAGE_SIZE; c -= snprintf(buf+PAGE_SIZE - c, c, "write device id xx-xxxxxxxxxxxx to remove slave\n"); return PAGE_SIZE - c; } static ssize_t w1_master_attribute_store_remove(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w1_master *md = dev_to_w1_master(dev); struct w1_reg_num rn; struct w1_slave *sl; ssize_t result = count; if (w1_atoreg_num(dev, buf, count, &rn)) return -EINVAL; mutex_lock(&md->mutex); sl = w1_slave_search_device(md, &rn); if (sl) { w1_slave_detach(sl); } else { dev_info(dev, "Device %02x-%012llx doesn't exists\n", rn.family, (unsigned long long)rn.id); result = -EINVAL; } mutex_unlock(&md->mutex); return result; } #define W1_MASTER_ATTR_RO(_name, _mode) \ struct device_attribute w1_master_attribute_##_name = \ __ATTR(w1_master_##_name, _mode, \ w1_master_attribute_show_##_name, NULL) #define W1_MASTER_ATTR_RW(_name, _mode) \ struct device_attribute w1_master_attribute_##_name = \ __ATTR(w1_master_##_name, _mode, \ w1_master_attribute_show_##_name, \ w1_master_attribute_store_##_name) static W1_MASTER_ATTR_RO(name, S_IRUGO); static W1_MASTER_ATTR_RO(slaves, S_IRUGO); static W1_MASTER_ATTR_RO(slave_count, S_IRUGO); static W1_MASTER_ATTR_RO(max_slave_count, S_IRUGO); static W1_MASTER_ATTR_RO(attempts, S_IRUGO); static W1_MASTER_ATTR_RO(timeout, S_IRUGO); static W1_MASTER_ATTR_RO(pointer, S_IRUGO); static W1_MASTER_ATTR_RW(search, S_IRUGO | S_IWUSR | S_IWGRP); static W1_MASTER_ATTR_RW(pullup, S_IRUGO | S_IWUSR | S_IWGRP); static W1_MASTER_ATTR_RW(add, S_IRUGO | S_IWUSR | S_IWGRP); static W1_MASTER_ATTR_RW(remove, S_IRUGO | S_IWUSR | S_IWGRP); static struct attribute *w1_master_default_attrs[] = { &w1_master_attribute_name.attr, &w1_master_attribute_slaves.attr, &w1_master_attribute_slave_count.attr, &w1_master_attribute_max_slave_count.attr, &w1_master_attribute_attempts.attr, &w1_master_attribute_timeout.attr, &w1_master_attribute_pointer.attr, &w1_master_attribute_search.attr, &w1_master_attribute_pullup.attr, &w1_master_attribute_add.attr, &w1_master_attribute_remove.attr, NULL }; static struct attribute_group w1_master_defattr_group = { .attrs = w1_master_default_attrs, }; int w1_create_master_attributes(struct w1_master *master) { return sysfs_create_group(&master->dev.kobj, &w1_master_defattr_group); } void w1_destroy_master_attributes(struct w1_master *master) { sysfs_remove_group(&master->dev.kobj, &w1_master_defattr_group); } #ifdef CONFIG_HOTPLUG static int w1_uevent(struct device *dev, struct kobj_uevent_env *env) { struct w1_master *md = NULL; struct w1_slave *sl = NULL; char *event_owner, *name; int err; if (dev->driver == &w1_master_driver) { md = container_of(dev, struct w1_master, dev); event_owner = "master"; name = md->name; } else if (dev->driver == &w1_slave_driver) { sl = container_of(dev, struct w1_slave, dev); event_owner = "slave"; name = sl->name; } else { dev_dbg(dev, "Unknown event.\n"); return -EINVAL; } dev_dbg(dev, "Hotplug event for %s %s, bus_id=%s.\n", event_owner, name, dev_name(dev)); if (dev->driver != &w1_slave_driver || !sl) return 0; err = add_uevent_var(env, "W1_FID=%02X", sl->reg_num.family); if (err) return err; err = add_uevent_var(env, "W1_SLAVE_ID=%024LX", (unsigned long long)sl->reg_num.id); if (err) return err; return 0; }; #else static int w1_uevent(struct device *dev, struct kobj_uevent_env *env) { return 0; } #endif static int __w1_attach_slave_device(struct w1_slave *sl) { int err; sl->dev.parent = &sl->master->dev; sl->dev.driver = &w1_slave_driver; sl->dev.bus = &w1_bus_type; sl->dev.release = &w1_slave_release; dev_set_name(&sl->dev, "%02x-%012llx", (unsigned int) sl->reg_num.family, (unsigned long long) sl->reg_num.id); snprintf(&sl->name[0], sizeof(sl->name), "%02x-%012llx", (unsigned int) sl->reg_num.family, (unsigned long long) sl->reg_num.id); dev_dbg(&sl->dev, "%s: registering %s as %p.\n", __func__, dev_name(&sl->dev), sl); err = device_register(&sl->dev); if (err < 0) { dev_err(&sl->dev, "Device registration [%s] failed. err=%d\n", dev_name(&sl->dev), err); return err; } /* Create "name" entry */ err = device_create_file(&sl->dev, &w1_slave_attr_name); if (err < 0) { dev_err(&sl->dev, "sysfs file creation for [%s] failed. err=%d\n", dev_name(&sl->dev), err); goto out_unreg; } /* Create "id" entry */ err = device_create_file(&sl->dev, &w1_slave_attr_id); if (err < 0) { dev_err(&sl->dev, "sysfs file creation for [%s] failed. err=%d\n", dev_name(&sl->dev), err); goto out_rem1; } /* if the family driver needs to initialize something... */ if (sl->family->fops && sl->family->fops->add_slave && ((err = sl->family->fops->add_slave(sl)) < 0)) { dev_err(&sl->dev, "sysfs file creation for [%s] failed. err=%d\n", dev_name(&sl->dev), err); goto out_rem2; } list_add_tail(&sl->w1_slave_entry, &sl->master->slist); return 0; out_rem2: device_remove_file(&sl->dev, &w1_slave_attr_id); out_rem1: device_remove_file(&sl->dev, &w1_slave_attr_name); out_unreg: device_unregister(&sl->dev); return err; } static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn) { struct w1_slave *sl; struct w1_family *f; int err; struct w1_netlink_msg msg; sl = kzalloc(sizeof(struct w1_slave), GFP_KERNEL); if (!sl) { dev_err(&dev->dev, "%s: failed to allocate new slave device.\n", __func__); return -ENOMEM; } sl->owner = THIS_MODULE; sl->master = dev; set_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags); memset(&msg, 0, sizeof(msg)); memcpy(&sl->reg_num, rn, sizeof(sl->reg_num)); atomic_set(&sl->refcnt, 0); init_completion(&sl->released); spin_lock(&w1_flock); f = w1_family_registered(rn->family); if (!f) { f= &w1_default_family; dev_info(&dev->dev, "Family %x for %02x.%012llx.%02x is not registered.\n", rn->family, rn->family, (unsigned long long)rn->id, rn->crc); } __w1_family_get(f); spin_unlock(&w1_flock); sl->family = f; err = __w1_attach_slave_device(sl); if (err < 0) { dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__, sl->name); w1_family_put(sl->family); kfree(sl); return err; } sl->ttl = dev->slave_ttl; dev->slave_count++; memcpy(msg.id.id, rn, sizeof(msg.id)); msg.type = W1_SLAVE_ADD; w1_netlink_send(dev, &msg); return 0; } void w1_slave_detach(struct w1_slave *sl) { struct w1_netlink_msg msg; dev_dbg(&sl->dev, "%s: detaching %s [%p].\n", __func__, sl->name, sl); list_del(&sl->w1_slave_entry); if (sl->family->fops && sl->family->fops->remove_slave) sl->family->fops->remove_slave(sl); memset(&msg, 0, sizeof(msg)); memcpy(msg.id.id, &sl->reg_num, sizeof(msg.id)); msg.type = W1_SLAVE_REMOVE; w1_netlink_send(sl->master, &msg); device_remove_file(&sl->dev, &w1_slave_attr_id); device_remove_file(&sl->dev, &w1_slave_attr_name); device_unregister(&sl->dev); wait_for_completion(&sl->released); kfree(sl); } struct w1_master *w1_search_master_id(u32 id) { struct w1_master *dev; int found = 0; mutex_lock(&w1_mlock); list_for_each_entry(dev, &w1_masters, w1_master_entry) { if (dev->id == id) { found = 1; atomic_inc(&dev->refcnt); break; } } mutex_unlock(&w1_mlock); return (found)?dev:NULL; } struct w1_slave *w1_search_slave(struct w1_reg_num *id) { struct w1_master *dev; struct w1_slave *sl = NULL; int found = 0; mutex_lock(&w1_mlock); list_for_each_entry(dev, &w1_masters, w1_master_entry) { mutex_lock(&dev->mutex); list_for_each_entry(sl, &dev->slist, w1_slave_entry) { if (sl->reg_num.family == id->family && sl->reg_num.id == id->id && sl->reg_num.crc == id->crc) { found = 1; atomic_inc(&dev->refcnt); atomic_inc(&sl->refcnt); break; } } mutex_unlock(&dev->mutex); if (found) break; } mutex_unlock(&w1_mlock); return (found)?sl:NULL; } void w1_reconnect_slaves(struct w1_family *f, int attach) { struct w1_slave *sl, *sln; struct w1_master *dev; mutex_lock(&w1_mlock); list_for_each_entry(dev, &w1_masters, w1_master_entry) { dev_dbg(&dev->dev, "Reconnecting slaves in device %s " "for family %02x.\n", dev->name, f->fid); mutex_lock(&dev->mutex); list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { /* If it is a new family, slaves with the default * family driver and are that family will be * connected. If the family is going away, devices * matching that family are reconneced. */ if ((attach && sl->family->fid == W1_FAMILY_DEFAULT && sl->reg_num.family == f->fid) || (!attach && sl->family->fid == f->fid)) { struct w1_reg_num rn; memcpy(&rn, &sl->reg_num, sizeof(rn)); w1_slave_detach(sl); w1_attach_slave_device(dev, &rn); } } dev_dbg(&dev->dev, "Reconnecting slaves in device %s " "has been finished.\n", dev->name); mutex_unlock(&dev->mutex); } mutex_unlock(&w1_mlock); } void w1_slave_found(struct w1_master *dev, u64 rn) { struct w1_slave *sl; struct w1_reg_num *tmp; u64 rn_le = cpu_to_le64(rn); atomic_inc(&dev->refcnt); tmp = (struct w1_reg_num *) &rn; sl = w1_slave_search_device(dev, tmp); if (sl) { set_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags); } else { if (rn && tmp->crc == w1_calc_crc8((u8 *)&rn_le, 7)) w1_attach_slave_device(dev, tmp); } atomic_dec(&dev->refcnt); } /** * Performs a ROM Search & registers any devices found. * The 1-wire search is a simple binary tree search. * For each bit of the address, we read two bits and write one bit. * The bit written will put to sleep all devies that don't match that bit. * When the two reads differ, the direction choice is obvious. * When both bits are 0, we must choose a path to take. * When we can scan all 64 bits without having to choose a path, we are done. * * See "Application note 187 1-wire search algorithm" at www.maxim-ic.com * * @dev The master device to search * @cb Function to call when a device is found */ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb) { u64 last_rn, rn, tmp64; int i, slave_count = 0; int last_zero, last_device; int search_bit, desc_bit; u8 triplet_ret = 0; search_bit = 0; rn = last_rn = 0; last_device = 0; last_zero = -1; desc_bit = 64; while ( !last_device && (slave_count++ < dev->max_slave_count) ) { last_rn = rn; rn = 0; /* * Reset bus and all 1-wire device state machines * so they can respond to our requests. * * Return 0 - device(s) present, 1 - no devices present. */ if (w1_reset_bus(dev)) { dev_dbg(&dev->dev, "No devices present on the wire.\n"); break; } /* Start the search */ w1_write_8(dev, search_type); for (i = 0; i < 64; ++i) { /* Determine the direction/search bit */ if (i == desc_bit) search_bit = 1; /* took the 0 path last time, so take the 1 path */ else if (i > desc_bit) search_bit = 0; /* take the 0 path on the next branch */ else search_bit = ((last_rn >> i) & 0x1); /** Read two bits and write one bit */ triplet_ret = w1_triplet(dev, search_bit); /* quit if no device responded */ if ( (triplet_ret & 0x03) == 0x03 ) break; /* If both directions were valid, and we took the 0 path... */ if (triplet_ret == 0) last_zero = i; /* extract the direction taken & update the device number */ tmp64 = (triplet_ret >> 2); rn |= (tmp64 << i); if (kthread_should_stop()) { dev_dbg(&dev->dev, "Abort w1_search\n"); return; } } if ( (triplet_ret & 0x03) != 0x03 ) { if ( (desc_bit == last_zero) || (last_zero < 0)) last_device = 1; desc_bit = last_zero; cb(dev, rn); } } } void w1_search_process_cb(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb) { struct w1_slave *sl, *sln; list_for_each_entry(sl, &dev->slist, w1_slave_entry) clear_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags); w1_search_devices(dev, search_type, cb); list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { if (!test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags) && !--sl->ttl) w1_slave_detach(sl); else if (test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags)) sl->ttl = dev->slave_ttl; } if (dev->search_count > 0) dev->search_count--; } static void w1_search_process(struct w1_master *dev, u8 search_type) { w1_search_process_cb(dev, search_type, w1_slave_found); } int w1_process(void *data) { struct w1_master *dev = (struct w1_master *) data; /* As long as w1_timeout is only set by a module parameter the sleep * time can be calculated in jiffies once. */ const unsigned long jtime = msecs_to_jiffies(w1_timeout * 1000); while (!kthread_should_stop()) { if (dev->search_count) { mutex_lock(&dev->mutex); w1_search_process(dev, W1_SEARCH); mutex_unlock(&dev->mutex); } try_to_freeze(); __set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) break; /* Only sleep when the search is active. */ if (dev->search_count) schedule_timeout(jtime); else schedule(); } atomic_dec(&dev->refcnt); return 0; } static int __init w1_init(void) { int retval; printk(KERN_INFO "Driver for 1-wire Dallas network protocol.\n"); w1_init_netlink(); retval = bus_register(&w1_bus_type); if (retval) { printk(KERN_ERR "Failed to register bus. err=%d.\n", retval); goto err_out_exit_init; } retval = driver_register(&w1_master_driver); if (retval) { printk(KERN_ERR "Failed to register master driver. err=%d.\n", retval); goto err_out_bus_unregister; } retval = driver_register(&w1_slave_driver); if (retval) { printk(KERN_ERR "Failed to register master driver. err=%d.\n", retval); goto err_out_master_unregister; } return 0; #if 0 /* For undoing the slave register if there was a step after it. */ err_out_slave_unregister: driver_unregister(&w1_slave_driver); #endif err_out_master_unregister: driver_unregister(&w1_master_driver); err_out_bus_unregister: bus_unregister(&w1_bus_type); err_out_exit_init: return retval; } static void __exit w1_fini(void) { struct w1_master *dev; /* Set netlink removal messages and some cleanup */ list_for_each_entry(dev, &w1_masters, w1_master_entry) __w1_remove_master_device(dev); w1_fini_netlink(); driver_unregister(&w1_slave_driver); driver_unregister(&w1_master_driver); bus_unregister(&w1_bus_type); } module_init(w1_init); module_exit(w1_fini);
gpl-2.0
ProtouProject/android_kernel_htc_protou
drivers/virtio/virtio_ring.c
1040
19525
/* Virtio ring implementation. * * Copyright 2007 Rusty Russell IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/virtio.h> #include <linux/virtio_ring.h> #include <linux/virtio_config.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/hrtimer.h> /* virtio guest is communicating with a virtual "device" that actually runs on * a host processor. Memory barriers are used to control SMP effects. */ #ifdef CONFIG_SMP /* Where possible, use SMP barriers which are more lightweight than mandatory * barriers, because mandatory barriers control MMIO effects on accesses * through relaxed memory I/O windows (which virtio-pci does not use). */ #define virtio_mb(vq) \ do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0) #define virtio_rmb(vq) \ do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0) #define virtio_wmb(vq) \ do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0) #else /* We must force memory ordering even if guest is UP since host could be * running on another CPU, but SMP barriers are defined to barrier() in that * configuration. So fall back to mandatory barriers instead. */ #define virtio_mb(vq) mb() #define virtio_rmb(vq) rmb() #define virtio_wmb(vq) wmb() #endif #ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ #define BAD_RING(_vq, fmt, args...) \ do { \ dev_err(&(_vq)->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \ BUG(); \ } while (0) /* Caller is supposed to guarantee no reentry. */ #define START_USE(_vq) \ do { \ if ((_vq)->in_use) \ panic("%s:in_use = %i\n", \ (_vq)->vq.name, (_vq)->in_use); \ (_vq)->in_use = __LINE__; \ } while (0) #define END_USE(_vq) \ do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) #else #define BAD_RING(_vq, fmt, args...) \ do { \ dev_err(&_vq->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \ (_vq)->broken = true; \ } while (0) #define START_USE(vq) #define END_USE(vq) #endif struct vring_virtqueue { struct virtqueue vq; /* Actual memory layout for this queue */ struct vring vring; /* Can we use weak barriers? */ bool weak_barriers; /* Other side has made a mess, don't try any more. */ bool broken; /* Host supports indirect buffers */ bool indirect; /* Host publishes avail event idx */ bool event; /* Number of free buffers */ unsigned int num_free; /* Head of free buffer list. */ unsigned int free_head; /* Number we've added since last sync. */ unsigned int num_added; /* Last used index we've seen. */ u16 last_used_idx; /* How to notify other side. FIXME: commonalize hcalls! */ void (*notify)(struct virtqueue *vq); #ifdef DEBUG /* They're supposed to lock for us. */ unsigned int in_use; /* Figure out if their kicks are too delayed. */ bool last_add_time_valid; ktime_t last_add_time; #endif /* Tokens for callbacks. */ void *data[]; }; #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) /* Set up an indirect table of descriptors and add it to the queue. */ static int vring_add_indirect(struct vring_virtqueue *vq, struct scatterlist sg[], unsigned int out, unsigned int in, gfp_t gfp) { struct vring_desc *desc; unsigned head; int i; desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); if (!desc) return -ENOMEM; /* Transfer entries from the sg list into the indirect page */ for (i = 0; i < out; i++) { desc[i].flags = VRING_DESC_F_NEXT; desc[i].addr = sg_phys(sg); desc[i].len = sg->length; desc[i].next = i+1; sg++; } for (; i < (out + in); i++) { desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; desc[i].addr = sg_phys(sg); desc[i].len = sg->length; desc[i].next = i+1; sg++; } /* Last one doesn't continue. */ desc[i-1].flags &= ~VRING_DESC_F_NEXT; desc[i-1].next = 0; /* We're about to use a buffer */ vq->num_free--; /* Use a single buffer which doesn't continue */ head = vq->free_head; vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; vq->vring.desc[head].addr = virt_to_phys(desc); vq->vring.desc[head].len = i * sizeof(struct vring_desc); /* Update free pointer */ vq->free_head = vq->vring.desc[head].next; return head; } /** * virtqueue_add_buf - expose buffer to other end * @vq: the struct virtqueue we're talking about. * @sg: the description of the buffer(s). * @out_num: the number of sg readable by other side * @in_num: the number of sg which are writable (after readable ones) * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns remaining capacity of queue or a negative error * (ie. ENOSPC). Note that it only really makes sense to treat all * positive return values as "available": indirect buffers mean that * we can put an entire sg[] array inside a single queue entry. */ int virtqueue_add_buf(struct virtqueue *_vq, struct scatterlist sg[], unsigned int out, unsigned int in, void *data, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i, avail, uninitialized_var(prev); int head; START_USE(vq); BUG_ON(data == NULL); #ifdef DEBUG { ktime_t now = ktime_get(); /* No kick or get, with .1 second between? Warn. */ if (vq->last_add_time_valid) WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) > 100); vq->last_add_time = now; vq->last_add_time_valid = true; } #endif /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ if (vq->indirect && (out + in) > 1 && vq->num_free) { head = vring_add_indirect(vq, sg, out, in, gfp); if (likely(head >= 0)) goto add_head; } BUG_ON(out + in > vq->vring.num); BUG_ON(out + in == 0); if (vq->num_free < out + in) { pr_debug("Can't add buf len %i - avail = %i\n", out + in, vq->num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ if (out) vq->notify(&vq->vq); END_USE(vq); return -ENOSPC; } /* We're about to use some buffers from the free list. */ vq->num_free -= out + in; head = vq->free_head; for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; sg++; } for (; in; i = vq->vring.desc[i].next, in--) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; sg++; } /* Last one doesn't continue. */ vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; /* Update free pointer */ vq->free_head = i; add_head: /* Set token. */ vq->data[head] = data; /* Put entry in available array (but don't update avail->idx until they * do sync). */ avail = (vq->vring.avail->idx & (vq->vring.num-1)); vq->vring.avail->ring[avail] = head; /* Descriptors and available array need to be set before we expose the * new available array entries. */ virtio_wmb(vq); vq->vring.avail->idx++; vq->num_added++; /* This is very unlikely, but theoretically possible. Kick * just in case. */ if (unlikely(vq->num_added == (1 << 16) - 1)) virtqueue_kick(_vq); pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); return vq->num_free; } EXPORT_SYMBOL_GPL(virtqueue_add_buf); /** * virtqueue_kick_prepare - first half of split virtqueue_kick call. * @vq: the struct virtqueue * * Instead of virtqueue_kick(), you can do: * if (virtqueue_kick_prepare(vq)) * virtqueue_notify(vq); * * This is sometimes useful because the virtqueue_kick_prepare() needs * to be serialized, but the actual virtqueue_notify() call does not. */ bool virtqueue_kick_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 new, old; bool needs_kick; START_USE(vq); /* We need to expose available array entries before checking avail * event. */ virtio_mb(vq); old = vq->vring.avail->idx - vq->num_added; new = vq->vring.avail->idx; vq->num_added = 0; #ifdef DEBUG if (vq->last_add_time_valid) { WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), vq->last_add_time)) > 100); } vq->last_add_time_valid = false; #endif if (vq->event) { needs_kick = vring_need_event(vring_avail_event(&vq->vring), new, old); } else { needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); } END_USE(vq); return needs_kick; } EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); /** * virtqueue_notify - second half of split virtqueue_kick call. * @vq: the struct virtqueue * * This does not need to be serialized. */ void virtqueue_notify(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); /* Prod other side to tell it about changes. */ vq->notify(_vq); } EXPORT_SYMBOL_GPL(virtqueue_notify); /** * virtqueue_kick - update after add_buf * @vq: the struct virtqueue * * After one or more virtqueue_add_buf calls, invoke this to kick * the other side. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ void virtqueue_kick(struct virtqueue *vq) { if (virtqueue_kick_prepare(vq)) virtqueue_notify(vq); } EXPORT_SYMBOL_GPL(virtqueue_kick); static void detach_buf(struct vring_virtqueue *vq, unsigned int head) { unsigned int i; /* Clear data ptr. */ vq->data[head] = NULL; /* Put back on free list: find end */ i = head; /* Free the indirect table */ if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) kfree(phys_to_virt(vq->vring.desc[i].addr)); while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { i = vq->vring.desc[i].next; vq->num_free++; } vq->vring.desc[i].next = vq->free_head; vq->free_head = head; /* Plus final descriptor */ vq->num_free++; } static inline bool more_used(const struct vring_virtqueue *vq) { return vq->last_used_idx != vq->vring.used->idx; } /** * virtqueue_get_buf - get the next used buffer * @vq: the struct virtqueue we're talking about. * @len: the length written into the buffer * * If the driver wrote data into the buffer, @len will be set to the * amount written. This means you don't need to clear the buffer * beforehand to ensure there's no data leakage in the case of short * writes. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). * * Returns NULL if there are no used buffers, or the "data" token * handed to virtqueue_add_buf(). */ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) { struct vring_virtqueue *vq = to_vvq(_vq); void *ret; unsigned int i; u16 last_used; START_USE(vq); if (unlikely(vq->broken)) { END_USE(vq); return NULL; } if (!more_used(vq)) { pr_debug("No more buffers in queue\n"); END_USE(vq); return NULL; } /* Only get used array entries after they have been exposed by host. */ virtio_rmb(vq); last_used = (vq->last_used_idx & (vq->vring.num - 1)); i = vq->vring.used->ring[last_used].id; *len = vq->vring.used->ring[last_used].len; if (unlikely(i >= vq->vring.num)) { BAD_RING(vq, "id %u out of range\n", i); return NULL; } if (unlikely(!vq->data[i])) { BAD_RING(vq, "id %u is not a head!\n", i); return NULL; } /* detach_buf clears data, so grab it now. */ ret = vq->data[i]; detach_buf(vq, i); vq->last_used_idx++; /* If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before * the read in the next get_buf call. */ if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { vring_used_event(&vq->vring) = vq->last_used_idx; virtio_mb(vq); } #ifdef DEBUG vq->last_add_time_valid = false; #endif END_USE(vq); return ret; } EXPORT_SYMBOL_GPL(virtqueue_get_buf); /** * virtqueue_disable_cb - disable callbacks * @vq: the struct virtqueue we're talking about. * * Note that this is not necessarily synchronous, hence unreliable and only * useful as an optimization. * * Unlike other operations, this need not be serialized. */ void virtqueue_disable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; } EXPORT_SYMBOL_GPL(virtqueue_disable_cb); /** * virtqueue_enable_cb - restart callbacks after disable_cb. * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns "false" if there are pending * buffers in the queue, to detect a possible race between the driver * checking for more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ bool virtqueue_enable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; vring_used_event(&vq->vring) = vq->last_used_idx; virtio_mb(vq); if (unlikely(more_used(vq))) { END_USE(vq); return false; } END_USE(vq); return true; } EXPORT_SYMBOL_GPL(virtqueue_enable_cb); /** * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks but hints to the other side to delay * interrupts until most of the available buffers have been processed; * it returns "false" if there are many pending buffers in the queue, * to detect a possible race between the driver checking for more work, * and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 bufs; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; /* TODO: tune this threshold */ bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; vring_used_event(&vq->vring) = vq->last_used_idx + bufs; virtio_mb(vq); if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { END_USE(vq); return false; } END_USE(vq); return true; } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); /** * virtqueue_detach_unused_buf - detach first unused buffer * @vq: the struct virtqueue we're talking about. * * Returns NULL or the "data" token handed to virtqueue_add_buf(). * This is not valid on an active queue; it is useful only for device * shutdown. */ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i; void *buf; START_USE(vq); for (i = 0; i < vq->vring.num; i++) { if (!vq->data[i]) continue; /* detach_buf clears data, so grab it now. */ buf = vq->data[i]; detach_buf(vq, i); vq->vring.avail->idx--; END_USE(vq); return buf; } /* That should have freed everything. */ BUG_ON(vq->num_free != vq->vring.num); END_USE(vq); return NULL; } EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); irqreturn_t vring_interrupt(int irq, void *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (!more_used(vq)) { pr_debug("virtqueue interrupt with no work for %p\n", vq); return IRQ_NONE; } if (unlikely(vq->broken)) return IRQ_HANDLED; pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); if (vq->vq.callback) vq->vq.callback(&vq->vq); return IRQ_HANDLED; } EXPORT_SYMBOL_GPL(vring_interrupt); struct virtqueue *vring_new_virtqueue(unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, void *pages, void (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) { struct vring_virtqueue *vq; unsigned int i; /* We assume num is a power of 2. */ if (num & (num - 1)) { dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); return NULL; } vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); if (!vq) return NULL; vring_init(&vq->vring, num, pages, vring_align); vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->notify = notify; vq->weak_barriers = weak_barriers; vq->broken = false; vq->last_used_idx = 0; vq->num_added = 0; list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; vq->last_add_time_valid = false; #endif vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); /* No callback? Tell other side not to bother us. */ if (!callback) vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; /* Put everything in free lists. */ vq->num_free = num; vq->free_head = 0; for (i = 0; i < num-1; i++) { vq->vring.desc[i].next = i+1; vq->data[i] = NULL; } vq->data[i] = NULL; return &vq->vq; } EXPORT_SYMBOL_GPL(vring_new_virtqueue); void vring_del_virtqueue(struct virtqueue *vq) { list_del(&vq->list); kfree(to_vvq(vq)); } EXPORT_SYMBOL_GPL(vring_del_virtqueue); /* Manipulates transport-specific feature bits. */ void vring_transport_features(struct virtio_device *vdev) { unsigned int i; for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { switch (i) { case VIRTIO_RING_F_INDIRECT_DESC: break; case VIRTIO_RING_F_EVENT_IDX: break; default: /* We don't understand this bit. */ clear_bit(i, vdev->features); } } } EXPORT_SYMBOL_GPL(vring_transport_features); /** * virtqueue_get_vring_size - return the size of the virtqueue's vring * @vq: the struct virtqueue containing the vring of interest. * * Returns the size of the vring. This is mainly used for boasting to * userspace. Unlike other operations, this need not be serialized. */ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->vring.num; } EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); MODULE_LICENSE("GPL");
gpl-2.0
Multirom-mi4i/android_kernel_xiaomi_ferrari
drivers/mmc/host/vub300.c
2320
74256
/* * Remote VUB300 SDIO/SDmem Host Controller Driver * * Copyright (C) 2010 Elan Digital Systems Limited * * based on USB Skeleton driver - 2.2 * * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 * * VUB300: is a USB 2.0 client device with a single SDIO/SDmem/MMC slot * Any SDIO/SDmem/MMC device plugged into the VUB300 will appear, * by virtue of this driver, to have been plugged into a local * SDIO host controller, similar to, say, a PCI Ricoh controller * This is because this kernel device driver is both a USB 2.0 * client device driver AND an MMC host controller driver. Thus * if there is an existing driver for the inserted SDIO/SDmem/MMC * device then that driver will be used by the kernel to manage * the device in exactly the same fashion as if it had been * directly plugged into, say, a local pci bus Ricoh controller * * RANT: this driver was written using a display 128x48 - converting it * to a line width of 80 makes it very difficult to support. In * particular functions have been broken down into sub functions * and the original meaningful names have been shortened into * cryptic ones. * The problem is that executing a fragment of code subject to * two conditions means an indentation of 24, thus leaving only * 56 characters for a C statement. And that is quite ridiculous! * * Data types: data passed to/from the VUB300 is fixed to a number of * bits and driver data fields reflect that limit by using * u8, u16, u32 */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kref.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_ids.h> #include <linux/workqueue.h> #include <linux/ctype.h> #include <linux/firmware.h> #include <linux/scatterlist.h> struct host_controller_info { u8 info_size; u16 firmware_version; u8 number_of_ports; } __packed; #define FIRMWARE_BLOCK_BOUNDARY 1024 struct sd_command_header { u8 header_size; u8 header_type; u8 port_number; u8 command_type; /* Bit7 - Rd/Wr */ u8 command_index; u8 transfer_size[4]; /* ReadSize + ReadSize */ u8 response_type; u8 arguments[4]; u8 block_count[2]; u8 block_size[2]; u8 block_boundary[2]; u8 reserved[44]; /* to pad out to 64 bytes */ } __packed; struct sd_irqpoll_header { u8 header_size; u8 header_type; u8 port_number; u8 command_type; /* Bit7 - Rd/Wr */ u8 padding[16]; /* don't ask why !! */ u8 poll_timeout_msb; u8 poll_timeout_lsb; u8 reserved[42]; /* to pad out to 64 bytes */ } __packed; struct sd_common_header { u8 header_size; u8 header_type; u8 port_number; } __packed; struct sd_response_header { u8 header_size; u8 header_type; u8 port_number; u8 command_type; u8 command_index; u8 command_response[0]; } __packed; struct sd_status_header { u8 header_size; u8 header_type; u8 port_number; u16 port_flags; u32 sdio_clock; u16 host_header_size; u16 func_header_size; u16 ctrl_header_size; } __packed; struct sd_error_header { u8 header_size; u8 header_type; u8 port_number; u8 error_code; } __packed; struct sd_interrupt_header { u8 header_size; u8 header_type; u8 port_number; } __packed; struct offload_registers_access { u8 command_byte[4]; u8 Respond_Byte[4]; } __packed; #define INTERRUPT_REGISTER_ACCESSES 15 struct sd_offloaded_interrupt { u8 header_size; u8 header_type; u8 port_number; struct offload_registers_access reg[INTERRUPT_REGISTER_ACCESSES]; } __packed; struct sd_register_header { u8 header_size; u8 header_type; u8 port_number; u8 command_type; u8 command_index; u8 command_response[6]; } __packed; #define PIGGYBACK_REGISTER_ACCESSES 14 struct sd_offloaded_piggyback { struct sd_register_header sdio; struct offload_registers_access reg[PIGGYBACK_REGISTER_ACCESSES]; } __packed; union sd_response { struct sd_common_header common; struct sd_status_header status; struct sd_error_header error; struct sd_interrupt_header interrupt; struct sd_response_header response; struct sd_offloaded_interrupt irq; struct sd_offloaded_piggyback pig; } __packed; union sd_command { struct sd_command_header head; struct sd_irqpoll_header poll; } __packed; enum SD_RESPONSE_TYPE { SDRT_UNSPECIFIED = 0, SDRT_NONE, SDRT_1, SDRT_1B, SDRT_2, SDRT_3, SDRT_4, SDRT_5, SDRT_5B, SDRT_6, SDRT_7, }; #define RESPONSE_INTERRUPT 0x01 #define RESPONSE_ERROR 0x02 #define RESPONSE_STATUS 0x03 #define RESPONSE_IRQ_DISABLED 0x05 #define RESPONSE_IRQ_ENABLED 0x06 #define RESPONSE_PIGGYBACKED 0x07 #define RESPONSE_NO_INTERRUPT 0x08 #define RESPONSE_PIG_DISABLED 0x09 #define RESPONSE_PIG_ENABLED 0x0A #define SD_ERROR_1BIT_TIMEOUT 0x01 #define SD_ERROR_4BIT_TIMEOUT 0x02 #define SD_ERROR_1BIT_CRC_WRONG 0x03 #define SD_ERROR_4BIT_CRC_WRONG 0x04 #define SD_ERROR_1BIT_CRC_ERROR 0x05 #define SD_ERROR_4BIT_CRC_ERROR 0x06 #define SD_ERROR_NO_CMD_ENDBIT 0x07 #define SD_ERROR_NO_1BIT_DATEND 0x08 #define SD_ERROR_NO_4BIT_DATEND 0x09 #define SD_ERROR_1BIT_UNEXPECTED_TIMEOUT 0x0A #define SD_ERROR_4BIT_UNEXPECTED_TIMEOUT 0x0B #define SD_ERROR_ILLEGAL_COMMAND 0x0C #define SD_ERROR_NO_DEVICE 0x0D #define SD_ERROR_TRANSFER_LENGTH 0x0E #define SD_ERROR_1BIT_DATA_TIMEOUT 0x0F #define SD_ERROR_4BIT_DATA_TIMEOUT 0x10 #define SD_ERROR_ILLEGAL_STATE 0x11 #define SD_ERROR_UNKNOWN_ERROR 0x12 #define SD_ERROR_RESERVED_ERROR 0x13 #define SD_ERROR_INVALID_FUNCTION 0x14 #define SD_ERROR_OUT_OF_RANGE 0x15 #define SD_ERROR_STAT_CMD 0x16 #define SD_ERROR_STAT_DATA 0x17 #define SD_ERROR_STAT_CMD_TIMEOUT 0x18 #define SD_ERROR_SDCRDY_STUCK 0x19 #define SD_ERROR_UNHANDLED 0x1A #define SD_ERROR_OVERRUN 0x1B #define SD_ERROR_PIO_TIMEOUT 0x1C #define FUN(c) (0x000007 & (c->arg>>28)) #define REG(c) (0x01FFFF & (c->arg>>9)) static bool limit_speed_to_24_MHz; module_param(limit_speed_to_24_MHz, bool, 0644); MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz"); static bool pad_input_to_usb_pkt; module_param(pad_input_to_usb_pkt, bool, 0644); MODULE_PARM_DESC(pad_input_to_usb_pkt, "Pad USB data input transfers to whole USB Packet"); static bool disable_offload_processing; module_param(disable_offload_processing, bool, 0644); MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing"); static bool force_1_bit_data_xfers; module_param(force_1_bit_data_xfers, bool, 0644); MODULE_PARM_DESC(force_1_bit_data_xfers, "Force SDIO Data Transfers to 1-bit Mode"); static bool force_polling_for_irqs; module_param(force_polling_for_irqs, bool, 0644); MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts"); static int firmware_irqpoll_timeout = 1024; module_param(firmware_irqpoll_timeout, int, 0644); MODULE_PARM_DESC(firmware_irqpoll_timeout, "VUB300 firmware irqpoll timeout"); static int force_max_req_size = 128; module_param(force_max_req_size, int, 0644); MODULE_PARM_DESC(force_max_req_size, "set max request size in kBytes"); #ifdef SMSC_DEVELOPMENT_BOARD static int firmware_rom_wait_states = 0x04; #else static int firmware_rom_wait_states = 0x1C; #endif module_param(firmware_rom_wait_states, int, 0644); MODULE_PARM_DESC(firmware_rom_wait_states, "ROM wait states byte=RRRIIEEE (Reserved Internal External)"); #define ELAN_VENDOR_ID 0x2201 #define VUB300_VENDOR_ID 0x0424 #define VUB300_PRODUCT_ID 0x012C static struct usb_device_id vub300_table[] = { {USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)}, {USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, vub300_table); static struct workqueue_struct *cmndworkqueue; static struct workqueue_struct *pollworkqueue; static struct workqueue_struct *deadworkqueue; static inline int interface_to_InterfaceNumber(struct usb_interface *interface) { if (!interface) return -1; if (!interface->cur_altsetting) return -1; return interface->cur_altsetting->desc.bInterfaceNumber; } struct sdio_register { unsigned func_num:3; unsigned sdio_reg:17; unsigned activate:1; unsigned prepared:1; unsigned regvalue:8; unsigned response:8; unsigned sparebit:26; }; struct vub300_mmc_host { struct usb_device *udev; struct usb_interface *interface; struct kref kref; struct mutex cmd_mutex; struct mutex irq_mutex; char vub_name[3 + (9 * 8) + 4 + 1]; /* max of 7 sdio fn's */ u8 cmnd_out_ep; /* EndPoint for commands */ u8 cmnd_res_ep; /* EndPoint for responses */ u8 data_out_ep; /* EndPoint for out data */ u8 data_inp_ep; /* EndPoint for inp data */ bool card_powered; bool card_present; bool read_only; bool large_usb_packets; bool app_spec; /* ApplicationSpecific */ bool irq_enabled; /* by the MMC CORE */ bool irq_disabled; /* in the firmware */ unsigned bus_width:4; u8 total_offload_count; u8 dynamic_register_count; u8 resp_len; u32 datasize; int errors; int usb_transport_fail; int usb_timed_out; int irqs_queued; struct sdio_register sdio_register[16]; struct offload_interrupt_function_register { #define MAXREGBITS 4 #define MAXREGS (1<<MAXREGBITS) #define MAXREGMASK (MAXREGS-1) u8 offload_count; u32 offload_point; struct offload_registers_access reg[MAXREGS]; } fn[8]; u16 fbs[8]; /* Function Block Size */ struct mmc_command *cmd; struct mmc_request *req; struct mmc_data *data; struct mmc_host *mmc; struct urb *urb; struct urb *command_out_urb; struct urb *command_res_urb; struct completion command_complete; struct completion irqpoll_complete; union sd_command cmnd; union sd_response resp; struct timer_list sg_transfer_timer; struct usb_sg_request sg_request; struct timer_list inactivity_timer; struct work_struct deadwork; struct work_struct cmndwork; struct delayed_work pollwork; struct host_controller_info hc_info; struct sd_status_header system_port_status; u8 padded_buffer[64]; }; #define kref_to_vub300_mmc_host(d) container_of(d, struct vub300_mmc_host, kref) #define SET_TRANSFER_PSEUDOCODE 21 #define SET_INTERRUPT_PSEUDOCODE 20 #define SET_FAILURE_MODE 18 #define SET_ROM_WAIT_STATES 16 #define SET_IRQ_ENABLE 13 #define SET_CLOCK_SPEED 11 #define SET_FUNCTION_BLOCK_SIZE 9 #define SET_SD_DATA_MODE 6 #define SET_SD_POWER 4 #define ENTER_DFU_MODE 3 #define GET_HC_INF0 1 #define GET_SYSTEM_PORT_STATUS 0 static void vub300_delete(struct kref *kref) { /* kref callback - softirq */ struct vub300_mmc_host *vub300 = kref_to_vub300_mmc_host(kref); struct mmc_host *mmc = vub300->mmc; usb_free_urb(vub300->command_out_urb); vub300->command_out_urb = NULL; usb_free_urb(vub300->command_res_urb); vub300->command_res_urb = NULL; usb_put_dev(vub300->udev); mmc_free_host(mmc); /* * and hence also frees vub300 * which is contained at the end of struct mmc */ } static void vub300_queue_cmnd_work(struct vub300_mmc_host *vub300) { kref_get(&vub300->kref); if (queue_work(cmndworkqueue, &vub300->cmndwork)) { /* * then the cmndworkqueue was not previously * running and the above get ref is obvious * required and will be put when the thread * terminates by a specific call */ } else { /* * the cmndworkqueue was already running from * a previous invocation and thus to keep the * kref counts correct we must undo the get */ kref_put(&vub300->kref, vub300_delete); } } static void vub300_queue_poll_work(struct vub300_mmc_host *vub300, int delay) { kref_get(&vub300->kref); if (queue_delayed_work(pollworkqueue, &vub300->pollwork, delay)) { /* * then the pollworkqueue was not previously * running and the above get ref is obvious * required and will be put when the thread * terminates by a specific call */ } else { /* * the pollworkqueue was already running from * a previous invocation and thus to keep the * kref counts correct we must undo the get */ kref_put(&vub300->kref, vub300_delete); } } static void vub300_queue_dead_work(struct vub300_mmc_host *vub300) { kref_get(&vub300->kref); if (queue_work(deadworkqueue, &vub300->deadwork)) { /* * then the deadworkqueue was not previously * running and the above get ref is obvious * required and will be put when the thread * terminates by a specific call */ } else { /* * the deadworkqueue was already running from * a previous invocation and thus to keep the * kref counts correct we must undo the get */ kref_put(&vub300->kref, vub300_delete); } } static void irqpoll_res_completed(struct urb *urb) { /* urb completion handler - hardirq */ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; if (urb->status) vub300->usb_transport_fail = urb->status; complete(&vub300->irqpoll_complete); } static void irqpoll_out_completed(struct urb *urb) { /* urb completion handler - hardirq */ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; if (urb->status) { vub300->usb_transport_fail = urb->status; complete(&vub300->irqpoll_complete); return; } else { int ret; unsigned int pipe = usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep); usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe, &vub300->resp, sizeof(vub300->resp), irqpoll_res_completed, vub300); vub300->command_res_urb->actual_length = 0; ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC); if (ret) { vub300->usb_transport_fail = ret; complete(&vub300->irqpoll_complete); } return; } } static void send_irqpoll(struct vub300_mmc_host *vub300) { /* cmd_mutex is held by vub300_pollwork_thread */ int retval; int timeout = 0xFFFF & (0x0001FFFF - firmware_irqpoll_timeout); vub300->cmnd.poll.header_size = 22; vub300->cmnd.poll.header_type = 1; vub300->cmnd.poll.port_number = 0; vub300->cmnd.poll.command_type = 2; vub300->cmnd.poll.poll_timeout_lsb = 0xFF & (unsigned)timeout; vub300->cmnd.poll.poll_timeout_msb = 0xFF & (unsigned)(timeout >> 8); usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev, usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep) , &vub300->cmnd, sizeof(vub300->cmnd) , irqpoll_out_completed, vub300); retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL); if (0 > retval) { vub300->usb_transport_fail = retval; vub300_queue_poll_work(vub300, 1); complete(&vub300->irqpoll_complete); return; } else { return; } } static void new_system_port_status(struct vub300_mmc_host *vub300) { int old_card_present = vub300->card_present; int new_card_present = (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; vub300->read_only = (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; if (new_card_present && !old_card_present) { dev_info(&vub300->udev->dev, "card just inserted\n"); vub300->card_present = 1; vub300->bus_width = 0; if (disable_offload_processing) strncpy(vub300->vub_name, "EMPTY Processing Disabled", sizeof(vub300->vub_name)); else vub300->vub_name[0] = 0; mmc_detect_change(vub300->mmc, 1); } else if (!new_card_present && old_card_present) { dev_info(&vub300->udev->dev, "card just ejected\n"); vub300->card_present = 0; mmc_detect_change(vub300->mmc, 0); } else { /* no change */ } } static void __add_offloaded_reg_to_fifo(struct vub300_mmc_host *vub300, struct offload_registers_access *register_access, u8 func) { u8 r = vub300->fn[func].offload_point + vub300->fn[func].offload_count; memcpy(&vub300->fn[func].reg[MAXREGMASK & r], register_access, sizeof(struct offload_registers_access)); vub300->fn[func].offload_count += 1; vub300->total_offload_count += 1; } static void add_offloaded_reg(struct vub300_mmc_host *vub300, struct offload_registers_access *register_access) { u32 Register = ((0x03 & register_access->command_byte[0]) << 15) | ((0xFF & register_access->command_byte[1]) << 7) | ((0xFE & register_access->command_byte[2]) >> 1); u8 func = ((0x70 & register_access->command_byte[0]) >> 4); u8 regs = vub300->dynamic_register_count; u8 i = 0; while (0 < regs-- && 1 == vub300->sdio_register[i].activate) { if (vub300->sdio_register[i].func_num == func && vub300->sdio_register[i].sdio_reg == Register) { if (vub300->sdio_register[i].prepared == 0) vub300->sdio_register[i].prepared = 1; vub300->sdio_register[i].response = register_access->Respond_Byte[2]; vub300->sdio_register[i].regvalue = register_access->Respond_Byte[3]; return; } else { i += 1; continue; } }; __add_offloaded_reg_to_fifo(vub300, register_access, func); } static void check_vub300_port_status(struct vub300_mmc_host *vub300) { /* * cmd_mutex is held by vub300_pollwork_thread, * vub300_deadwork_thread or vub300_cmndwork_thread */ int retval; retval = usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), GET_SYSTEM_PORT_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, &vub300->system_port_status, sizeof(vub300->system_port_status), HZ); if (sizeof(vub300->system_port_status) == retval) new_system_port_status(vub300); } static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300) { /* cmd_mutex is held by vub300_pollwork_thread */ if (vub300->command_res_urb->actual_length == 0) return; switch (vub300->resp.common.header_type) { case RESPONSE_INTERRUPT: mutex_lock(&vub300->irq_mutex); if (vub300->irq_enabled) mmc_signal_sdio_irq(vub300->mmc); else vub300->irqs_queued += 1; vub300->irq_disabled = 1; mutex_unlock(&vub300->irq_mutex); break; case RESPONSE_ERROR: if (vub300->resp.error.error_code == SD_ERROR_NO_DEVICE) check_vub300_port_status(vub300); break; case RESPONSE_STATUS: vub300->system_port_status = vub300->resp.status; new_system_port_status(vub300); if (!vub300->card_present) vub300_queue_poll_work(vub300, HZ / 5); break; case RESPONSE_IRQ_DISABLED: { int offloaded_data_length = vub300->resp.common.header_size - 3; int register_count = offloaded_data_length >> 3; int ri = 0; while (register_count--) { add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]); ri += 1; } mutex_lock(&vub300->irq_mutex); if (vub300->irq_enabled) mmc_signal_sdio_irq(vub300->mmc); else vub300->irqs_queued += 1; vub300->irq_disabled = 1; mutex_unlock(&vub300->irq_mutex); break; } case RESPONSE_IRQ_ENABLED: { int offloaded_data_length = vub300->resp.common.header_size - 3; int register_count = offloaded_data_length >> 3; int ri = 0; while (register_count--) { add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]); ri += 1; } mutex_lock(&vub300->irq_mutex); if (vub300->irq_enabled) mmc_signal_sdio_irq(vub300->mmc); else if (vub300->irqs_queued) vub300->irqs_queued += 1; else vub300->irqs_queued += 1; vub300->irq_disabled = 0; mutex_unlock(&vub300->irq_mutex); break; } case RESPONSE_NO_INTERRUPT: vub300_queue_poll_work(vub300, 1); break; default: break; } } static void __do_poll(struct vub300_mmc_host *vub300) { /* cmd_mutex is held by vub300_pollwork_thread */ long commretval; mod_timer(&vub300->inactivity_timer, jiffies + HZ); init_completion(&vub300->irqpoll_complete); send_irqpoll(vub300); commretval = wait_for_completion_timeout(&vub300->irqpoll_complete, msecs_to_jiffies(500)); if (vub300->usb_transport_fail) { /* no need to do anything */ } else if (commretval == 0) { vub300->usb_timed_out = 1; usb_kill_urb(vub300->command_out_urb); usb_kill_urb(vub300->command_res_urb); } else if (commretval < 0) { vub300_queue_poll_work(vub300, 1); } else { /* commretval > 0 */ __vub300_irqpoll_response(vub300); } } /* this thread runs only when the driver * is trying to poll the device for an IRQ */ static void vub300_pollwork_thread(struct work_struct *work) { /* NOT irq */ struct vub300_mmc_host *vub300 = container_of(work, struct vub300_mmc_host, pollwork.work); if (!vub300->interface) { kref_put(&vub300->kref, vub300_delete); return; } mutex_lock(&vub300->cmd_mutex); if (vub300->cmd) { vub300_queue_poll_work(vub300, 1); } else if (!vub300->card_present) { /* no need to do anything */ } else { /* vub300->card_present */ mutex_lock(&vub300->irq_mutex); if (!vub300->irq_enabled) { mutex_unlock(&vub300->irq_mutex); } else if (vub300->irqs_queued) { vub300->irqs_queued -= 1; mmc_signal_sdio_irq(vub300->mmc); mod_timer(&vub300->inactivity_timer, jiffies + HZ); mutex_unlock(&vub300->irq_mutex); } else { /* NOT vub300->irqs_queued */ mutex_unlock(&vub300->irq_mutex); __do_poll(vub300); } } mutex_unlock(&vub300->cmd_mutex); kref_put(&vub300->kref, vub300_delete); } static void vub300_deadwork_thread(struct work_struct *work) { /* NOT irq */ struct vub300_mmc_host *vub300 = container_of(work, struct vub300_mmc_host, deadwork); if (!vub300->interface) { kref_put(&vub300->kref, vub300_delete); return; } mutex_lock(&vub300->cmd_mutex); if (vub300->cmd) { /* * a command got in as the inactivity * timer expired - so we just let the * processing of the command show if * the device is dead */ } else if (vub300->card_present) { check_vub300_port_status(vub300); } else if (vub300->mmc && vub300->mmc->card && mmc_card_present(vub300->mmc->card)) { /* * the MMC core must not have responded * to the previous indication - lets * hope that it eventually does so we * will just ignore this for now */ } else { check_vub300_port_status(vub300); } mod_timer(&vub300->inactivity_timer, jiffies + HZ); mutex_unlock(&vub300->cmd_mutex); kref_put(&vub300->kref, vub300_delete); } static void vub300_inactivity_timer_expired(unsigned long data) { /* softirq */ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data; if (!vub300->interface) { kref_put(&vub300->kref, vub300_delete); } else if (vub300->cmd) { mod_timer(&vub300->inactivity_timer, jiffies + HZ); } else { vub300_queue_dead_work(vub300); mod_timer(&vub300->inactivity_timer, jiffies + HZ); } } static int vub300_response_error(u8 error_code) { switch (error_code) { case SD_ERROR_PIO_TIMEOUT: case SD_ERROR_1BIT_TIMEOUT: case SD_ERROR_4BIT_TIMEOUT: return -ETIMEDOUT; case SD_ERROR_STAT_DATA: case SD_ERROR_OVERRUN: case SD_ERROR_STAT_CMD: case SD_ERROR_STAT_CMD_TIMEOUT: case SD_ERROR_SDCRDY_STUCK: case SD_ERROR_UNHANDLED: case SD_ERROR_1BIT_CRC_WRONG: case SD_ERROR_4BIT_CRC_WRONG: case SD_ERROR_1BIT_CRC_ERROR: case SD_ERROR_4BIT_CRC_ERROR: case SD_ERROR_NO_CMD_ENDBIT: case SD_ERROR_NO_1BIT_DATEND: case SD_ERROR_NO_4BIT_DATEND: case SD_ERROR_1BIT_DATA_TIMEOUT: case SD_ERROR_4BIT_DATA_TIMEOUT: case SD_ERROR_1BIT_UNEXPECTED_TIMEOUT: case SD_ERROR_4BIT_UNEXPECTED_TIMEOUT: return -EILSEQ; case 33: return -EILSEQ; case SD_ERROR_ILLEGAL_COMMAND: return -EINVAL; case SD_ERROR_NO_DEVICE: return -ENOMEDIUM; default: return -ENODEV; } } static void command_res_completed(struct urb *urb) { /* urb completion handler - hardirq */ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; if (urb->status) { /* we have to let the initiator handle the error */ } else if (vub300->command_res_urb->actual_length == 0) { /* * we have seen this happen once or twice and * we suspect a buggy USB host controller */ } else if (!vub300->data) { /* this means that the command (typically CMD52) succeeded */ } else if (vub300->resp.common.header_type != 0x02) { /* * this is an error response from the VUB300 chip * and we let the initiator handle it */ } else if (vub300->urb) { vub300->cmd->error = vub300_response_error(vub300->resp.error.error_code); usb_unlink_urb(vub300->urb); } else { vub300->cmd->error = vub300_response_error(vub300->resp.error.error_code); usb_sg_cancel(&vub300->sg_request); } complete(&vub300->command_complete); /* got_response_in */ } static void command_out_completed(struct urb *urb) { /* urb completion handler - hardirq */ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; if (urb->status) { complete(&vub300->command_complete); } else { int ret; unsigned int pipe = usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep); usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe, &vub300->resp, sizeof(vub300->resp), command_res_completed, vub300); vub300->command_res_urb->actual_length = 0; ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC); if (ret == 0) { /* * the urb completion handler will call * our completion handler */ } else { /* * and thus we only call it directly * when it will not be called */ complete(&vub300->command_complete); } } } /* * the STUFF bits are masked out for the comparisons */ static void snoop_block_size_and_bus_width(struct vub300_mmc_host *vub300, u32 cmd_arg) { if ((0xFBFFFE00 & cmd_arg) == 0x80022200) vub300->fbs[1] = (cmd_arg << 8) | (0x00FF & vub300->fbs[1]); else if ((0xFBFFFE00 & cmd_arg) == 0x80022000) vub300->fbs[1] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[1]); else if ((0xFBFFFE00 & cmd_arg) == 0x80042200) vub300->fbs[2] = (cmd_arg << 8) | (0x00FF & vub300->fbs[2]); else if ((0xFBFFFE00 & cmd_arg) == 0x80042000) vub300->fbs[2] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[2]); else if ((0xFBFFFE00 & cmd_arg) == 0x80062200) vub300->fbs[3] = (cmd_arg << 8) | (0x00FF & vub300->fbs[3]); else if ((0xFBFFFE00 & cmd_arg) == 0x80062000) vub300->fbs[3] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[3]); else if ((0xFBFFFE00 & cmd_arg) == 0x80082200) vub300->fbs[4] = (cmd_arg << 8) | (0x00FF & vub300->fbs[4]); else if ((0xFBFFFE00 & cmd_arg) == 0x80082000) vub300->fbs[4] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[4]); else if ((0xFBFFFE00 & cmd_arg) == 0x800A2200) vub300->fbs[5] = (cmd_arg << 8) | (0x00FF & vub300->fbs[5]); else if ((0xFBFFFE00 & cmd_arg) == 0x800A2000) vub300->fbs[5] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[5]); else if ((0xFBFFFE00 & cmd_arg) == 0x800C2200) vub300->fbs[6] = (cmd_arg << 8) | (0x00FF & vub300->fbs[6]); else if ((0xFBFFFE00 & cmd_arg) == 0x800C2000) vub300->fbs[6] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[6]); else if ((0xFBFFFE00 & cmd_arg) == 0x800E2200) vub300->fbs[7] = (cmd_arg << 8) | (0x00FF & vub300->fbs[7]); else if ((0xFBFFFE00 & cmd_arg) == 0x800E2000) vub300->fbs[7] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[7]); else if ((0xFBFFFE03 & cmd_arg) == 0x80000E00) vub300->bus_width = 1; else if ((0xFBFFFE03 & cmd_arg) == 0x80000E02) vub300->bus_width = 4; } static void send_command(struct vub300_mmc_host *vub300) { /* cmd_mutex is held by vub300_cmndwork_thread */ struct mmc_command *cmd = vub300->cmd; struct mmc_data *data = vub300->data; int retval; int i; u8 response_type; if (vub300->app_spec) { switch (cmd->opcode) { case 6: response_type = SDRT_1; vub300->resp_len = 6; if (0x00000000 == (0x00000003 & cmd->arg)) vub300->bus_width = 1; else if (0x00000002 == (0x00000003 & cmd->arg)) vub300->bus_width = 4; else dev_err(&vub300->udev->dev, "unexpected ACMD6 bus_width=%d\n", 0x00000003 & cmd->arg); break; case 13: response_type = SDRT_1; vub300->resp_len = 6; break; case 22: response_type = SDRT_1; vub300->resp_len = 6; break; case 23: response_type = SDRT_1; vub300->resp_len = 6; break; case 41: response_type = SDRT_3; vub300->resp_len = 6; break; case 42: response_type = SDRT_1; vub300->resp_len = 6; break; case 51: response_type = SDRT_1; vub300->resp_len = 6; break; case 55: response_type = SDRT_1; vub300->resp_len = 6; break; default: vub300->resp_len = 0; cmd->error = -EINVAL; complete(&vub300->command_complete); return; } vub300->app_spec = 0; } else { switch (cmd->opcode) { case 0: response_type = SDRT_NONE; vub300->resp_len = 0; break; case 1: response_type = SDRT_3; vub300->resp_len = 6; break; case 2: response_type = SDRT_2; vub300->resp_len = 17; break; case 3: response_type = SDRT_6; vub300->resp_len = 6; break; case 4: response_type = SDRT_NONE; vub300->resp_len = 0; break; case 5: response_type = SDRT_4; vub300->resp_len = 6; break; case 6: response_type = SDRT_1; vub300->resp_len = 6; break; case 7: response_type = SDRT_1B; vub300->resp_len = 6; break; case 8: response_type = SDRT_7; vub300->resp_len = 6; break; case 9: response_type = SDRT_2; vub300->resp_len = 17; break; case 10: response_type = SDRT_2; vub300->resp_len = 17; break; case 12: response_type = SDRT_1B; vub300->resp_len = 6; break; case 13: response_type = SDRT_1; vub300->resp_len = 6; break; case 15: response_type = SDRT_NONE; vub300->resp_len = 0; break; case 16: for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++) vub300->fbs[i] = 0xFFFF & cmd->arg; response_type = SDRT_1; vub300->resp_len = 6; break; case 17: case 18: case 24: case 25: case 27: response_type = SDRT_1; vub300->resp_len = 6; break; case 28: case 29: response_type = SDRT_1B; vub300->resp_len = 6; break; case 30: case 32: case 33: response_type = SDRT_1; vub300->resp_len = 6; break; case 38: response_type = SDRT_1B; vub300->resp_len = 6; break; case 42: response_type = SDRT_1; vub300->resp_len = 6; break; case 52: response_type = SDRT_5; vub300->resp_len = 6; snoop_block_size_and_bus_width(vub300, cmd->arg); break; case 53: response_type = SDRT_5; vub300->resp_len = 6; break; case 55: response_type = SDRT_1; vub300->resp_len = 6; vub300->app_spec = 1; break; case 56: response_type = SDRT_1; vub300->resp_len = 6; break; default: vub300->resp_len = 0; cmd->error = -EINVAL; complete(&vub300->command_complete); return; } } /* * it is a shame that we can not use "sizeof(struct sd_command_header)" * this is because the packet _must_ be padded to 64 bytes */ vub300->cmnd.head.header_size = 20; vub300->cmnd.head.header_type = 0x00; vub300->cmnd.head.port_number = 0; /* "0" means port 1 */ vub300->cmnd.head.command_type = 0x00; /* standard read command */ vub300->cmnd.head.response_type = response_type; vub300->cmnd.head.command_index = cmd->opcode; vub300->cmnd.head.arguments[0] = cmd->arg >> 24; vub300->cmnd.head.arguments[1] = cmd->arg >> 16; vub300->cmnd.head.arguments[2] = cmd->arg >> 8; vub300->cmnd.head.arguments[3] = cmd->arg >> 0; if (cmd->opcode == 52) { int fn = 0x7 & (cmd->arg >> 28); vub300->cmnd.head.block_count[0] = 0; vub300->cmnd.head.block_count[1] = 0; vub300->cmnd.head.block_size[0] = (vub300->fbs[fn] >> 8) & 0xFF; vub300->cmnd.head.block_size[1] = (vub300->fbs[fn] >> 0) & 0xFF; vub300->cmnd.head.command_type = 0x00; vub300->cmnd.head.transfer_size[0] = 0; vub300->cmnd.head.transfer_size[1] = 0; vub300->cmnd.head.transfer_size[2] = 0; vub300->cmnd.head.transfer_size[3] = 0; } else if (!data) { vub300->cmnd.head.block_count[0] = 0; vub300->cmnd.head.block_count[1] = 0; vub300->cmnd.head.block_size[0] = (vub300->fbs[0] >> 8) & 0xFF; vub300->cmnd.head.block_size[1] = (vub300->fbs[0] >> 0) & 0xFF; vub300->cmnd.head.command_type = 0x00; vub300->cmnd.head.transfer_size[0] = 0; vub300->cmnd.head.transfer_size[1] = 0; vub300->cmnd.head.transfer_size[2] = 0; vub300->cmnd.head.transfer_size[3] = 0; } else if (cmd->opcode == 53) { int fn = 0x7 & (cmd->arg >> 28); if (0x08 & vub300->cmnd.head.arguments[0]) { /* BLOCK MODE */ vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF; vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF; vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF; vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF; } else { /* BYTE MODE */ vub300->cmnd.head.block_count[0] = 0; vub300->cmnd.head.block_count[1] = 0; vub300->cmnd.head.block_size[0] = (vub300->datasize >> 8) & 0xFF; vub300->cmnd.head.block_size[1] = (vub300->datasize >> 0) & 0xFF; } vub300->cmnd.head.command_type = (MMC_DATA_READ & data->flags) ? 0x00 : 0x80; vub300->cmnd.head.transfer_size[0] = (vub300->datasize >> 24) & 0xFF; vub300->cmnd.head.transfer_size[1] = (vub300->datasize >> 16) & 0xFF; vub300->cmnd.head.transfer_size[2] = (vub300->datasize >> 8) & 0xFF; vub300->cmnd.head.transfer_size[3] = (vub300->datasize >> 0) & 0xFF; if (vub300->datasize < vub300->fbs[fn]) { vub300->cmnd.head.block_count[0] = 0; vub300->cmnd.head.block_count[1] = 0; } } else { vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF; vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF; vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF; vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF; vub300->cmnd.head.command_type = (MMC_DATA_READ & data->flags) ? 0x00 : 0x80; vub300->cmnd.head.transfer_size[0] = (vub300->datasize >> 24) & 0xFF; vub300->cmnd.head.transfer_size[1] = (vub300->datasize >> 16) & 0xFF; vub300->cmnd.head.transfer_size[2] = (vub300->datasize >> 8) & 0xFF; vub300->cmnd.head.transfer_size[3] = (vub300->datasize >> 0) & 0xFF; if (vub300->datasize < vub300->fbs[0]) { vub300->cmnd.head.block_count[0] = 0; vub300->cmnd.head.block_count[1] = 0; } } if (vub300->cmnd.head.block_size[0] || vub300->cmnd.head.block_size[1]) { u16 block_size = vub300->cmnd.head.block_size[1] | (vub300->cmnd.head.block_size[0] << 8); u16 block_boundary = FIRMWARE_BLOCK_BOUNDARY - (FIRMWARE_BLOCK_BOUNDARY % block_size); vub300->cmnd.head.block_boundary[0] = (block_boundary >> 8) & 0xFF; vub300->cmnd.head.block_boundary[1] = (block_boundary >> 0) & 0xFF; } else { vub300->cmnd.head.block_boundary[0] = 0; vub300->cmnd.head.block_boundary[1] = 0; } usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev, usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep), &vub300->cmnd, sizeof(vub300->cmnd), command_out_completed, vub300); retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL); if (retval < 0) { cmd->error = retval; complete(&vub300->command_complete); return; } else { return; } } /* * timer callback runs in atomic mode * so it cannot call usb_kill_urb() */ static void vub300_sg_timed_out(unsigned long data) { struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data; vub300->usb_timed_out = 1; usb_sg_cancel(&vub300->sg_request); usb_unlink_urb(vub300->command_out_urb); usb_unlink_urb(vub300->command_res_urb); } static u16 roundup_to_multiple_of_64(u16 number) { return 0xFFC0 & (0x3F + number); } /* * this is a separate function to solve the 80 column width restriction */ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, const struct firmware *fw) { u8 register_count = 0; u16 ts = 0; u16 interrupt_size = 0; const u8 *data = fw->data; int size = fw->size; u8 c; dev_info(&vub300->udev->dev, "using %s for SDIO offload processing\n", vub300->vub_name); do { c = *data++; } while (size-- && c); /* skip comment */ dev_info(&vub300->udev->dev, "using offload firmware %s %s\n", fw->data, vub300->vub_name); if (size < 4) { dev_err(&vub300->udev->dev, "corrupt offload pseudocode in firmware %s\n", vub300->vub_name); strncpy(vub300->vub_name, "corrupt offload pseudocode", sizeof(vub300->vub_name)); return; } interrupt_size += *data++; size -= 1; interrupt_size <<= 8; interrupt_size += *data++; size -= 1; if (interrupt_size < size) { u16 xfer_length = roundup_to_multiple_of_64(interrupt_size); u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL); if (xfer_buffer) { int retval; memcpy(xfer_buffer, data, interrupt_size); memset(xfer_buffer + interrupt_size, 0, xfer_length - interrupt_size); size -= interrupt_size; data += interrupt_size; retval = usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), SET_INTERRUPT_PSEUDOCODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, xfer_buffer, xfer_length, HZ); kfree(xfer_buffer); if (retval < 0) { strncpy(vub300->vub_name, "SDIO pseudocode download failed", sizeof(vub300->vub_name)); return; } } else { dev_err(&vub300->udev->dev, "not enough memory for xfer buffer to send" " INTERRUPT_PSEUDOCODE for %s %s\n", fw->data, vub300->vub_name); strncpy(vub300->vub_name, "SDIO interrupt pseudocode download failed", sizeof(vub300->vub_name)); return; } } else { dev_err(&vub300->udev->dev, "corrupt interrupt pseudocode in firmware %s %s\n", fw->data, vub300->vub_name); strncpy(vub300->vub_name, "corrupt interrupt pseudocode", sizeof(vub300->vub_name)); return; } ts += *data++; size -= 1; ts <<= 8; ts += *data++; size -= 1; if (ts < size) { u16 xfer_length = roundup_to_multiple_of_64(ts); u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL); if (xfer_buffer) { int retval; memcpy(xfer_buffer, data, ts); memset(xfer_buffer + ts, 0, xfer_length - ts); size -= ts; data += ts; retval = usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), SET_TRANSFER_PSEUDOCODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, xfer_buffer, xfer_length, HZ); kfree(xfer_buffer); if (retval < 0) { strncpy(vub300->vub_name, "SDIO pseudocode download failed", sizeof(vub300->vub_name)); return; } } else { dev_err(&vub300->udev->dev, "not enough memory for xfer buffer to send" " TRANSFER_PSEUDOCODE for %s %s\n", fw->data, vub300->vub_name); strncpy(vub300->vub_name, "SDIO transfer pseudocode download failed", sizeof(vub300->vub_name)); return; } } else { dev_err(&vub300->udev->dev, "corrupt transfer pseudocode in firmware %s %s\n", fw->data, vub300->vub_name); strncpy(vub300->vub_name, "corrupt transfer pseudocode", sizeof(vub300->vub_name)); return; } register_count += *data++; size -= 1; if (register_count * 4 == size) { int I = vub300->dynamic_register_count = register_count; int i = 0; while (I--) { unsigned int func_num = 0; vub300->sdio_register[i].func_num = *data++; size -= 1; func_num += *data++; size -= 1; func_num <<= 8; func_num += *data++; size -= 1; func_num <<= 8; func_num += *data++; size -= 1; vub300->sdio_register[i].sdio_reg = func_num; vub300->sdio_register[i].activate = 1; vub300->sdio_register[i].prepared = 0; i += 1; } dev_info(&vub300->udev->dev, "initialized %d dynamic pseudocode registers\n", vub300->dynamic_register_count); return; } else { dev_err(&vub300->udev->dev, "corrupt dynamic registers in firmware %s\n", vub300->vub_name); strncpy(vub300->vub_name, "corrupt dynamic registers", sizeof(vub300->vub_name)); return; } } /* * if the binary containing the EMPTY PseudoCode can not be found * vub300->vub_name is set anyway in order to prevent an automatic retry */ static void download_offload_pseudocode(struct vub300_mmc_host *vub300) { struct mmc_card *card = vub300->mmc->card; int sdio_funcs = card->sdio_funcs; const struct firmware *fw = NULL; int l = snprintf(vub300->vub_name, sizeof(vub300->vub_name), "vub_%04X%04X", card->cis.vendor, card->cis.device); int n = 0; int retval; for (n = 0; n < sdio_funcs; n++) { struct sdio_func *sf = card->sdio_func[n]; l += snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, "_%04X%04X", sf->vendor, sf->device); }; snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin"); dev_info(&vub300->udev->dev, "requesting offload firmware %s\n", vub300->vub_name); retval = request_firmware(&fw, vub300->vub_name, &card->dev); if (retval < 0) { strncpy(vub300->vub_name, "vub_default.bin", sizeof(vub300->vub_name)); retval = request_firmware(&fw, vub300->vub_name, &card->dev); if (retval < 0) { strncpy(vub300->vub_name, "no SDIO offload firmware found", sizeof(vub300->vub_name)); } else { __download_offload_pseudocode(vub300, fw); release_firmware(fw); } } else { __download_offload_pseudocode(vub300, fw); release_firmware(fw); } } static void vub300_usb_bulk_msg_completion(struct urb *urb) { /* urb completion handler - hardirq */ complete((struct completion *)urb->context); } static int vub300_usb_bulk_msg(struct vub300_mmc_host *vub300, unsigned int pipe, void *data, int len, int *actual_length, int timeout_msecs) { /* cmd_mutex is held by vub300_cmndwork_thread */ struct usb_device *usb_dev = vub300->udev; struct completion done; int retval; vub300->urb = usb_alloc_urb(0, GFP_KERNEL); if (!vub300->urb) return -ENOMEM; usb_fill_bulk_urb(vub300->urb, usb_dev, pipe, data, len, vub300_usb_bulk_msg_completion, NULL); init_completion(&done); vub300->urb->context = &done; vub300->urb->actual_length = 0; retval = usb_submit_urb(vub300->urb, GFP_KERNEL); if (unlikely(retval)) goto out; if (!wait_for_completion_timeout (&done, msecs_to_jiffies(timeout_msecs))) { retval = -ETIMEDOUT; usb_kill_urb(vub300->urb); } else { retval = vub300->urb->status; } out: *actual_length = vub300->urb->actual_length; usb_free_urb(vub300->urb); vub300->urb = NULL; return retval; } static int __command_read_data(struct vub300_mmc_host *vub300, struct mmc_command *cmd, struct mmc_data *data) { /* cmd_mutex is held by vub300_cmndwork_thread */ int linear_length = vub300->datasize; int padded_length = vub300->large_usb_packets ? ((511 + linear_length) >> 9) << 9 : ((63 + linear_length) >> 6) << 6; if ((padded_length == linear_length) || !pad_input_to_usb_pkt) { int result; unsigned pipe; pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep); result = usb_sg_init(&vub300->sg_request, vub300->udev, pipe, 0, data->sg, data->sg_len, 0, GFP_KERNEL); if (result < 0) { usb_unlink_urb(vub300->command_out_urb); usb_unlink_urb(vub300->command_res_urb); cmd->error = result; data->bytes_xfered = 0; return 0; } else { vub300->sg_transfer_timer.expires = jiffies + msecs_to_jiffies(2000 + (linear_length / 16384)); add_timer(&vub300->sg_transfer_timer); usb_sg_wait(&vub300->sg_request); del_timer(&vub300->sg_transfer_timer); if (vub300->sg_request.status < 0) { cmd->error = vub300->sg_request.status; data->bytes_xfered = 0; return 0; } else { data->bytes_xfered = vub300->datasize; return linear_length; } } } else { u8 *buf = kmalloc(padded_length, GFP_KERNEL); if (buf) { int result; unsigned pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep); int actual_length = 0; result = vub300_usb_bulk_msg(vub300, pipe, buf, padded_length, &actual_length, 2000 + (padded_length / 16384)); if (result < 0) { cmd->error = result; data->bytes_xfered = 0; kfree(buf); return 0; } else if (actual_length < linear_length) { cmd->error = -EREMOTEIO; data->bytes_xfered = 0; kfree(buf); return 0; } else { sg_copy_from_buffer(data->sg, data->sg_len, buf, linear_length); kfree(buf); data->bytes_xfered = vub300->datasize; return linear_length; } } else { cmd->error = -ENOMEM; data->bytes_xfered = 0; return 0; } } } static int __command_write_data(struct vub300_mmc_host *vub300, struct mmc_command *cmd, struct mmc_data *data) { /* cmd_mutex is held by vub300_cmndwork_thread */ unsigned pipe = usb_sndbulkpipe(vub300->udev, vub300->data_out_ep); int linear_length = vub300->datasize; int modulo_64_length = linear_length & 0x003F; int modulo_512_length = linear_length & 0x01FF; if (linear_length < 64) { int result; int actual_length; sg_copy_to_buffer(data->sg, data->sg_len, vub300->padded_buffer, sizeof(vub300->padded_buffer)); memset(vub300->padded_buffer + linear_length, 0, sizeof(vub300->padded_buffer) - linear_length); result = vub300_usb_bulk_msg(vub300, pipe, vub300->padded_buffer, sizeof(vub300->padded_buffer), &actual_length, 2000 + (sizeof(vub300->padded_buffer) / 16384)); if (result < 0) { cmd->error = result; data->bytes_xfered = 0; } else { data->bytes_xfered = vub300->datasize; } } else if ((!vub300->large_usb_packets && (0 < modulo_64_length)) || (vub300->large_usb_packets && (64 > modulo_512_length)) ) { /* don't you just love these work-rounds */ int padded_length = ((63 + linear_length) >> 6) << 6; u8 *buf = kmalloc(padded_length, GFP_KERNEL); if (buf) { int result; int actual_length; sg_copy_to_buffer(data->sg, data->sg_len, buf, padded_length); memset(buf + linear_length, 0, padded_length - linear_length); result = vub300_usb_bulk_msg(vub300, pipe, buf, padded_length, &actual_length, 2000 + padded_length / 16384); kfree(buf); if (result < 0) { cmd->error = result; data->bytes_xfered = 0; } else { data->bytes_xfered = vub300->datasize; } } else { cmd->error = -ENOMEM; data->bytes_xfered = 0; } } else { /* no data padding required */ int result; unsigned char buf[64 * 4]; sg_copy_to_buffer(data->sg, data->sg_len, buf, sizeof(buf)); result = usb_sg_init(&vub300->sg_request, vub300->udev, pipe, 0, data->sg, data->sg_len, 0, GFP_KERNEL); if (result < 0) { usb_unlink_urb(vub300->command_out_urb); usb_unlink_urb(vub300->command_res_urb); cmd->error = result; data->bytes_xfered = 0; } else { vub300->sg_transfer_timer.expires = jiffies + msecs_to_jiffies(2000 + linear_length / 16384); add_timer(&vub300->sg_transfer_timer); usb_sg_wait(&vub300->sg_request); if (cmd->error) { data->bytes_xfered = 0; } else { del_timer(&vub300->sg_transfer_timer); if (vub300->sg_request.status < 0) { cmd->error = vub300->sg_request.status; data->bytes_xfered = 0; } else { data->bytes_xfered = vub300->datasize; } } } } return linear_length; } static void __vub300_command_response(struct vub300_mmc_host *vub300, struct mmc_command *cmd, struct mmc_data *data, int data_length) { /* cmd_mutex is held by vub300_cmndwork_thread */ long respretval; int msec_timeout = 1000 + data_length / 4; respretval = wait_for_completion_timeout(&vub300->command_complete, msecs_to_jiffies(msec_timeout)); if (respretval == 0) { /* TIMED OUT */ /* we don't know which of "out" and "res" if any failed */ int result; vub300->usb_timed_out = 1; usb_kill_urb(vub300->command_out_urb); usb_kill_urb(vub300->command_res_urb); cmd->error = -ETIMEDOUT; result = usb_lock_device_for_reset(vub300->udev, vub300->interface); if (result == 0) { result = usb_reset_device(vub300->udev); usb_unlock_device(vub300->udev); } } else if (respretval < 0) { /* we don't know which of "out" and "res" if any failed */ usb_kill_urb(vub300->command_out_urb); usb_kill_urb(vub300->command_res_urb); cmd->error = respretval; } else if (cmd->error) { /* * the error occurred sending the command * or receiving the response */ } else if (vub300->command_out_urb->status) { vub300->usb_transport_fail = vub300->command_out_urb->status; cmd->error = -EPROTO == vub300->command_out_urb->status ? -ESHUTDOWN : vub300->command_out_urb->status; } else if (vub300->command_res_urb->status) { vub300->usb_transport_fail = vub300->command_res_urb->status; cmd->error = -EPROTO == vub300->command_res_urb->status ? -ESHUTDOWN : vub300->command_res_urb->status; } else if (vub300->resp.common.header_type == 0x00) { /* * the command completed successfully * and there was no piggybacked data */ } else if (vub300->resp.common.header_type == RESPONSE_ERROR) { cmd->error = vub300_response_error(vub300->resp.error.error_code); if (vub300->data) usb_sg_cancel(&vub300->sg_request); } else if (vub300->resp.common.header_type == RESPONSE_PIGGYBACKED) { int offloaded_data_length = vub300->resp.common.header_size - sizeof(struct sd_register_header); int register_count = offloaded_data_length >> 3; int ri = 0; while (register_count--) { add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); ri += 1; } vub300->resp.common.header_size = sizeof(struct sd_register_header); vub300->resp.common.header_type = 0x00; cmd->error = 0; } else if (vub300->resp.common.header_type == RESPONSE_PIG_DISABLED) { int offloaded_data_length = vub300->resp.common.header_size - sizeof(struct sd_register_header); int register_count = offloaded_data_length >> 3; int ri = 0; while (register_count--) { add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); ri += 1; } mutex_lock(&vub300->irq_mutex); if (vub300->irqs_queued) { vub300->irqs_queued += 1; } else if (vub300->irq_enabled) { vub300->irqs_queued += 1; vub300_queue_poll_work(vub300, 0); } else { vub300->irqs_queued += 1; } vub300->irq_disabled = 1; mutex_unlock(&vub300->irq_mutex); vub300->resp.common.header_size = sizeof(struct sd_register_header); vub300->resp.common.header_type = 0x00; cmd->error = 0; } else if (vub300->resp.common.header_type == RESPONSE_PIG_ENABLED) { int offloaded_data_length = vub300->resp.common.header_size - sizeof(struct sd_register_header); int register_count = offloaded_data_length >> 3; int ri = 0; while (register_count--) { add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); ri += 1; } mutex_lock(&vub300->irq_mutex); if (vub300->irqs_queued) { vub300->irqs_queued += 1; } else if (vub300->irq_enabled) { vub300->irqs_queued += 1; vub300_queue_poll_work(vub300, 0); } else { vub300->irqs_queued += 1; } vub300->irq_disabled = 0; mutex_unlock(&vub300->irq_mutex); vub300->resp.common.header_size = sizeof(struct sd_register_header); vub300->resp.common.header_type = 0x00; cmd->error = 0; } else { cmd->error = -EINVAL; } } static void construct_request_response(struct vub300_mmc_host *vub300, struct mmc_command *cmd) { int resp_len = vub300->resp_len; int less_cmd = (17 == resp_len) ? resp_len : resp_len - 1; int bytes = 3 & less_cmd; int words = less_cmd >> 2; u8 *r = vub300->resp.response.command_response; if (bytes == 3) { cmd->resp[words] = (r[1 + (words << 2)] << 24) | (r[2 + (words << 2)] << 16) | (r[3 + (words << 2)] << 8); } else if (bytes == 2) { cmd->resp[words] = (r[1 + (words << 2)] << 24) | (r[2 + (words << 2)] << 16); } else if (bytes == 1) { cmd->resp[words] = (r[1 + (words << 2)] << 24); } while (words-- > 0) { cmd->resp[words] = (r[1 + (words << 2)] << 24) | (r[2 + (words << 2)] << 16) | (r[3 + (words << 2)] << 8) | (r[4 + (words << 2)] << 0); } if ((cmd->opcode == 53) && (0x000000FF & cmd->resp[0])) cmd->resp[0] &= 0xFFFFFF00; } /* this thread runs only when there is an upper level command req outstanding */ static void vub300_cmndwork_thread(struct work_struct *work) { struct vub300_mmc_host *vub300 = container_of(work, struct vub300_mmc_host, cmndwork); if (!vub300->interface) { kref_put(&vub300->kref, vub300_delete); return; } else { struct mmc_request *req = vub300->req; struct mmc_command *cmd = vub300->cmd; struct mmc_data *data = vub300->data; int data_length; mutex_lock(&vub300->cmd_mutex); init_completion(&vub300->command_complete); if (likely(vub300->vub_name[0]) || !vub300->mmc->card || !mmc_card_present(vub300->mmc->card)) { /* * the name of the EMPTY Pseudo firmware file * is used as a flag to indicate that the file * has been already downloaded to the VUB300 chip */ } else if (0 == vub300->mmc->card->sdio_funcs) { strncpy(vub300->vub_name, "SD memory device", sizeof(vub300->vub_name)); } else { download_offload_pseudocode(vub300); } send_command(vub300); if (!data) data_length = 0; else if (MMC_DATA_READ & data->flags) data_length = __command_read_data(vub300, cmd, data); else data_length = __command_write_data(vub300, cmd, data); __vub300_command_response(vub300, cmd, data, data_length); vub300->req = NULL; vub300->cmd = NULL; vub300->data = NULL; if (cmd->error) { if (cmd->error == -ENOMEDIUM) check_vub300_port_status(vub300); mutex_unlock(&vub300->cmd_mutex); mmc_request_done(vub300->mmc, req); kref_put(&vub300->kref, vub300_delete); return; } else { construct_request_response(vub300, cmd); vub300->resp_len = 0; mutex_unlock(&vub300->cmd_mutex); kref_put(&vub300->kref, vub300_delete); mmc_request_done(vub300->mmc, req); return; } } } static int examine_cyclic_buffer(struct vub300_mmc_host *vub300, struct mmc_command *cmd, u8 Function) { /* cmd_mutex is held by vub300_mmc_request */ u8 cmd0 = 0xFF & (cmd->arg >> 24); u8 cmd1 = 0xFF & (cmd->arg >> 16); u8 cmd2 = 0xFF & (cmd->arg >> 8); u8 cmd3 = 0xFF & (cmd->arg >> 0); int first = MAXREGMASK & vub300->fn[Function].offload_point; struct offload_registers_access *rf = &vub300->fn[Function].reg[first]; if (cmd0 == rf->command_byte[0] && cmd1 == rf->command_byte[1] && cmd2 == rf->command_byte[2] && cmd3 == rf->command_byte[3]) { u8 checksum = 0x00; cmd->resp[1] = checksum << 24; cmd->resp[0] = (rf->Respond_Byte[0] << 24) | (rf->Respond_Byte[1] << 16) | (rf->Respond_Byte[2] << 8) | (rf->Respond_Byte[3] << 0); vub300->fn[Function].offload_point += 1; vub300->fn[Function].offload_count -= 1; vub300->total_offload_count -= 1; return 1; } else { int delta = 1; /* because it does not match the first one */ u8 register_count = vub300->fn[Function].offload_count - 1; u32 register_point = vub300->fn[Function].offload_point + 1; while (0 < register_count) { int point = MAXREGMASK & register_point; struct offload_registers_access *r = &vub300->fn[Function].reg[point]; if (cmd0 == r->command_byte[0] && cmd1 == r->command_byte[1] && cmd2 == r->command_byte[2] && cmd3 == r->command_byte[3]) { u8 checksum = 0x00; cmd->resp[1] = checksum << 24; cmd->resp[0] = (r->Respond_Byte[0] << 24) | (r->Respond_Byte[1] << 16) | (r->Respond_Byte[2] << 8) | (r->Respond_Byte[3] << 0); vub300->fn[Function].offload_point += delta; vub300->fn[Function].offload_count -= delta; vub300->total_offload_count -= delta; return 1; } else { register_point += 1; register_count -= 1; delta += 1; continue; } } return 0; } } static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300, struct mmc_command *cmd) { /* cmd_mutex is held by vub300_mmc_request */ u8 regs = vub300->dynamic_register_count; u8 i = 0; u8 func = FUN(cmd); u32 reg = REG(cmd); while (0 < regs--) { if ((vub300->sdio_register[i].func_num == func) && (vub300->sdio_register[i].sdio_reg == reg)) { if (!vub300->sdio_register[i].prepared) { return 0; } else if ((0x80000000 & cmd->arg) == 0x80000000) { /* * a write to a dynamic register * nullifies our offloaded value */ vub300->sdio_register[i].prepared = 0; return 0; } else { u8 checksum = 0x00; u8 rsp0 = 0x00; u8 rsp1 = 0x00; u8 rsp2 = vub300->sdio_register[i].response; u8 rsp3 = vub300->sdio_register[i].regvalue; vub300->sdio_register[i].prepared = 0; cmd->resp[1] = checksum << 24; cmd->resp[0] = (rsp0 << 24) | (rsp1 << 16) | (rsp2 << 8) | (rsp3 << 0); return 1; } } else { i += 1; continue; } }; if (vub300->total_offload_count == 0) return 0; else if (vub300->fn[func].offload_count == 0) return 0; else return examine_cyclic_buffer(vub300, cmd, func); } static void vub300_mmc_request(struct mmc_host *mmc, struct mmc_request *req) { /* NOT irq */ struct mmc_command *cmd = req->cmd; struct vub300_mmc_host *vub300 = mmc_priv(mmc); if (!vub300->interface) { cmd->error = -ESHUTDOWN; mmc_request_done(mmc, req); return; } else { struct mmc_data *data = req->data; if (!vub300->card_powered) { cmd->error = -ENOMEDIUM; mmc_request_done(mmc, req); return; } if (!vub300->card_present) { cmd->error = -ENOMEDIUM; mmc_request_done(mmc, req); return; } if (vub300->usb_transport_fail) { cmd->error = vub300->usb_transport_fail; mmc_request_done(mmc, req); return; } if (!vub300->interface) { cmd->error = -ENODEV; mmc_request_done(mmc, req); return; } kref_get(&vub300->kref); mutex_lock(&vub300->cmd_mutex); mod_timer(&vub300->inactivity_timer, jiffies + HZ); /* * for performance we have to return immediately * if the requested data has been offloaded */ if (cmd->opcode == 52 && satisfy_request_from_offloaded_data(vub300, cmd)) { cmd->error = 0; mutex_unlock(&vub300->cmd_mutex); kref_put(&vub300->kref, vub300_delete); mmc_request_done(mmc, req); return; } else { vub300->cmd = cmd; vub300->req = req; vub300->data = data; if (data) vub300->datasize = data->blksz * data->blocks; else vub300->datasize = 0; vub300_queue_cmnd_work(vub300); mutex_unlock(&vub300->cmd_mutex); kref_put(&vub300->kref, vub300_delete); /* * the kernel lock diagnostics complain * if the cmd_mutex * is "passed on" * to the cmndwork thread, * so we must release it now * and re-acquire it in the cmndwork thread */ } } } static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8], struct mmc_ios *ios) { int buf_array_size = 8; /* ARRAY_SIZE(buf) does not work !!! */ int retval; u32 kHzClock; if (ios->clock >= 48000000) kHzClock = 48000; else if (ios->clock >= 24000000) kHzClock = 24000; else if (ios->clock >= 20000000) kHzClock = 20000; else if (ios->clock >= 15000000) kHzClock = 15000; else if (ios->clock >= 200000) kHzClock = 200; else kHzClock = 0; { int i; u64 c = kHzClock; for (i = 0; i < buf_array_size; i++) { buf[i] = c; c >>= 8; } } retval = usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), SET_CLOCK_SPEED, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, 0x00, buf, buf_array_size, HZ); if (retval != 8) { dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED" " %dkHz failed with retval=%d\n", kHzClock, retval); } else { dev_dbg(&vub300->udev->dev, "SET_CLOCK_SPEED" " %dkHz\n", kHzClock); } } static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { /* NOT irq */ struct vub300_mmc_host *vub300 = mmc_priv(mmc); if (!vub300->interface) return; kref_get(&vub300->kref); mutex_lock(&vub300->cmd_mutex); if ((ios->power_mode == MMC_POWER_OFF) && vub300->card_powered) { vub300->card_powered = 0; usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), SET_SD_POWER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, NULL, 0, HZ); /* must wait for the VUB300 u-proc to boot up */ msleep(600); } else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) { usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), SET_SD_POWER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0001, 0x0000, NULL, 0, HZ); msleep(600); vub300->card_powered = 1; } else if (ios->power_mode == MMC_POWER_ON) { u8 *buf = kmalloc(8, GFP_KERNEL); if (buf) { __set_clock_speed(vub300, buf, ios); kfree(buf); } } else { /* this should mean no change of state */ } mutex_unlock(&vub300->cmd_mutex); kref_put(&vub300->kref, vub300_delete); } static int vub300_mmc_get_ro(struct mmc_host *mmc) { struct vub300_mmc_host *vub300 = mmc_priv(mmc); return vub300->read_only; } static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable) { /* NOT irq */ struct vub300_mmc_host *vub300 = mmc_priv(mmc); if (!vub300->interface) return; kref_get(&vub300->kref); if (enable) { mutex_lock(&vub300->irq_mutex); if (vub300->irqs_queued) { vub300->irqs_queued -= 1; mmc_signal_sdio_irq(vub300->mmc); } else if (vub300->irq_disabled) { vub300->irq_disabled = 0; vub300->irq_enabled = 1; vub300_queue_poll_work(vub300, 0); } else if (vub300->irq_enabled) { /* this should not happen, so we will just ignore it */ } else { vub300->irq_enabled = 1; vub300_queue_poll_work(vub300, 0); } mutex_unlock(&vub300->irq_mutex); } else { vub300->irq_enabled = 0; } kref_put(&vub300->kref, vub300_delete); } void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card) { /* NOT irq */ struct vub300_mmc_host *vub300 = mmc_priv(mmc); dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n"); } static struct mmc_host_ops vub300_mmc_ops = { .request = vub300_mmc_request, .set_ios = vub300_mmc_set_ios, .get_ro = vub300_mmc_get_ro, .enable_sdio_irq = vub300_enable_sdio_irq, .init_card = vub300_init_card, }; static int vub300_probe(struct usb_interface *interface, const struct usb_device_id *id) { /* NOT irq */ struct vub300_mmc_host *vub300; struct usb_host_interface *iface_desc; struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface)); int i; int retval = -ENOMEM; struct urb *command_out_urb; struct urb *command_res_urb; struct mmc_host *mmc; char manufacturer[48]; char product[32]; char serial_number[32]; usb_string(udev, udev->descriptor.iManufacturer, manufacturer, sizeof(manufacturer)); usb_string(udev, udev->descriptor.iProduct, product, sizeof(product)); usb_string(udev, udev->descriptor.iSerialNumber, serial_number, sizeof(serial_number)); dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n", udev->descriptor.idVendor, udev->descriptor.idProduct, manufacturer, product, serial_number); command_out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!command_out_urb) { retval = -ENOMEM; dev_err(&udev->dev, "not enough memory for command_out_urb\n"); goto error0; } command_res_urb = usb_alloc_urb(0, GFP_KERNEL); if (!command_res_urb) { retval = -ENOMEM; dev_err(&udev->dev, "not enough memory for command_res_urb\n"); goto error1; } /* this also allocates memory for our VUB300 mmc host device */ mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev); if (!mmc) { retval = -ENOMEM; dev_err(&udev->dev, "not enough memory for the mmc_host\n"); goto error4; } /* MMC core transfer sizes tunable parameters */ mmc->caps = 0; if (!force_1_bit_data_xfers) mmc->caps |= MMC_CAP_4_BIT_DATA; if (!force_polling_for_irqs) mmc->caps |= MMC_CAP_SDIO_IRQ; mmc->caps &= ~MMC_CAP_NEEDS_POLL; /* * MMC_CAP_NEEDS_POLL causes core.c:mmc_rescan() to poll * for devices which results in spurious CMD7's being * issued which stops some SDIO cards from working */ if (limit_speed_to_24_MHz) { mmc->caps |= MMC_CAP_MMC_HIGHSPEED; mmc->caps |= MMC_CAP_SD_HIGHSPEED; mmc->f_max = 24000000; dev_info(&udev->dev, "limiting SDIO speed to 24_MHz\n"); } else { mmc->caps |= MMC_CAP_MMC_HIGHSPEED; mmc->caps |= MMC_CAP_SD_HIGHSPEED; mmc->f_max = 48000000; } mmc->f_min = 200000; mmc->max_blk_count = 511; mmc->max_blk_size = 512; mmc->max_segs = 128; if (force_max_req_size) mmc->max_req_size = force_max_req_size * 1024; else mmc->max_req_size = 64 * 1024; mmc->max_seg_size = mmc->max_req_size; mmc->ocr_avail = 0; mmc->ocr_avail |= MMC_VDD_165_195; mmc->ocr_avail |= MMC_VDD_20_21; mmc->ocr_avail |= MMC_VDD_21_22; mmc->ocr_avail |= MMC_VDD_22_23; mmc->ocr_avail |= MMC_VDD_23_24; mmc->ocr_avail |= MMC_VDD_24_25; mmc->ocr_avail |= MMC_VDD_25_26; mmc->ocr_avail |= MMC_VDD_26_27; mmc->ocr_avail |= MMC_VDD_27_28; mmc->ocr_avail |= MMC_VDD_28_29; mmc->ocr_avail |= MMC_VDD_29_30; mmc->ocr_avail |= MMC_VDD_30_31; mmc->ocr_avail |= MMC_VDD_31_32; mmc->ocr_avail |= MMC_VDD_32_33; mmc->ocr_avail |= MMC_VDD_33_34; mmc->ocr_avail |= MMC_VDD_34_35; mmc->ocr_avail |= MMC_VDD_35_36; mmc->ops = &vub300_mmc_ops; vub300 = mmc_priv(mmc); vub300->mmc = mmc; vub300->card_powered = 0; vub300->bus_width = 0; vub300->cmnd.head.block_size[0] = 0x00; vub300->cmnd.head.block_size[1] = 0x00; vub300->app_spec = 0; mutex_init(&vub300->cmd_mutex); mutex_init(&vub300->irq_mutex); vub300->command_out_urb = command_out_urb; vub300->command_res_urb = command_res_urb; vub300->usb_timed_out = 0; vub300->dynamic_register_count = 0; for (i = 0; i < ARRAY_SIZE(vub300->fn); i++) { vub300->fn[i].offload_point = 0; vub300->fn[i].offload_count = 0; } vub300->total_offload_count = 0; vub300->irq_enabled = 0; vub300->irq_disabled = 0; vub300->irqs_queued = 0; for (i = 0; i < ARRAY_SIZE(vub300->sdio_register); i++) vub300->sdio_register[i++].activate = 0; vub300->udev = udev; vub300->interface = interface; vub300->cmnd_res_ep = 0; vub300->cmnd_out_ep = 0; vub300->data_inp_ep = 0; vub300->data_out_ep = 0; for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++) vub300->fbs[i] = 512; /* * set up the endpoint information * * use the first pair of bulk-in and bulk-out * endpoints for Command/Response+Interrupt * * use the second pair of bulk-in and bulk-out * endpoints for Data In/Out */ vub300->large_usb_packets = 0; iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { struct usb_endpoint_descriptor *endpoint = &iface_desc->endpoint[i].desc; dev_info(&vub300->udev->dev, "vub300 testing %s EndPoint(%d) %02X\n", usb_endpoint_is_bulk_in(endpoint) ? "BULK IN" : usb_endpoint_is_bulk_out(endpoint) ? "BULK OUT" : "UNKNOWN", i, endpoint->bEndpointAddress); if (endpoint->wMaxPacketSize > 64) vub300->large_usb_packets = 1; if (usb_endpoint_is_bulk_in(endpoint)) { if (!vub300->cmnd_res_ep) { vub300->cmnd_res_ep = endpoint->bEndpointAddress; } else if (!vub300->data_inp_ep) { vub300->data_inp_ep = endpoint->bEndpointAddress; } else { dev_warn(&vub300->udev->dev, "ignoring" " unexpected bulk_in endpoint"); } } else if (usb_endpoint_is_bulk_out(endpoint)) { if (!vub300->cmnd_out_ep) { vub300->cmnd_out_ep = endpoint->bEndpointAddress; } else if (!vub300->data_out_ep) { vub300->data_out_ep = endpoint->bEndpointAddress; } else { dev_warn(&vub300->udev->dev, "ignoring" " unexpected bulk_out endpoint"); } } else { dev_warn(&vub300->udev->dev, "vub300 ignoring EndPoint(%d) %02X", i, endpoint->bEndpointAddress); } } if (vub300->cmnd_res_ep && vub300->cmnd_out_ep && vub300->data_inp_ep && vub300->data_out_ep) { dev_info(&vub300->udev->dev, "vub300 %s packets" " using EndPoints %02X %02X %02X %02X\n", vub300->large_usb_packets ? "LARGE" : "SMALL", vub300->cmnd_out_ep, vub300->cmnd_res_ep, vub300->data_out_ep, vub300->data_inp_ep); /* we have the expected EndPoints */ } else { dev_err(&vub300->udev->dev, "Could not find two sets of bulk-in/out endpoint pairs\n"); retval = -EINVAL; goto error5; } retval = usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), GET_HC_INF0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, &vub300->hc_info, sizeof(vub300->hc_info), HZ); if (retval < 0) goto error5; retval = usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), SET_ROM_WAIT_STATES, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, firmware_rom_wait_states, 0x0000, NULL, 0, HZ); if (retval < 0) goto error5; dev_info(&vub300->udev->dev, "operating_mode = %s %s %d MHz %s %d byte USB packets\n", (mmc->caps & MMC_CAP_SDIO_IRQ) ? "IRQs" : "POLL", (mmc->caps & MMC_CAP_4_BIT_DATA) ? "4-bit" : "1-bit", mmc->f_max / 1000000, pad_input_to_usb_pkt ? "padding input data to" : "with", vub300->large_usb_packets ? 512 : 64); retval = usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), GET_SYSTEM_PORT_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, &vub300->system_port_status, sizeof(vub300->system_port_status), HZ); if (retval < 0) { goto error4; } else if (sizeof(vub300->system_port_status) == retval) { vub300->card_present = (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; vub300->read_only = (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; } else { goto error4; } usb_set_intfdata(interface, vub300); INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread); INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread); INIT_WORK(&vub300->deadwork, vub300_deadwork_thread); kref_init(&vub300->kref); init_timer(&vub300->sg_transfer_timer); vub300->sg_transfer_timer.data = (unsigned long)vub300; vub300->sg_transfer_timer.function = vub300_sg_timed_out; kref_get(&vub300->kref); init_timer(&vub300->inactivity_timer); vub300->inactivity_timer.data = (unsigned long)vub300; vub300->inactivity_timer.function = vub300_inactivity_timer_expired; vub300->inactivity_timer.expires = jiffies + HZ; add_timer(&vub300->inactivity_timer); if (vub300->card_present) dev_info(&vub300->udev->dev, "USB vub300 remote SDIO host controller[%d]" "connected with SD/SDIO card inserted\n", interface_to_InterfaceNumber(interface)); else dev_info(&vub300->udev->dev, "USB vub300 remote SDIO host controller[%d]" "connected with no SD/SDIO card inserted\n", interface_to_InterfaceNumber(interface)); mmc_add_host(mmc); return 0; error5: mmc_free_host(mmc); /* * and hence also frees vub300 * which is contained at the end of struct mmc */ error4: usb_free_urb(command_res_urb); error1: usb_free_urb(command_out_urb); error0: usb_put_dev(udev); return retval; } static void vub300_disconnect(struct usb_interface *interface) { /* NOT irq */ struct vub300_mmc_host *vub300 = usb_get_intfdata(interface); if (!vub300 || !vub300->mmc) { return; } else { struct mmc_host *mmc = vub300->mmc; if (!vub300->mmc) { return; } else { int ifnum = interface_to_InterfaceNumber(interface); usb_set_intfdata(interface, NULL); /* prevent more I/O from starting */ vub300->interface = NULL; kref_put(&vub300->kref, vub300_delete); mmc_remove_host(mmc); pr_info("USB vub300 remote SDIO host controller[%d]" " now disconnected", ifnum); return; } } } #ifdef CONFIG_PM static int vub300_suspend(struct usb_interface *intf, pm_message_t message) { struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); if (!vub300 || !vub300->mmc) { return 0; } else { struct mmc_host *mmc = vub300->mmc; mmc_suspend_host(mmc); return 0; } } static int vub300_resume(struct usb_interface *intf) { struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); if (!vub300 || !vub300->mmc) { return 0; } else { struct mmc_host *mmc = vub300->mmc; mmc_resume_host(mmc); return 0; } } #else #define vub300_suspend NULL #define vub300_resume NULL #endif static int vub300_pre_reset(struct usb_interface *intf) { /* NOT irq */ struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); mutex_lock(&vub300->cmd_mutex); return 0; } static int vub300_post_reset(struct usb_interface *intf) { /* NOT irq */ struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); /* we are sure no URBs are active - no locking needed */ vub300->errors = -EPIPE; mutex_unlock(&vub300->cmd_mutex); return 0; } static struct usb_driver vub300_driver = { .name = "vub300", .probe = vub300_probe, .disconnect = vub300_disconnect, .suspend = vub300_suspend, .resume = vub300_resume, .pre_reset = vub300_pre_reset, .post_reset = vub300_post_reset, .id_table = vub300_table, .supports_autosuspend = 1, }; static int __init vub300_init(void) { /* NOT irq */ int result; pr_info("VUB300 Driver rom wait states = %02X irqpoll timeout = %04X", firmware_rom_wait_states, 0x0FFFF & firmware_irqpoll_timeout); cmndworkqueue = create_singlethread_workqueue("kvub300c"); if (!cmndworkqueue) { pr_err("not enough memory for the REQUEST workqueue"); result = -ENOMEM; goto out1; } pollworkqueue = create_singlethread_workqueue("kvub300p"); if (!pollworkqueue) { pr_err("not enough memory for the IRQPOLL workqueue"); result = -ENOMEM; goto out2; } deadworkqueue = create_singlethread_workqueue("kvub300d"); if (!deadworkqueue) { pr_err("not enough memory for the EXPIRED workqueue"); result = -ENOMEM; goto out3; } result = usb_register(&vub300_driver); if (result) { pr_err("usb_register failed. Error number %d", result); goto out4; } return 0; out4: destroy_workqueue(deadworkqueue); out3: destroy_workqueue(pollworkqueue); out2: destroy_workqueue(cmndworkqueue); out1: return result; } static void __exit vub300_exit(void) { usb_deregister(&vub300_driver); flush_workqueue(cmndworkqueue); flush_workqueue(pollworkqueue); flush_workqueue(deadworkqueue); destroy_workqueue(cmndworkqueue); destroy_workqueue(pollworkqueue); destroy_workqueue(deadworkqueue); } module_init(vub300_init); module_exit(vub300_exit); MODULE_AUTHOR("Tony Olech <tony.olech@elandigitalsystems.com>"); MODULE_DESCRIPTION("VUB300 USB to SD/MMC/SDIO adapter driver"); MODULE_LICENSE("GPL");
gpl-2.0
NicholasPace/android_kernel_asus_moorefield
drivers/mtd/nand/denali_dt.c
2320
3838
/* * NAND Flash Controller Device Driver for DT * * Copyright © 2011, Picochip. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/slab.h> #include "denali.h" struct denali_dt { struct denali_nand_info denali; struct clk *clk; }; static void __iomem *request_and_map(struct device *dev, const struct resource *res) { void __iomem *ptr; if (!devm_request_mem_region(dev, res->start, resource_size(res), "denali-dt")) { dev_err(dev, "unable to request %s\n", res->name); return NULL; } ptr = devm_ioremap_nocache(dev, res->start, resource_size(res)); if (!ptr) dev_err(dev, "ioremap_nocache of %s failed!", res->name); return ptr; } static const struct of_device_id denali_nand_dt_ids[] = { { .compatible = "denali,denali-nand-dt" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, denali_nand_dt_ids); static u64 denali_dma_mask; static int denali_dt_probe(struct platform_device *ofdev) { struct resource *denali_reg, *nand_data; struct denali_dt *dt; struct denali_nand_info *denali; int ret; const struct of_device_id *of_id; of_id = of_match_device(denali_nand_dt_ids, &ofdev->dev); if (of_id) { ofdev->id_entry = of_id->data; } else { pr_err("Failed to find the right device id.\n"); return -ENOMEM; } dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL); if (!dt) return -ENOMEM; denali = &dt->denali; denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg"); nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data"); if (!denali_reg || !nand_data) { dev_err(&ofdev->dev, "resources not completely defined\n"); return -EINVAL; } denali->platform = DT; denali->dev = &ofdev->dev; denali->irq = platform_get_irq(ofdev, 0); if (denali->irq < 0) { dev_err(&ofdev->dev, "no irq defined\n"); return denali->irq; } denali->flash_reg = request_and_map(&ofdev->dev, denali_reg); if (!denali->flash_reg) return -ENOMEM; denali->flash_mem = request_and_map(&ofdev->dev, nand_data); if (!denali->flash_mem) return -ENOMEM; if (!of_property_read_u32(ofdev->dev.of_node, "dma-mask", (u32 *)&denali_dma_mask)) { denali->dev->dma_mask = &denali_dma_mask; } else { denali->dev->dma_mask = NULL; } dt->clk = clk_get(&ofdev->dev, NULL); if (IS_ERR(dt->clk)) { dev_err(&ofdev->dev, "no clk available\n"); return PTR_ERR(dt->clk); } clk_prepare_enable(dt->clk); ret = denali_init(denali); if (ret) goto out_disable_clk; platform_set_drvdata(ofdev, dt); return 0; out_disable_clk: clk_disable_unprepare(dt->clk); clk_put(dt->clk); return ret; } static int denali_dt_remove(struct platform_device *ofdev) { struct denali_dt *dt = platform_get_drvdata(ofdev); denali_remove(&dt->denali); clk_disable(dt->clk); clk_put(dt->clk); return 0; } static struct platform_driver denali_dt_driver = { .probe = denali_dt_probe, .remove = denali_dt_remove, .driver = { .name = "denali-nand-dt", .owner = THIS_MODULE, .of_match_table = denali_nand_dt_ids, }, }; module_platform_driver(denali_dt_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamie Iles"); MODULE_DESCRIPTION("DT driver for Denali NAND controller");
gpl-2.0
goodwinos/linux-dm
drivers/mmc/host/vub300.c
2320
74256
/* * Remote VUB300 SDIO/SDmem Host Controller Driver * * Copyright (C) 2010 Elan Digital Systems Limited * * based on USB Skeleton driver - 2.2 * * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 * * VUB300: is a USB 2.0 client device with a single SDIO/SDmem/MMC slot * Any SDIO/SDmem/MMC device plugged into the VUB300 will appear, * by virtue of this driver, to have been plugged into a local * SDIO host controller, similar to, say, a PCI Ricoh controller * This is because this kernel device driver is both a USB 2.0 * client device driver AND an MMC host controller driver. Thus * if there is an existing driver for the inserted SDIO/SDmem/MMC * device then that driver will be used by the kernel to manage * the device in exactly the same fashion as if it had been * directly plugged into, say, a local pci bus Ricoh controller * * RANT: this driver was written using a display 128x48 - converting it * to a line width of 80 makes it very difficult to support. In * particular functions have been broken down into sub functions * and the original meaningful names have been shortened into * cryptic ones. * The problem is that executing a fragment of code subject to * two conditions means an indentation of 24, thus leaving only * 56 characters for a C statement. And that is quite ridiculous! * * Data types: data passed to/from the VUB300 is fixed to a number of * bits and driver data fields reflect that limit by using * u8, u16, u32 */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kref.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_ids.h> #include <linux/workqueue.h> #include <linux/ctype.h> #include <linux/firmware.h> #include <linux/scatterlist.h> struct host_controller_info { u8 info_size; u16 firmware_version; u8 number_of_ports; } __packed; #define FIRMWARE_BLOCK_BOUNDARY 1024 struct sd_command_header { u8 header_size; u8 header_type; u8 port_number; u8 command_type; /* Bit7 - Rd/Wr */ u8 command_index; u8 transfer_size[4]; /* ReadSize + ReadSize */ u8 response_type; u8 arguments[4]; u8 block_count[2]; u8 block_size[2]; u8 block_boundary[2]; u8 reserved[44]; /* to pad out to 64 bytes */ } __packed; struct sd_irqpoll_header { u8 header_size; u8 header_type; u8 port_number; u8 command_type; /* Bit7 - Rd/Wr */ u8 padding[16]; /* don't ask why !! */ u8 poll_timeout_msb; u8 poll_timeout_lsb; u8 reserved[42]; /* to pad out to 64 bytes */ } __packed; struct sd_common_header { u8 header_size; u8 header_type; u8 port_number; } __packed; struct sd_response_header { u8 header_size; u8 header_type; u8 port_number; u8 command_type; u8 command_index; u8 command_response[0]; } __packed; struct sd_status_header { u8 header_size; u8 header_type; u8 port_number; u16 port_flags; u32 sdio_clock; u16 host_header_size; u16 func_header_size; u16 ctrl_header_size; } __packed; struct sd_error_header { u8 header_size; u8 header_type; u8 port_number; u8 error_code; } __packed; struct sd_interrupt_header { u8 header_size; u8 header_type; u8 port_number; } __packed; struct offload_registers_access { u8 command_byte[4]; u8 Respond_Byte[4]; } __packed; #define INTERRUPT_REGISTER_ACCESSES 15 struct sd_offloaded_interrupt { u8 header_size; u8 header_type; u8 port_number; struct offload_registers_access reg[INTERRUPT_REGISTER_ACCESSES]; } __packed; struct sd_register_header { u8 header_size; u8 header_type; u8 port_number; u8 command_type; u8 command_index; u8 command_response[6]; } __packed; #define PIGGYBACK_REGISTER_ACCESSES 14 struct sd_offloaded_piggyback { struct sd_register_header sdio; struct offload_registers_access reg[PIGGYBACK_REGISTER_ACCESSES]; } __packed; union sd_response { struct sd_common_header common; struct sd_status_header status; struct sd_error_header error; struct sd_interrupt_header interrupt; struct sd_response_header response; struct sd_offloaded_interrupt irq; struct sd_offloaded_piggyback pig; } __packed; union sd_command { struct sd_command_header head; struct sd_irqpoll_header poll; } __packed; enum SD_RESPONSE_TYPE { SDRT_UNSPECIFIED = 0, SDRT_NONE, SDRT_1, SDRT_1B, SDRT_2, SDRT_3, SDRT_4, SDRT_5, SDRT_5B, SDRT_6, SDRT_7, }; #define RESPONSE_INTERRUPT 0x01 #define RESPONSE_ERROR 0x02 #define RESPONSE_STATUS 0x03 #define RESPONSE_IRQ_DISABLED 0x05 #define RESPONSE_IRQ_ENABLED 0x06 #define RESPONSE_PIGGYBACKED 0x07 #define RESPONSE_NO_INTERRUPT 0x08 #define RESPONSE_PIG_DISABLED 0x09 #define RESPONSE_PIG_ENABLED 0x0A #define SD_ERROR_1BIT_TIMEOUT 0x01 #define SD_ERROR_4BIT_TIMEOUT 0x02 #define SD_ERROR_1BIT_CRC_WRONG 0x03 #define SD_ERROR_4BIT_CRC_WRONG 0x04 #define SD_ERROR_1BIT_CRC_ERROR 0x05 #define SD_ERROR_4BIT_CRC_ERROR 0x06 #define SD_ERROR_NO_CMD_ENDBIT 0x07 #define SD_ERROR_NO_1BIT_DATEND 0x08 #define SD_ERROR_NO_4BIT_DATEND 0x09 #define SD_ERROR_1BIT_UNEXPECTED_TIMEOUT 0x0A #define SD_ERROR_4BIT_UNEXPECTED_TIMEOUT 0x0B #define SD_ERROR_ILLEGAL_COMMAND 0x0C #define SD_ERROR_NO_DEVICE 0x0D #define SD_ERROR_TRANSFER_LENGTH 0x0E #define SD_ERROR_1BIT_DATA_TIMEOUT 0x0F #define SD_ERROR_4BIT_DATA_TIMEOUT 0x10 #define SD_ERROR_ILLEGAL_STATE 0x11 #define SD_ERROR_UNKNOWN_ERROR 0x12 #define SD_ERROR_RESERVED_ERROR 0x13 #define SD_ERROR_INVALID_FUNCTION 0x14 #define SD_ERROR_OUT_OF_RANGE 0x15 #define SD_ERROR_STAT_CMD 0x16 #define SD_ERROR_STAT_DATA 0x17 #define SD_ERROR_STAT_CMD_TIMEOUT 0x18 #define SD_ERROR_SDCRDY_STUCK 0x19 #define SD_ERROR_UNHANDLED 0x1A #define SD_ERROR_OVERRUN 0x1B #define SD_ERROR_PIO_TIMEOUT 0x1C #define FUN(c) (0x000007 & (c->arg>>28)) #define REG(c) (0x01FFFF & (c->arg>>9)) static bool limit_speed_to_24_MHz; module_param(limit_speed_to_24_MHz, bool, 0644); MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz"); static bool pad_input_to_usb_pkt; module_param(pad_input_to_usb_pkt, bool, 0644); MODULE_PARM_DESC(pad_input_to_usb_pkt, "Pad USB data input transfers to whole USB Packet"); static bool disable_offload_processing; module_param(disable_offload_processing, bool, 0644); MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing"); static bool force_1_bit_data_xfers; module_param(force_1_bit_data_xfers, bool, 0644); MODULE_PARM_DESC(force_1_bit_data_xfers, "Force SDIO Data Transfers to 1-bit Mode"); static bool force_polling_for_irqs; module_param(force_polling_for_irqs, bool, 0644); MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts"); static int firmware_irqpoll_timeout = 1024; module_param(firmware_irqpoll_timeout, int, 0644); MODULE_PARM_DESC(firmware_irqpoll_timeout, "VUB300 firmware irqpoll timeout"); static int force_max_req_size = 128; module_param(force_max_req_size, int, 0644); MODULE_PARM_DESC(force_max_req_size, "set max request size in kBytes"); #ifdef SMSC_DEVELOPMENT_BOARD static int firmware_rom_wait_states = 0x04; #else static int firmware_rom_wait_states = 0x1C; #endif module_param(firmware_rom_wait_states, int, 0644); MODULE_PARM_DESC(firmware_rom_wait_states, "ROM wait states byte=RRRIIEEE (Reserved Internal External)"); #define ELAN_VENDOR_ID 0x2201 #define VUB300_VENDOR_ID 0x0424 #define VUB300_PRODUCT_ID 0x012C static struct usb_device_id vub300_table[] = { {USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)}, {USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, vub300_table); static struct workqueue_struct *cmndworkqueue; static struct workqueue_struct *pollworkqueue; static struct workqueue_struct *deadworkqueue; static inline int interface_to_InterfaceNumber(struct usb_interface *interface) { if (!interface) return -1; if (!interface->cur_altsetting) return -1; return interface->cur_altsetting->desc.bInterfaceNumber; } struct sdio_register { unsigned func_num:3; unsigned sdio_reg:17; unsigned activate:1; unsigned prepared:1; unsigned regvalue:8; unsigned response:8; unsigned sparebit:26; }; struct vub300_mmc_host { struct usb_device *udev; struct usb_interface *interface; struct kref kref; struct mutex cmd_mutex; struct mutex irq_mutex; char vub_name[3 + (9 * 8) + 4 + 1]; /* max of 7 sdio fn's */ u8 cmnd_out_ep; /* EndPoint for commands */ u8 cmnd_res_ep; /* EndPoint for responses */ u8 data_out_ep; /* EndPoint for out data */ u8 data_inp_ep; /* EndPoint for inp data */ bool card_powered; bool card_present; bool read_only; bool large_usb_packets; bool app_spec; /* ApplicationSpecific */ bool irq_enabled; /* by the MMC CORE */ bool irq_disabled; /* in the firmware */ unsigned bus_width:4; u8 total_offload_count; u8 dynamic_register_count; u8 resp_len; u32 datasize; int errors; int usb_transport_fail; int usb_timed_out; int irqs_queued; struct sdio_register sdio_register[16]; struct offload_interrupt_function_register { #define MAXREGBITS 4 #define MAXREGS (1<<MAXREGBITS) #define MAXREGMASK (MAXREGS-1) u8 offload_count; u32 offload_point; struct offload_registers_access reg[MAXREGS]; } fn[8]; u16 fbs[8]; /* Function Block Size */ struct mmc_command *cmd; struct mmc_request *req; struct mmc_data *data; struct mmc_host *mmc; struct urb *urb; struct urb *command_out_urb; struct urb *command_res_urb; struct completion command_complete; struct completion irqpoll_complete; union sd_command cmnd; union sd_response resp; struct timer_list sg_transfer_timer; struct usb_sg_request sg_request; struct timer_list inactivity_timer; struct work_struct deadwork; struct work_struct cmndwork; struct delayed_work pollwork; struct host_controller_info hc_info; struct sd_status_header system_port_status; u8 padded_buffer[64]; }; #define kref_to_vub300_mmc_host(d) container_of(d, struct vub300_mmc_host, kref) #define SET_TRANSFER_PSEUDOCODE 21 #define SET_INTERRUPT_PSEUDOCODE 20 #define SET_FAILURE_MODE 18 #define SET_ROM_WAIT_STATES 16 #define SET_IRQ_ENABLE 13 #define SET_CLOCK_SPEED 11 #define SET_FUNCTION_BLOCK_SIZE 9 #define SET_SD_DATA_MODE 6 #define SET_SD_POWER 4 #define ENTER_DFU_MODE 3 #define GET_HC_INF0 1 #define GET_SYSTEM_PORT_STATUS 0 static void vub300_delete(struct kref *kref) { /* kref callback - softirq */ struct vub300_mmc_host *vub300 = kref_to_vub300_mmc_host(kref); struct mmc_host *mmc = vub300->mmc; usb_free_urb(vub300->command_out_urb); vub300->command_out_urb = NULL; usb_free_urb(vub300->command_res_urb); vub300->command_res_urb = NULL; usb_put_dev(vub300->udev); mmc_free_host(mmc); /* * and hence also frees vub300 * which is contained at the end of struct mmc */ } static void vub300_queue_cmnd_work(struct vub300_mmc_host *vub300) { kref_get(&vub300->kref); if (queue_work(cmndworkqueue, &vub300->cmndwork)) { /* * then the cmndworkqueue was not previously * running and the above get ref is obvious * required and will be put when the thread * terminates by a specific call */ } else { /* * the cmndworkqueue was already running from * a previous invocation and thus to keep the * kref counts correct we must undo the get */ kref_put(&vub300->kref, vub300_delete); } } static void vub300_queue_poll_work(struct vub300_mmc_host *vub300, int delay) { kref_get(&vub300->kref); if (queue_delayed_work(pollworkqueue, &vub300->pollwork, delay)) { /* * then the pollworkqueue was not previously * running and the above get ref is obvious * required and will be put when the thread * terminates by a specific call */ } else { /* * the pollworkqueue was already running from * a previous invocation and thus to keep the * kref counts correct we must undo the get */ kref_put(&vub300->kref, vub300_delete); } } static void vub300_queue_dead_work(struct vub300_mmc_host *vub300) { kref_get(&vub300->kref); if (queue_work(deadworkqueue, &vub300->deadwork)) { /* * then the deadworkqueue was not previously * running and the above get ref is obvious * required and will be put when the thread * terminates by a specific call */ } else { /* * the deadworkqueue was already running from * a previous invocation and thus to keep the * kref counts correct we must undo the get */ kref_put(&vub300->kref, vub300_delete); } } static void irqpoll_res_completed(struct urb *urb) { /* urb completion handler - hardirq */ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; if (urb->status) vub300->usb_transport_fail = urb->status; complete(&vub300->irqpoll_complete); } static void irqpoll_out_completed(struct urb *urb) { /* urb completion handler - hardirq */ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; if (urb->status) { vub300->usb_transport_fail = urb->status; complete(&vub300->irqpoll_complete); return; } else { int ret; unsigned int pipe = usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep); usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe, &vub300->resp, sizeof(vub300->resp), irqpoll_res_completed, vub300); vub300->command_res_urb->actual_length = 0; ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC); if (ret) { vub300->usb_transport_fail = ret; complete(&vub300->irqpoll_complete); } return; } } static void send_irqpoll(struct vub300_mmc_host *vub300) { /* cmd_mutex is held by vub300_pollwork_thread */ int retval; int timeout = 0xFFFF & (0x0001FFFF - firmware_irqpoll_timeout); vub300->cmnd.poll.header_size = 22; vub300->cmnd.poll.header_type = 1; vub300->cmnd.poll.port_number = 0; vub300->cmnd.poll.command_type = 2; vub300->cmnd.poll.poll_timeout_lsb = 0xFF & (unsigned)timeout; vub300->cmnd.poll.poll_timeout_msb = 0xFF & (unsigned)(timeout >> 8); usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev, usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep) , &vub300->cmnd, sizeof(vub300->cmnd) , irqpoll_out_completed, vub300); retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL); if (0 > retval) { vub300->usb_transport_fail = retval; vub300_queue_poll_work(vub300, 1); complete(&vub300->irqpoll_complete); return; } else { return; } } static void new_system_port_status(struct vub300_mmc_host *vub300) { int old_card_present = vub300->card_present; int new_card_present = (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; vub300->read_only = (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; if (new_card_present && !old_card_present) { dev_info(&vub300->udev->dev, "card just inserted\n"); vub300->card_present = 1; vub300->bus_width = 0; if (disable_offload_processing) strncpy(vub300->vub_name, "EMPTY Processing Disabled", sizeof(vub300->vub_name)); else vub300->vub_name[0] = 0; mmc_detect_change(vub300->mmc, 1); } else if (!new_card_present && old_card_present) { dev_info(&vub300->udev->dev, "card just ejected\n"); vub300->card_present = 0; mmc_detect_change(vub300->mmc, 0); } else { /* no change */ } } static void __add_offloaded_reg_to_fifo(struct vub300_mmc_host *vub300, struct offload_registers_access *register_access, u8 func) { u8 r = vub300->fn[func].offload_point + vub300->fn[func].offload_count; memcpy(&vub300->fn[func].reg[MAXREGMASK & r], register_access, sizeof(struct offload_registers_access)); vub300->fn[func].offload_count += 1; vub300->total_offload_count += 1; } static void add_offloaded_reg(struct vub300_mmc_host *vub300, struct offload_registers_access *register_access) { u32 Register = ((0x03 & register_access->command_byte[0]) << 15) | ((0xFF & register_access->command_byte[1]) << 7) | ((0xFE & register_access->command_byte[2]) >> 1); u8 func = ((0x70 & register_access->command_byte[0]) >> 4); u8 regs = vub300->dynamic_register_count; u8 i = 0; while (0 < regs-- && 1 == vub300->sdio_register[i].activate) { if (vub300->sdio_register[i].func_num == func && vub300->sdio_register[i].sdio_reg == Register) { if (vub300->sdio_register[i].prepared == 0) vub300->sdio_register[i].prepared = 1; vub300->sdio_register[i].response = register_access->Respond_Byte[2]; vub300->sdio_register[i].regvalue = register_access->Respond_Byte[3]; return; } else { i += 1; continue; } }; __add_offloaded_reg_to_fifo(vub300, register_access, func); } static void check_vub300_port_status(struct vub300_mmc_host *vub300) { /* * cmd_mutex is held by vub300_pollwork_thread, * vub300_deadwork_thread or vub300_cmndwork_thread */ int retval; retval = usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), GET_SYSTEM_PORT_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, &vub300->system_port_status, sizeof(vub300->system_port_status), HZ); if (sizeof(vub300->system_port_status) == retval) new_system_port_status(vub300); } static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300) { /* cmd_mutex is held by vub300_pollwork_thread */ if (vub300->command_res_urb->actual_length == 0) return; switch (vub300->resp.common.header_type) { case RESPONSE_INTERRUPT: mutex_lock(&vub300->irq_mutex); if (vub300->irq_enabled) mmc_signal_sdio_irq(vub300->mmc); else vub300->irqs_queued += 1; vub300->irq_disabled = 1; mutex_unlock(&vub300->irq_mutex); break; case RESPONSE_ERROR: if (vub300->resp.error.error_code == SD_ERROR_NO_DEVICE) check_vub300_port_status(vub300); break; case RESPONSE_STATUS: vub300->system_port_status = vub300->resp.status; new_system_port_status(vub300); if (!vub300->card_present) vub300_queue_poll_work(vub300, HZ / 5); break; case RESPONSE_IRQ_DISABLED: { int offloaded_data_length = vub300->resp.common.header_size - 3; int register_count = offloaded_data_length >> 3; int ri = 0; while (register_count--) { add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]); ri += 1; } mutex_lock(&vub300->irq_mutex); if (vub300->irq_enabled) mmc_signal_sdio_irq(vub300->mmc); else vub300->irqs_queued += 1; vub300->irq_disabled = 1; mutex_unlock(&vub300->irq_mutex); break; } case RESPONSE_IRQ_ENABLED: { int offloaded_data_length = vub300->resp.common.header_size - 3; int register_count = offloaded_data_length >> 3; int ri = 0; while (register_count--) { add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]); ri += 1; } mutex_lock(&vub300->irq_mutex); if (vub300->irq_enabled) mmc_signal_sdio_irq(vub300->mmc); else if (vub300->irqs_queued) vub300->irqs_queued += 1; else vub300->irqs_queued += 1; vub300->irq_disabled = 0; mutex_unlock(&vub300->irq_mutex); break; } case RESPONSE_NO_INTERRUPT: vub300_queue_poll_work(vub300, 1); break; default: break; } } static void __do_poll(struct vub300_mmc_host *vub300) { /* cmd_mutex is held by vub300_pollwork_thread */ long commretval; mod_timer(&vub300->inactivity_timer, jiffies + HZ); init_completion(&vub300->irqpoll_complete); send_irqpoll(vub300); commretval = wait_for_completion_timeout(&vub300->irqpoll_complete, msecs_to_jiffies(500)); if (vub300->usb_transport_fail) { /* no need to do anything */ } else if (commretval == 0) { vub300->usb_timed_out = 1; usb_kill_urb(vub300->command_out_urb); usb_kill_urb(vub300->command_res_urb); } else if (commretval < 0) { vub300_queue_poll_work(vub300, 1); } else { /* commretval > 0 */ __vub300_irqpoll_response(vub300); } } /* this thread runs only when the driver * is trying to poll the device for an IRQ */ static void vub300_pollwork_thread(struct work_struct *work) { /* NOT irq */ struct vub300_mmc_host *vub300 = container_of(work, struct vub300_mmc_host, pollwork.work); if (!vub300->interface) { kref_put(&vub300->kref, vub300_delete); return; } mutex_lock(&vub300->cmd_mutex); if (vub300->cmd) { vub300_queue_poll_work(vub300, 1); } else if (!vub300->card_present) { /* no need to do anything */ } else { /* vub300->card_present */ mutex_lock(&vub300->irq_mutex); if (!vub300->irq_enabled) { mutex_unlock(&vub300->irq_mutex); } else if (vub300->irqs_queued) { vub300->irqs_queued -= 1; mmc_signal_sdio_irq(vub300->mmc); mod_timer(&vub300->inactivity_timer, jiffies + HZ); mutex_unlock(&vub300->irq_mutex); } else { /* NOT vub300->irqs_queued */ mutex_unlock(&vub300->irq_mutex); __do_poll(vub300); } } mutex_unlock(&vub300->cmd_mutex); kref_put(&vub300->kref, vub300_delete); } static void vub300_deadwork_thread(struct work_struct *work) { /* NOT irq */ struct vub300_mmc_host *vub300 = container_of(work, struct vub300_mmc_host, deadwork); if (!vub300->interface) { kref_put(&vub300->kref, vub300_delete); return; } mutex_lock(&vub300->cmd_mutex); if (vub300->cmd) { /* * a command got in as the inactivity * timer expired - so we just let the * processing of the command show if * the device is dead */ } else if (vub300->card_present) { check_vub300_port_status(vub300); } else if (vub300->mmc && vub300->mmc->card && mmc_card_present(vub300->mmc->card)) { /* * the MMC core must not have responded * to the previous indication - lets * hope that it eventually does so we * will just ignore this for now */ } else { check_vub300_port_status(vub300); } mod_timer(&vub300->inactivity_timer, jiffies + HZ); mutex_unlock(&vub300->cmd_mutex); kref_put(&vub300->kref, vub300_delete); } static void vub300_inactivity_timer_expired(unsigned long data) { /* softirq */ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data; if (!vub300->interface) { kref_put(&vub300->kref, vub300_delete); } else if (vub300->cmd) { mod_timer(&vub300->inactivity_timer, jiffies + HZ); } else { vub300_queue_dead_work(vub300); mod_timer(&vub300->inactivity_timer, jiffies + HZ); } } static int vub300_response_error(u8 error_code) { switch (error_code) { case SD_ERROR_PIO_TIMEOUT: case SD_ERROR_1BIT_TIMEOUT: case SD_ERROR_4BIT_TIMEOUT: return -ETIMEDOUT; case SD_ERROR_STAT_DATA: case SD_ERROR_OVERRUN: case SD_ERROR_STAT_CMD: case SD_ERROR_STAT_CMD_TIMEOUT: case SD_ERROR_SDCRDY_STUCK: case SD_ERROR_UNHANDLED: case SD_ERROR_1BIT_CRC_WRONG: case SD_ERROR_4BIT_CRC_WRONG: case SD_ERROR_1BIT_CRC_ERROR: case SD_ERROR_4BIT_CRC_ERROR: case SD_ERROR_NO_CMD_ENDBIT: case SD_ERROR_NO_1BIT_DATEND: case SD_ERROR_NO_4BIT_DATEND: case SD_ERROR_1BIT_DATA_TIMEOUT: case SD_ERROR_4BIT_DATA_TIMEOUT: case SD_ERROR_1BIT_UNEXPECTED_TIMEOUT: case SD_ERROR_4BIT_UNEXPECTED_TIMEOUT: return -EILSEQ; case 33: return -EILSEQ; case SD_ERROR_ILLEGAL_COMMAND: return -EINVAL; case SD_ERROR_NO_DEVICE: return -ENOMEDIUM; default: return -ENODEV; } } static void command_res_completed(struct urb *urb) { /* urb completion handler - hardirq */ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; if (urb->status) { /* we have to let the initiator handle the error */ } else if (vub300->command_res_urb->actual_length == 0) { /* * we have seen this happen once or twice and * we suspect a buggy USB host controller */ } else if (!vub300->data) { /* this means that the command (typically CMD52) succeeded */ } else if (vub300->resp.common.header_type != 0x02) { /* * this is an error response from the VUB300 chip * and we let the initiator handle it */ } else if (vub300->urb) { vub300->cmd->error = vub300_response_error(vub300->resp.error.error_code); usb_unlink_urb(vub300->urb); } else { vub300->cmd->error = vub300_response_error(vub300->resp.error.error_code); usb_sg_cancel(&vub300->sg_request); } complete(&vub300->command_complete); /* got_response_in */ } static void command_out_completed(struct urb *urb) { /* urb completion handler - hardirq */ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; if (urb->status) { complete(&vub300->command_complete); } else { int ret; unsigned int pipe = usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep); usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe, &vub300->resp, sizeof(vub300->resp), command_res_completed, vub300); vub300->command_res_urb->actual_length = 0; ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC); if (ret == 0) { /* * the urb completion handler will call * our completion handler */ } else { /* * and thus we only call it directly * when it will not be called */ complete(&vub300->command_complete); } } } /* * the STUFF bits are masked out for the comparisons */ static void snoop_block_size_and_bus_width(struct vub300_mmc_host *vub300, u32 cmd_arg) { if ((0xFBFFFE00 & cmd_arg) == 0x80022200) vub300->fbs[1] = (cmd_arg << 8) | (0x00FF & vub300->fbs[1]); else if ((0xFBFFFE00 & cmd_arg) == 0x80022000) vub300->fbs[1] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[1]); else if ((0xFBFFFE00 & cmd_arg) == 0x80042200) vub300->fbs[2] = (cmd_arg << 8) | (0x00FF & vub300->fbs[2]); else if ((0xFBFFFE00 & cmd_arg) == 0x80042000) vub300->fbs[2] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[2]); else if ((0xFBFFFE00 & cmd_arg) == 0x80062200) vub300->fbs[3] = (cmd_arg << 8) | (0x00FF & vub300->fbs[3]); else if ((0xFBFFFE00 & cmd_arg) == 0x80062000) vub300->fbs[3] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[3]); else if ((0xFBFFFE00 & cmd_arg) == 0x80082200) vub300->fbs[4] = (cmd_arg << 8) | (0x00FF & vub300->fbs[4]); else if ((0xFBFFFE00 & cmd_arg) == 0x80082000) vub300->fbs[4] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[4]); else if ((0xFBFFFE00 & cmd_arg) == 0x800A2200) vub300->fbs[5] = (cmd_arg << 8) | (0x00FF & vub300->fbs[5]); else if ((0xFBFFFE00 & cmd_arg) == 0x800A2000) vub300->fbs[5] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[5]); else if ((0xFBFFFE00 & cmd_arg) == 0x800C2200) vub300->fbs[6] = (cmd_arg << 8) | (0x00FF & vub300->fbs[6]); else if ((0xFBFFFE00 & cmd_arg) == 0x800C2000) vub300->fbs[6] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[6]); else if ((0xFBFFFE00 & cmd_arg) == 0x800E2200) vub300->fbs[7] = (cmd_arg << 8) | (0x00FF & vub300->fbs[7]); else if ((0xFBFFFE00 & cmd_arg) == 0x800E2000) vub300->fbs[7] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[7]); else if ((0xFBFFFE03 & cmd_arg) == 0x80000E00) vub300->bus_width = 1; else if ((0xFBFFFE03 & cmd_arg) == 0x80000E02) vub300->bus_width = 4; } static void send_command(struct vub300_mmc_host *vub300) { /* cmd_mutex is held by vub300_cmndwork_thread */ struct mmc_command *cmd = vub300->cmd; struct mmc_data *data = vub300->data; int retval; int i; u8 response_type; if (vub300->app_spec) { switch (cmd->opcode) { case 6: response_type = SDRT_1; vub300->resp_len = 6; if (0x00000000 == (0x00000003 & cmd->arg)) vub300->bus_width = 1; else if (0x00000002 == (0x00000003 & cmd->arg)) vub300->bus_width = 4; else dev_err(&vub300->udev->dev, "unexpected ACMD6 bus_width=%d\n", 0x00000003 & cmd->arg); break; case 13: response_type = SDRT_1; vub300->resp_len = 6; break; case 22: response_type = SDRT_1; vub300->resp_len = 6; break; case 23: response_type = SDRT_1; vub300->resp_len = 6; break; case 41: response_type = SDRT_3; vub300->resp_len = 6; break; case 42: response_type = SDRT_1; vub300->resp_len = 6; break; case 51: response_type = SDRT_1; vub300->resp_len = 6; break; case 55: response_type = SDRT_1; vub300->resp_len = 6; break; default: vub300->resp_len = 0; cmd->error = -EINVAL; complete(&vub300->command_complete); return; } vub300->app_spec = 0; } else { switch (cmd->opcode) { case 0: response_type = SDRT_NONE; vub300->resp_len = 0; break; case 1: response_type = SDRT_3; vub300->resp_len = 6; break; case 2: response_type = SDRT_2; vub300->resp_len = 17; break; case 3: response_type = SDRT_6; vub300->resp_len = 6; break; case 4: response_type = SDRT_NONE; vub300->resp_len = 0; break; case 5: response_type = SDRT_4; vub300->resp_len = 6; break; case 6: response_type = SDRT_1; vub300->resp_len = 6; break; case 7: response_type = SDRT_1B; vub300->resp_len = 6; break; case 8: response_type = SDRT_7; vub300->resp_len = 6; break; case 9: response_type = SDRT_2; vub300->resp_len = 17; break; case 10: response_type = SDRT_2; vub300->resp_len = 17; break; case 12: response_type = SDRT_1B; vub300->resp_len = 6; break; case 13: response_type = SDRT_1; vub300->resp_len = 6; break; case 15: response_type = SDRT_NONE; vub300->resp_len = 0; break; case 16: for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++) vub300->fbs[i] = 0xFFFF & cmd->arg; response_type = SDRT_1; vub300->resp_len = 6; break; case 17: case 18: case 24: case 25: case 27: response_type = SDRT_1; vub300->resp_len = 6; break; case 28: case 29: response_type = SDRT_1B; vub300->resp_len = 6; break; case 30: case 32: case 33: response_type = SDRT_1; vub300->resp_len = 6; break; case 38: response_type = SDRT_1B; vub300->resp_len = 6; break; case 42: response_type = SDRT_1; vub300->resp_len = 6; break; case 52: response_type = SDRT_5; vub300->resp_len = 6; snoop_block_size_and_bus_width(vub300, cmd->arg); break; case 53: response_type = SDRT_5; vub300->resp_len = 6; break; case 55: response_type = SDRT_1; vub300->resp_len = 6; vub300->app_spec = 1; break; case 56: response_type = SDRT_1; vub300->resp_len = 6; break; default: vub300->resp_len = 0; cmd->error = -EINVAL; complete(&vub300->command_complete); return; } } /* * it is a shame that we can not use "sizeof(struct sd_command_header)" * this is because the packet _must_ be padded to 64 bytes */ vub300->cmnd.head.header_size = 20; vub300->cmnd.head.header_type = 0x00; vub300->cmnd.head.port_number = 0; /* "0" means port 1 */ vub300->cmnd.head.command_type = 0x00; /* standard read command */ vub300->cmnd.head.response_type = response_type; vub300->cmnd.head.command_index = cmd->opcode; vub300->cmnd.head.arguments[0] = cmd->arg >> 24; vub300->cmnd.head.arguments[1] = cmd->arg >> 16; vub300->cmnd.head.arguments[2] = cmd->arg >> 8; vub300->cmnd.head.arguments[3] = cmd->arg >> 0; if (cmd->opcode == 52) { int fn = 0x7 & (cmd->arg >> 28); vub300->cmnd.head.block_count[0] = 0; vub300->cmnd.head.block_count[1] = 0; vub300->cmnd.head.block_size[0] = (vub300->fbs[fn] >> 8) & 0xFF; vub300->cmnd.head.block_size[1] = (vub300->fbs[fn] >> 0) & 0xFF; vub300->cmnd.head.command_type = 0x00; vub300->cmnd.head.transfer_size[0] = 0; vub300->cmnd.head.transfer_size[1] = 0; vub300->cmnd.head.transfer_size[2] = 0; vub300->cmnd.head.transfer_size[3] = 0; } else if (!data) { vub300->cmnd.head.block_count[0] = 0; vub300->cmnd.head.block_count[1] = 0; vub300->cmnd.head.block_size[0] = (vub300->fbs[0] >> 8) & 0xFF; vub300->cmnd.head.block_size[1] = (vub300->fbs[0] >> 0) & 0xFF; vub300->cmnd.head.command_type = 0x00; vub300->cmnd.head.transfer_size[0] = 0; vub300->cmnd.head.transfer_size[1] = 0; vub300->cmnd.head.transfer_size[2] = 0; vub300->cmnd.head.transfer_size[3] = 0; } else if (cmd->opcode == 53) { int fn = 0x7 & (cmd->arg >> 28); if (0x08 & vub300->cmnd.head.arguments[0]) { /* BLOCK MODE */ vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF; vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF; vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF; vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF; } else { /* BYTE MODE */ vub300->cmnd.head.block_count[0] = 0; vub300->cmnd.head.block_count[1] = 0; vub300->cmnd.head.block_size[0] = (vub300->datasize >> 8) & 0xFF; vub300->cmnd.head.block_size[1] = (vub300->datasize >> 0) & 0xFF; } vub300->cmnd.head.command_type = (MMC_DATA_READ & data->flags) ? 0x00 : 0x80; vub300->cmnd.head.transfer_size[0] = (vub300->datasize >> 24) & 0xFF; vub300->cmnd.head.transfer_size[1] = (vub300->datasize >> 16) & 0xFF; vub300->cmnd.head.transfer_size[2] = (vub300->datasize >> 8) & 0xFF; vub300->cmnd.head.transfer_size[3] = (vub300->datasize >> 0) & 0xFF; if (vub300->datasize < vub300->fbs[fn]) { vub300->cmnd.head.block_count[0] = 0; vub300->cmnd.head.block_count[1] = 0; } } else { vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF; vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF; vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF; vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF; vub300->cmnd.head.command_type = (MMC_DATA_READ & data->flags) ? 0x00 : 0x80; vub300->cmnd.head.transfer_size[0] = (vub300->datasize >> 24) & 0xFF; vub300->cmnd.head.transfer_size[1] = (vub300->datasize >> 16) & 0xFF; vub300->cmnd.head.transfer_size[2] = (vub300->datasize >> 8) & 0xFF; vub300->cmnd.head.transfer_size[3] = (vub300->datasize >> 0) & 0xFF; if (vub300->datasize < vub300->fbs[0]) { vub300->cmnd.head.block_count[0] = 0; vub300->cmnd.head.block_count[1] = 0; } } if (vub300->cmnd.head.block_size[0] || vub300->cmnd.head.block_size[1]) { u16 block_size = vub300->cmnd.head.block_size[1] | (vub300->cmnd.head.block_size[0] << 8); u16 block_boundary = FIRMWARE_BLOCK_BOUNDARY - (FIRMWARE_BLOCK_BOUNDARY % block_size); vub300->cmnd.head.block_boundary[0] = (block_boundary >> 8) & 0xFF; vub300->cmnd.head.block_boundary[1] = (block_boundary >> 0) & 0xFF; } else { vub300->cmnd.head.block_boundary[0] = 0; vub300->cmnd.head.block_boundary[1] = 0; } usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev, usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep), &vub300->cmnd, sizeof(vub300->cmnd), command_out_completed, vub300); retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL); if (retval < 0) { cmd->error = retval; complete(&vub300->command_complete); return; } else { return; } } /* * timer callback runs in atomic mode * so it cannot call usb_kill_urb() */ static void vub300_sg_timed_out(unsigned long data) { struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data; vub300->usb_timed_out = 1; usb_sg_cancel(&vub300->sg_request); usb_unlink_urb(vub300->command_out_urb); usb_unlink_urb(vub300->command_res_urb); } static u16 roundup_to_multiple_of_64(u16 number) { return 0xFFC0 & (0x3F + number); } /* * this is a separate function to solve the 80 column width restriction */ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, const struct firmware *fw) { u8 register_count = 0; u16 ts = 0; u16 interrupt_size = 0; const u8 *data = fw->data; int size = fw->size; u8 c; dev_info(&vub300->udev->dev, "using %s for SDIO offload processing\n", vub300->vub_name); do { c = *data++; } while (size-- && c); /* skip comment */ dev_info(&vub300->udev->dev, "using offload firmware %s %s\n", fw->data, vub300->vub_name); if (size < 4) { dev_err(&vub300->udev->dev, "corrupt offload pseudocode in firmware %s\n", vub300->vub_name); strncpy(vub300->vub_name, "corrupt offload pseudocode", sizeof(vub300->vub_name)); return; } interrupt_size += *data++; size -= 1; interrupt_size <<= 8; interrupt_size += *data++; size -= 1; if (interrupt_size < size) { u16 xfer_length = roundup_to_multiple_of_64(interrupt_size); u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL); if (xfer_buffer) { int retval; memcpy(xfer_buffer, data, interrupt_size); memset(xfer_buffer + interrupt_size, 0, xfer_length - interrupt_size); size -= interrupt_size; data += interrupt_size; retval = usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), SET_INTERRUPT_PSEUDOCODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, xfer_buffer, xfer_length, HZ); kfree(xfer_buffer); if (retval < 0) { strncpy(vub300->vub_name, "SDIO pseudocode download failed", sizeof(vub300->vub_name)); return; } } else { dev_err(&vub300->udev->dev, "not enough memory for xfer buffer to send" " INTERRUPT_PSEUDOCODE for %s %s\n", fw->data, vub300->vub_name); strncpy(vub300->vub_name, "SDIO interrupt pseudocode download failed", sizeof(vub300->vub_name)); return; } } else { dev_err(&vub300->udev->dev, "corrupt interrupt pseudocode in firmware %s %s\n", fw->data, vub300->vub_name); strncpy(vub300->vub_name, "corrupt interrupt pseudocode", sizeof(vub300->vub_name)); return; } ts += *data++; size -= 1; ts <<= 8; ts += *data++; size -= 1; if (ts < size) { u16 xfer_length = roundup_to_multiple_of_64(ts); u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL); if (xfer_buffer) { int retval; memcpy(xfer_buffer, data, ts); memset(xfer_buffer + ts, 0, xfer_length - ts); size -= ts; data += ts; retval = usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), SET_TRANSFER_PSEUDOCODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, xfer_buffer, xfer_length, HZ); kfree(xfer_buffer); if (retval < 0) { strncpy(vub300->vub_name, "SDIO pseudocode download failed", sizeof(vub300->vub_name)); return; } } else { dev_err(&vub300->udev->dev, "not enough memory for xfer buffer to send" " TRANSFER_PSEUDOCODE for %s %s\n", fw->data, vub300->vub_name); strncpy(vub300->vub_name, "SDIO transfer pseudocode download failed", sizeof(vub300->vub_name)); return; } } else { dev_err(&vub300->udev->dev, "corrupt transfer pseudocode in firmware %s %s\n", fw->data, vub300->vub_name); strncpy(vub300->vub_name, "corrupt transfer pseudocode", sizeof(vub300->vub_name)); return; } register_count += *data++; size -= 1; if (register_count * 4 == size) { int I = vub300->dynamic_register_count = register_count; int i = 0; while (I--) { unsigned int func_num = 0; vub300->sdio_register[i].func_num = *data++; size -= 1; func_num += *data++; size -= 1; func_num <<= 8; func_num += *data++; size -= 1; func_num <<= 8; func_num += *data++; size -= 1; vub300->sdio_register[i].sdio_reg = func_num; vub300->sdio_register[i].activate = 1; vub300->sdio_register[i].prepared = 0; i += 1; } dev_info(&vub300->udev->dev, "initialized %d dynamic pseudocode registers\n", vub300->dynamic_register_count); return; } else { dev_err(&vub300->udev->dev, "corrupt dynamic registers in firmware %s\n", vub300->vub_name); strncpy(vub300->vub_name, "corrupt dynamic registers", sizeof(vub300->vub_name)); return; } } /* * if the binary containing the EMPTY PseudoCode can not be found * vub300->vub_name is set anyway in order to prevent an automatic retry */ static void download_offload_pseudocode(struct vub300_mmc_host *vub300) { struct mmc_card *card = vub300->mmc->card; int sdio_funcs = card->sdio_funcs; const struct firmware *fw = NULL; int l = snprintf(vub300->vub_name, sizeof(vub300->vub_name), "vub_%04X%04X", card->cis.vendor, card->cis.device); int n = 0; int retval; for (n = 0; n < sdio_funcs; n++) { struct sdio_func *sf = card->sdio_func[n]; l += snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, "_%04X%04X", sf->vendor, sf->device); }; snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin"); dev_info(&vub300->udev->dev, "requesting offload firmware %s\n", vub300->vub_name); retval = request_firmware(&fw, vub300->vub_name, &card->dev); if (retval < 0) { strncpy(vub300->vub_name, "vub_default.bin", sizeof(vub300->vub_name)); retval = request_firmware(&fw, vub300->vub_name, &card->dev); if (retval < 0) { strncpy(vub300->vub_name, "no SDIO offload firmware found", sizeof(vub300->vub_name)); } else { __download_offload_pseudocode(vub300, fw); release_firmware(fw); } } else { __download_offload_pseudocode(vub300, fw); release_firmware(fw); } } static void vub300_usb_bulk_msg_completion(struct urb *urb) { /* urb completion handler - hardirq */ complete((struct completion *)urb->context); } static int vub300_usb_bulk_msg(struct vub300_mmc_host *vub300, unsigned int pipe, void *data, int len, int *actual_length, int timeout_msecs) { /* cmd_mutex is held by vub300_cmndwork_thread */ struct usb_device *usb_dev = vub300->udev; struct completion done; int retval; vub300->urb = usb_alloc_urb(0, GFP_KERNEL); if (!vub300->urb) return -ENOMEM; usb_fill_bulk_urb(vub300->urb, usb_dev, pipe, data, len, vub300_usb_bulk_msg_completion, NULL); init_completion(&done); vub300->urb->context = &done; vub300->urb->actual_length = 0; retval = usb_submit_urb(vub300->urb, GFP_KERNEL); if (unlikely(retval)) goto out; if (!wait_for_completion_timeout (&done, msecs_to_jiffies(timeout_msecs))) { retval = -ETIMEDOUT; usb_kill_urb(vub300->urb); } else { retval = vub300->urb->status; } out: *actual_length = vub300->urb->actual_length; usb_free_urb(vub300->urb); vub300->urb = NULL; return retval; } static int __command_read_data(struct vub300_mmc_host *vub300, struct mmc_command *cmd, struct mmc_data *data) { /* cmd_mutex is held by vub300_cmndwork_thread */ int linear_length = vub300->datasize; int padded_length = vub300->large_usb_packets ? ((511 + linear_length) >> 9) << 9 : ((63 + linear_length) >> 6) << 6; if ((padded_length == linear_length) || !pad_input_to_usb_pkt) { int result; unsigned pipe; pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep); result = usb_sg_init(&vub300->sg_request, vub300->udev, pipe, 0, data->sg, data->sg_len, 0, GFP_KERNEL); if (result < 0) { usb_unlink_urb(vub300->command_out_urb); usb_unlink_urb(vub300->command_res_urb); cmd->error = result; data->bytes_xfered = 0; return 0; } else { vub300->sg_transfer_timer.expires = jiffies + msecs_to_jiffies(2000 + (linear_length / 16384)); add_timer(&vub300->sg_transfer_timer); usb_sg_wait(&vub300->sg_request); del_timer(&vub300->sg_transfer_timer); if (vub300->sg_request.status < 0) { cmd->error = vub300->sg_request.status; data->bytes_xfered = 0; return 0; } else { data->bytes_xfered = vub300->datasize; return linear_length; } } } else { u8 *buf = kmalloc(padded_length, GFP_KERNEL); if (buf) { int result; unsigned pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep); int actual_length = 0; result = vub300_usb_bulk_msg(vub300, pipe, buf, padded_length, &actual_length, 2000 + (padded_length / 16384)); if (result < 0) { cmd->error = result; data->bytes_xfered = 0; kfree(buf); return 0; } else if (actual_length < linear_length) { cmd->error = -EREMOTEIO; data->bytes_xfered = 0; kfree(buf); return 0; } else { sg_copy_from_buffer(data->sg, data->sg_len, buf, linear_length); kfree(buf); data->bytes_xfered = vub300->datasize; return linear_length; } } else { cmd->error = -ENOMEM; data->bytes_xfered = 0; return 0; } } } static int __command_write_data(struct vub300_mmc_host *vub300, struct mmc_command *cmd, struct mmc_data *data) { /* cmd_mutex is held by vub300_cmndwork_thread */ unsigned pipe = usb_sndbulkpipe(vub300->udev, vub300->data_out_ep); int linear_length = vub300->datasize; int modulo_64_length = linear_length & 0x003F; int modulo_512_length = linear_length & 0x01FF; if (linear_length < 64) { int result; int actual_length; sg_copy_to_buffer(data->sg, data->sg_len, vub300->padded_buffer, sizeof(vub300->padded_buffer)); memset(vub300->padded_buffer + linear_length, 0, sizeof(vub300->padded_buffer) - linear_length); result = vub300_usb_bulk_msg(vub300, pipe, vub300->padded_buffer, sizeof(vub300->padded_buffer), &actual_length, 2000 + (sizeof(vub300->padded_buffer) / 16384)); if (result < 0) { cmd->error = result; data->bytes_xfered = 0; } else { data->bytes_xfered = vub300->datasize; } } else if ((!vub300->large_usb_packets && (0 < modulo_64_length)) || (vub300->large_usb_packets && (64 > modulo_512_length)) ) { /* don't you just love these work-rounds */ int padded_length = ((63 + linear_length) >> 6) << 6; u8 *buf = kmalloc(padded_length, GFP_KERNEL); if (buf) { int result; int actual_length; sg_copy_to_buffer(data->sg, data->sg_len, buf, padded_length); memset(buf + linear_length, 0, padded_length - linear_length); result = vub300_usb_bulk_msg(vub300, pipe, buf, padded_length, &actual_length, 2000 + padded_length / 16384); kfree(buf); if (result < 0) { cmd->error = result; data->bytes_xfered = 0; } else { data->bytes_xfered = vub300->datasize; } } else { cmd->error = -ENOMEM; data->bytes_xfered = 0; } } else { /* no data padding required */ int result; unsigned char buf[64 * 4]; sg_copy_to_buffer(data->sg, data->sg_len, buf, sizeof(buf)); result = usb_sg_init(&vub300->sg_request, vub300->udev, pipe, 0, data->sg, data->sg_len, 0, GFP_KERNEL); if (result < 0) { usb_unlink_urb(vub300->command_out_urb); usb_unlink_urb(vub300->command_res_urb); cmd->error = result; data->bytes_xfered = 0; } else { vub300->sg_transfer_timer.expires = jiffies + msecs_to_jiffies(2000 + linear_length / 16384); add_timer(&vub300->sg_transfer_timer); usb_sg_wait(&vub300->sg_request); if (cmd->error) { data->bytes_xfered = 0; } else { del_timer(&vub300->sg_transfer_timer); if (vub300->sg_request.status < 0) { cmd->error = vub300->sg_request.status; data->bytes_xfered = 0; } else { data->bytes_xfered = vub300->datasize; } } } } return linear_length; } static void __vub300_command_response(struct vub300_mmc_host *vub300, struct mmc_command *cmd, struct mmc_data *data, int data_length) { /* cmd_mutex is held by vub300_cmndwork_thread */ long respretval; int msec_timeout = 1000 + data_length / 4; respretval = wait_for_completion_timeout(&vub300->command_complete, msecs_to_jiffies(msec_timeout)); if (respretval == 0) { /* TIMED OUT */ /* we don't know which of "out" and "res" if any failed */ int result; vub300->usb_timed_out = 1; usb_kill_urb(vub300->command_out_urb); usb_kill_urb(vub300->command_res_urb); cmd->error = -ETIMEDOUT; result = usb_lock_device_for_reset(vub300->udev, vub300->interface); if (result == 0) { result = usb_reset_device(vub300->udev); usb_unlock_device(vub300->udev); } } else if (respretval < 0) { /* we don't know which of "out" and "res" if any failed */ usb_kill_urb(vub300->command_out_urb); usb_kill_urb(vub300->command_res_urb); cmd->error = respretval; } else if (cmd->error) { /* * the error occurred sending the command * or receiving the response */ } else if (vub300->command_out_urb->status) { vub300->usb_transport_fail = vub300->command_out_urb->status; cmd->error = -EPROTO == vub300->command_out_urb->status ? -ESHUTDOWN : vub300->command_out_urb->status; } else if (vub300->command_res_urb->status) { vub300->usb_transport_fail = vub300->command_res_urb->status; cmd->error = -EPROTO == vub300->command_res_urb->status ? -ESHUTDOWN : vub300->command_res_urb->status; } else if (vub300->resp.common.header_type == 0x00) { /* * the command completed successfully * and there was no piggybacked data */ } else if (vub300->resp.common.header_type == RESPONSE_ERROR) { cmd->error = vub300_response_error(vub300->resp.error.error_code); if (vub300->data) usb_sg_cancel(&vub300->sg_request); } else if (vub300->resp.common.header_type == RESPONSE_PIGGYBACKED) { int offloaded_data_length = vub300->resp.common.header_size - sizeof(struct sd_register_header); int register_count = offloaded_data_length >> 3; int ri = 0; while (register_count--) { add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); ri += 1; } vub300->resp.common.header_size = sizeof(struct sd_register_header); vub300->resp.common.header_type = 0x00; cmd->error = 0; } else if (vub300->resp.common.header_type == RESPONSE_PIG_DISABLED) { int offloaded_data_length = vub300->resp.common.header_size - sizeof(struct sd_register_header); int register_count = offloaded_data_length >> 3; int ri = 0; while (register_count--) { add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); ri += 1; } mutex_lock(&vub300->irq_mutex); if (vub300->irqs_queued) { vub300->irqs_queued += 1; } else if (vub300->irq_enabled) { vub300->irqs_queued += 1; vub300_queue_poll_work(vub300, 0); } else { vub300->irqs_queued += 1; } vub300->irq_disabled = 1; mutex_unlock(&vub300->irq_mutex); vub300->resp.common.header_size = sizeof(struct sd_register_header); vub300->resp.common.header_type = 0x00; cmd->error = 0; } else if (vub300->resp.common.header_type == RESPONSE_PIG_ENABLED) { int offloaded_data_length = vub300->resp.common.header_size - sizeof(struct sd_register_header); int register_count = offloaded_data_length >> 3; int ri = 0; while (register_count--) { add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); ri += 1; } mutex_lock(&vub300->irq_mutex); if (vub300->irqs_queued) { vub300->irqs_queued += 1; } else if (vub300->irq_enabled) { vub300->irqs_queued += 1; vub300_queue_poll_work(vub300, 0); } else { vub300->irqs_queued += 1; } vub300->irq_disabled = 0; mutex_unlock(&vub300->irq_mutex); vub300->resp.common.header_size = sizeof(struct sd_register_header); vub300->resp.common.header_type = 0x00; cmd->error = 0; } else { cmd->error = -EINVAL; } } static void construct_request_response(struct vub300_mmc_host *vub300, struct mmc_command *cmd) { int resp_len = vub300->resp_len; int less_cmd = (17 == resp_len) ? resp_len : resp_len - 1; int bytes = 3 & less_cmd; int words = less_cmd >> 2; u8 *r = vub300->resp.response.command_response; if (bytes == 3) { cmd->resp[words] = (r[1 + (words << 2)] << 24) | (r[2 + (words << 2)] << 16) | (r[3 + (words << 2)] << 8); } else if (bytes == 2) { cmd->resp[words] = (r[1 + (words << 2)] << 24) | (r[2 + (words << 2)] << 16); } else if (bytes == 1) { cmd->resp[words] = (r[1 + (words << 2)] << 24); } while (words-- > 0) { cmd->resp[words] = (r[1 + (words << 2)] << 24) | (r[2 + (words << 2)] << 16) | (r[3 + (words << 2)] << 8) | (r[4 + (words << 2)] << 0); } if ((cmd->opcode == 53) && (0x000000FF & cmd->resp[0])) cmd->resp[0] &= 0xFFFFFF00; } /* this thread runs only when there is an upper level command req outstanding */ static void vub300_cmndwork_thread(struct work_struct *work) { struct vub300_mmc_host *vub300 = container_of(work, struct vub300_mmc_host, cmndwork); if (!vub300->interface) { kref_put(&vub300->kref, vub300_delete); return; } else { struct mmc_request *req = vub300->req; struct mmc_command *cmd = vub300->cmd; struct mmc_data *data = vub300->data; int data_length; mutex_lock(&vub300->cmd_mutex); init_completion(&vub300->command_complete); if (likely(vub300->vub_name[0]) || !vub300->mmc->card || !mmc_card_present(vub300->mmc->card)) { /* * the name of the EMPTY Pseudo firmware file * is used as a flag to indicate that the file * has been already downloaded to the VUB300 chip */ } else if (0 == vub300->mmc->card->sdio_funcs) { strncpy(vub300->vub_name, "SD memory device", sizeof(vub300->vub_name)); } else { download_offload_pseudocode(vub300); } send_command(vub300); if (!data) data_length = 0; else if (MMC_DATA_READ & data->flags) data_length = __command_read_data(vub300, cmd, data); else data_length = __command_write_data(vub300, cmd, data); __vub300_command_response(vub300, cmd, data, data_length); vub300->req = NULL; vub300->cmd = NULL; vub300->data = NULL; if (cmd->error) { if (cmd->error == -ENOMEDIUM) check_vub300_port_status(vub300); mutex_unlock(&vub300->cmd_mutex); mmc_request_done(vub300->mmc, req); kref_put(&vub300->kref, vub300_delete); return; } else { construct_request_response(vub300, cmd); vub300->resp_len = 0; mutex_unlock(&vub300->cmd_mutex); kref_put(&vub300->kref, vub300_delete); mmc_request_done(vub300->mmc, req); return; } } } static int examine_cyclic_buffer(struct vub300_mmc_host *vub300, struct mmc_command *cmd, u8 Function) { /* cmd_mutex is held by vub300_mmc_request */ u8 cmd0 = 0xFF & (cmd->arg >> 24); u8 cmd1 = 0xFF & (cmd->arg >> 16); u8 cmd2 = 0xFF & (cmd->arg >> 8); u8 cmd3 = 0xFF & (cmd->arg >> 0); int first = MAXREGMASK & vub300->fn[Function].offload_point; struct offload_registers_access *rf = &vub300->fn[Function].reg[first]; if (cmd0 == rf->command_byte[0] && cmd1 == rf->command_byte[1] && cmd2 == rf->command_byte[2] && cmd3 == rf->command_byte[3]) { u8 checksum = 0x00; cmd->resp[1] = checksum << 24; cmd->resp[0] = (rf->Respond_Byte[0] << 24) | (rf->Respond_Byte[1] << 16) | (rf->Respond_Byte[2] << 8) | (rf->Respond_Byte[3] << 0); vub300->fn[Function].offload_point += 1; vub300->fn[Function].offload_count -= 1; vub300->total_offload_count -= 1; return 1; } else { int delta = 1; /* because it does not match the first one */ u8 register_count = vub300->fn[Function].offload_count - 1; u32 register_point = vub300->fn[Function].offload_point + 1; while (0 < register_count) { int point = MAXREGMASK & register_point; struct offload_registers_access *r = &vub300->fn[Function].reg[point]; if (cmd0 == r->command_byte[0] && cmd1 == r->command_byte[1] && cmd2 == r->command_byte[2] && cmd3 == r->command_byte[3]) { u8 checksum = 0x00; cmd->resp[1] = checksum << 24; cmd->resp[0] = (r->Respond_Byte[0] << 24) | (r->Respond_Byte[1] << 16) | (r->Respond_Byte[2] << 8) | (r->Respond_Byte[3] << 0); vub300->fn[Function].offload_point += delta; vub300->fn[Function].offload_count -= delta; vub300->total_offload_count -= delta; return 1; } else { register_point += 1; register_count -= 1; delta += 1; continue; } } return 0; } } static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300, struct mmc_command *cmd) { /* cmd_mutex is held by vub300_mmc_request */ u8 regs = vub300->dynamic_register_count; u8 i = 0; u8 func = FUN(cmd); u32 reg = REG(cmd); while (0 < regs--) { if ((vub300->sdio_register[i].func_num == func) && (vub300->sdio_register[i].sdio_reg == reg)) { if (!vub300->sdio_register[i].prepared) { return 0; } else if ((0x80000000 & cmd->arg) == 0x80000000) { /* * a write to a dynamic register * nullifies our offloaded value */ vub300->sdio_register[i].prepared = 0; return 0; } else { u8 checksum = 0x00; u8 rsp0 = 0x00; u8 rsp1 = 0x00; u8 rsp2 = vub300->sdio_register[i].response; u8 rsp3 = vub300->sdio_register[i].regvalue; vub300->sdio_register[i].prepared = 0; cmd->resp[1] = checksum << 24; cmd->resp[0] = (rsp0 << 24) | (rsp1 << 16) | (rsp2 << 8) | (rsp3 << 0); return 1; } } else { i += 1; continue; } }; if (vub300->total_offload_count == 0) return 0; else if (vub300->fn[func].offload_count == 0) return 0; else return examine_cyclic_buffer(vub300, cmd, func); } static void vub300_mmc_request(struct mmc_host *mmc, struct mmc_request *req) { /* NOT irq */ struct mmc_command *cmd = req->cmd; struct vub300_mmc_host *vub300 = mmc_priv(mmc); if (!vub300->interface) { cmd->error = -ESHUTDOWN; mmc_request_done(mmc, req); return; } else { struct mmc_data *data = req->data; if (!vub300->card_powered) { cmd->error = -ENOMEDIUM; mmc_request_done(mmc, req); return; } if (!vub300->card_present) { cmd->error = -ENOMEDIUM; mmc_request_done(mmc, req); return; } if (vub300->usb_transport_fail) { cmd->error = vub300->usb_transport_fail; mmc_request_done(mmc, req); return; } if (!vub300->interface) { cmd->error = -ENODEV; mmc_request_done(mmc, req); return; } kref_get(&vub300->kref); mutex_lock(&vub300->cmd_mutex); mod_timer(&vub300->inactivity_timer, jiffies + HZ); /* * for performance we have to return immediately * if the requested data has been offloaded */ if (cmd->opcode == 52 && satisfy_request_from_offloaded_data(vub300, cmd)) { cmd->error = 0; mutex_unlock(&vub300->cmd_mutex); kref_put(&vub300->kref, vub300_delete); mmc_request_done(mmc, req); return; } else { vub300->cmd = cmd; vub300->req = req; vub300->data = data; if (data) vub300->datasize = data->blksz * data->blocks; else vub300->datasize = 0; vub300_queue_cmnd_work(vub300); mutex_unlock(&vub300->cmd_mutex); kref_put(&vub300->kref, vub300_delete); /* * the kernel lock diagnostics complain * if the cmd_mutex * is "passed on" * to the cmndwork thread, * so we must release it now * and re-acquire it in the cmndwork thread */ } } } static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8], struct mmc_ios *ios) { int buf_array_size = 8; /* ARRAY_SIZE(buf) does not work !!! */ int retval; u32 kHzClock; if (ios->clock >= 48000000) kHzClock = 48000; else if (ios->clock >= 24000000) kHzClock = 24000; else if (ios->clock >= 20000000) kHzClock = 20000; else if (ios->clock >= 15000000) kHzClock = 15000; else if (ios->clock >= 200000) kHzClock = 200; else kHzClock = 0; { int i; u64 c = kHzClock; for (i = 0; i < buf_array_size; i++) { buf[i] = c; c >>= 8; } } retval = usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), SET_CLOCK_SPEED, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, 0x00, buf, buf_array_size, HZ); if (retval != 8) { dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED" " %dkHz failed with retval=%d\n", kHzClock, retval); } else { dev_dbg(&vub300->udev->dev, "SET_CLOCK_SPEED" " %dkHz\n", kHzClock); } } static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { /* NOT irq */ struct vub300_mmc_host *vub300 = mmc_priv(mmc); if (!vub300->interface) return; kref_get(&vub300->kref); mutex_lock(&vub300->cmd_mutex); if ((ios->power_mode == MMC_POWER_OFF) && vub300->card_powered) { vub300->card_powered = 0; usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), SET_SD_POWER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, NULL, 0, HZ); /* must wait for the VUB300 u-proc to boot up */ msleep(600); } else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) { usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), SET_SD_POWER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0001, 0x0000, NULL, 0, HZ); msleep(600); vub300->card_powered = 1; } else if (ios->power_mode == MMC_POWER_ON) { u8 *buf = kmalloc(8, GFP_KERNEL); if (buf) { __set_clock_speed(vub300, buf, ios); kfree(buf); } } else { /* this should mean no change of state */ } mutex_unlock(&vub300->cmd_mutex); kref_put(&vub300->kref, vub300_delete); } static int vub300_mmc_get_ro(struct mmc_host *mmc) { struct vub300_mmc_host *vub300 = mmc_priv(mmc); return vub300->read_only; } static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable) { /* NOT irq */ struct vub300_mmc_host *vub300 = mmc_priv(mmc); if (!vub300->interface) return; kref_get(&vub300->kref); if (enable) { mutex_lock(&vub300->irq_mutex); if (vub300->irqs_queued) { vub300->irqs_queued -= 1; mmc_signal_sdio_irq(vub300->mmc); } else if (vub300->irq_disabled) { vub300->irq_disabled = 0; vub300->irq_enabled = 1; vub300_queue_poll_work(vub300, 0); } else if (vub300->irq_enabled) { /* this should not happen, so we will just ignore it */ } else { vub300->irq_enabled = 1; vub300_queue_poll_work(vub300, 0); } mutex_unlock(&vub300->irq_mutex); } else { vub300->irq_enabled = 0; } kref_put(&vub300->kref, vub300_delete); } void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card) { /* NOT irq */ struct vub300_mmc_host *vub300 = mmc_priv(mmc); dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n"); } static struct mmc_host_ops vub300_mmc_ops = { .request = vub300_mmc_request, .set_ios = vub300_mmc_set_ios, .get_ro = vub300_mmc_get_ro, .enable_sdio_irq = vub300_enable_sdio_irq, .init_card = vub300_init_card, }; static int vub300_probe(struct usb_interface *interface, const struct usb_device_id *id) { /* NOT irq */ struct vub300_mmc_host *vub300; struct usb_host_interface *iface_desc; struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface)); int i; int retval = -ENOMEM; struct urb *command_out_urb; struct urb *command_res_urb; struct mmc_host *mmc; char manufacturer[48]; char product[32]; char serial_number[32]; usb_string(udev, udev->descriptor.iManufacturer, manufacturer, sizeof(manufacturer)); usb_string(udev, udev->descriptor.iProduct, product, sizeof(product)); usb_string(udev, udev->descriptor.iSerialNumber, serial_number, sizeof(serial_number)); dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n", udev->descriptor.idVendor, udev->descriptor.idProduct, manufacturer, product, serial_number); command_out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!command_out_urb) { retval = -ENOMEM; dev_err(&udev->dev, "not enough memory for command_out_urb\n"); goto error0; } command_res_urb = usb_alloc_urb(0, GFP_KERNEL); if (!command_res_urb) { retval = -ENOMEM; dev_err(&udev->dev, "not enough memory for command_res_urb\n"); goto error1; } /* this also allocates memory for our VUB300 mmc host device */ mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev); if (!mmc) { retval = -ENOMEM; dev_err(&udev->dev, "not enough memory for the mmc_host\n"); goto error4; } /* MMC core transfer sizes tunable parameters */ mmc->caps = 0; if (!force_1_bit_data_xfers) mmc->caps |= MMC_CAP_4_BIT_DATA; if (!force_polling_for_irqs) mmc->caps |= MMC_CAP_SDIO_IRQ; mmc->caps &= ~MMC_CAP_NEEDS_POLL; /* * MMC_CAP_NEEDS_POLL causes core.c:mmc_rescan() to poll * for devices which results in spurious CMD7's being * issued which stops some SDIO cards from working */ if (limit_speed_to_24_MHz) { mmc->caps |= MMC_CAP_MMC_HIGHSPEED; mmc->caps |= MMC_CAP_SD_HIGHSPEED; mmc->f_max = 24000000; dev_info(&udev->dev, "limiting SDIO speed to 24_MHz\n"); } else { mmc->caps |= MMC_CAP_MMC_HIGHSPEED; mmc->caps |= MMC_CAP_SD_HIGHSPEED; mmc->f_max = 48000000; } mmc->f_min = 200000; mmc->max_blk_count = 511; mmc->max_blk_size = 512; mmc->max_segs = 128; if (force_max_req_size) mmc->max_req_size = force_max_req_size * 1024; else mmc->max_req_size = 64 * 1024; mmc->max_seg_size = mmc->max_req_size; mmc->ocr_avail = 0; mmc->ocr_avail |= MMC_VDD_165_195; mmc->ocr_avail |= MMC_VDD_20_21; mmc->ocr_avail |= MMC_VDD_21_22; mmc->ocr_avail |= MMC_VDD_22_23; mmc->ocr_avail |= MMC_VDD_23_24; mmc->ocr_avail |= MMC_VDD_24_25; mmc->ocr_avail |= MMC_VDD_25_26; mmc->ocr_avail |= MMC_VDD_26_27; mmc->ocr_avail |= MMC_VDD_27_28; mmc->ocr_avail |= MMC_VDD_28_29; mmc->ocr_avail |= MMC_VDD_29_30; mmc->ocr_avail |= MMC_VDD_30_31; mmc->ocr_avail |= MMC_VDD_31_32; mmc->ocr_avail |= MMC_VDD_32_33; mmc->ocr_avail |= MMC_VDD_33_34; mmc->ocr_avail |= MMC_VDD_34_35; mmc->ocr_avail |= MMC_VDD_35_36; mmc->ops = &vub300_mmc_ops; vub300 = mmc_priv(mmc); vub300->mmc = mmc; vub300->card_powered = 0; vub300->bus_width = 0; vub300->cmnd.head.block_size[0] = 0x00; vub300->cmnd.head.block_size[1] = 0x00; vub300->app_spec = 0; mutex_init(&vub300->cmd_mutex); mutex_init(&vub300->irq_mutex); vub300->command_out_urb = command_out_urb; vub300->command_res_urb = command_res_urb; vub300->usb_timed_out = 0; vub300->dynamic_register_count = 0; for (i = 0; i < ARRAY_SIZE(vub300->fn); i++) { vub300->fn[i].offload_point = 0; vub300->fn[i].offload_count = 0; } vub300->total_offload_count = 0; vub300->irq_enabled = 0; vub300->irq_disabled = 0; vub300->irqs_queued = 0; for (i = 0; i < ARRAY_SIZE(vub300->sdio_register); i++) vub300->sdio_register[i++].activate = 0; vub300->udev = udev; vub300->interface = interface; vub300->cmnd_res_ep = 0; vub300->cmnd_out_ep = 0; vub300->data_inp_ep = 0; vub300->data_out_ep = 0; for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++) vub300->fbs[i] = 512; /* * set up the endpoint information * * use the first pair of bulk-in and bulk-out * endpoints for Command/Response+Interrupt * * use the second pair of bulk-in and bulk-out * endpoints for Data In/Out */ vub300->large_usb_packets = 0; iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { struct usb_endpoint_descriptor *endpoint = &iface_desc->endpoint[i].desc; dev_info(&vub300->udev->dev, "vub300 testing %s EndPoint(%d) %02X\n", usb_endpoint_is_bulk_in(endpoint) ? "BULK IN" : usb_endpoint_is_bulk_out(endpoint) ? "BULK OUT" : "UNKNOWN", i, endpoint->bEndpointAddress); if (endpoint->wMaxPacketSize > 64) vub300->large_usb_packets = 1; if (usb_endpoint_is_bulk_in(endpoint)) { if (!vub300->cmnd_res_ep) { vub300->cmnd_res_ep = endpoint->bEndpointAddress; } else if (!vub300->data_inp_ep) { vub300->data_inp_ep = endpoint->bEndpointAddress; } else { dev_warn(&vub300->udev->dev, "ignoring" " unexpected bulk_in endpoint"); } } else if (usb_endpoint_is_bulk_out(endpoint)) { if (!vub300->cmnd_out_ep) { vub300->cmnd_out_ep = endpoint->bEndpointAddress; } else if (!vub300->data_out_ep) { vub300->data_out_ep = endpoint->bEndpointAddress; } else { dev_warn(&vub300->udev->dev, "ignoring" " unexpected bulk_out endpoint"); } } else { dev_warn(&vub300->udev->dev, "vub300 ignoring EndPoint(%d) %02X", i, endpoint->bEndpointAddress); } } if (vub300->cmnd_res_ep && vub300->cmnd_out_ep && vub300->data_inp_ep && vub300->data_out_ep) { dev_info(&vub300->udev->dev, "vub300 %s packets" " using EndPoints %02X %02X %02X %02X\n", vub300->large_usb_packets ? "LARGE" : "SMALL", vub300->cmnd_out_ep, vub300->cmnd_res_ep, vub300->data_out_ep, vub300->data_inp_ep); /* we have the expected EndPoints */ } else { dev_err(&vub300->udev->dev, "Could not find two sets of bulk-in/out endpoint pairs\n"); retval = -EINVAL; goto error5; } retval = usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), GET_HC_INF0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, &vub300->hc_info, sizeof(vub300->hc_info), HZ); if (retval < 0) goto error5; retval = usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), SET_ROM_WAIT_STATES, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, firmware_rom_wait_states, 0x0000, NULL, 0, HZ); if (retval < 0) goto error5; dev_info(&vub300->udev->dev, "operating_mode = %s %s %d MHz %s %d byte USB packets\n", (mmc->caps & MMC_CAP_SDIO_IRQ) ? "IRQs" : "POLL", (mmc->caps & MMC_CAP_4_BIT_DATA) ? "4-bit" : "1-bit", mmc->f_max / 1000000, pad_input_to_usb_pkt ? "padding input data to" : "with", vub300->large_usb_packets ? 512 : 64); retval = usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), GET_SYSTEM_PORT_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, &vub300->system_port_status, sizeof(vub300->system_port_status), HZ); if (retval < 0) { goto error4; } else if (sizeof(vub300->system_port_status) == retval) { vub300->card_present = (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; vub300->read_only = (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; } else { goto error4; } usb_set_intfdata(interface, vub300); INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread); INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread); INIT_WORK(&vub300->deadwork, vub300_deadwork_thread); kref_init(&vub300->kref); init_timer(&vub300->sg_transfer_timer); vub300->sg_transfer_timer.data = (unsigned long)vub300; vub300->sg_transfer_timer.function = vub300_sg_timed_out; kref_get(&vub300->kref); init_timer(&vub300->inactivity_timer); vub300->inactivity_timer.data = (unsigned long)vub300; vub300->inactivity_timer.function = vub300_inactivity_timer_expired; vub300->inactivity_timer.expires = jiffies + HZ; add_timer(&vub300->inactivity_timer); if (vub300->card_present) dev_info(&vub300->udev->dev, "USB vub300 remote SDIO host controller[%d]" "connected with SD/SDIO card inserted\n", interface_to_InterfaceNumber(interface)); else dev_info(&vub300->udev->dev, "USB vub300 remote SDIO host controller[%d]" "connected with no SD/SDIO card inserted\n", interface_to_InterfaceNumber(interface)); mmc_add_host(mmc); return 0; error5: mmc_free_host(mmc); /* * and hence also frees vub300 * which is contained at the end of struct mmc */ error4: usb_free_urb(command_res_urb); error1: usb_free_urb(command_out_urb); error0: usb_put_dev(udev); return retval; } static void vub300_disconnect(struct usb_interface *interface) { /* NOT irq */ struct vub300_mmc_host *vub300 = usb_get_intfdata(interface); if (!vub300 || !vub300->mmc) { return; } else { struct mmc_host *mmc = vub300->mmc; if (!vub300->mmc) { return; } else { int ifnum = interface_to_InterfaceNumber(interface); usb_set_intfdata(interface, NULL); /* prevent more I/O from starting */ vub300->interface = NULL; kref_put(&vub300->kref, vub300_delete); mmc_remove_host(mmc); pr_info("USB vub300 remote SDIO host controller[%d]" " now disconnected", ifnum); return; } } } #ifdef CONFIG_PM static int vub300_suspend(struct usb_interface *intf, pm_message_t message) { struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); if (!vub300 || !vub300->mmc) { return 0; } else { struct mmc_host *mmc = vub300->mmc; mmc_suspend_host(mmc); return 0; } } static int vub300_resume(struct usb_interface *intf) { struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); if (!vub300 || !vub300->mmc) { return 0; } else { struct mmc_host *mmc = vub300->mmc; mmc_resume_host(mmc); return 0; } } #else #define vub300_suspend NULL #define vub300_resume NULL #endif static int vub300_pre_reset(struct usb_interface *intf) { /* NOT irq */ struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); mutex_lock(&vub300->cmd_mutex); return 0; } static int vub300_post_reset(struct usb_interface *intf) { /* NOT irq */ struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); /* we are sure no URBs are active - no locking needed */ vub300->errors = -EPIPE; mutex_unlock(&vub300->cmd_mutex); return 0; } static struct usb_driver vub300_driver = { .name = "vub300", .probe = vub300_probe, .disconnect = vub300_disconnect, .suspend = vub300_suspend, .resume = vub300_resume, .pre_reset = vub300_pre_reset, .post_reset = vub300_post_reset, .id_table = vub300_table, .supports_autosuspend = 1, }; static int __init vub300_init(void) { /* NOT irq */ int result; pr_info("VUB300 Driver rom wait states = %02X irqpoll timeout = %04X", firmware_rom_wait_states, 0x0FFFF & firmware_irqpoll_timeout); cmndworkqueue = create_singlethread_workqueue("kvub300c"); if (!cmndworkqueue) { pr_err("not enough memory for the REQUEST workqueue"); result = -ENOMEM; goto out1; } pollworkqueue = create_singlethread_workqueue("kvub300p"); if (!pollworkqueue) { pr_err("not enough memory for the IRQPOLL workqueue"); result = -ENOMEM; goto out2; } deadworkqueue = create_singlethread_workqueue("kvub300d"); if (!deadworkqueue) { pr_err("not enough memory for the EXPIRED workqueue"); result = -ENOMEM; goto out3; } result = usb_register(&vub300_driver); if (result) { pr_err("usb_register failed. Error number %d", result); goto out4; } return 0; out4: destroy_workqueue(deadworkqueue); out3: destroy_workqueue(pollworkqueue); out2: destroy_workqueue(cmndworkqueue); out1: return result; } static void __exit vub300_exit(void) { usb_deregister(&vub300_driver); flush_workqueue(cmndworkqueue); flush_workqueue(pollworkqueue); flush_workqueue(deadworkqueue); destroy_workqueue(cmndworkqueue); destroy_workqueue(pollworkqueue); destroy_workqueue(deadworkqueue); } module_init(vub300_init); module_exit(vub300_exit); MODULE_AUTHOR("Tony Olech <tony.olech@elandigitalsystems.com>"); MODULE_DESCRIPTION("VUB300 USB to SD/MMC/SDIO adapter driver"); MODULE_LICENSE("GPL");
gpl-2.0
TeamExodus/kernel_huawei_angler
crypto/cast_common.c
2320
13392
/* * Common lookup tables for CAST-128 (cast5) and CAST-256 (cast6) * * Copyright © 1998, 1999, 2000, 2001 Free Software Foundation, Inc. * Copyright © 2003 Kartikey Mahendra Bhatt <kartik_me@hotmail.com> * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * This program is free software; you can redistribute it and/or modify it * under the terms of GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <linux/module.h> #include <crypto/cast_common.h> const u32 cast_s1[256] = { 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf }; EXPORT_SYMBOL_GPL(cast_s1); const u32 cast_s2[256] = { 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1 }; EXPORT_SYMBOL_GPL(cast_s2); const u32 cast_s3[256] = { 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783 }; EXPORT_SYMBOL_GPL(cast_s3); const u32 cast_s4[256] = { 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2 }; EXPORT_SYMBOL_GPL(cast_s4); MODULE_LICENSE("GPL");
gpl-2.0
xiaolvmu/flounder-kernel
drivers/hwmon/max197.c
2320
8940
/* * Maxim MAX197 A/D Converter driver * * Copyright (c) 2012 Savoir-faire Linux Inc. * Vivien Didelot <vivien.didelot@savoirfairelinux.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * For further information, see the Documentation/hwmon/max197 file. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/sysfs.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/platform_device.h> #include <linux/platform_data/max197.h> #define MAX199_LIMIT 4000 /* 4V */ #define MAX197_LIMIT 10000 /* 10V */ #define MAX197_NUM_CH 8 /* 8 Analog Input Channels */ /* Control byte format */ #define MAX197_BIP (1 << 3) /* Bipolarity */ #define MAX197_RNG (1 << 4) /* Full range */ #define MAX197_SCALE 12207 /* Scale coefficient for raw data */ /* List of supported chips */ enum max197_chips { max197, max199 }; /** * struct max197_data - device instance specific data * @pdata: Platform data. * @hwmon_dev: The hwmon device. * @lock: Read/Write mutex. * @limit: Max range value (10V for MAX197, 4V for MAX199). * @scale: Need to scale. * @ctrl_bytes: Channels control byte. */ struct max197_data { struct max197_platform_data *pdata; struct device *hwmon_dev; struct mutex lock; int limit; bool scale; u8 ctrl_bytes[MAX197_NUM_CH]; }; static inline void max197_set_unipolarity(struct max197_data *data, int channel) { data->ctrl_bytes[channel] &= ~MAX197_BIP; } static inline void max197_set_bipolarity(struct max197_data *data, int channel) { data->ctrl_bytes[channel] |= MAX197_BIP; } static inline void max197_set_half_range(struct max197_data *data, int channel) { data->ctrl_bytes[channel] &= ~MAX197_RNG; } static inline void max197_set_full_range(struct max197_data *data, int channel) { data->ctrl_bytes[channel] |= MAX197_RNG; } static inline bool max197_is_bipolar(struct max197_data *data, int channel) { return data->ctrl_bytes[channel] & MAX197_BIP; } static inline bool max197_is_full_range(struct max197_data *data, int channel) { return data->ctrl_bytes[channel] & MAX197_RNG; } /* Function called on read access on in{0,1,2,3,4,5,6,7}_{min,max} */ static ssize_t max197_show_range(struct device *dev, struct device_attribute *devattr, char *buf) { struct max197_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); int channel = attr->index; bool is_min = attr->nr; int range; if (mutex_lock_interruptible(&data->lock)) return -ERESTARTSYS; range = max197_is_full_range(data, channel) ? data->limit : data->limit / 2; if (is_min) { if (max197_is_bipolar(data, channel)) range = -range; else range = 0; } mutex_unlock(&data->lock); return sprintf(buf, "%d\n", range); } /* Function called on write access on in{0,1,2,3,4,5,6,7}_{min,max} */ static ssize_t max197_store_range(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct max197_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); int channel = attr->index; bool is_min = attr->nr; long value; int half = data->limit / 2; int full = data->limit; if (kstrtol(buf, 10, &value)) return -EINVAL; if (is_min) { if (value <= -full) value = -full; else if (value < 0) value = -half; else value = 0; } else { if (value >= full) value = full; else value = half; } if (mutex_lock_interruptible(&data->lock)) return -ERESTARTSYS; if (value == 0) { /* We can deduce only the polarity */ max197_set_unipolarity(data, channel); } else if (value == -half) { max197_set_bipolarity(data, channel); max197_set_half_range(data, channel); } else if (value == -full) { max197_set_bipolarity(data, channel); max197_set_full_range(data, channel); } else if (value == half) { /* We can deduce only the range */ max197_set_half_range(data, channel); } else if (value == full) { /* We can deduce only the range */ max197_set_full_range(data, channel); } mutex_unlock(&data->lock); return count; } /* Function called on read access on in{0,1,2,3,4,5,6,7}_input */ static ssize_t max197_show_input(struct device *dev, struct device_attribute *devattr, char *buf) { struct max197_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int channel = attr->index; s32 value; int ret; if (mutex_lock_interruptible(&data->lock)) return -ERESTARTSYS; ret = data->pdata->convert(data->ctrl_bytes[channel]); if (ret < 0) { dev_err(dev, "conversion failed\n"); goto unlock; } value = ret; /* * Coefficient to apply on raw value. * See Table 1. Full Scale and Zero Scale in the MAX197 datasheet. */ if (data->scale) { value *= MAX197_SCALE; if (max197_is_full_range(data, channel)) value *= 2; value /= 10000; } ret = sprintf(buf, "%d\n", value); unlock: mutex_unlock(&data->lock); return ret; } static ssize_t max197_show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); return sprintf(buf, "%s\n", pdev->name); } #define MAX197_SENSOR_DEVICE_ATTR_CH(chan) \ static SENSOR_DEVICE_ATTR(in##chan##_input, S_IRUGO, \ max197_show_input, NULL, chan); \ static SENSOR_DEVICE_ATTR_2(in##chan##_min, S_IRUGO | S_IWUSR, \ max197_show_range, \ max197_store_range, \ true, chan); \ static SENSOR_DEVICE_ATTR_2(in##chan##_max, S_IRUGO | S_IWUSR, \ max197_show_range, \ max197_store_range, \ false, chan) #define MAX197_SENSOR_DEV_ATTR_IN(chan) \ &sensor_dev_attr_in##chan##_input.dev_attr.attr, \ &sensor_dev_attr_in##chan##_max.dev_attr.attr, \ &sensor_dev_attr_in##chan##_min.dev_attr.attr static DEVICE_ATTR(name, S_IRUGO, max197_show_name, NULL); MAX197_SENSOR_DEVICE_ATTR_CH(0); MAX197_SENSOR_DEVICE_ATTR_CH(1); MAX197_SENSOR_DEVICE_ATTR_CH(2); MAX197_SENSOR_DEVICE_ATTR_CH(3); MAX197_SENSOR_DEVICE_ATTR_CH(4); MAX197_SENSOR_DEVICE_ATTR_CH(5); MAX197_SENSOR_DEVICE_ATTR_CH(6); MAX197_SENSOR_DEVICE_ATTR_CH(7); static const struct attribute_group max197_sysfs_group = { .attrs = (struct attribute *[]) { &dev_attr_name.attr, MAX197_SENSOR_DEV_ATTR_IN(0), MAX197_SENSOR_DEV_ATTR_IN(1), MAX197_SENSOR_DEV_ATTR_IN(2), MAX197_SENSOR_DEV_ATTR_IN(3), MAX197_SENSOR_DEV_ATTR_IN(4), MAX197_SENSOR_DEV_ATTR_IN(5), MAX197_SENSOR_DEV_ATTR_IN(6), MAX197_SENSOR_DEV_ATTR_IN(7), NULL }, }; static int max197_probe(struct platform_device *pdev) { int ch, ret; struct max197_data *data; struct max197_platform_data *pdata = pdev->dev.platform_data; enum max197_chips chip = platform_get_device_id(pdev)->driver_data; if (pdata == NULL) { dev_err(&pdev->dev, "no platform data supplied\n"); return -EINVAL; } if (pdata->convert == NULL) { dev_err(&pdev->dev, "no convert function supplied\n"); return -EINVAL; } data = devm_kzalloc(&pdev->dev, sizeof(struct max197_data), GFP_KERNEL); if (!data) { dev_err(&pdev->dev, "devm_kzalloc failed\n"); return -ENOMEM; } data->pdata = pdata; mutex_init(&data->lock); if (chip == max197) { data->limit = MAX197_LIMIT; data->scale = true; } else { data->limit = MAX199_LIMIT; data->scale = false; } for (ch = 0; ch < MAX197_NUM_CH; ch++) data->ctrl_bytes[ch] = (u8) ch; platform_set_drvdata(pdev, data); ret = sysfs_create_group(&pdev->dev.kobj, &max197_sysfs_group); if (ret) { dev_err(&pdev->dev, "sysfs create group failed\n"); return ret; } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { ret = PTR_ERR(data->hwmon_dev); dev_err(&pdev->dev, "hwmon device register failed\n"); goto error; } return 0; error: sysfs_remove_group(&pdev->dev.kobj, &max197_sysfs_group); return ret; } static int max197_remove(struct platform_device *pdev) { struct max197_data *data = platform_get_drvdata(pdev); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &max197_sysfs_group); return 0; } static struct platform_device_id max197_device_ids[] = { { "max197", max197 }, { "max199", max199 }, { } }; MODULE_DEVICE_TABLE(platform, max197_device_ids); static struct platform_driver max197_driver = { .driver = { .name = "max197", .owner = THIS_MODULE, }, .probe = max197_probe, .remove = max197_remove, .id_table = max197_device_ids, }; module_platform_driver(max197_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Savoir-faire Linux Inc. <kernel@savoirfairelinux.com>"); MODULE_DESCRIPTION("Maxim MAX197 A/D Converter driver");
gpl-2.0
HRTKernel/Hacker_Kernel_SM-G928F
drivers/net/ethernet/amd/7990.c
2320
21625
/* * 7990.c -- LANCE ethernet IC generic routines. * This is an attempt to separate out the bits of various ethernet * drivers that are common because they all use the AMD 7990 LANCE * (Local Area Network Controller for Ethernet) chip. * * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk> * * Most of this stuff was obtained by looking at other LANCE drivers, * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful. * NB: this was made easy by the fact that Jes Sorensen had cleaned up * most of a2025 and sunlance with the aim of merging them, so the * common code was pretty obvious. */ #include <linux/crc32.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/route.h> #include <linux/string.h> #include <linux/skbuff.h> #include <asm/irq.h> /* Used for the temporal inet entries and routing */ #include <linux/socket.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/pgtable.h> #ifdef CONFIG_HP300 #include <asm/blinken.h> #endif #include "7990.h" #define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x)) #define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x)) #define READRDP(lp) in_be16(lp->base + LANCE_RDP) #if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE) #include "hplance.h" #undef WRITERAP #undef WRITERDP #undef READRDP #if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE) /* Lossage Factor Nine, Mr Sulu. */ #define WRITERAP(lp,x) (lp->writerap(lp,x)) #define WRITERDP(lp,x) (lp->writerdp(lp,x)) #define READRDP(lp) (lp->readrdp(lp)) #else /* These inlines can be used if only CONFIG_HPLANCE is defined */ static inline void WRITERAP(struct lance_private *lp, __u16 value) { do { out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); } static inline void WRITERDP(struct lance_private *lp, __u16 value) { do { out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); } static inline __u16 READRDP(struct lance_private *lp) { __u16 value; do { value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); return value; } #endif #endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */ /* debugging output macros, various flavours */ /* #define TEST_HITS */ #ifdef UNDEF #define PRINT_RINGS() \ do { \ int t; \ for (t=0; t < RX_RING_SIZE; t++) { \ printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\ t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\ ib->brx_ring[t].length,\ ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\ }\ for (t=0; t < TX_RING_SIZE; t++) { \ printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\ t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\ ib->btx_ring[t].length,\ ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\ }\ } while (0) #else #define PRINT_RINGS() #endif /* Load the CSR registers. The LANCE has to be STOPped when we do this! */ static void load_csrs (struct lance_private *lp) { volatile struct lance_init_block *aib = lp->lance_init_block; int leptr; leptr = LANCE_ADDR (aib); WRITERAP(lp, LE_CSR1); /* load address of init block */ WRITERDP(lp, leptr & 0xFFFF); WRITERAP(lp, LE_CSR2); WRITERDP(lp, leptr >> 16); WRITERAP(lp, LE_CSR3); WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */ /* Point back to csr0 */ WRITERAP(lp, LE_CSR0); } /* #define to 0 or 1 appropriately */ #define DEBUG_IRING 0 /* Set up the Lance Rx and Tx rings and the init block */ static void lance_init_ring (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */ int leptr; int i; aib = lp->lance_init_block; lp->rx_new = lp->tx_new = 0; lp->rx_old = lp->tx_old = 0; ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */ /* Copy the ethernet address to the lance init block * Notice that we do a byteswap if we're big endian. * [I think this is the right criterion; at least, sunlance, * a2065 and atarilance do the byteswap and lance.c (PC) doesn't. * However, the datasheet says that the BSWAP bit doesn't affect * the init block, so surely it should be low byte first for * everybody? Um.] * We could define the ib->physaddr as three 16bit values and * use (addr[1] << 8) | addr[0] & co, but this is more efficient. */ #ifdef __BIG_ENDIAN ib->phys_addr [0] = dev->dev_addr [1]; ib->phys_addr [1] = dev->dev_addr [0]; ib->phys_addr [2] = dev->dev_addr [3]; ib->phys_addr [3] = dev->dev_addr [2]; ib->phys_addr [4] = dev->dev_addr [5]; ib->phys_addr [5] = dev->dev_addr [4]; #else for (i=0; i<6; i++) ib->phys_addr[i] = dev->dev_addr[i]; #endif if (DEBUG_IRING) printk ("TX rings:\n"); lp->tx_full = 0; /* Setup the Tx ring entries */ for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) { leptr = LANCE_ADDR(&aib->tx_buf[i][0]); ib->btx_ring [i].tmd0 = leptr; ib->btx_ring [i].tmd1_hadr = leptr >> 16; ib->btx_ring [i].tmd1_bits = 0; ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */ ib->btx_ring [i].misc = 0; if (DEBUG_IRING) printk ("%d: 0x%8.8x\n", i, leptr); } /* Setup the Rx ring entries */ if (DEBUG_IRING) printk ("RX rings:\n"); for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) { leptr = LANCE_ADDR(&aib->rx_buf[i][0]); ib->brx_ring [i].rmd0 = leptr; ib->brx_ring [i].rmd1_hadr = leptr >> 16; ib->brx_ring [i].rmd1_bits = LE_R1_OWN; /* 0xf000 == bits that must be one (reserved, presumably) */ ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000; ib->brx_ring [i].mblength = 0; if (DEBUG_IRING) printk ("%d: 0x%8.8x\n", i, leptr); } /* Setup the initialization block */ /* Setup rx descriptor pointer */ leptr = LANCE_ADDR(&aib->brx_ring); ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16); ib->rx_ptr = leptr; if (DEBUG_IRING) printk ("RX ptr: %8.8x\n", leptr); /* Setup tx descriptor pointer */ leptr = LANCE_ADDR(&aib->btx_ring); ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16); ib->tx_ptr = leptr; if (DEBUG_IRING) printk ("TX ptr: %8.8x\n", leptr); /* Clear the multicast filter */ ib->filter [0] = 0; ib->filter [1] = 0; PRINT_RINGS(); } /* LANCE must be STOPped before we do this, too... */ static int init_restart_lance (struct lance_private *lp) { int i; WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_INIT); /* Need a hook here for sunlance ledma stuff */ /* Wait for the lance to complete initialization */ for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++) barrier(); if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) { printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp)); return -1; } /* Clear IDON by writing a "1", enable interrupts and start lance */ WRITERDP(lp, LE_C0_IDON); WRITERDP(lp, LE_C0_INEA | LE_C0_STRT); return 0; } static int lance_reset (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int status; /* Stop the lance */ WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); load_csrs (lp); lance_init_ring (dev); dev->trans_start = jiffies; /* prevent tx timeout */ status = init_restart_lance (lp); #ifdef DEBUG_DRIVER printk ("Lance restart=%d\n", status); #endif return status; } static int lance_rx (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile struct lance_rx_desc *rd; unsigned char bits; #ifdef TEST_HITS int i; #endif #ifdef TEST_HITS printk ("["); for (i = 0; i < RX_RING_SIZE; i++) { if (i == lp->rx_new) printk ("%s", ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X"); else printk ("%s", ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1"); } printk ("]"); #endif #ifdef CONFIG_HP300 blinken_leds(0x40, 0); #endif WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */ for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */ !((bits = rd->rmd1_bits) & LE_R1_OWN); rd = &ib->brx_ring [lp->rx_new]) { /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { dev->stats.rx_over_errors++; dev->stats.rx_errors++; continue; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { int len = (rd->mblength & 0xfff) - 4; struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); if (!skb) { dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; return 0; } skb_reserve (skb, 2); /* 16 byte align */ skb_put (skb, len); /* make room */ skb_copy_to_linear_data(skb, (unsigned char *)&(ib->rx_buf [lp->rx_new][0]), len); skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } /* Return the packet to the pool */ rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; } return 0; } static int lance_tx (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile struct lance_tx_desc *td; int i, j; int status; #ifdef CONFIG_HP300 blinken_leds(0x80, 0); #endif /* csr0 is 2f3 */ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA); /* csr0 is 73 */ j = lp->tx_old; for (i = j; i != lp->tx_new; i = j) { td = &ib->btx_ring [i]; /* If we hit a packet not owned by us, stop */ if (td->tmd1_bits & LE_T1_OWN) break; if (td->tmd1_bits & LE_T1_ERR) { status = td->misc; dev->stats.tx_errors++; if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk("%s: Carrier Lost, trying %s\n", dev->name, lp->tpe?"TPE":"AUI"); /* Stop the lance */ WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); lance_init_ring (dev); load_csrs (lp); init_restart_lance (lp); return 0; } } /* buffer errors and underflows turn off the transmitter */ /* Restart the adapter */ if (status & (LE_T3_BUF|LE_T3_UFL)) { dev->stats.tx_fifo_errors++; printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); /* Stop the lance */ WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); lance_init_ring (dev); load_csrs (lp); init_restart_lance (lp); return 0; } } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { /* * So we don't count the packet more than once. */ td->tmd1_bits &= ~(LE_T1_POK); /* One collision before packet was sent. */ if (td->tmd1_bits & LE_T1_EONE) dev->stats.collisions++; /* More than one collision, be optimistic. */ if (td->tmd1_bits & LE_T1_EMORE) dev->stats.collisions += 2; dev->stats.tx_packets++; } j = (j + 1) & lp->tx_ring_mod_mask; } lp->tx_old = j; WRITERDP(lp, LE_C0_TINT | LE_C0_INEA); return 0; } static irqreturn_t lance_interrupt (int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct lance_private *lp = netdev_priv(dev); int csr0; spin_lock (&lp->devlock); WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */ csr0 = READRDP(lp); PRINT_RINGS(); if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */ spin_unlock (&lp->devlock); return IRQ_NONE; /* been generated by the Lance. */ } /* Acknowledge all the interrupt sources ASAP */ WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT)); if ((csr0 & LE_C0_ERR)) { /* Clear the error condition */ WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA); } if (csr0 & LE_C0_RINT) lance_rx (dev); if (csr0 & LE_C0_TINT) lance_tx (dev); /* Log misc errors. */ if (csr0 & LE_C0_BABL) dev->stats.tx_errors++; /* Tx babble. */ if (csr0 & LE_C0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & LE_C0_MERR) { printk("%s: Bus master arbitration failure, status %4.4x.\n", dev->name, csr0); /* Restart the chip. */ WRITERDP(lp, LE_C0_STRT); } if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) { lp->tx_full = 0; netif_wake_queue (dev); } WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA); spin_unlock (&lp->devlock); return IRQ_HANDLED; } int lance_open (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int res; /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */ if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev)) return -EAGAIN; res = lance_reset(dev); spin_lock_init(&lp->devlock); netif_start_queue (dev); return res; } EXPORT_SYMBOL_GPL(lance_open); int lance_close (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); netif_stop_queue (dev); /* Stop the LANCE */ WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); free_irq(lp->irq, dev); return 0; } EXPORT_SYMBOL_GPL(lance_close); void lance_tx_timeout(struct net_device *dev) { printk("lance_tx_timeout\n"); lance_reset(dev); dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue (dev); } EXPORT_SYMBOL_GPL(lance_tx_timeout); int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; int entry, skblen, len; static int outs; unsigned long flags; if (!TX_BUFFS_AVAIL) return NETDEV_TX_LOCKED; netif_stop_queue (dev); skblen = skb->len; #ifdef DEBUG_DRIVER /* dump the packet */ { int i; for (i = 0; i < 64; i++) { if ((i % 16) == 0) printk ("\n"); printk ("%2.2x ", skb->data [i]); } } #endif len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; entry = lp->tx_new & lp->tx_ring_mod_mask; ib->btx_ring [entry].length = (-len) | 0xf000; ib->btx_ring [entry].misc = 0; if (skb->len < ETH_ZLEN) memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN); skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen); /* Now, give the packet to the lance */ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; outs++; /* Kick the lance: transmit now */ WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD); dev_kfree_skb (skb); spin_lock_irqsave (&lp->devlock, flags); if (TX_BUFFS_AVAIL) netif_start_queue (dev); else lp->tx_full = 1; spin_unlock_irqrestore (&lp->devlock, flags); return NETDEV_TX_OK; } EXPORT_SYMBOL_GPL(lance_start_xmit); /* taken from the depca driver via a2065.c */ static void lance_load_multicast (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile u16 *mcast_table = (u16 *)&ib->filter; struct netdev_hw_addr *ha; u32 crc; /* set all multicast bits */ if (dev->flags & IFF_ALLMULTI){ ib->filter [0] = 0xffffffff; ib->filter [1] = 0xffffffff; return; } /* clear the multicast filter */ ib->filter [0] = 0; ib->filter [1] = 0; /* Add addresses */ netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc = crc >> 26; mcast_table [crc >> 4] |= 1 << (crc & 0xf); } } void lance_set_multicast (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; int stopped; stopped = netif_queue_stopped(dev); if (!stopped) netif_stop_queue (dev); while (lp->tx_old != lp->tx_new) schedule(); WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); lance_init_ring (dev); if (dev->flags & IFF_PROMISC) { ib->mode |= LE_MO_PROM; } else { ib->mode &= ~LE_MO_PROM; lance_load_multicast (dev); } load_csrs (lp); init_restart_lance (lp); if (!stopped) netif_start_queue (dev); } EXPORT_SYMBOL_GPL(lance_set_multicast); #ifdef CONFIG_NET_POLL_CONTROLLER void lance_poll(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); spin_lock (&lp->devlock); WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STRT); spin_unlock (&lp->devlock); lance_interrupt(dev->irq, dev); } #endif MODULE_LICENSE("GPL");
gpl-2.0
sominn/android_kernel_samsung_golden
net/rfkill/rfkill-regulator.c
2832
4051
/* * rfkill-regulator.c - Regulator consumer driver for rfkill * * Copyright (C) 2009 Guiming Zhuo <gmzhuo@gmail.com> * Copyright (C) 2011 Antonio Ospite <ospite@studenti.unina.it> * * Implementation inspired by leds-regulator driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/rfkill.h> #include <linux/rfkill-regulator.h> struct rfkill_regulator_data { struct rfkill *rf_kill; bool reg_enabled; struct regulator *vcc; }; static int rfkill_regulator_set_block(void *data, bool blocked) { struct rfkill_regulator_data *rfkill_data = data; pr_debug("%s: blocked: %d\n", __func__, blocked); if (blocked) { if (rfkill_data->reg_enabled) { regulator_disable(rfkill_data->vcc); rfkill_data->reg_enabled = 0; } } else { if (!rfkill_data->reg_enabled) { regulator_enable(rfkill_data->vcc); rfkill_data->reg_enabled = 1; } } pr_debug("%s: regulator_is_enabled after set_block: %d\n", __func__, regulator_is_enabled(rfkill_data->vcc)); return 0; } struct rfkill_ops rfkill_regulator_ops = { .set_block = rfkill_regulator_set_block, }; static int __devinit rfkill_regulator_probe(struct platform_device *pdev) { struct rfkill_regulator_platform_data *pdata = pdev->dev.platform_data; struct rfkill_regulator_data *rfkill_data; struct regulator *vcc; struct rfkill *rf_kill; int ret = 0; if (pdata == NULL) { dev_err(&pdev->dev, "no platform data\n"); return -ENODEV; } if (pdata->name == NULL || pdata->type == 0) { dev_err(&pdev->dev, "invalid name or type in platform data\n"); return -EINVAL; } vcc = regulator_get_exclusive(&pdev->dev, "vrfkill"); if (IS_ERR(vcc)) { dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name); ret = PTR_ERR(vcc); goto out; } rfkill_data = kzalloc(sizeof(*rfkill_data), GFP_KERNEL); if (rfkill_data == NULL) { ret = -ENOMEM; goto err_data_alloc; } rf_kill = rfkill_alloc(pdata->name, &pdev->dev, pdata->type, &rfkill_regulator_ops, rfkill_data); if (rf_kill == NULL) { dev_err(&pdev->dev, "Cannot alloc rfkill device\n"); ret = -ENOMEM; goto err_rfkill_alloc; } if (regulator_is_enabled(vcc)) { dev_dbg(&pdev->dev, "Regulator already enabled\n"); rfkill_data->reg_enabled = 1; } rfkill_data->vcc = vcc; rfkill_data->rf_kill = rf_kill; ret = rfkill_register(rf_kill); if (ret) { dev_err(&pdev->dev, "Cannot register rfkill device\n"); goto err_rfkill_register; } platform_set_drvdata(pdev, rfkill_data); dev_info(&pdev->dev, "%s initialized\n", pdata->name); return 0; err_rfkill_register: rfkill_destroy(rf_kill); err_rfkill_alloc: kfree(rfkill_data); err_data_alloc: regulator_put(vcc); out: return ret; } static int __devexit rfkill_regulator_remove(struct platform_device *pdev) { struct rfkill_regulator_data *rfkill_data = platform_get_drvdata(pdev); struct rfkill *rf_kill = rfkill_data->rf_kill; rfkill_unregister(rf_kill); rfkill_destroy(rf_kill); regulator_put(rfkill_data->vcc); kfree(rfkill_data); return 0; } static struct platform_driver rfkill_regulator_driver = { .probe = rfkill_regulator_probe, .remove = __devexit_p(rfkill_regulator_remove), .driver = { .name = "rfkill-regulator", .owner = THIS_MODULE, }, }; static int __init rfkill_regulator_init(void) { return platform_driver_register(&rfkill_regulator_driver); } module_init(rfkill_regulator_init); static void __exit rfkill_regulator_exit(void) { platform_driver_unregister(&rfkill_regulator_driver); } module_exit(rfkill_regulator_exit); MODULE_AUTHOR("Guiming Zhuo <gmzhuo@gmail.com>"); MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>"); MODULE_DESCRIPTION("Regulator consumer driver for rfkill"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:rfkill-regulator");
gpl-2.0
SlimRoms/kernel_htc_msm8994
net/bridge/netfilter/ebt_stp.c
3856
5001
/* * ebt_stp * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * Stephen Hemminger <shemminger@osdl.org> * * July, 2003 */ #include <linux/etherdevice.h> #include <linux/module.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_stp.h> #define BPDU_TYPE_CONFIG 0 #define BPDU_TYPE_TCN 0x80 struct stp_header { uint8_t dsap; uint8_t ssap; uint8_t ctrl; uint8_t pid; uint8_t vers; uint8_t type; }; struct stp_config_pdu { uint8_t flags; uint8_t root[8]; uint8_t root_cost[4]; uint8_t sender[8]; uint8_t port[2]; uint8_t msg_age[2]; uint8_t max_age[2]; uint8_t hello_time[2]; uint8_t forward_delay[2]; }; #define NR16(p) (p[0] << 8 | p[1]) #define NR32(p) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]) static bool ebt_filter_config(const struct ebt_stp_info *info, const struct stp_config_pdu *stpc) { const struct ebt_stp_config_info *c; uint16_t v16; uint32_t v32; int verdict, i; c = &info->config; if ((info->bitmask & EBT_STP_FLAGS) && FWINV(c->flags != stpc->flags, EBT_STP_FLAGS)) return false; if (info->bitmask & EBT_STP_ROOTPRIO) { v16 = NR16(stpc->root); if (FWINV(v16 < c->root_priol || v16 > c->root_priou, EBT_STP_ROOTPRIO)) return false; } if (info->bitmask & EBT_STP_ROOTADDR) { verdict = 0; for (i = 0; i < 6; i++) verdict |= (stpc->root[2+i] ^ c->root_addr[i]) & c->root_addrmsk[i]; if (FWINV(verdict != 0, EBT_STP_ROOTADDR)) return false; } if (info->bitmask & EBT_STP_ROOTCOST) { v32 = NR32(stpc->root_cost); if (FWINV(v32 < c->root_costl || v32 > c->root_costu, EBT_STP_ROOTCOST)) return false; } if (info->bitmask & EBT_STP_SENDERPRIO) { v16 = NR16(stpc->sender); if (FWINV(v16 < c->sender_priol || v16 > c->sender_priou, EBT_STP_SENDERPRIO)) return false; } if (info->bitmask & EBT_STP_SENDERADDR) { verdict = 0; for (i = 0; i < 6; i++) verdict |= (stpc->sender[2+i] ^ c->sender_addr[i]) & c->sender_addrmsk[i]; if (FWINV(verdict != 0, EBT_STP_SENDERADDR)) return false; } if (info->bitmask & EBT_STP_PORT) { v16 = NR16(stpc->port); if (FWINV(v16 < c->portl || v16 > c->portu, EBT_STP_PORT)) return false; } if (info->bitmask & EBT_STP_MSGAGE) { v16 = NR16(stpc->msg_age); if (FWINV(v16 < c->msg_agel || v16 > c->msg_ageu, EBT_STP_MSGAGE)) return false; } if (info->bitmask & EBT_STP_MAXAGE) { v16 = NR16(stpc->max_age); if (FWINV(v16 < c->max_agel || v16 > c->max_ageu, EBT_STP_MAXAGE)) return false; } if (info->bitmask & EBT_STP_HELLOTIME) { v16 = NR16(stpc->hello_time); if (FWINV(v16 < c->hello_timel || v16 > c->hello_timeu, EBT_STP_HELLOTIME)) return false; } if (info->bitmask & EBT_STP_FWDD) { v16 = NR16(stpc->forward_delay); if (FWINV(v16 < c->forward_delayl || v16 > c->forward_delayu, EBT_STP_FWDD)) return false; } return true; } static bool ebt_stp_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct ebt_stp_info *info = par->matchinfo; const struct stp_header *sp; struct stp_header _stph; const uint8_t header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00}; sp = skb_header_pointer(skb, 0, sizeof(_stph), &_stph); if (sp == NULL) return false; /* The stp code only considers these */ if (memcmp(sp, header, sizeof(header))) return false; if (info->bitmask & EBT_STP_TYPE && FWINV(info->type != sp->type, EBT_STP_TYPE)) return false; if (sp->type == BPDU_TYPE_CONFIG && info->bitmask & EBT_STP_CONFIG_MASK) { const struct stp_config_pdu *st; struct stp_config_pdu _stpc; st = skb_header_pointer(skb, sizeof(_stph), sizeof(_stpc), &_stpc); if (st == NULL) return false; return ebt_filter_config(info, st); } return true; } static int ebt_stp_mt_check(const struct xt_mtchk_param *par) { const struct ebt_stp_info *info = par->matchinfo; const uint8_t bridge_ula[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; const uint8_t msk[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; const struct ebt_entry *e = par->entryinfo; if (info->bitmask & ~EBT_STP_MASK || info->invflags & ~EBT_STP_MASK || !(info->bitmask & EBT_STP_MASK)) return -EINVAL; /* Make sure the match only receives stp frames */ if (!ether_addr_equal(e->destmac, bridge_ula) || !ether_addr_equal(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC)) return -EINVAL; return 0; } static struct xt_match ebt_stp_mt_reg __read_mostly = { .name = "stp", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_stp_mt, .checkentry = ebt_stp_mt_check, .matchsize = sizeof(struct ebt_stp_info), .me = THIS_MODULE, }; static int __init ebt_stp_init(void) { return xt_register_match(&ebt_stp_mt_reg); } static void __exit ebt_stp_fini(void) { xt_unregister_match(&ebt_stp_mt_reg); } module_init(ebt_stp_init); module_exit(ebt_stp_fini); MODULE_DESCRIPTION("Ebtables: Spanning Tree Protocol packet match"); MODULE_LICENSE("GPL");
gpl-2.0
TheTypoMaster/android_kernel_samsung_smdk4412
drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
4880
35615
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <net/mac80211.h> #include "rate.h" #include "scb.h" #include "phy/phy_hal.h" #include "antsel.h" #include "main.h" #include "ampdu.h" /* max number of mpdus in an ampdu */ #define AMPDU_MAX_MPDU 32 /* max number of mpdus in an ampdu to a legacy */ #define AMPDU_NUM_MPDU_LEGACY 16 /* max Tx ba window size (in pdu) */ #define AMPDU_TX_BA_MAX_WSIZE 64 /* default Tx ba window size (in pdu) */ #define AMPDU_TX_BA_DEF_WSIZE 64 /* default Rx ba window size (in pdu) */ #define AMPDU_RX_BA_DEF_WSIZE 64 /* max Rx ba window size (in pdu) */ #define AMPDU_RX_BA_MAX_WSIZE 64 /* max dur of tx ampdu (in msec) */ #define AMPDU_MAX_DUR 5 /* default tx retry limit */ #define AMPDU_DEF_RETRY_LIMIT 5 /* default tx retry limit at reg rate */ #define AMPDU_DEF_RR_RETRY_LIMIT 2 /* default weight of ampdu in txfifo */ #define AMPDU_DEF_TXPKT_WEIGHT 2 /* default ffpld reserved bytes */ #define AMPDU_DEF_FFPLD_RSVD 2048 /* # of inis to be freed on detach */ #define AMPDU_INI_FREE 10 /* max # of mpdus released at a time */ #define AMPDU_SCB_MAX_RELEASE 20 #define NUM_FFPLD_FIFO 4 /* number of fifo concerned by pre-loading */ #define FFPLD_TX_MAX_UNFL 200 /* default value of the average number of ampdu * without underflows */ #define FFPLD_MPDU_SIZE 1800 /* estimate of maximum mpdu size */ #define FFPLD_MAX_MCS 23 /* we don't deal with mcs 32 */ #define FFPLD_PLD_INCR 1000 /* increments in bytes */ #define FFPLD_MAX_AMPDU_CNT 5000 /* maximum number of ampdu we * accumulate between resets. */ #define AMPDU_DELIMITER_LEN 4 /* max allowed number of mpdus in an ampdu (2 streams) */ #define AMPDU_NUM_MPDU 16 #define TX_SEQ_TO_INDEX(seq) ((seq) % AMPDU_TX_BA_MAX_WSIZE) /* max possible overhead per mpdu in the ampdu; 3 is for roundup if needed */ #define AMPDU_MAX_MPDU_OVERHEAD (FCS_LEN + DOT11_ICV_AES_LEN +\ AMPDU_DELIMITER_LEN + 3\ + DOT11_A4_HDR_LEN + DOT11_QOS_LEN + DOT11_IV_MAX_LEN) /* modulo add/sub, bound = 2^k */ #define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1)) #define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1)) /* structure to hold tx fifo information and pre-loading state * counters specific to tx underflows of ampdus * some counters might be redundant with the ones in wlc or ampdu structures. * This allows to maintain a specific state independently of * how often and/or when the wlc counters are updated. * * ampdu_pld_size: number of bytes to be pre-loaded * mcs2ampdu_table: per-mcs max # of mpdus in an ampdu * prev_txfunfl: num of underflows last read from the HW macstats counter * accum_txfunfl: num of underflows since we modified pld params * accum_txampdu: num of tx ampdu since we modified pld params * prev_txampdu: previous reading of tx ampdu * dmaxferrate: estimated dma avg xfer rate in kbits/sec */ struct brcms_fifo_info { u16 ampdu_pld_size; u8 mcs2ampdu_table[FFPLD_MAX_MCS + 1]; u16 prev_txfunfl; u32 accum_txfunfl; u32 accum_txampdu; u32 prev_txampdu; u32 dmaxferrate; }; /* AMPDU module specific state * * wlc: pointer to main wlc structure * scb_handle: scb cubby handle to retrieve data from scb * ini_enable: per-tid initiator enable/disable of ampdu * ba_tx_wsize: Tx ba window size (in pdu) * ba_rx_wsize: Rx ba window size (in pdu) * retry_limit: mpdu transmit retry limit * rr_retry_limit: mpdu transmit retry limit at regular rate * retry_limit_tid: per-tid mpdu transmit retry limit * rr_retry_limit_tid: per-tid mpdu transmit retry limit at regular rate * mpdu_density: min mpdu spacing (0-7) ==> 2^(x-1)/8 usec * max_pdu: max pdus allowed in ampdu * dur: max duration of an ampdu (in msec) * txpkt_weight: weight of ampdu in txfifo; reduces rate lag * rx_factor: maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes * ffpld_rsvd: number of bytes to reserve for preload * max_txlen: max size of ampdu per mcs, bw and sgi * mfbr: enable multiple fallback rate * tx_max_funl: underflows should be kept such that * (tx_max_funfl*underflows) < tx frames * fifo_tb: table of fifo infos */ struct ampdu_info { struct brcms_c_info *wlc; int scb_handle; u8 ini_enable[AMPDU_MAX_SCB_TID]; u8 ba_tx_wsize; u8 ba_rx_wsize; u8 retry_limit; u8 rr_retry_limit; u8 retry_limit_tid[AMPDU_MAX_SCB_TID]; u8 rr_retry_limit_tid[AMPDU_MAX_SCB_TID]; u8 mpdu_density; s8 max_pdu; u8 dur; u8 txpkt_weight; u8 rx_factor; u32 ffpld_rsvd; u32 max_txlen[MCS_TABLE_SIZE][2][2]; bool mfbr; u32 tx_max_funl; struct brcms_fifo_info fifo_tb[NUM_FFPLD_FIFO]; }; /* used for flushing ampdu packets */ struct cb_del_ampdu_pars { struct ieee80211_sta *sta; u16 tid; }; static void brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur) { u32 rate, mcs; for (mcs = 0; mcs < MCS_TABLE_SIZE; mcs++) { /* rate is in Kbps; dur is in msec ==> len = (rate * dur) / 8 */ /* 20MHz, No SGI */ rate = mcs_2_rate(mcs, false, false); ampdu->max_txlen[mcs][0][0] = (rate * dur) >> 3; /* 40 MHz, No SGI */ rate = mcs_2_rate(mcs, true, false); ampdu->max_txlen[mcs][1][0] = (rate * dur) >> 3; /* 20MHz, SGI */ rate = mcs_2_rate(mcs, false, true); ampdu->max_txlen[mcs][0][1] = (rate * dur) >> 3; /* 40 MHz, SGI */ rate = mcs_2_rate(mcs, true, true); ampdu->max_txlen[mcs][1][1] = (rate * dur) >> 3; } } static bool brcms_c_ampdu_cap(struct ampdu_info *ampdu) { if (BRCMS_PHY_11N_CAP(ampdu->wlc->band)) return true; else return false; } static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on) { struct brcms_c_info *wlc = ampdu->wlc; wlc->pub->_ampdu = false; if (on) { if (!(wlc->pub->_n_enab & SUPPORT_11N)) { wiphy_err(ampdu->wlc->wiphy, "wl%d: driver not " "nmode enabled\n", wlc->pub->unit); return -ENOTSUPP; } if (!brcms_c_ampdu_cap(ampdu)) { wiphy_err(ampdu->wlc->wiphy, "wl%d: device not " "ampdu capable\n", wlc->pub->unit); return -ENOTSUPP; } wlc->pub->_ampdu = on; } return 0; } static void brcms_c_ffpld_init(struct ampdu_info *ampdu) { int i, j; struct brcms_fifo_info *fifo; for (j = 0; j < NUM_FFPLD_FIFO; j++) { fifo = (ampdu->fifo_tb + j); fifo->ampdu_pld_size = 0; for (i = 0; i <= FFPLD_MAX_MCS; i++) fifo->mcs2ampdu_table[i] = 255; fifo->dmaxferrate = 0; fifo->accum_txampdu = 0; fifo->prev_txfunfl = 0; fifo->accum_txfunfl = 0; } } struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc) { struct ampdu_info *ampdu; int i; ampdu = kzalloc(sizeof(struct ampdu_info), GFP_ATOMIC); if (!ampdu) return NULL; ampdu->wlc = wlc; for (i = 0; i < AMPDU_MAX_SCB_TID; i++) ampdu->ini_enable[i] = true; /* Disable ampdu for VO by default */ ampdu->ini_enable[PRIO_8021D_VO] = false; ampdu->ini_enable[PRIO_8021D_NC] = false; /* Disable ampdu for BK by default since not enough fifo space */ ampdu->ini_enable[PRIO_8021D_NONE] = false; ampdu->ini_enable[PRIO_8021D_BK] = false; ampdu->ba_tx_wsize = AMPDU_TX_BA_DEF_WSIZE; ampdu->ba_rx_wsize = AMPDU_RX_BA_DEF_WSIZE; ampdu->mpdu_density = AMPDU_DEF_MPDU_DENSITY; ampdu->max_pdu = AUTO; ampdu->dur = AMPDU_MAX_DUR; ampdu->txpkt_weight = AMPDU_DEF_TXPKT_WEIGHT; ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD; /* * bump max ampdu rcv size to 64k for all 11n * devices except 4321A0 and 4321A1 */ if (BRCMS_ISNPHY(wlc->band) && NREV_LT(wlc->band->phyrev, 2)) ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_32K; else ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_64K; ampdu->retry_limit = AMPDU_DEF_RETRY_LIMIT; ampdu->rr_retry_limit = AMPDU_DEF_RR_RETRY_LIMIT; for (i = 0; i < AMPDU_MAX_SCB_TID; i++) { ampdu->retry_limit_tid[i] = ampdu->retry_limit; ampdu->rr_retry_limit_tid[i] = ampdu->rr_retry_limit; } brcms_c_scb_ampdu_update_max_txlen(ampdu, ampdu->dur); ampdu->mfbr = false; /* try to set ampdu to the default value */ brcms_c_ampdu_set(ampdu, wlc->pub->_ampdu); ampdu->tx_max_funl = FFPLD_TX_MAX_UNFL; brcms_c_ffpld_init(ampdu); return ampdu; } void brcms_c_ampdu_detach(struct ampdu_info *ampdu) { kfree(ampdu); } static void brcms_c_scb_ampdu_update_config(struct ampdu_info *ampdu, struct scb *scb) { struct scb_ampdu *scb_ampdu = &scb->scb_ampdu; int i; scb_ampdu->max_pdu = AMPDU_NUM_MPDU; /* go back to legacy size if some preloading is occurring */ for (i = 0; i < NUM_FFPLD_FIFO; i++) { if (ampdu->fifo_tb[i].ampdu_pld_size > FFPLD_PLD_INCR) scb_ampdu->max_pdu = AMPDU_NUM_MPDU_LEGACY; } /* apply user override */ if (ampdu->max_pdu != AUTO) scb_ampdu->max_pdu = (u8) ampdu->max_pdu; scb_ampdu->release = min_t(u8, scb_ampdu->max_pdu, AMPDU_SCB_MAX_RELEASE); if (scb_ampdu->max_rx_ampdu_bytes) scb_ampdu->release = min_t(u8, scb_ampdu->release, scb_ampdu->max_rx_ampdu_bytes / 1600); scb_ampdu->release = min(scb_ampdu->release, ampdu->fifo_tb[TX_AC_BE_FIFO]. mcs2ampdu_table[FFPLD_MAX_MCS]); } static void brcms_c_scb_ampdu_update_config_all(struct ampdu_info *ampdu) { brcms_c_scb_ampdu_update_config(ampdu, &ampdu->wlc->pri_scb); } static void brcms_c_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f) { int i; u32 phy_rate, dma_rate, tmp; u8 max_mpdu; struct brcms_fifo_info *fifo = (ampdu->fifo_tb + f); /* recompute the dma rate */ /* note : we divide/multiply by 100 to avoid integer overflows */ max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS], AMPDU_NUM_MPDU_LEGACY); phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false); dma_rate = (((phy_rate / 100) * (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size)) / (max_mpdu * FFPLD_MPDU_SIZE)) * 100; fifo->dmaxferrate = dma_rate; /* fill up the mcs2ampdu table; do not recalc the last mcs */ dma_rate = dma_rate >> 7; for (i = 0; i < FFPLD_MAX_MCS; i++) { /* shifting to keep it within integer range */ phy_rate = mcs_2_rate(i, true, false) >> 7; if (phy_rate > dma_rate) { tmp = ((fifo->ampdu_pld_size * phy_rate) / ((phy_rate - dma_rate) * FFPLD_MPDU_SIZE)) + 1; tmp = min_t(u32, tmp, 255); fifo->mcs2ampdu_table[i] = (u8) tmp; } } } /* evaluate the dma transfer rate using the tx underflows as feedback. * If necessary, increase tx fifo preloading. If not enough, * decrease maximum ampdu size for each mcs till underflows stop * Return 1 if pre-loading not active, -1 if not an underflow event, * 0 if pre-loading module took care of the event. */ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid) { struct ampdu_info *ampdu = wlc->ampdu; u32 phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false); u32 txunfl_ratio; u8 max_mpdu; u32 current_ampdu_cnt = 0; u16 max_pld_size; u32 new_txunfl; struct brcms_fifo_info *fifo = (ampdu->fifo_tb + fid); uint xmtfifo_sz; u16 cur_txunfl; /* return if we got here for a different reason than underflows */ cur_txunfl = brcms_b_read_shm(wlc->hw, M_UCODE_MACSTAT + offsetof(struct macstat, txfunfl[fid])); new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl); if (new_txunfl == 0) { BCMMSG(wlc->wiphy, "TX status FRAG set but no tx underflows\n"); return -1; } fifo->prev_txfunfl = cur_txunfl; if (!ampdu->tx_max_funl) return 1; /* check if fifo is big enough */ if (brcms_b_xmtfifo_sz_get(wlc->hw, fid, &xmtfifo_sz)) return -1; if ((TXFIFO_SIZE_UNIT * (u32) xmtfifo_sz) <= ampdu->ffpld_rsvd) return 1; max_pld_size = TXFIFO_SIZE_UNIT * xmtfifo_sz - ampdu->ffpld_rsvd; fifo->accum_txfunfl += new_txunfl; /* we need to wait for at least 10 underflows */ if (fifo->accum_txfunfl < 10) return 0; BCMMSG(wlc->wiphy, "ampdu_count %d tx_underflows %d\n", current_ampdu_cnt, fifo->accum_txfunfl); /* compute the current ratio of tx unfl per ampdu. When the current ampdu count becomes too big while the ratio remains small, we reset the current count in order to not introduce too big of a latency in detecting a large amount of tx underflows later. */ txunfl_ratio = current_ampdu_cnt / fifo->accum_txfunfl; if (txunfl_ratio > ampdu->tx_max_funl) { if (current_ampdu_cnt >= FFPLD_MAX_AMPDU_CNT) fifo->accum_txfunfl = 0; return 0; } max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS], AMPDU_NUM_MPDU_LEGACY); /* In case max value max_pdu is already lower than the fifo depth, there is nothing more we can do. */ if (fifo->ampdu_pld_size >= max_mpdu * FFPLD_MPDU_SIZE) { fifo->accum_txfunfl = 0; return 0; } if (fifo->ampdu_pld_size < max_pld_size) { /* increment by TX_FIFO_PLD_INC bytes */ fifo->ampdu_pld_size += FFPLD_PLD_INCR; if (fifo->ampdu_pld_size > max_pld_size) fifo->ampdu_pld_size = max_pld_size; /* update scb release size */ brcms_c_scb_ampdu_update_config_all(ampdu); /* * compute a new dma xfer rate for max_mpdu @ max mcs. * This is the minimum dma rate that can achieve no * underflow condition for the current mpdu size. * * note : we divide/multiply by 100 to avoid integer overflows */ fifo->dmaxferrate = (((phy_rate / 100) * (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size)) / (max_mpdu * FFPLD_MPDU_SIZE)) * 100; BCMMSG(wlc->wiphy, "DMA estimated transfer rate %d; " "pre-load size %d\n", fifo->dmaxferrate, fifo->ampdu_pld_size); } else { /* decrease ampdu size */ if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] > 1) { if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] == 255) fifo->mcs2ampdu_table[FFPLD_MAX_MCS] = AMPDU_NUM_MPDU_LEGACY - 1; else fifo->mcs2ampdu_table[FFPLD_MAX_MCS] -= 1; /* recompute the table */ brcms_c_ffpld_calc_mcs2ampdu_table(ampdu, fid); /* update scb release size */ brcms_c_scb_ampdu_update_config_all(ampdu); } } fifo->accum_txfunfl = 0; return 0; } void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid, u8 ba_wsize, /* negotiated ba window size (in pdu) */ uint max_rx_ampdu_bytes) /* from ht_cap in beacon */ { struct scb_ampdu *scb_ampdu; struct scb_ampdu_tid_ini *ini; struct ampdu_info *ampdu = wlc->ampdu; struct scb *scb = &wlc->pri_scb; scb_ampdu = &scb->scb_ampdu; if (!ampdu->ini_enable[tid]) { wiphy_err(ampdu->wlc->wiphy, "%s: Rejecting tid %d\n", __func__, tid); return; } ini = &scb_ampdu->ini[tid]; ini->tid = tid; ini->scb = scb_ampdu->scb; ini->ba_wsize = ba_wsize; scb_ampdu->max_rx_ampdu_bytes = max_rx_ampdu_bytes; } int brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi, struct sk_buff **pdu, int prec) { struct brcms_c_info *wlc; struct sk_buff *p, *pkt[AMPDU_MAX_MPDU]; u8 tid, ndelim; int err = 0; u8 preamble_type = BRCMS_GF_PREAMBLE; u8 fbr_preamble_type = BRCMS_GF_PREAMBLE; u8 rts_preamble_type = BRCMS_LONG_PREAMBLE; u8 rts_fbr_preamble_type = BRCMS_LONG_PREAMBLE; bool rr = true, fbr = false; uint i, count = 0, fifo, seg_cnt = 0; u16 plen, len, seq = 0, mcl, mch, index, frameid, dma_len = 0; u32 ampdu_len, max_ampdu_bytes = 0; struct d11txh *txh = NULL; u8 *plcp; struct ieee80211_hdr *h; struct scb *scb; struct scb_ampdu *scb_ampdu; struct scb_ampdu_tid_ini *ini; u8 mcs = 0; bool use_rts = false, use_cts = false; u32 rspec = 0, rspec_fallback = 0; u32 rts_rspec = 0, rts_rspec_fallback = 0; u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ; struct ieee80211_rts *rts; u8 rr_retry_limit; struct brcms_fifo_info *f; bool fbr_iscck; struct ieee80211_tx_info *tx_info; u16 qlen; struct wiphy *wiphy; wlc = ampdu->wlc; wiphy = wlc->wiphy; p = *pdu; tid = (u8) (p->priority); f = ampdu->fifo_tb + prio2fifo[tid]; scb = &wlc->pri_scb; scb_ampdu = &scb->scb_ampdu; ini = &scb_ampdu->ini[tid]; /* Let pressure continue to build ... */ qlen = pktq_plen(&qi->q, prec); if (ini->tx_in_transit > 0 && qlen < min(scb_ampdu->max_pdu, ini->ba_wsize)) /* Collect multiple MPDU's to be sent in the next AMPDU */ return -EBUSY; /* at this point we intend to transmit an AMPDU */ rr_retry_limit = ampdu->rr_retry_limit_tid[tid]; ampdu_len = 0; dma_len = 0; while (p) { struct ieee80211_tx_rate *txrate; tx_info = IEEE80211_SKB_CB(p); txrate = tx_info->status.rates; if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { err = brcms_c_prep_pdu(wlc, p, &fifo); } else { wiphy_err(wiphy, "%s: AMPDU flag is off!\n", __func__); *pdu = NULL; err = 0; break; } if (err) { if (err == -EBUSY) { wiphy_err(wiphy, "wl%d: sendampdu: " "prep_xdu retry; seq 0x%x\n", wlc->pub->unit, seq); *pdu = p; break; } /* error in the packet; reject it */ wiphy_err(wiphy, "wl%d: sendampdu: prep_xdu " "rejected; seq 0x%x\n", wlc->pub->unit, seq); *pdu = NULL; break; } /* pkt is good to be aggregated */ txh = (struct d11txh *) p->data; plcp = (u8 *) (txh + 1); h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN); seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT; index = TX_SEQ_TO_INDEX(seq); /* check mcl fields and test whether it can be agg'd */ mcl = le16_to_cpu(txh->MacTxControlLow); mcl &= ~TXC_AMPDU_MASK; fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x3); txh->PreloadSize = 0; /* always default to 0 */ /* Handle retry limits */ if (txrate[0].count <= rr_retry_limit) { txrate[0].count++; rr = true; fbr = false; } else { fbr = true; rr = false; txrate[1].count++; } /* extract the length info */ len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) : BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback); /* retrieve null delimiter count */ ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM]; seg_cnt += 1; BCMMSG(wlc->wiphy, "wl%d: mpdu %d plcp_len %d\n", wlc->pub->unit, count, len); /* * aggregateable mpdu. For ucode/hw agg, * test whether need to break or change the epoch */ if (count == 0) { mcl |= (TXC_AMPDU_FIRST << TXC_AMPDU_SHIFT); /* refill the bits since might be a retx mpdu */ mcl |= TXC_STARTMSDU; rts = (struct ieee80211_rts *)&txh->rts_frame; if (ieee80211_is_rts(rts->frame_control)) { mcl |= TXC_SENDRTS; use_rts = true; } if (ieee80211_is_cts(rts->frame_control)) { mcl |= TXC_SENDCTS; use_cts = true; } } else { mcl |= (TXC_AMPDU_MIDDLE << TXC_AMPDU_SHIFT); mcl &= ~(TXC_STARTMSDU | TXC_SENDRTS | TXC_SENDCTS); } len = roundup(len, 4); ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN); dma_len += (u16) p->len; BCMMSG(wlc->wiphy, "wl%d: ampdu_len %d" " seg_cnt %d null delim %d\n", wlc->pub->unit, ampdu_len, seg_cnt, ndelim); txh->MacTxControlLow = cpu_to_le16(mcl); /* this packet is added */ pkt[count++] = p; /* patch the first MPDU */ if (count == 1) { u8 plcp0, plcp3, is40, sgi; struct ieee80211_sta *sta; sta = tx_info->control.sta; if (rr) { plcp0 = plcp[0]; plcp3 = plcp[3]; } else { plcp0 = txh->FragPLCPFallback[0]; plcp3 = txh->FragPLCPFallback[3]; } is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0; sgi = plcp3_issgi(plcp3) ? 1 : 0; mcs = plcp0 & ~MIMO_PLCP_40MHZ; max_ampdu_bytes = min(scb_ampdu->max_rx_ampdu_bytes, ampdu->max_txlen[mcs][is40][sgi]); if (is40) mimo_ctlchbw = CHSPEC_SB_UPPER(wlc_phy_chanspec_get( wlc->band->pi)) ? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ; /* rebuild the rspec and rspec_fallback */ rspec = RSPEC_MIMORATE; rspec |= plcp[0] & ~MIMO_PLCP_40MHZ; if (plcp[0] & MIMO_PLCP_40MHZ) rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT); if (fbr_iscck) /* CCK */ rspec_fallback = cck_rspec(cck_phy2mac_rate (txh->FragPLCPFallback[0])); else { /* MIMO */ rspec_fallback = RSPEC_MIMORATE; rspec_fallback |= txh->FragPLCPFallback[0] & ~MIMO_PLCP_40MHZ; if (txh->FragPLCPFallback[0] & MIMO_PLCP_40MHZ) rspec_fallback |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT); } if (use_rts || use_cts) { rts_rspec = brcms_c_rspec_to_rts_rspec(wlc, rspec, false, mimo_ctlchbw); rts_rspec_fallback = brcms_c_rspec_to_rts_rspec(wlc, rspec_fallback, false, mimo_ctlchbw); } } /* if (first mpdu for host agg) */ /* test whether to add more */ if ((mcs_2_rate(mcs, true, false) >= f->dmaxferrate) && (count == f->mcs2ampdu_table[mcs])) { BCMMSG(wlc->wiphy, "wl%d: PR 37644: stopping" " ampdu at %d for mcs %d\n", wlc->pub->unit, count, mcs); break; } if (count == scb_ampdu->max_pdu) break; /* * check to see if the next pkt is * a candidate for aggregation */ p = pktq_ppeek(&qi->q, prec); /* tx_info must be checked with current p */ tx_info = IEEE80211_SKB_CB(p); if (p) { if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && ((u8) (p->priority) == tid)) { plen = p->len + AMPDU_MAX_MPDU_OVERHEAD; plen = max(scb_ampdu->min_len, plen); if ((plen + ampdu_len) > max_ampdu_bytes) { p = NULL; continue; } /* * check if there are enough * descriptors available */ if (*wlc->core->txavail[fifo] <= seg_cnt + 1) { wiphy_err(wiphy, "%s: No fifo space " "!!\n", __func__); p = NULL; continue; } p = brcmu_pktq_pdeq(&qi->q, prec); } else { p = NULL; } } } /* end while(p) */ ini->tx_in_transit += count; if (count) { /* patch up the last txh */ txh = (struct d11txh *) pkt[count - 1]->data; mcl = le16_to_cpu(txh->MacTxControlLow); mcl &= ~TXC_AMPDU_MASK; mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT); txh->MacTxControlLow = cpu_to_le16(mcl); /* remove the null delimiter after last mpdu */ ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM]; txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = 0; ampdu_len -= ndelim * AMPDU_DELIMITER_LEN; /* remove the pad len from last mpdu */ fbr_iscck = ((le16_to_cpu(txh->XtraFrameTypes) & 0x3) == 0); len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) : BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback); ampdu_len -= roundup(len, 4) - len; /* patch up the first txh & plcp */ txh = (struct d11txh *) pkt[0]->data; plcp = (u8 *) (txh + 1); BRCMS_SET_MIMO_PLCP_LEN(plcp, ampdu_len); /* mark plcp to indicate ampdu */ BRCMS_SET_MIMO_PLCP_AMPDU(plcp); /* reset the mixed mode header durations */ if (txh->MModeLen) { u16 mmodelen = brcms_c_calc_lsig_len(wlc, rspec, ampdu_len); txh->MModeLen = cpu_to_le16(mmodelen); preamble_type = BRCMS_MM_PREAMBLE; } if (txh->MModeFbrLen) { u16 mmfbrlen = brcms_c_calc_lsig_len(wlc, rspec_fallback, ampdu_len); txh->MModeFbrLen = cpu_to_le16(mmfbrlen); fbr_preamble_type = BRCMS_MM_PREAMBLE; } /* set the preload length */ if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) { dma_len = min(dma_len, f->ampdu_pld_size); txh->PreloadSize = cpu_to_le16(dma_len); } else txh->PreloadSize = 0; mch = le16_to_cpu(txh->MacTxControlHigh); /* update RTS dur fields */ if (use_rts || use_cts) { u16 durid; rts = (struct ieee80211_rts *)&txh->rts_frame; if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) == TXC_PREAMBLE_RTS_MAIN_SHORT) rts_preamble_type = BRCMS_SHORT_PREAMBLE; if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) == TXC_PREAMBLE_RTS_FB_SHORT) rts_fbr_preamble_type = BRCMS_SHORT_PREAMBLE; durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec, rspec, rts_preamble_type, preamble_type, ampdu_len, true); rts->duration = cpu_to_le16(durid); durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec_fallback, rspec_fallback, rts_fbr_preamble_type, fbr_preamble_type, ampdu_len, true); txh->RTSDurFallback = cpu_to_le16(durid); /* set TxFesTimeNormal */ txh->TxFesTimeNormal = rts->duration; /* set fallback rate version of TxFesTimeNormal */ txh->TxFesTimeFallback = txh->RTSDurFallback; } /* set flag and plcp for fallback rate */ if (fbr) { mch |= TXC_AMPDU_FBR; txh->MacTxControlHigh = cpu_to_le16(mch); BRCMS_SET_MIMO_PLCP_AMPDU(plcp); BRCMS_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback); } BCMMSG(wlc->wiphy, "wl%d: count %d ampdu_len %d\n", wlc->pub->unit, count, ampdu_len); /* inform rate_sel if it this is a rate probe pkt */ frameid = le16_to_cpu(txh->TxFrameID); if (frameid & TXFID_RATE_PROBE_MASK) wiphy_err(wiphy, "%s: XXX what to do with " "TXFID_RATE_PROBE_MASK!?\n", __func__); for (i = 0; i < count; i++) brcms_c_txfifo(wlc, fifo, pkt[i], i == (count - 1), ampdu->txpkt_weight); } /* endif (count) */ return err; } static void brcms_c_ampdu_rate_status(struct brcms_c_info *wlc, struct ieee80211_tx_info *tx_info, struct tx_status *txs, u8 mcs) { struct ieee80211_tx_rate *txrate = tx_info->status.rates; int i; /* clear the rest of the rates */ for (i = 2; i < IEEE80211_TX_MAX_RATES; i++) { txrate[i].idx = -1; txrate[i].count = 0; } } static void brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb, struct sk_buff *p, struct tx_status *txs, u32 s1, u32 s2) { struct scb_ampdu *scb_ampdu; struct brcms_c_info *wlc = ampdu->wlc; struct scb_ampdu_tid_ini *ini; u8 bitmap[8], queue, tid; struct d11txh *txh; u8 *plcp; struct ieee80211_hdr *h; u16 seq, start_seq = 0, bindex, index, mcl; u8 mcs = 0; bool ba_recd = false, ack_recd = false; u8 suc_mpdu = 0, tot_mpdu = 0; uint supr_status; bool update_rate = true, retry = true, tx_error = false; u16 mimoantsel = 0; u8 antselid = 0; u8 retry_limit, rr_retry_limit; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p); struct wiphy *wiphy = wlc->wiphy; #ifdef DEBUG u8 hole[AMPDU_MAX_MPDU]; memset(hole, 0, sizeof(hole)); #endif scb_ampdu = &scb->scb_ampdu; tid = (u8) (p->priority); ini = &scb_ampdu->ini[tid]; retry_limit = ampdu->retry_limit_tid[tid]; rr_retry_limit = ampdu->rr_retry_limit_tid[tid]; memset(bitmap, 0, sizeof(bitmap)); queue = txs->frameid & TXFID_QUEUE_MASK; supr_status = txs->status & TX_STATUS_SUPR_MASK; if (txs->status & TX_STATUS_ACK_RCV) { if (TX_STATUS_SUPR_UF == supr_status) update_rate = false; WARN_ON(!(txs->status & TX_STATUS_INTERMEDIATE)); start_seq = txs->sequence >> SEQNUM_SHIFT; bitmap[0] = (txs->status & TX_STATUS_BA_BMAP03_MASK) >> TX_STATUS_BA_BMAP03_SHIFT; WARN_ON(s1 & TX_STATUS_INTERMEDIATE); WARN_ON(!(s1 & TX_STATUS_AMPDU)); bitmap[0] |= (s1 & TX_STATUS_BA_BMAP47_MASK) << TX_STATUS_BA_BMAP47_SHIFT; bitmap[1] = (s1 >> 8) & 0xff; bitmap[2] = (s1 >> 16) & 0xff; bitmap[3] = (s1 >> 24) & 0xff; bitmap[4] = s2 & 0xff; bitmap[5] = (s2 >> 8) & 0xff; bitmap[6] = (s2 >> 16) & 0xff; bitmap[7] = (s2 >> 24) & 0xff; ba_recd = true; } else { if (supr_status) { update_rate = false; if (supr_status == TX_STATUS_SUPR_BADCH) { wiphy_err(wiphy, "%s: Pkt tx suppressed, illegal channel possibly %d\n", __func__, CHSPEC_CHANNEL( wlc->default_bss->chanspec)); } else { if (supr_status != TX_STATUS_SUPR_FRAG) wiphy_err(wiphy, "%s: supr_status 0x%x\n", __func__, supr_status); } /* no need to retry for badch; will fail again */ if (supr_status == TX_STATUS_SUPR_BADCH || supr_status == TX_STATUS_SUPR_EXPTIME) { retry = false; } else if (supr_status == TX_STATUS_SUPR_EXPTIME) { /* TX underflow: * try tuning pre-loading or ampdu size */ } else if (supr_status == TX_STATUS_SUPR_FRAG) { /* * if there were underflows, but pre-loading * is not active, notify rate adaptation. */ if (brcms_c_ffpld_check_txfunfl(wlc, prio2fifo[tid]) > 0) tx_error = true; } } else if (txs->phyerr) { update_rate = false; wiphy_err(wiphy, "%s: ampdu tx phy error (0x%x)\n", __func__, txs->phyerr); if (brcm_msg_level & LOG_ERROR_VAL) { brcmu_prpkt("txpkt (AMPDU)", p); brcms_c_print_txdesc((struct d11txh *) p->data); } brcms_c_print_txstatus(txs); } } /* loop through all pkts and retry if not acked */ while (p) { tx_info = IEEE80211_SKB_CB(p); txh = (struct d11txh *) p->data; mcl = le16_to_cpu(txh->MacTxControlLow); plcp = (u8 *) (txh + 1); h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN); seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT; if (tot_mpdu == 0) { mcs = plcp[0] & MIMO_PLCP_MCS_MASK; mimoantsel = le16_to_cpu(txh->ABI_MimoAntSel); } index = TX_SEQ_TO_INDEX(seq); ack_recd = false; if (ba_recd) { bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX); BCMMSG(wiphy, "tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n", tid, seq, start_seq, bindex, isset(bitmap, bindex), index); /* if acked then clear bit and free packet */ if ((bindex < AMPDU_TX_BA_MAX_WSIZE) && isset(bitmap, bindex)) { ini->tx_in_transit--; ini->txretry[index] = 0; /* * ampdu_ack_len: * number of acked aggregated frames */ /* ampdu_len: number of aggregated frames */ brcms_c_ampdu_rate_status(wlc, tx_info, txs, mcs); tx_info->flags |= IEEE80211_TX_STAT_ACK; tx_info->flags |= IEEE80211_TX_STAT_AMPDU; tx_info->status.ampdu_ack_len = tx_info->status.ampdu_len = 1; skb_pull(p, D11_PHY_HDR_LEN); skb_pull(p, D11_TXH_LEN); ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, p); ack_recd = true; suc_mpdu++; } } /* either retransmit or send bar if ack not recd */ if (!ack_recd) { if (retry && (ini->txretry[index] < (int)retry_limit)) { ini->txretry[index]++; ini->tx_in_transit--; /* * Use high prededence for retransmit to * give some punch */ brcms_c_txq_enq(wlc, scb, p, BRCMS_PRIO_TO_HI_PREC(tid)); } else { /* Retry timeout */ ini->tx_in_transit--; ieee80211_tx_info_clear_status(tx_info); tx_info->status.ampdu_ack_len = 0; tx_info->status.ampdu_len = 1; tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; skb_pull(p, D11_PHY_HDR_LEN); skb_pull(p, D11_TXH_LEN); BCMMSG(wiphy, "BA Timeout, seq %d, in_transit %d\n", seq, ini->tx_in_transit); ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, p); } } tot_mpdu++; /* break out if last packet of ampdu */ if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) == TXC_AMPDU_LAST) break; p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED); } brcms_c_send_q(wlc); /* update rate state */ antselid = brcms_c_antsel_antsel2id(wlc->asi, mimoantsel); brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight); } void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb, struct sk_buff *p, struct tx_status *txs) { struct scb_ampdu *scb_ampdu; struct brcms_c_info *wlc = ampdu->wlc; struct scb_ampdu_tid_ini *ini; u32 s1 = 0, s2 = 0; struct ieee80211_tx_info *tx_info; tx_info = IEEE80211_SKB_CB(p); /* BMAC_NOTE: For the split driver, second level txstatus comes later * So if the ACK was received then wait for the second level else just * call the first one */ if (txs->status & TX_STATUS_ACK_RCV) { u8 status_delay = 0; /* wait till the next 8 bytes of txstatus is available */ s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus)); while ((s1 & TXS_V) == 0) { udelay(1); status_delay++; if (status_delay > 10) return; /* error condition */ s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus)); } s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2)); } if (scb) { scb_ampdu = &scb->scb_ampdu; ini = &scb_ampdu->ini[p->priority]; brcms_c_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2); } else { /* loop through all pkts and free */ u8 queue = txs->frameid & TXFID_QUEUE_MASK; struct d11txh *txh; u16 mcl; while (p) { tx_info = IEEE80211_SKB_CB(p); txh = (struct d11txh *) p->data; mcl = le16_to_cpu(txh->MacTxControlLow); brcmu_pkt_buf_free_skb(p); /* break out if last packet of ampdu */ if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) == TXC_AMPDU_LAST) break; p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED); } brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight); } } void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc) { char template[T_RAM_ACCESS_SZ * 2]; /* driver needs to write the ta in the template; ta is at offset 16 */ memset(template, 0, sizeof(template)); memcpy(template, wlc->pub->cur_etheraddr, ETH_ALEN); brcms_b_write_template_ram(wlc->hw, (T_BA_TPL_BASE + 16), (T_RAM_ACCESS_SZ * 2), template); } bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid) { return wlc->ampdu->ini_enable[tid]; } void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu) { struct brcms_c_info *wlc = ampdu->wlc; /* * Extend ucode internal watchdog timer to * match larger received frames */ if ((ampdu->rx_factor & IEEE80211_HT_AMPDU_PARM_FACTOR) == IEEE80211_HT_MAX_AMPDU_64K) { brcms_b_write_shm(wlc->hw, M_MIMO_MAXSYM, MIMO_MAXSYM_MAX); brcms_b_write_shm(wlc->hw, M_WATCHDOG_8TU, WATCHDOG_8TU_MAX); } else { brcms_b_write_shm(wlc->hw, M_MIMO_MAXSYM, MIMO_MAXSYM_DEF); brcms_b_write_shm(wlc->hw, M_WATCHDOG_8TU, WATCHDOG_8TU_DEF); } } /* * callback function that helps flushing ampdu packets from a priority queue */ static bool cb_del_ampdu_pkt(struct sk_buff *mpdu, void *arg_a) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(mpdu); struct cb_del_ampdu_pars *ampdu_pars = (struct cb_del_ampdu_pars *)arg_a; bool rc; rc = tx_info->flags & IEEE80211_TX_CTL_AMPDU ? true : false; rc = rc && (tx_info->control.sta == NULL || ampdu_pars->sta == NULL || tx_info->control.sta == ampdu_pars->sta); rc = rc && ((u8)(mpdu->priority) == ampdu_pars->tid); return rc; } /* * callback function that helps invalidating ampdu packets in a DMA queue */ static void dma_cb_fn_ampdu(void *txi, void *arg_a) { struct ieee80211_sta *sta = arg_a; struct ieee80211_tx_info *tx_info = (struct ieee80211_tx_info *)txi; if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && (tx_info->control.sta == sta || sta == NULL)) tx_info->control.sta = NULL; } /* * When a remote party is no longer available for ampdu communication, any * pending tx ampdu packets in the driver have to be flushed. */ void brcms_c_ampdu_flush(struct brcms_c_info *wlc, struct ieee80211_sta *sta, u16 tid) { struct brcms_txq_info *qi = wlc->pkt_queue; struct pktq *pq = &qi->q; int prec; struct cb_del_ampdu_pars ampdu_pars; ampdu_pars.sta = sta; ampdu_pars.tid = tid; for (prec = 0; prec < pq->num_prec; prec++) brcmu_pktq_pflush(pq, prec, true, cb_del_ampdu_pkt, (void *)&ampdu_pars); brcms_c_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu); }
gpl-2.0
engine95/navelA-990
drivers/staging/rts_pstor/general.c
8208
1029
/* Driver for Realtek PCI-Express card reader * * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. * * Author: * wwang (wei_wang@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China */ #include "general.h" int bit1cnt_long(u32 data) { int i, cnt = 0; for (i = 0; i < 32; i++) { if (data & 0x01) cnt++; data >>= 1; } return cnt; }
gpl-2.0
ISTweak/android_kernel_toshiba_is11t
drivers/usb/serial/console.c
8464
7577
/* * USB Serial Console driver * * Copyright (C) 2001 - 2002 Greg Kroah-Hartman (greg@kroah.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * Thanks to Randy Dunlap for the original version of this code. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/console.h> #include <linux/serial.h> #include <linux/usb.h> #include <linux/usb/serial.h> static int debug; struct usbcons_info { int magic; int break_flag; struct usb_serial_port *port; }; static struct usbcons_info usbcons_info; static struct console usbcons; /* * ------------------------------------------------------------ * USB Serial console driver * * Much of the code here is copied from drivers/char/serial.c * and implements a phony serial console in the same way that * serial.c does so that in case some software queries it, * it will get the same results. * * Things that are different from the way the serial port code * does things, is that we call the lower level usb-serial * driver code to initialize the device, and we set the initial * console speeds based on the command line arguments. * ------------------------------------------------------------ */ /* * The parsing of the command line works exactly like the * serial.c code, except that the specifier is "ttyUSB" instead * of "ttyS". */ static int usb_console_setup(struct console *co, char *options) { struct usbcons_info *info = &usbcons_info; int baud = 9600; int bits = 8; int parity = 'n'; int doflow = 0; int cflag = CREAD | HUPCL | CLOCAL; char *s; struct usb_serial *serial; struct usb_serial_port *port; int retval; struct tty_struct *tty = NULL; struct ktermios dummy; dbg("%s", __func__); if (options) { baud = simple_strtoul(options, NULL, 10); s = options; while (*s >= '0' && *s <= '9') s++; if (*s) parity = *s++; if (*s) bits = *s++ - '0'; if (*s) doflow = (*s++ == 'r'); } /* Sane default */ if (baud == 0) baud = 9600; switch (bits) { case 7: cflag |= CS7; break; default: case 8: cflag |= CS8; break; } switch (parity) { case 'o': case 'O': cflag |= PARODD; break; case 'e': case 'E': cflag |= PARENB; break; } co->cflag = cflag; /* * no need to check the index here: if the index is wrong, console * code won't call us */ serial = usb_serial_get_by_index(co->index); if (serial == NULL) { /* no device is connected yet, sorry :( */ err("No USB device connected to ttyUSB%i", co->index); return -ENODEV; } retval = usb_autopm_get_interface(serial->interface); if (retval) goto error_get_interface; port = serial->port[co->index - serial->minor]; tty_port_tty_set(&port->port, NULL); info->port = port; ++port->port.count; if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) { if (serial->type->set_termios) { /* * allocate a fake tty so the driver can initialize * the termios structure, then later call set_termios to * configure according to command line arguments */ tty = kzalloc(sizeof(*tty), GFP_KERNEL); if (!tty) { retval = -ENOMEM; err("no more memory"); goto reset_open_count; } kref_init(&tty->kref); tty_port_tty_set(&port->port, tty); tty->driver = usb_serial_tty_driver; tty->index = co->index; if (tty_init_termios(tty)) { retval = -ENOMEM; err("no more memory"); goto free_tty; } } /* only call the device specific open if this * is the first time the port is opened */ if (serial->type->open) retval = serial->type->open(NULL, port); else retval = usb_serial_generic_open(NULL, port); if (retval) { err("could not open USB console port"); goto fail; } if (serial->type->set_termios) { tty->termios->c_cflag = cflag; tty_termios_encode_baud_rate(tty->termios, baud, baud); memset(&dummy, 0, sizeof(struct ktermios)); serial->type->set_termios(tty, port, &dummy); tty_port_tty_set(&port->port, NULL); kfree(tty); } set_bit(ASYNCB_INITIALIZED, &port->port.flags); } /* Now that any required fake tty operations are completed restore * the tty port count */ --port->port.count; /* The console is special in terms of closing the device so * indicate this port is now acting as a system console. */ port->port.console = 1; mutex_unlock(&serial->disc_mutex); return retval; fail: tty_port_tty_set(&port->port, NULL); free_tty: kfree(tty); reset_open_count: port->port.count = 0; usb_autopm_put_interface(serial->interface); error_get_interface: usb_serial_put(serial); mutex_unlock(&serial->disc_mutex); return retval; } static void usb_console_write(struct console *co, const char *buf, unsigned count) { static struct usbcons_info *info = &usbcons_info; struct usb_serial_port *port = info->port; struct usb_serial *serial; int retval = -ENODEV; if (!port || port->serial->dev->state == USB_STATE_NOTATTACHED) return; serial = port->serial; if (count == 0) return; dbg("%s - port %d, %d byte(s)", __func__, port->number, count); if (!port->port.console) { dbg("%s - port not opened", __func__); return; } while (count) { unsigned int i; unsigned int lf; /* search for LF so we can insert CR if necessary */ for (i = 0, lf = 0 ; i < count ; i++) { if (*(buf + i) == 10) { lf = 1; i++; break; } } /* pass on to the driver specific version of this function if it is available */ if (serial->type->write) retval = serial->type->write(NULL, port, buf, i); else retval = usb_serial_generic_write(NULL, port, buf, i); dbg("%s - return value : %d", __func__, retval); if (lf) { /* append CR after LF */ unsigned char cr = 13; if (serial->type->write) retval = serial->type->write(NULL, port, &cr, 1); else retval = usb_serial_generic_write(NULL, port, &cr, 1); dbg("%s - return value : %d", __func__, retval); } buf += i; count -= i; } } static struct tty_driver *usb_console_device(struct console *co, int *index) { struct tty_driver **p = (struct tty_driver **)co->data; if (!*p) return NULL; *index = co->index; return *p; } static struct console usbcons = { .name = "ttyUSB", .write = usb_console_write, .device = usb_console_device, .setup = usb_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &usb_serial_tty_driver, }; void usb_serial_console_disconnect(struct usb_serial *serial) { if (serial && serial->port && serial->port[0] && serial->port[0] == usbcons_info.port) { usb_serial_console_exit(); usb_serial_put(serial); } } void usb_serial_console_init(int serial_debug, int minor) { debug = serial_debug; if (minor == 0) { /* * Call register_console() if this is the first device plugged * in. If we call it earlier, then the callback to * console_setup() will fail, as there is not a device seen by * the USB subsystem yet. */ /* * Register console. * NOTES: * console_setup() is called (back) immediately (from * register_console). console_write() is called immediately * from register_console iff CON_PRINTBUFFER is set in flags. */ dbg("registering the USB serial console."); register_console(&usbcons); } } void usb_serial_console_exit(void) { if (usbcons_info.port) { unregister_console(&usbcons); usbcons_info.port->port.console = 0; usbcons_info.port = NULL; } }
gpl-2.0